hexsha
stringlengths 40
40
| size
int64 5
1.04M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
236
| max_stars_repo_name
stringlengths 4
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequence | max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
236
| max_issues_repo_name
stringlengths 4
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequence | max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
236
| max_forks_repo_name
stringlengths 4
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequence | max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
1.04M
| avg_line_length
float64 1.3
664k
| max_line_length
int64 1
1.01M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6a9f39fd39575fcc4d7f00e6126712c6032c0075 | 2,246 | py | Python | eggs/raven-3.1.8-py2.7.egg/raven/contrib/celery/__init__.py | bopopescu/phyG | 023f505b705ab953f502cbc55e90612047867583 | [
"CC-BY-3.0"
] | null | null | null | eggs/raven-3.1.8-py2.7.egg/raven/contrib/celery/__init__.py | bopopescu/phyG | 023f505b705ab953f502cbc55e90612047867583 | [
"CC-BY-3.0"
] | null | null | null | eggs/raven-3.1.8-py2.7.egg/raven/contrib/celery/__init__.py | bopopescu/phyG | 023f505b705ab953f502cbc55e90612047867583 | [
"CC-BY-3.0"
] | 1 | 2020-07-25T21:10:26.000Z | 2020-07-25T21:10:26.000Z | """
raven.contrib.celery
~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2012 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import logging
try:
from celery.task import task
except ImportError:
from celery.decorators import task # NOQA
from celery.signals import after_setup_logger, task_failure
from raven.base import Client
from raven.handlers.logging import SentryHandler
class CeleryMixin(object):
def send_encoded(self, *args, **kwargs):
"Errors through celery"
self.send_raw.delay(*args, **kwargs)
@task(routing_key='sentry')
def send_raw(self, *args, **kwargs):
return super(CeleryMixin, self).send_encoded(*args, **kwargs)
class CeleryClient(CeleryMixin, Client):
pass
class CeleryFilter(logging.Filter):
def filter(self, record):
return record.funcName not in ('_log_error',)
def register_signal(client):
def process_failure_signal(sender, task_id, exception, args, kwargs,
traceback, einfo, **kw):
client.captureException(
exc_info=einfo.exc_info,
extra={
'task_id': task_id,
'task': sender,
'args': args,
'kwargs': kwargs,
})
task_failure.connect(process_failure_signal, weak=False)
def register_logger_signal(client, logger=None):
filter_ = CeleryFilter()
if logger is None:
logger = logging.getLogger()
handler = SentryHandler(client)
handler.setLevel(logging.ERROR)
handler.addFilter(filter_)
def process_logger_event(sender, logger, loglevel, logfile, format,
colorize, **kw):
# Attempt to find an existing SentryHandler, and if it exists ensure
# that the CeleryFilter is installed.
# If one is found, we do not attempt to install another one.
for h in logger.handlers:
if type(h) == SentryHandler:
if not any(type(f) == CeleryFilter for f in h.filters):
h.addFilter(filter_)
return False
logger.addHandler(handler)
after_setup_logger.connect(process_logger_event, weak=False)
| 29.552632 | 76 | 0.634461 |
62cc16b1e44f96c9230362fd0cd8fdb0a50fba06 | 3,294 | py | Python | maelstrom/dataClasses/campaign.py | Matt-Crow/Maelstrom | 005c947f5fe6a8d38a3b9b18116ae2c04183009e | [
"MIT"
] | null | null | null | maelstrom/dataClasses/campaign.py | Matt-Crow/Maelstrom | 005c947f5fe6a8d38a3b9b18116ae2c04183009e | [
"MIT"
] | null | null | null | maelstrom/dataClasses/campaign.py | Matt-Crow/Maelstrom | 005c947f5fe6a8d38a3b9b18116ae2c04183009e | [
"MIT"
] | null | null | null | """
this module will eventually include a Campaign class - a collection of Areas
"""
from maelstrom.util.stringUtil import entab
from maelstrom.util.serialize import AbstractJsonSerialable
class Area(AbstractJsonSerialable):
"""
a collection of Levels and Locations
"""
def __init__(self, **kwargs):
"""
required kwargs:
- name: str
- description: str
- locations: List<Location>. Defaults to []
- levels: List<Level>. Defaults to []
"""
super().__init__(**dict(kwargs, type="Area"))
self.name = kwargs["name"]
self.description = kwargs["description"]
self.locations = kwargs.get("locations", [])
self.levels = kwargs.get("levels", [])
self.addSerializedAttributes("name", "description", "locations", "levels")
def __str__(self)->str:
return f'Area: {self.name}'
def getDisplayData(self)->str:
lines = [
f'Area: {self.name}',
entab(self.description)
]
if len(self.locations) > 0:
lines.append("Locations:")
for location in self.locations:
lines.append(entab(location.getDisplayData()))
if len(self.levels) > 0:
lines.append("Levels:")
for level in self.levels:
lines.append(entab(level.getDisplayData()))
return "\n".join(lines)
class Location(AbstractJsonSerialable):
"""
Locations provide flavor text for an Area
"""
def __init__(self, **kwargs):
"""
required kwargs:
- name: str
- description: str
"""
super().__init__(**dict(kwargs, type="Location"))
self.name = kwargs["name"]
self.description = kwargs["description"]
self.addSerializedAttributes("name", "description")
def __str__(self)->str:
return f'Location: {self.name}'
def getDisplayData(self)->str:
"""
gets data to output
"""
return f'{self.name}: "{self.description}"'
class Level(AbstractJsonSerialable):
"""
A level pits two teams against each other in an Encounter
"""
def __init__(self, **kwargs):
"""
required kwargs:
- name: str
- description: str
- prescript: str
- postscript: str
- enemyNames: List<str>
- enemyLevel: int
"""
super().__init__(**dict(kwargs, type="Level"))
self.name = kwargs["name"]
self.description = kwargs["description"]
self.prescript = kwargs["prescript"]
self.postscript = kwargs["postscript"]
self.enemyNames = kwargs["enemyNames"]
self.enemyLevel = kwargs["enemyLevel"]
self.addSerializedAttributes(
"name",
"description",
"prescript",
"postscript",
"enemyNames",
"enemyLevel"
)
def __str__(self)->str:
return f'Level: {self.name}'
def getDisplayData(self)->str:
lines = [
f'Level: {self.name}',
entab(f'"{self.description}"')
]
for name in self.enemyNames:
lines.append(entab(f'* {name} Lv. {self.enemyLevel}'))
return "\n".join(lines)
| 26.142857 | 82 | 0.558288 |
da48d1d3a0b12181631fd76faeabdcd454bd8b31 | 3,103 | py | Python | example/KoalaRemoteTestApp_example.py | jejmule/pyKoalaRemote | 8549b6af18a4b6a3673cf4244604412f252e3bf0 | [
"MIT"
] | null | null | null | example/KoalaRemoteTestApp_example.py | jejmule/pyKoalaRemote | 8549b6af18a4b6a3673cf4244604412f252e3bf0 | [
"MIT"
] | null | null | null | example/KoalaRemoteTestApp_example.py | jejmule/pyKoalaRemote | 8549b6af18a4b6a3673cf4244604412f252e3bf0 | [
"MIT"
] | null | null | null | from pyKoalaRemote import client
from pathlib import Path
import numpy as np
import matplotlib.pyplot as plt
#create instance to class pyRemote
remote = client.pyKoalaRemoteClient()
#Connect and Login
remote.ConnectAndLoginDialog()
#Open a 2 wavelenghts configuration.
remote.OpenConfigDialog()
#Open main display windows
remote.OpenPhaseWin();
remote.OpenIntensityWin();
remote.OpenHoloWin();
#This block records an hologram so that you can later work offline with the rest of the script.
#Set logical source 0 (the 1st source of the current configuration) to ON
remote.SetSourceState(0, True, True)
#Set logical source 1 (the 2nd source of the current configuration) to ON
remote.SetSourceState(1, True, True)
#Acquire on hologram
remote.Acquisition2L()
remote.ResetGrab()
#Save holo to file
#path = Path(r'c:\tmp')
path = Path.cwd()/'data'
remote.SaveImageToFile(1, str(path/'holo.tiff'))
#Load previously recorded hologram
remote.LoadHolo(str(path/'holo.tiff'), 2);
#Display lambda 1 image in phase window
remote.SelectDisplayWL(8192);
#Save hologram image
remote.SaveImageToFile(1, str(path/'holo.tiff'));
#Save intensity image
remote.SaveImageToFile(2, str(path/'intensity.tiff'));
#Save phase image (as displayed, which means phase lamabda 1)
remote.SaveImageToFile(4, str(path/'phase.tif'));
#//Save intensity float in bin
remote.SaveImageFloatToFile(2, str(path/'intensity.bin'), True);
#//Save phase float in bin
remote.SaveImageFloatToFile(4, str(path/'phase.bin'), True);
print('files saved in',str(path))
#//This block only works for 2 wavelengths configurations
#//Display lambda 2 image in intensity window
remote.SelectDisplayWL(4096);
#//Display lambda 2 image in phase window
remote.SelectDisplayWL(16384);
#//Save intensity image (as displayed, which means intensity lambda 2)
remote.SaveImageToFile(2, str(path/'intensity2.tiff'));
#//Save phase image (as displayed, which means phase lambda 2)
remote.SaveImageToFile(4, str(path/'phase2.tif'));
remote.SaveImageFloatToFile(2, str(path/'intensity2.bin'), True);
remote.SaveImageFloatToFile(4, str(path/'phase2.bin'), True);
#//Gets the current reconstruction distance
recDist = remote.GetRecDistCM();
#//Set a new reconstruction distance
remote.SetRecDistCM(recDist * 1.1);
#Do a reconstruction with this new distance
remote.OnDistanceChange();
#//Get phase image for computation
phase = remote.GetPhase32fImage();
#plot phase numpy
plt.imshow(phase,cmap="gray")
#//Extract a profile
remote.SetPhaseProfileState(True)
remote.ExtractPhaseProfile(100, 100, 200, 200)
#Get a profile
profile = remote.GetPhaseProfile()
#Get xy values to plot calibrated profile
distance = np.arange(remote.GetPhaseProfileLength()) * remote.GetPxSizeUm()
plt.figure()
plt.plot(distance,profile)
plt.xlabel('dist [um]')
plt.ylabel('OPL [nm]')
plt.show()
#//Reset phase correction segemnts
remote.ResetCorrSegment();
#Add new phase profile correction
remote.AddCorrSegment(100, 100, 500, 1);
remote.AddCorrSegment(200, 200, 600, 0);
#//Compute 1D phase correction using tilt method
remote.ComputePhaseCorrection(0, 1);
#Logout
remote.Logout() | 32.322917 | 96 | 0.77409 |
d831779d193e52112e08503f3c219d0096f976e0 | 13,655 | py | Python | rotor.py | generic-user1/py-enigma | 6d310fc6d54c0bf598259c7f14aa20fb0bf3d5c5 | [
"MIT"
] | 1 | 2021-09-02T22:04:56.000Z | 2021-09-02T22:04:56.000Z | rotor.py | generic-user1/py-enigma | 6d310fc6d54c0bf598259c7f14aa20fb0bf3d5c5 | [
"MIT"
] | null | null | null | rotor.py | generic-user1/py-enigma | 6d310fc6d54c0bf598259c7f14aa20fb0bf3d5c5 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
from letterswitcher import LetterSwitcher, LettermapException
from enum import Enum
#define an enumeration for the
#different types of rotors supported
#TODO: support more rotors
class RotorType(Enum):
I = 0
II = 1
III = 2
IV = 3
V = 4
#class for an engima rotor
#details on the workings of the real-life enigma rotors can be found at
# http://users.telenet.be/d.rijmenants/en/enigmatech.htm
#if you are curious
class Rotor(LetterSwitcher):
#define letter maps for different rotor types
#rotorLetterMaps is accessed by index,
#you can see which index translates to which
#rotor type using the RotorType enum
__rotorLetterMaps = (
{'a': 'e', 'b': 'k', 'c': 'm', 'd': 'f', 'e': 'l', 'f': 'g', 'g': 'd', 'h': 'q', 'i': 'v', 'j': 'z', 'k': 'n', 'l': 't', 'm': 'o', 'n': 'w', 'o': 'y', 'p': 'h', 'q': 'x', 'r': 'u', 's': 's', 't': 'p', 'u': 'a', 'v': 'i', 'w': 'b', 'x': 'r', 'y': 'c', 'z': 'j'},
{'a': 'a', 'b': 'j', 'c': 'd', 'd': 'k', 'e': 's', 'f': 'i', 'g': 'r', 'h': 'u', 'i': 'x', 'j': 'b', 'k': 'l', 'l': 'h', 'm': 'w', 'n': 't', 'o': 'm', 'p': 'c', 'q': 'q', 'r': 'g', 's': 'z', 't': 'n', 'u': 'p', 'v': 'y', 'w': 'f', 'x': 'v', 'y': 'o', 'z': 'e'},
{'a': 'b', 'b': 'd', 'c': 'f', 'd': 'h', 'e': 'j', 'f': 'l', 'g': 'c', 'h': 'p', 'i': 'r', 'j': 't', 'k': 'x', 'l': 'v', 'm': 'z', 'n': 'n', 'o': 'y', 'p': 'e', 'q': 'i', 'r': 'w', 's': 'g', 't': 'a', 'u': 'k', 'v': 'm', 'w': 'u', 'x': 's', 'y': 'q', 'z': 'o'},
{'a': 'e', 'b': 's', 'c': 'o', 'd': 'v', 'e': 'p', 'f': 'z', 'g': 'j', 'h': 'a', 'i': 'y', 'j': 'q', 'k': 'u', 'l': 'i', 'm': 'r', 'n': 'h', 'o': 'x', 'p': 'l', 'q': 'n', 'r': 'f', 's': 't', 't': 'g', 'u': 'k', 'v': 'd', 'w': 'c', 'x': 'm', 'y': 'w', 'z': 'b'},
{'a': 'v', 'b': 'z', 'c': 'b', 'd': 'r', 'e': 'g', 'f': 'i', 'g': 't', 'h': 'y', 'i': 'u', 'j': 'p', 'k': 's', 'l': 'd', 'm': 'n', 'n': 'h', 'o': 'l', 'p': 'x', 'q': 'a', 'r': 'w', 's': 'm', 't': 'j', 'u': 'q', 'v': 'o', 'w': 'f', 'x': 'e', 'y': 'c', 'z': 'k'}
)
#define the rotor notch positions for each rotor
#each of these is defined as the index of the letter
#that displays in the window when the notch is
#lined up to rotate the next rotor with the next keypress
#(note: this is not actually where the physical notch is on a real Engima rotor
# storing notch positions this way is just easier to implement
# however, the output for a given rotor configuration should still be the same)
__rotorNotchPositions = (
16,
4,
21,
9,
25
)
#validate a rotorType and return it as an integer
#raise an exception if input is invalid
@staticmethod
def validateRotorType(rotorType):
if isinstance(rotorType, RotorType):
rotorType = rotorType.value
else:
if not (isinstance(rotorType, int)
and rotorType <= 4
and rotorType >= 0):
#raise an exception if rotor type is out of bounds
raise ValueError("rotorType must be of type RotorType or an integer from 0 to 4")
return rotorType
#returns the pre-defined rotor lettermaps for the different supported rotors
@classmethod
def getRotorLettermap(cls, rotorType):
#get rotor type as an integer (and validate)
rotorType = cls.validateRotorType(rotorType)
return cls.__rotorLetterMaps[rotorType]
#returns the pre-defined rotor notch positions for the
#different supported rotors
@classmethod
def getRotorNotchPos(cls, rotorType):
#get rotor type as an integer (and validate)
rotorType = cls.validateRotorType(rotorType)
return cls.__rotorNotchPositions[rotorType]
#lettermaps for rotors must contain exactly 26 entries,
#one for each letter
@classmethod
def lettermapIsValid(cls, lettermap):
if len(lettermap) != 26:
return False
else:
#the remaining checks can be fulfilled by the
#validator from LetterSwitcher
return super().lettermapIsValid(lettermap)
def __init__(self, rotorType):
#useful for debugging
self.rotorType = rotorType
#set the lettermap var to be passed to the
#super constructor method
#calling getRotorLettermap will throw a ValueError
#if rotorType is invalid
lettermap = self.getRotorLettermap(rotorType)
#set the notch position of this rotor to the predefined notch position
#for this particular rotor type
self.notchPosition = self.getRotorNotchPos(rotorType)
#note: notch position being stored as a single value disallows
#the use of rotors with multiple notches
#multi-notched rotors were created, but are not supported at this time
#TODO: support multi-notched rotors
#used to keep track of the rotor's rotation,
#rotorPosition maps 0 - 25 to the letters a to z
#that you would see through the viewing window on an actual Enigma
self.rotorPosition = 0
#this will be used as an offset to access the lettermap
#initialize ring setting (sometimes called ringstellung)
#this affects the
self.ringSetting = 0
#run super constructor (this sets the lettermap)
super().__init__(lettermap)
#given a letter, uses the rotorPosition instance var
#to return which letter the specified
#letter will enter the 'rotor' (lettermap) as
def getRotatedLetter(self, letter):
#raise exception if letter is not valid
self.validateLetter(letter)
indexInAlphabet = self.alphabet.index(letter)
#'rotate' by rotorPosition by adding it to the
#letter index and modulo the result by 26
#to guarentee a value between 0 and 25
rotatedIndex = (indexInAlphabet + self.rotorPosition) % 26
return self.alphabet[rotatedIndex]
#rotate the output of this rotor using rotorPosition
#this is done to account for the entire rotor rotating
#getRotatedLetter is used to account for the rotation of input,
#whereas this is used to account for the rotation of output
def _getOutputLetter(self, letter):
letterIndex = self.alphabet.index(letter)
outputIndex = (letterIndex - (self.rotorPosition + self.ringSetting)) % 26
return self.alphabet[outputIndex]
#increment rotorPosition
#keeps it within the range 0 - 25 inclusive
#this is technically unneeded as getRotatedLetter uses a modulo
#but I figured it would make a bit more sense
def incrementRotor(self):
self.rotorPosition += 1
#Normally I would use >= 26, but doing it this way
#ensures that if someone manually sets the rotor position to something
#outside this range, the rotor will continue incrementing as normal instead of
#mysteriously "incrementing" from 252 to 0
if self.rotorPosition == 26:
self.rotorPosition = 0
#like incrementRotor, but the value goes down
#keeps it within the range 0 - 25 inclusive (see above)
def decementRotor(self):
self.rotorPosition -= 1
if self.rotorPosition == -1:
self.rotorPosition = 25
#returns the letter you would see through the viewer
#if you looked at this current position
#that is, the rotor's position (accounting for ring setting)
def getRotorPosition(self):
letterIndex = (self.rotorPosition + self.ringSetting) % 26
return self.alphabet[letterIndex]
#validates a ring setting input and returns it as an integer
#raises a ValueError if input is invalid
@classmethod
def validateRingSetting(cls, ringSetting):
#if ring setting is an integer, verify it is within bounds (0-25)
#and then return it
if isinstance(ringSetting, int) and ringSetting >= 0 and ringSetting <= 25:
return ringSetting
#if ring setting is a string, verify it is a single letter, then convert it to
#an integer and return the result
elif isinstance(ringSetting, str) and len(ringSetting) == 1 and ringSetting in cls.alphabet:
return cls.alphabet.index(ringSetting)
else:
raise ValueError("Ring setting must be a single lowercase letter or integer from 0 to 25 (inclusive)")
#alias of validateRingSetting
#these inputs are validated using the same procedure;
#this method exists in case that ever changes in the future
@classmethod
def validateRotorPosition(cls, rotorPosition):
try:
return cls.validateRingSetting(rotorPosition)
except ValueError:
#use a custom error message if invalid
raise ValueError("Rotor position must be a single lowercase letter or integer from 0 to 25 inclusive")
#sets the rotor's rotation such that the specified letter appears in the window
#better than setting rotorPosition directly because it accounts for ring setting
def setRotorPosition(self, rotorPosition):
#validate rotorPosition and get it as an integer
rotorPositionIndex = self.validateRotorPosition(rotorPosition)
self.rotorPosition = (rotorPositionIndex - self.ringSetting) % 26
#set the ring setting to the specified value
#raises a ValueError if the input is invalid
#also adjusts the rotor position to keep the window letter constant
def setRingSetting(self, ringSetting):
#note rotor position
rotorPos = self.rotorPosition
#set the ring setting after validating it
self.ringSetting = self.validateRingSetting(ringSetting)
#set the rotor position such that the window letter is kept the same
#this is because on real enigma machines, the rotor's position was set based on the
#window letter, not the actual rotation of the rotor
self.setRotorPosition(rotorPos)
#returns true if the notch is in position
#to increment the next rotor (the rotor to the left) on this rotor's next turn
#the notch is attached to the alphabet ring and thus the ring setting influences
#its position
def notchInPosition(self):
return (self.rotorPosition + self.ringSetting) % 26 == self.notchPosition
#returns true if the notch is in a position one click AFTER it would've incremented the next rotor
#useful for decrementing rotors
def notchInReversePosition(self):
return (self.rotorPosition + self.ringSetting) % 26 == (self.notchPosition + 1)
#applies ring setting to a letter
#given the letter coming out of the rotor's internal wiring,
#returns the letter of the output pin accounting for shifting due to ring setting
def applyRing(self, letter):
#convert the letter to an integer
letterIndex = self.alphabet.index(letter)
#apply the ring setting
letterIndex = (letterIndex + self.ringSetting) % 26
#return the resulting letter
return self.alphabet[letterIndex]
#override switchLetter to include rotation
def switchLetter(self, letter):
#apply rotation to input
letter = self.getRotatedLetter(letter)
#swap letter according to internal wiring
letter = super().switchLetter(letter)
#apply ring setting rotation to output
letter = self.applyRing(letter)
#apply full rotor rotation to output and return
return self._getOutputLetter(letter)
#override switchLetterReverse to include rotation
#this is important as signals travel through all 3 rotors forward AND back
#on each keypress
def switchLetterReverse(self, letter):
letter = self.getRotatedLetter(letter)
letter = super().switchLetterReverse(letter)
letter = self._getOutputLetter(letter)
letter = self.applyRing(letter)
return letter
#disable the switchSeqence method for rotors,
# as it shouldn't be used due to an inability
# to check when the next rotor should turn
#TODO: fix Rotor.switchSequence so that it can be used
def switchSequence(self):
raise NotImplementedError("Rotor.switchSequence is not implemented")
if __name__ == '__main__':
#test Rotor
encode = Rotor(RotorType.I)
print('switchLetter (expected e):',encode.switchLetter('a'))
print('switchLetterReverse (expected a):',encode.switchLetterReverse('e'))
encode.setRingSetting(1)
encode.setRotorPosition('a')
print("setting ring to B-02")
print("setting rotor to A")
switchedLetter = encode.switchLetter('a')
print('switch letter (expected k):', switchedLetter)
switchedLetter = encode.switchLetterReverse('k')
print('switch letter reverse (expected a):', switchedLetter)
encode.setRingSetting(5)
encode.setRotorPosition('y')
print("setting ring to F-06")
print("setting rotor to Y")
switchedLetter = encode.switchLetter('a')
print('switch letter (expected w):', switchedLetter)
switchedLetter = encode.switchLetterReverse('w')
print('switch letter reverse (expected a):', switchedLetter)
| 39.810496 | 273 | 0.61948 |
56fb96771fd98e986ff71727d706ff7af1035132 | 2,085 | py | Python | tests/test_external.py | applio/disco | b379301e60255910ced3e836ffa6e3f61a96ceec | [
"BSD-3-Clause"
] | 2 | 2017-01-02T13:54:45.000Z | 2018-01-04T23:29:23.000Z | tests/test_external.py | mrgloom/disco | b379301e60255910ced3e836ffa6e3f61a96ceec | [
"BSD-3-Clause"
] | null | null | null | tests/test_external.py | mrgloom/disco | b379301e60255910ced3e836ffa6e3f61a96ceec | [
"BSD-3-Clause"
] | null | null | null | from disco.test import TestCase, TestJob
from disco.util import external
from subprocess import check_call, STDOUT
from os import uname, path
class ExternalJob(TestJob):
ext_params = {"test1": "1,2,3", "one two three": "dim\ndam\n", "dummy": "value"}
sort = False
class ExternalTestCase(TestCase):
inputs = ['ape', 'cat', 'dog']
def serve(self, path):
return 'test_%s\n' % path
def setUp(self):
super(ExternalTestCase, self).setUp()
if uname()[0] == 'Darwin':
self.skipTest('Cannot build static test_external on OS X')
else:
home = self.settings['DISCO_HOME']
self.binary = path.join(home, 'tests', 'test_external')
check_call(['gcc', '-g', '-O3', '-static', '-Wall',
'-I', path.join(home, 'ext'),
'-o', self.binary,
path.join(home, 'ext', 'disco.c'),
path.join(home, 'tests', 'test_external.c'),
'-l', 'Judy'],
stderr=STDOUT)
def test_extmap(self):
def reduce(iter, params):
for k, v in iter:
yield "red_" + k, "red_" + v
self.job = ExternalJob().run(input=self.test_server.urls(self.inputs),
map=external([self.binary]),
reduce=reduce)
results = sorted((v, k) for k, v in self.results(self.job))
for n, (v, k) in enumerate(results):
self.assertEquals(k, 'red_dkey')
self.assertEquals(v, 'red_test_%s\n' % self.inputs[n / 3])
self.assertEquals(len(results), 9)
def test_extreduce(self):
self.job = ExternalJob().run(input=self.test_server.urls(self.inputs),
map=lambda e, params: [('', e)],
reduce=external([self.binary]))
ans = str(sum(map(ord, ''.join('test_%s\n' % i for i in self.inputs))))
self.assertEquals([(ans, ans)] * 10, list(self.results(self.job)))
| 40.882353 | 84 | 0.518945 |
5e67ff3dfe03cca0e4118c1d14545fda8334b6a6 | 1,218 | py | Python | addons/odoo/addons/base/tests/__init__.py | apadanagroup/parOdoo | 8c6f67848e0689b76fb780feca08d819fd3c1847 | [
"Apache-2.0"
] | 12 | 2021-03-26T08:39:40.000Z | 2022-03-16T02:20:10.000Z | addons/odoo/addons/base/tests/__init__.py | apadanagroup/parOdoo | 8c6f67848e0689b76fb780feca08d819fd3c1847 | [
"Apache-2.0"
] | 13 | 2020-12-20T16:00:21.000Z | 2022-03-14T14:55:30.000Z | addons/odoo/addons/base/tests/__init__.py | apadanagroup/parOdoo | 8c6f67848e0689b76fb780feca08d819fd3c1847 | [
"Apache-2.0"
] | 17 | 2020-08-31T11:18:49.000Z | 2022-02-09T05:57:31.000Z | # -*- coding: utf-8 -*-
from . import test_acl
from . import test_api
from . import test_base
from . import test_basecase
from . import test_cache
from . import test_db_cursor
from . import test_expression
from . import test_float
from . import test_func
from . import test_image
from . import test_ir_actions
from . import test_ir_attachment
from . import test_ir_http
from . import test_ir_filters
from . import test_ir_model
from . import test_ir_sequence
from . import test_ir_sequence_date_range
from . import test_ir_default
from . import test_mail
from . import test_menu
from . import test_mimetypes
from . import test_misc
from . import test_orm
from . import test_ormcache
from . import test_osv
from . import test_qweb
from . import test_res_config
from . import test_res_lang
from . import test_search
from . import test_translate
#import test_uninstall # loop
from . import test_user_has_group
from . import test_view_validation
from . import test_views
from . import test_xmlrpc
from . import test_res_partner
from . import test_res_partner_bank
from . import test_res_users
from . import test_reports
from . import test_tests_tags
from . import test_base_document_layout
from . import test_form_create
| 27.681818 | 41 | 0.81445 |
47d3a6f6c063d12601e2724e082521bb00967799 | 6,620 | py | Python | cairis/test/test_ImpliedCharacteristic.py | RAJANAGORI/cairis | 53fba9051106691d6a22b05fceee3669bb72bfe4 | [
"Apache-2.0"
] | 62 | 2019-08-23T02:42:29.000Z | 2022-03-29T10:52:19.000Z | cairis/test/test_ImpliedCharacteristic.py | RAJANAGORI/cairis | 53fba9051106691d6a22b05fceee3669bb72bfe4 | [
"Apache-2.0"
] | 223 | 2019-07-29T09:49:54.000Z | 2022-03-29T09:48:21.000Z | cairis/test/test_ImpliedCharacteristic.py | RAJANAGORI/cairis | 53fba9051106691d6a22b05fceee3669bb72bfe4 | [
"Apache-2.0"
] | 32 | 2019-10-14T12:27:42.000Z | 2022-03-19T08:08:23.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
import os
import json
from subprocess import call
import cairis.core.BorgFactory
from cairis.core.Borg import Borg
from cairis.core.InternalDocumentParameters import InternalDocumentParameters
from cairis.core.CodeParameters import CodeParameters
from cairis.core.RoleParameters import RoleParameters
from cairis.core.EnvironmentParameters import EnvironmentParameters
from cairis.core.PersonaParameters import PersonaParameters
from cairis.core.PersonaEnvironmentProperties import PersonaEnvironmentProperties
from cairis.core.ImpliedCharacteristicParameters import ImpliedCharacteristicParameters
import sys
class ImpliedCharacteristicTest(unittest.TestCase):
def setUp(self):
call([os.environ['CAIRIS_CFG_DIR'] + "/initdb.sh"])
cairis.core.BorgFactory.initialise()
f = open(os.environ['CAIRIS_SRC'] + '/test/processes.json')
d = json.load(f)
f.close()
b = Borg()
iIntDocs = d['internaldocuments']
i = InternalDocumentParameters(iIntDocs[0]["theName"],iIntDocs[0]["theDescription"], iIntDocs[0]["theContent"], [],[])
b.dbProxy.addInternalDocument(i)
iCodes = d['codes']
i = CodeParameters(iCodes[0]["theName"], iCodes[0]["theType"],iCodes[0]["theDescription"], iCodes[0]["theInclusionCriteria"], iCodes[0]["theExample"])
b.dbProxy.addCode(i)
i = CodeParameters(iCodes[1]["theName"], iCodes[1]["theType"],iCodes[1]["theDescription"], iCodes[1]["theInclusionCriteria"], iCodes[1]["theExample"])
b.dbProxy.addCode(i)
iQs = d['quotations']
i = (iQs[0]["theType"],iQs[0]["theCode"],iQs[0]["theArtifactType"],iQs[0]["theArtifactName"],iQs[0]["theEnvironment"],iQs[0]["theSection"],iQs[0]["theStartIdx"],iQs[0]["theEndIdx"],iQs[0]["theLabel"],iQs[0]["theSynopsis"])
b.dbProxy.addQuotation(i)
i = (iQs[2]['theType'],iQs[2]["theCode"],iQs[2]["theArtifactType"],iQs[2]["theArtifactName"],iQs[2]["theEnvironment"],iQs[2]["theSection"],iQs[2]["theStartIdx"],iQs[2]["theEndIdx"],iQs[2]["theLabel"],iQs[2]["theSynopsis"])
b.dbProxy.addQuotation(i)
iEnvironments = d['environments']
iep1 = EnvironmentParameters(iEnvironments[0]["theName"],iEnvironments[0]["theShortCode"],iEnvironments[0]["theDescription"])
b.dbProxy.addEnvironment(iep1)
iRoles = d['roles']
irp = RoleParameters(iRoles[0]["theName"], iRoles[0]["theType"], iRoles[0]["theShortCode"], iRoles[0]["theDescription"],[])
b.dbProxy.addRole(irp)
iPersonas = d['personas']
ipp = PersonaParameters(iPersonas[0]["theName"],iPersonas[0]["theActivities"],iPersonas[0]["theAttitudes"],iPersonas[0]["theAptitudes"],iPersonas[0]["theMotivations"],iPersonas[0]["theSkills"],iPersonas[0]["theIntrinsic"],iPersonas[0]["theContextual"],"","0",iPersonas[0]["thePersonaType"],[],[PersonaEnvironmentProperties(iPersonas[0]["theEnvironmentProperties"][0]["theName"],(iPersonas[0]["theEnvironmentProperties"][0]["theDirectFlag"] == "True"),iPersonas[0]["theEnvironmentProperties"][0]["theNarrative"],iPersonas[0]["theEnvironmentProperties"][0]["theRole"])],[])
b.dbProxy.addPersona(ipp)
self.iCN = d['code_networks']
b.dbProxy.addCodeRelationship(self.iCN[0]["thePersonaName"],self.iCN[0]["theFromCode"],self.iCN[0]["theToCode"],self.iCN[0]["theRshipType"])
def testAddUpdateImpliedCharacteristic(self):
b = Borg()
p = ImpliedCharacteristicParameters(self.iCN[0]["thePersonaName"],self.iCN[0]["theFromCode"],self.iCN[0]["theToCode"],self.iCN[0]["theRshipType"],self.iCN[0]["theImpliedCharacteristic"]["theName"],self.iCN[0]["theImpliedCharacteristic"]["theQualifier"],[(self.iCN[0]["theImpliedCharacteristic"]["theFromLabel"],self.iCN[0]["theImpliedCharacteristic"]["theFromReferenceType"])],[(self.iCN[0]["theImpliedCharacteristic"]["theToLabel"],self.iCN[0]["theImpliedCharacteristic"]["theToReferenceType"])],self.iCN[0]["theImpliedCharacteristic"]["theType"])
b.dbProxy.addImpliedCharacteristic(p)
b.dbProxy.addIntention((self.iCN[0]["theImpliedCharacteristic"]["theName"],"implied_characteristic",self.iCN[0]["theImpliedCharacteristic"]["theIntentionName"],self.iCN[0]["theImpliedCharacteristic"]["theIntentionType"]))
# b.dbProxy.addContribution((self.iCN[0]["theImpliedCharacteristic"]["theName"],self.iCN[0]["theImpliedCharacteristic"]["theFromLabel"],self.iCN[0]["theImpliedCharacteristic"]["theFromLabelContribution"],self.iCN[0]["theImpliedCharacteristic"]["theFromLabelValue"]))
# b.dbProxy.addContribution((self.iCN[0]["theImpliedCharacteristic"]["theName"],self.iCN[0]["theImpliedCharacteristicName"]["theToLabel"],self.iCN[0]["theImpliedCharacteristic"]["theToLabelContribution"],self.iCN[0]["theImpliedCharacteristic"]["theToLabelValue"]))
p.setIntention(self.iCN[0]["theImpliedCharacteristic"]["theIntentionName"])
p.setIntentionType(self.iCN[0]["theImpliedCharacteristic"]["theIntentionType"])
o = b.dbProxy.impliedCharacteristic(self.iCN[0]["thePersonaName"],self.iCN[0]["theFromCode"],self.iCN[0]["theToCode"],self.iCN[0]["theRshipType"])
self.assertEqual(self.iCN[0]["theImpliedCharacteristic"]["theName"], o[0])
self.assertEqual(self.iCN[0]["theImpliedCharacteristic"]["theQualifier"], o[1])
self.assertEqual(self.iCN[0]["theImpliedCharacteristic"]["theType"], o[2])
b.dbProxy.updateImpliedCharacteristic(p)
o = b.dbProxy.impliedCharacteristic(self.iCN[0]["thePersonaName"],self.iCN[0]["theFromCode"],self.iCN[0]["theToCode"],self.iCN[0]["theRshipType"])
self.assertEqual(self.iCN[0]["theImpliedCharacteristic"]["theName"], o[0])
self.assertEqual(self.iCN[0]["theImpliedCharacteristic"]["theQualifier"], o[1])
self.assertEqual(self.iCN[0]["theImpliedCharacteristic"]["theType"], o[2])
def tearDown(self):
b = Borg()
b.dbProxy.close()
call([os.environ['CAIRIS_CFG_DIR'] + "/dropdb.sh"])
if __name__ == '__main__':
unittest.main()
| 66.2 | 575 | 0.737764 |
83466365a40623785431e4a5d6543f55b1d9a2d2 | 6,892 | py | Python | scripts/optimize_bboxes.py | raahii/yolact | 859ab63a10a0b30a0009de98552b4ce178abaaf4 | [
"MIT"
] | 3 | 2020-04-21T07:52:02.000Z | 2022-01-04T05:52:29.000Z | scripts/optimize_bboxes.py | raahii/yolact | 859ab63a10a0b30a0009de98552b4ce178abaaf4 | [
"MIT"
] | null | null | null | scripts/optimize_bboxes.py | raahii/yolact | 859ab63a10a0b30a0009de98552b4ce178abaaf4 | [
"MIT"
] | 3 | 2020-05-21T10:25:26.000Z | 2022-02-07T17:06:13.000Z | """
Instead of clustering bbox widths and heights, this script
directly optimizes average IoU across the training set given
the specified number of anchor boxes.
Run this script from the Yolact root directory.
"""
import pickle
import random
from itertools import product
from math import sqrt
import numpy as np
import torch
from scipy.optimize import minimize
dump_file = "weights/bboxes.pkl"
aug_file = "weights/bboxes_aug.pkl"
use_augmented_boxes = True
def intersect(box_a, box_b):
""" We resize both tensors to [A,B,2] without new malloc:
[A,2] -> [A,1,2] -> [A,B,2]
[B,2] -> [1,B,2] -> [A,B,2]
Then we compute the area of intersect between box_a and box_b.
Args:
box_a: (tensor) bounding boxes, Shape: [A,4].
box_b: (tensor) bounding boxes, Shape: [B,4].
Return:
(tensor) intersection area, Shape: [A,B].
"""
A = box_a.size(0)
B = box_b.size(0)
max_xy = torch.min(
box_a[:, 2:].unsqueeze(1).expand(A, B, 2),
box_b[:, 2:].unsqueeze(0).expand(A, B, 2),
)
min_xy = torch.max(
box_a[:, :2].unsqueeze(1).expand(A, B, 2),
box_b[:, :2].unsqueeze(0).expand(A, B, 2),
)
inter = torch.clamp((max_xy - min_xy), min=0)
return inter[:, :, 0] * inter[:, :, 1]
def jaccard(box_a, box_b, iscrowd=False):
"""Compute the jaccard overlap of two sets of boxes. The jaccard overlap
is simply the intersection over union of two boxes. Here we operate on
ground truth boxes and default boxes. If iscrowd=True, put the crowd in box_b.
E.g.:
A ∩ B / A ∪ B = A ∩ B / (area(A) + area(B) - A ∩ B)
Args:
box_a: (tensor) Ground truth bounding boxes, Shape: [num_objects,4]
box_b: (tensor) Prior boxes from priorbox layers, Shape: [num_priors,4]
Return:
jaccard overlap: (tensor) Shape: [box_a.size(0), box_b.size(0)]
"""
inter = intersect(box_a, box_b)
area_a = (
((box_a[:, 2] - box_a[:, 0]) * (box_a[:, 3] - box_a[:, 1]))
.unsqueeze(1)
.expand_as(inter)
) # [A,B]
area_b = (
((box_b[:, 2] - box_b[:, 0]) * (box_b[:, 3] - box_b[:, 1]))
.unsqueeze(0)
.expand_as(inter)
) # [A,B]
union = area_a + area_b - inter
if iscrowd:
return inter / area_a
else:
return inter / union # [A,B]
# Also convert to point form
def to_relative(bboxes):
return np.concatenate(
(
bboxes[:, 2:4] / bboxes[:, :2],
(bboxes[:, 2:4] + bboxes[:, 4:]) / bboxes[:, :2],
),
axis=1,
)
def make_priors(conv_size, scales, aspect_ratios):
prior_data = []
conv_h = conv_size[0]
conv_w = conv_size[1]
# Iteration order is important (it has to sync up with the convout)
for j, i in product(range(conv_h), range(conv_w)):
x = (i + 0.5) / conv_w
y = (j + 0.5) / conv_h
for scale, ars in zip(scales, aspect_ratios):
for ar in ars:
w = scale * ar / conv_w
h = scale / ar / conv_h
# Point form
prior_data += [x - w / 2, y - h / 2, x + w / 2, y + h / 2]
return torch.Tensor(prior_data).view(-1, 4).cuda()
scales = [
[1.68, 2.91],
[2.95, 2.22, 0.84],
[2.17, 2.22, 3.22],
[0.76, 2.06, 2.81],
[5.33, 2.79],
[13.69],
]
aspect_ratios = [
[[0.72, 0.96], [0.68, 1.17]],
[[1.30, 0.66], [0.63, 1.23], [0.87, 1.41]],
[[1.96, 1.23], [0.58, 0.84], [0.61, 1.15]],
[[19.79, 2.21], [0.47, 1.76], [1.38, 0.79]],
[[4.79, 17.96], [1.04]],
[[14.82]],
]
conv_sizes = [(35, 35), (18, 18), (9, 9), (5, 5), (3, 3), (2, 2)]
optimize_scales = False
batch_idx = 0
def compute_hits(bboxes, anchors, iou_threshold=0.5):
ious = jaccard(bboxes, anchors)
perGTAnchorMax, _ = torch.max(ious, dim=1)
return perGTAnchorMax > iou_threshold
def compute_recall(hits, base_hits):
hits = (hits | base_hits).float()
return torch.sum(hits) / hits.size(0)
def step(x, x_func, bboxes, base_hits, optim_idx):
# This should set the scale and aspect ratio
x_func(x, scales[optim_idx], aspect_ratios[optim_idx])
anchors = make_priors(
conv_sizes[optim_idx], scales[optim_idx], aspect_ratios[optim_idx]
)
return -float(compute_recall(compute_hits(bboxes, anchors), base_hits).cpu())
def optimize(full_bboxes, optim_idx, batch_size=5000):
global batch_idx, scales, aspect_ratios, conv_sizes
start = batch_idx * batch_size
end = min((batch_idx + 1) * batch_size, full_bboxes.size(0))
if batch_idx > (full_bboxes.size(0) // batch_size):
batch_idx = 0
bboxes = full_bboxes[start:end, :]
anchor_base = [
make_priors(conv_sizes[idx], scales[idx], aspect_ratios[idx])
for idx in range(len(conv_sizes))
if idx != optim_idx
]
base_hits = compute_hits(bboxes, torch.cat(anchor_base, dim=0))
def set_x(x, scales, aspect_ratios):
if optimize_scales:
for i in range(len(scales)):
scales[i] = max(x[i], 0)
else:
k = 0
for i in range(len(aspect_ratios)):
for j in range(len(aspect_ratios[i])):
aspect_ratios[i][j] = x[k]
k += 1
res = minimize(
step,
x0=scales[optim_idx] if optimize_scales else sum(aspect_ratios[optim_idx], []),
method="Powell",
args=(set_x, bboxes, base_hits, optim_idx),
)
def pretty_str(x: list):
if isinstance(x, list):
return "[" + ", ".join([pretty_str(y) for y in x]) + "]"
elif isinstance(x, np.ndarray):
return pretty_str(list(x))
else:
return "%.2f" % x
if __name__ == "__main__":
if use_augmented_boxes:
with open(aug_file, "rb") as f:
bboxes = pickle.load(f)
else:
# Load widths and heights from a dump file. Obtain this with
# python3 scripts/save_bboxes.py
with open(dump_file, "rb") as f:
bboxes = pickle.load(f)
bboxes = np.array(bboxes)
bboxes = to_relative(bboxes)
with torch.no_grad():
bboxes = torch.Tensor(bboxes).cuda()
def print_out():
if optimize_scales:
print("Scales: " + pretty_str(scales))
else:
print("Aspect Ratios: " + pretty_str(aspect_ratios))
for p in range(10):
print("(Sub Iteration) ", end="")
for i in range(len(conv_sizes)):
print("%d " % i, end="", flush=True)
optimize(bboxes, i)
print("Done", end="\r")
print("(Iteration %d) " % p, end="")
print_out()
print()
optimize_scales = not optimize_scales
print("scales = " + pretty_str(scales))
print("aspect_ratios = " + pretty_str(aspect_ratios))
| 28.59751 | 87 | 0.564568 |
59c5a8f4231b94dc69915f2f9f77b433dd82780f | 266 | py | Python | basic_accounting/basic_accounting/doctype/journal_entry/journal_entry.py | EPIsumeet/Accounting-App | 82836ee9e5dc21a0292b8590d8ae2c60b9b77b3f | [
"MIT"
] | null | null | null | basic_accounting/basic_accounting/doctype/journal_entry/journal_entry.py | EPIsumeet/Accounting-App | 82836ee9e5dc21a0292b8590d8ae2c60b9b77b3f | [
"MIT"
] | null | null | null | basic_accounting/basic_accounting/doctype/journal_entry/journal_entry.py | EPIsumeet/Accounting-App | 82836ee9e5dc21a0292b8590d8ae2c60b9b77b3f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2021, Sherlock Holmes and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
# import frappe
from frappe.model.document import Document
class JournalEntry(Document):
pass
| 24.181818 | 54 | 0.778195 |
f62424042576de5a1a172dea5fb15ff06517a0c4 | 2,122 | py | Python | test/discovery_failure_test.py | slingamn/Testify | ab9fd3948dd5a4327e4e71a6b7df5a007c613c83 | [
"Apache-2.0"
] | 1 | 2016-08-19T04:11:16.000Z | 2016-08-19T04:11:16.000Z | test/discovery_failure_test.py | slingamn/Testify | ab9fd3948dd5a4327e4e71a6b7df5a007c613c83 | [
"Apache-2.0"
] | null | null | null | test/discovery_failure_test.py | slingamn/Testify | ab9fd3948dd5a4327e4e71a6b7df5a007c613c83 | [
"Apache-2.0"
] | null | null | null | import os
import tempfile
from testify.test_logger import _log
from testify import TestCase, assert_in, run, setup, teardown, test_discovery
from testify.test_discovery import DiscoveryError
class BrokenImportTestCase(TestCase):
__test__ = False
@setup
def create_broken_import_file(self):
"""Write out a test file containing a bad import. This way, a broken
test isn't lying around to be discovered while running other tests.
Write the file in the directory containing this test file; otherwise,
Testify will refuse to import it."""
here = os.path.dirname(os.path.abspath(__file__))
(unused_filehandle, self.broken_import_file_path) = tempfile.mkstemp(
prefix='fake_broken_import',
suffix='.py',
dir=here,
)
with open(self.broken_import_file_path, 'w') as broken_import_file:
broken_import_file.write('import non_existent_module')
self.broken_import_module = 'test.%s' % os.path.splitext(os.path.basename(self.broken_import_file_path))[0]
@teardown
def delete_broken_import_file(self):
files = [
self.broken_import_file_path,
# Also remove the .pyc that was created if the file was imported.
self.broken_import_file_path + 'c',
]
for f in files:
try:
os.remove(f)
except OSError, exc:
_log.error("Could not remove broken import file %s: %r" % (f, exc))
class DiscoveryFailureTestCase(BrokenImportTestCase):
def test_discover_test_with_broken_import(self):
"""Insure that DiscoveryError is raised when a test which imports a
non-existent module is discovered."""
try:
discovered_tests = test_discovery.discover(self.broken_import_module)
discovered_tests.next()
except DiscoveryError, exc:
assert_in('No module named non_existent_module', str(exc))
else:
assert False, 'Expected DiscoveryError.'
if __name__ == '__main__':
run()
# vim: set ts=4 sts=4 sw=4 et:
| 35.966102 | 115 | 0.660697 |
d29cdbea3d5f238c327a55b102d6bfffc1b8ff2d | 955 | py | Python | tests/test_kraken_segment.py | wrznr/ocrd_kraken | cae0b9ba441bc212caf8fabbf4a75df337757c76 | [
"Apache-2.0"
] | 7 | 2018-04-13T14:28:54.000Z | 2021-01-13T15:50:06.000Z | tests/test_kraken_segment.py | wrznr/ocrd_kraken | cae0b9ba441bc212caf8fabbf4a75df337757c76 | [
"Apache-2.0"
] | 20 | 2018-06-13T16:29:05.000Z | 2022-01-10T16:38:50.000Z | tests/test_kraken_segment.py | wrznr/ocrd_kraken | cae0b9ba441bc212caf8fabbf4a75df337757c76 | [
"Apache-2.0"
] | 4 | 2018-04-13T13:00:41.000Z | 2020-06-01T17:30:30.000Z | # pylint: disable=import-error
import os
import shutil
from tests.base import TestCase, assets, main
from ocrd.resolver import Resolver
from ocrd_kraken.segment import KrakenSegment
PARAM_JSON = assets.url_of('param-segment.json')
WORKSPACE_DIR = '/tmp/ocrd-ocropy-segment-test'
class TestKrakenSegment(TestCase):
def setUp(self):
if os.path.exists(WORKSPACE_DIR):
shutil.rmtree(WORKSPACE_DIR)
os.makedirs(WORKSPACE_DIR)
def test_run1(self):
resolver = Resolver()
workspace = resolver.workspace_from_url(assets.url_of('kant_aufklaerung_1784-binarized/data/mets.xml'), dst_dir=WORKSPACE_DIR)
proc = KrakenSegment(
workspace,
input_file_grp="OCR-D-IMG-BIN",
output_file_grp="OCR-D-SEG-LINE-KRAKEN",
parameter={'level-of-operation': 'line'}
)
proc.process()
workspace.save_mets()
if __name__ == "__main__":
main()
| 27.285714 | 134 | 0.67644 |
c0f1849b6566d1f11e12b18b95b2f06e984a9882 | 3,902 | py | Python | src/validate_plist_xml/validate_plist_xml.py | jgstew/validate_plist_xml | f8508b565fe910bdd34bbe0dafd68e694a8c99cf | [
"MIT"
] | null | null | null | src/validate_plist_xml/validate_plist_xml.py | jgstew/validate_plist_xml | f8508b565fe910bdd34bbe0dafd68e694a8c99cf | [
"MIT"
] | 1 | 2021-07-29T15:09:17.000Z | 2021-07-29T15:09:17.000Z | src/validate_plist_xml/validate_plist_xml.py | jgstew/validate_plist_xml | f8508b565fe910bdd34bbe0dafd68e694a8c99cf | [
"MIT"
] | null | null | null | #!/usr/local/python
"""
# pylint: disable=line-too-long
- https://stackoverflow.com/questions/15798/how-do-i-validate-xml-against-a-dtd-file-in-python
"""
import io
import os
import sys
try:
import lxml.etree # pylint: disable=import-error
except ImportError:
import lxml
# pylint: disable=line-too-long
# https://docs.python.org/3/library/io.html
# https://www.apple.com/DTDs/PropertyList-1.0.dtd
DTD_PLIST = lxml.etree.DTD(
io.StringIO(
"""<!ENTITY % plistObject "(array | data | date | dict | real | integer | string | true | false )" >
<!ELEMENT plist %plistObject;>
<!ATTLIST plist version CDATA "1.0" >
<!-- Collections -->
<!ELEMENT array (%plistObject;)*>
<!ELEMENT dict (key, %plistObject;)*>
<!ELEMENT key (#PCDATA)>
<!--- Primitive types -->
<!ELEMENT string (#PCDATA)>
<!ELEMENT data (#PCDATA)> <!-- Contents interpreted as Base-64 encoded -->
<!ELEMENT date (#PCDATA)> <!-- Contents should conform to a subset of ISO 8601 (in particular, YYYY '-' MM '-' DD 'T' HH ':' MM ':' SS 'Z'. Smaller units may be omitted with a loss of precision) -->
<!-- Numerical primitives -->
<!ELEMENT true EMPTY> <!-- Boolean constant true -->
<!ELEMENT false EMPTY> <!-- Boolean constant false -->
<!ELEMENT real (#PCDATA)> <!-- Contents should represent a floating point number matching ("+" | "-")? d+ ("."d*)? ("E" ("+" | "-") d+)? where d is a digit 0-9. -->
<!ELEMENT integer (#PCDATA)> <!-- Contents should represent a (possibly signed) integer number in base 10 -->"""
)
)
def validate_plist(file_pathname):
"""This will validate a single plist XML file against the DTD"""
# parse xml
try:
doc = lxml.etree.parse(file_pathname)
# print('XML well formed, syntax ok.')
# check for file IO error
except IOError:
print("Invalid File: %s" % file_pathname)
return False
# check for XML syntax errors
except lxml.etree.XMLSyntaxError as err:
print("XML Syntax Error in: %s" % file_pathname)
print(err)
return False
# all other errors
except Exception as err: # pylint: disable=broad-except
print(err)
return False
# check if xml is valid to DTD spec
try:
DTD_PLIST.assertValid(doc)
except lxml.etree.DocumentInvalid as err:
print("Failed DTD Validation: %s" % file_pathname)
print(err)
return False
# all other errors
except Exception as err: # pylint: disable=broad-except
print(err)
return False
return True
def validate_plist_files(
folder_path=".", file_extensions=(".recipe", ".plist", ".profile")
):
"""Validate all plist files in a folder and subfolders"""
# https://stackoverflow.com/questions/3964681/find-all-files-in-a-directory-with-extension-txt-in-python
count_errors = 0
count_files = 0
for root, dirs, files in os.walk(folder_path): # pylint: disable=unused-variable
for file in files:
# do not scan within .git folder
if not root.startswith((".git", "./.git")):
# process all files ending with `file_extensions`
if file.lower().endswith(file_extensions):
count_files = count_files + 1
file_path = os.path.join(root, file)
result = validate_plist(file_path)
if not result:
count_errors = count_errors + 1
print("%d errors found in %d plist xml files" % (count_errors, count_files))
return count_errors
def main(folder_path=".", file_extensions=(".recipe", ".plist", ".profile")):
"""Run this function by default"""
# run the validation, get the number of errors
count_errors = validate_plist_files(folder_path, file_extensions)
# return the number of errors as the exit code
sys.exit(count_errors)
if __name__ == "__main__":
main()
| 32.789916 | 199 | 0.634546 |
2ab1947e74a9d31fc5a3b12b87c229cc6cf08412 | 5,621 | py | Python | share/ttkwidgets/frames/balloon.py | Marusoftware/Marutools | 2b462ea02abaf957eb037c281b62d7efe053840e | [
"MIT"
] | null | null | null | share/ttkwidgets/frames/balloon.py | Marusoftware/Marutools | 2b462ea02abaf957eb037c281b62d7efe053840e | [
"MIT"
] | 5 | 2021-01-21T09:46:12.000Z | 2022-02-14T13:54:44.000Z | share/ttkwidgets/frames/balloon.py | Marusoftware/Marutools | 2b462ea02abaf957eb037c281b62d7efe053840e | [
"MIT"
] | 2 | 2021-11-02T11:01:53.000Z | 2022-02-14T10:11:21.000Z | """
Author: RedFantom
License: GNU GPLv3
Source: This repository
"""
try:
import Tkinter as tk
import ttk
except ImportError:
import tkinter.ttk as ttk
import tkinter as tk
from PIL import Image, ImageTk
import os
from ttkwidgets.utilities import get_assets_directory
class Balloon(ttk.Frame):
"""Simple help hover balloon."""
def __init__(self, master=None, headertext="Help", text="Some great help is displayed here.", width=200, timeout=1,
background="#fef9cd", **kwargs):
"""
Create a Balloon.
:param master: widget to bind the Balloon to
:type master: widget
:param headertext: text to show in window header
:type headertext: str
:param text: text to show as help text
:type text: str
:param width: width of the window
:type width: int
:param timeout: timeout in seconds to wait until the Balloon is shown
:type timeout: float
:param background: background color of the Balloon
:type background: str
:param kwargs: keyword arguments passed on to the :class:`ttk.Frame` initializer
"""
ttk.Frame.__init__(self, master, **kwargs)
self._toplevel = None
self._canvas = None
self.header_label = None
self.text_label = None
# The image was found here:
# https://www.iconfinder.com/icons/26486/balloon_help_information_icon#size=16
# Under CC Attribution License
self._image = Image.open(os.path.join(get_assets_directory(), "balloon.png"))
self._photo_image = ImageTk.PhotoImage(self._image, master=self)
self.__background = background
self.__headertext = headertext
self.__text = text
self.__width = width
self.master = master
self._id = None
self._timeout = timeout
self.master.bind("<Enter>", self._on_enter)
self.master.bind("<Leave>", self._on_leave)
def __getitem__(self, key):
return self.cget(key)
def __setitem__(self, key, value):
self.configure(**{key: value})
def _grid_widgets(self):
"""Place the widgets in the Toplevel."""
self._canvas.grid(sticky="nswe")
self.header_label.grid(row=1, column=1, sticky="nswe", pady=5, padx=5)
self.text_label.grid(row=3, column=1, sticky="nswe", pady=6, padx=5)
def _on_enter(self, event):
"""Creates a delayed callback for the :obj:`<Enter>` event."""
self._id = self.master.after(int(self._timeout * 1000), func=self.show)
def _on_leave(self, event):
"""Callback for the :obj:`<Leave>` event to destroy the Toplevel."""
if self._toplevel:
self._toplevel.destroy()
self._toplevel = None
if self._id:
self.master.after_cancel(self._id)
self._id = None
def show(self):
"""
Create the Toplevel widget and its child widgets to show in the spot of the cursor.
This is the callback for the delayed :obj:`<Enter>` event (see :meth:`~Balloon._on_enter`).
"""
self._toplevel = tk.Toplevel(self.master)
self._canvas = tk.Canvas(self._toplevel, background=self.__background)
self.header_label = ttk.Label(self._canvas, text=self.__headertext, background=self.__background,
image=self._photo_image, compound=tk.LEFT)
self.text_label = ttk.Label(self._canvas, text=self.__text, wraplength=self.__width,
background=self.__background)
self._toplevel.attributes("-topmost", True)
self._toplevel.overrideredirect(True)
self._grid_widgets()
x, y = self.master.winfo_pointerxy()
self._canvas.update()
# Update the Geometry of the Toplevel to update its position and size
self._toplevel.geometry("{0}x{1}+{2}+{3}".format(self._canvas.winfo_width(), self._canvas.winfo_height(),
x + 2, y + 2))
def cget(self, key):
"""
Query widget option.
:param key: option name
:type key: str
:return: value of the option
To get the list of options for this widget, call the method :meth:`~Balloon.keys`.
"""
if key == "headertext":
return self.__headertext
elif key == "text":
return self.__text
elif key == "width":
return self.__width
elif key == "timeout":
return self._timeout
elif key == "background":
return self.__background
else:
return ttk.Frame.cget(self, key)
def config(self, **kwargs):
"""
Configure resources of the widget.
To get the list of options for this widget, call the method :meth:`~Balloon.keys`.
See :meth:`~Balloon.__init__` for a description of the widget specific option.
"""
self.__headertext = kwargs.pop("headertext", self.__headertext)
self.__text = kwargs.pop("text", self.__text)
self.__width = kwargs.pop("width", self.__width)
self._timeout = kwargs.pop("timeout", self._timeout)
self.__background = kwargs.pop("background", self.__background)
if self._toplevel:
self._on_leave(None)
self.show()
ttk.Frame.config(self, **kwargs)
configure = config
def keys(self):
keys = ttk.Frame.keys(self)
keys.extend(["headertext", "text", "width", "timeout", "background"])
return keys
| 36.980263 | 119 | 0.607721 |
2797ba610756a1b227b8f1fbcdf246ba13c1e1a7 | 817 | py | Python | python/src/main/python/pygw/base/data_type_adapter.py | e2000y/geowave | 6cc4d39c7bd4cbfe57095559ee2fb1e1cfa89890 | [
"Apache-2.0"
] | null | null | null | python/src/main/python/pygw/base/data_type_adapter.py | e2000y/geowave | 6cc4d39c7bd4cbfe57095559ee2fb1e1cfa89890 | [
"Apache-2.0"
] | null | null | null | python/src/main/python/pygw/base/data_type_adapter.py | e2000y/geowave | 6cc4d39c7bd4cbfe57095559ee2fb1e1cfa89890 | [
"Apache-2.0"
] | null | null | null | #
# Copyright (c) 2013-2020 Contributors to the Eclipse Foundation
#
# See the NOTICE file distributed with this work for additional information regarding copyright
# ownership. All rights reserved. This program and the accompanying materials are made available
# under the terms of the Apache License, Version 2.0 which accompanies this distribution and is
# available at http://www.apache.org/licenses/LICENSE-2.0.txt
#===============================================================================================
from .geowave_object import GeoWaveObject
class DataTypeAdapter(GeoWaveObject):
"""
Base class for data type adapters.
"""
def get_type_name(self):
"""
Returns:
The type name of the data adapter.
"""
return self._java_ref.getTypeName()
| 34.041667 | 96 | 0.635251 |
43e3b90e71b6b2ea2df12588ee51f6e343d11541 | 10,896 | py | Python | mypythontools/build.py | Malachov/mypythontools | 8a51e980f004f3e1de3ec17a65b43515f187d6f9 | [
"MIT"
] | null | null | null | mypythontools/build.py | Malachov/mypythontools | 8a51e980f004f3e1de3ec17a65b43515f187d6f9 | [
"MIT"
] | null | null | null | mypythontools/build.py | Malachov/mypythontools | 8a51e980f004f3e1de3ec17a65b43515f187d6f9 | [
"MIT"
] | 1 | 2021-11-21T23:33:40.000Z | 2021-11-21T23:33:40.000Z | """
This module build the app via pyinstaller.
It has presets to build applications build with eel.
There is one main function `build_app`. Check it's help for how to use
it (should be very simple).
Note:
You can run build for example from vs code tasks, create folder utils,
create build_script.py inside, add
>>> import mypythontools
...
>>> if __name__ == "__main__":
... mypythontools.build.build_app() # With all the params you need.
Then just add this task to global tasks.json::
{
"label": "Build app",
"type": "shell",
"command": "python",
"args": ["${workspaceFolder}/utils/build_script.py"],
"presentation": {
"reveal": "always",
"panel": "new"
}
},
"""
from __future__ import annotations
import subprocess
import shutil
from pathlib import Path
import mylogging
import sys
from typing_extensions import Literal
# Lazy imports
# import EelForkExcludeFiles
from . import paths
from . import venvs
from .paths import PROJECT_PATHS
def build_app(
root_path: str | Path = "infer",
main_file: str | Path = "app.py",
preset: Literal["eel", None] = None,
web_path: str | Path | None = "infer",
build_web: bool | str = "preset",
use_virutalenv: bool = True,
remove_last_build: bool = False,
console: bool = True,
debug: bool = False,
icon: str | Path | None = None,
hidden_imports: list[str] = [],
ignored_packages: list[str] = [],
datas: tuple[tuple[str, str], ...] = (),
name: str = None,
env_vars: dict = {},
cleanit: bool = True,
) -> None:
"""One script to build .exe app from source code.
This script automatically generate .spec file, build node web files and add environment variables during build.
This script suppose some structure of the app (may have way different though). You can use project-starter from the same repository,
if you start with application.
Args:
root_path (str | Path, optional): Path of root folder where build and dist folders will be placed. Defaults to "infer".
main_file (str, optional): Main file path or name with extension. Main file is found automatically
and don't have to be in root. Defaults to 'app.py'.
preset (Literal['eel', None], optional): Edit other params for specific use cases (append to hidden_imports, datas etc.).
Defaults to None.
web_path (str | Path | None, optional): Folder with index.html. Defaults to 'infer'.
build_web (bool | str, optional): If application contain package.json build node application. If 'preset' build automatically
depending on preset. Defaults to 'preset'.
use_virutalenv (bool, optional): Whether run new virtualenv and install all libraries from requirements.txt. Defaults to True.
remove_last_build (bool, optional): If some problems, it is possible to delete build and dist folders. Defaults to False.
console (bool, optional): Before app run terminal window appears (good for debugging). Defaults to False.
debug (bool, optional): If no console, then dialog window with traceback appears. Defaults to False.
icon (str | Path | None, optional): Path or name with extension to .ico file (!no png!). Defaults to None.
hidden_imports (list, optional): If app is not working, it can be because some library was not builded. Add such
libraries into this list. Defaults to [].
ignored_packages (list, optional): Libraries take space even if not necessary. Defaults to [].
datas (tuple[tuple[str, str], ...], optional): Add static files to build. Example: [('my_source_path, 'destination_path')]. Defaults to [].
name (str, optional): If name of app is different than main py file. Defaults to None.
env_vars (dict, optional): Add some env vars during build. Mostly to tell main script that it's production (ne development) mode.
Defaults to {}.
cleanit (bool, optional): Remove spec file and var env py hook. Defaults to True.
Note:
Build pyinstaller bootloader on your pc, otherwise antivirus can check the
file for a while on first run and even alert false positive.
Download from github, cd to bootloader and::
+
python ./waf all
Back to pyinstaller folder and python `setup.py`
"""
if sys.version_info.major == 3 and sys.version_info.minor >= 10:
raise RuntimeError(mylogging.return_str("Python version >=3.10 not supported yet."))
root_path = PROJECT_PATHS.ROOT_PATH if root_path == "infer" else paths.validate_path(root_path)
# Try to recognize the structure of app
build_path = root_path / "build"
if not build_path.exists():
build_path.mkdir(parents=True, exist_ok=True)
# Remove last dist manually to avoid permission error if opened in some application
dist_path = root_path / "dist"
if dist_path.exists():
try:
shutil.rmtree(dist_path, ignore_errors=False)
except (PermissionError, OSError):
raise PermissionError(
mylogging.return_str(
"App is opened (May be in another app(terminal, explorer...)). Close it first."
)
)
# May be just name - not absolute
main_file_path = Path(main_file)
if not main_file_path.exists():
# Iter paths and find the one
main_file_path = paths.find_path(main_file_path.name,)
if not main_file_path.exists():
raise KeyError("Main file not found, not inferred and must be configured in params...")
main_file_path = main_file_path.resolve()
if not name:
name = main_file_path.stem
main_folder_path = main_file_path.parent
if icon:
icon_path = Path(icon)
if not icon_path.exists():
# Iter paths and find the one
icon_path = paths.find_path(icon_path.name, exclude_names=["node_modules", "build"],)
if not icon_path.exists():
raise KeyError("Icon not found, not inferred check path or name...")
else:
icon_path = None
generated_warning = """
#########################
### File is generated ###
#########################
# Do not edit this file, edit build_script
"""
if remove_last_build:
try:
shutil.rmtree("build", ignore_errors=True)
except Exception:
pass
# Build JS to static asset
if build_web is True or (build_web == "preset" and preset in ["eel"]):
gui_path = paths.find_path("package.json").parent
try:
builded = subprocess.run("npm run build", check=True, cwd=gui_path.as_posix(), shell=True)
if builded.returncode != 0:
raise RuntimeError()
except Exception:
mylogging.traceback(f"Build of web files failed. Try \n\nnpm run build\n\n in folder {gui_path}.")
raise
if build_web or preset == "eel":
if web_path == "infer":
web_path = paths.find_path(
"index.html", exclude_names=["public", "node_modules", "build",],
).parent
else:
web_path = Path(web_path)
if not web_path.exists():
raise KeyError("Build web assets not found, not inferred and must be configured in params...")
datas = (
*datas,
(web_path.as_posix(), "gui"),
)
if preset == "eel":
import EelForkExcludeFiles
hidden_imports = [
*hidden_imports,
"EelForkExcludeFiles",
"bottle_websocket",
]
datas = (
*datas,
(EelForkExcludeFiles._eel_js_file, "EelForkExcludeFiles",),
)
env_vars = {
**env_vars,
"MY_PYTHON_VUE_ENVIRONMENT": "production",
}
if env_vars:
env_vars_template = f"""
{generated_warning}
import os
for i, j in {env_vars}.items():
os.environ[i] = j
"""
env_path = build_path / "env_vars.py"
with open(env_path, "w") as env_vars_py:
env_vars_py.write(env_vars_template)
runtime_hooks = [env_path.as_posix()]
else:
runtime_hooks = None
spec_template = f"""
{generated_warning}
import sys
from pathlib import Path
import os
sys.setrecursionlimit(5000)
block_cipher = None
a = Analysis(['{main_file_path.as_posix()}'],
pathex=['{main_folder_path.as_posix()}'],
binaries=[],
datas={datas},
hiddenimports={hidden_imports},
hookspath=[],
runtime_hooks={runtime_hooks},
excludes={ignored_packages},
win_no_prefer_redirects=False,
win_private_assemblies=False,
cipher=block_cipher,
noarchive=False)
pyz = PYZ(a.pure, a.zipped_data,
cipher=block_cipher)
exe = EXE(pyz,
a.scripts,
[],
exclude_binaries=True,
name='{name}',
debug={debug},
bootloader_ignore_signals=False,
strip=False,
upx=True,
console={console},
icon={f"'{icon_path.as_posix()}'" if icon else None})
coll = COLLECT(exe,
a.binaries,
a.zipfiles,
a.datas,
strip=False,
upx=True,
upx_exclude=[],
name='{name}')
"""
spec_path = build_path / "app.spec"
with open(spec_path, "w") as spec_file:
spec_file.write(spec_template)
# Build py to exe
command_list = ["pyinstaller", "-y", spec_path.as_posix()]
if use_virutalenv:
my_venv = venvs.MyVenv(PROJECT_PATHS.ROOT_PATH / "venv")
my_venv.create()
my_venv.sync_requirements()
command_list = [*my_venv.activate_command.split(), " && ", *command_list]
try:
subprocess.run(" ".join(command_list), check=True, cwd=PROJECT_PATHS.ROOT_PATH.as_posix(), shell=True)
except (Exception,):
mylogging.traceback(
"Build with pyinstaller failed. First, check if `pyinstaller` is installed. Check it with pip list in used python interpreter. "
f" Try (if windows, use cmd) \n\n\t{' '.join(command_list)}\n\n in folder `{PROJECT_PATHS.ROOT_PATH.as_posix()}`.\n\n"
"Troubleshooting: If there are still errors, try to install newset pyinstaller locally with `python setup.py install`, "
"update setuptools, delete `build` and `dist` folder and try again."
)
raise
if cleanit:
try:
spec_path.unlink()
env_path.unlink()
except Exception:
pass
| 34.264151 | 151 | 0.611509 |
f42a447475f7919dbca43e42d65b77907bbbcb2c | 295 | py | Python | manage.py | RoganMurley/rogan-murley-website | ada12e77bafee77d7c69fb99899ddf95dd5c5d80 | [
"Apache-2.0"
] | null | null | null | manage.py | RoganMurley/rogan-murley-website | ada12e77bafee77d7c69fb99899ddf95dd5c5d80 | [
"Apache-2.0"
] | null | null | null | manage.py | RoganMurley/rogan-murley-website | ada12e77bafee77d7c69fb99899ddf95dd5c5d80 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import os
import sys
from core.boot import fix_path
fix_path()
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "core.settings.dev")
from djangae.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| 21.071429 | 72 | 0.772881 |
ef5a7e55c9274586d637c7b352950c5712dd8d56 | 778 | py | Python | bricks/setup.py | sixaphone/lego_server | f2f42b2047162882d330c868035570cc62df9d8e | [
"MIT"
] | 1 | 2020-09-17T17:54:10.000Z | 2020-09-17T17:54:10.000Z | bricks/setup.py | sixaphone/lego_server | f2f42b2047162882d330c868035570cc62df9d8e | [
"MIT"
] | 2 | 2020-09-22T12:31:05.000Z | 2020-09-24T10:19:40.000Z | bricks/setup.py | sixaphone/lego_server | f2f42b2047162882d330c868035570cc62df9d8e | [
"MIT"
] | null | null | null | from bricks.brick import Brick
from utils.path_resolver import brick_path
from os import listdir
from settings import REMOTE_CONF_DIR, REMOTE_SCRIPT_DIR
class Setup(Brick):
_commands = [
f"rm -rf {REMOTE_CONF_DIR} && mkdir {REMOTE_CONF_DIR}",
f"rm -rf {REMOTE_SCRIPT_DIR} && mkdir {REMOTE_SCRIPT_DIR}",
]
def __init__(self, name, description, cli):
super(Setup, self).__init__(name, description, cli)
def run(self):
super(Setup, self).run()
self.put_recursive("config", REMOTE_CONF_DIR)
self.put_recursive("scripts", REMOTE_SCRIPT_DIR)
def put_recursive(self, source, destination):
for file in listdir(brick_path(source)):
self.cli.connection.put(brick_path(source, file), destination) | 33.826087 | 74 | 0.694087 |
12bab35d6d65a88cd701b143714e4d56f9a4646f | 6,864 | py | Python | src/backend/opsbot/natural_language.py | xiashuqin89/bk-chatbot | d3f95363032f699cbc7e6617060642e0763443a6 | [
"MIT"
] | 11 | 2021-05-27T11:45:02.000Z | 2022-03-29T15:03:28.000Z | src/backend/opsbot/natural_language.py | xiashuqin89/bk-chatbot | d3f95363032f699cbc7e6617060642e0763443a6 | [
"MIT"
] | 2 | 2021-08-16T03:59:19.000Z | 2021-09-29T09:31:39.000Z | src/backend/opsbot/natural_language.py | xiashuqin89/bk-chatbot | d3f95363032f699cbc7e6617060642e0763443a6 | [
"MIT"
] | 12 | 2021-05-27T11:59:18.000Z | 2022-03-17T07:21:53.000Z | """
TencentBlueKing is pleased to support the open source community by making
蓝鲸智云PaaS平台社区版 (BlueKing PaaSCommunity Edition) available.
Copyright (C) 2017-2018 THL A29 Limited,
a Tencent company. All rights reserved.
Licensed under the MIT License (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import importlib
from typing import Iterable, Optional, Callable, Union, NamedTuple
import asyncio
from . import permission as perm
from .adapter import Bot
from .command import call_command
from .log import logger
from .session import BaseSession
from .self_typing import Context_T, CommandName_T, CommandArgs_T
_nl_processors = set()
class NLProcessor:
__slots__ = ('func', 'keywords', 'permission',
'only_to_me', 'only_short_message',
'allow_empty_message')
def __init__(self, *, func: Callable, keywords: Optional[Iterable],
permission: int, only_to_me: bool, only_short_message: bool,
allow_empty_message: bool):
self.func = func
self.keywords = keywords
self.permission = permission
self.only_to_me = only_to_me
self.only_short_message = only_short_message
self.allow_empty_message = allow_empty_message
def on_natural_language(keywords: Union[Optional[Iterable], str, Callable] = None,
*, permission: int = perm.EVERYBODY,
only_to_me: bool = True,
only_short_message: bool = True,
allow_empty_message: bool = False) -> Callable:
"""
Decorator to register a function as a natural language processor.
:param keywords: keywords to respond to, if None, respond to all messages
:param permission: permission required by the processor
:param only_to_me: only handle messages to me
:param only_short_message: only handle short messages
:param allow_empty_message: handle empty messages
"""
def deco(func: Callable) -> Callable:
nl_processor = NLProcessor(func=func, keywords=keywords,
permission=permission,
only_to_me=only_to_me,
only_short_message=only_short_message,
allow_empty_message=allow_empty_message)
_nl_processors.add(nl_processor)
return func
if isinstance(keywords, Callable):
# here "keywords" is the function to be decorated
return on_natural_language()(keywords)
else:
if isinstance(keywords, str):
keywords = (keywords,)
return deco
class NLPSession(BaseSession):
__slots__ = ('msg', 'msg_text', 'msg_images')
def __init__(self, bot: Bot, ctx: Context_T, msg: str):
super().__init__(bot, ctx)
self.msg = msg
protocol = importlib.import_module(f'protocol.{self.bot.type}')
tmp_msg = protocol.Message(msg)
self.msg_text = tmp_msg.extract_plain_text()
self.msg_images = [s.data['url'] for s in tmp_msg
if s.type == 'image' and 'url' in s.data]
class NLPResult(NamedTuple):
"""
Deprecated.
Use class IntentCommand instead.
"""
confidence: float
cmd_name: Union[str, CommandName_T]
cmd_args: Optional[CommandArgs_T] = None
def to_intent_command(self):
return IntentCommand(confidence=self.confidence,
name=self.cmd_name,
args=self.cmd_args)
class IntentCommand(NamedTuple):
"""
To represent a command that we think the user may be intended to call.
"""
confidence: float
name: Union[str, CommandName_T]
args: Optional[CommandArgs_T] = None
current_arg: str = ''
async def handle_natural_language(bot: Bot, ctx: Context_T) -> bool:
"""
Handle a message as natural language.
This function is typically called by "handle_message".
:param bot: Bot instance
:param ctx: message context
:return: the message is handled as natural language
"""
session = NLPSession(bot, ctx, str(ctx['message']))
# use msg_text here because CQ code "share" may be very long,
# at the same time some plugins may want to handle it
msg_text_length = len(session.msg_text)
futures = []
for p in _nl_processors:
if not p.allow_empty_message and not session.msg:
# don't allow empty msg, but it is one, so skip to next
continue
if p.only_short_message and \
msg_text_length > bot.config.SHORT_MESSAGE_MAX_LENGTH:
continue
if p.only_to_me and not ctx['to_me']:
continue
should_run = await bot.check_permission(ctx, p.permission)
if should_run and p.keywords:
for kw in p.keywords:
if kw in session.msg_text:
break
else:
# no keyword matches
should_run = False
if should_run:
futures.append(asyncio.ensure_future(p.func(session)))
if futures:
# wait for intent commands, and sort them by confidence
intent_commands = []
for fut in futures:
try:
res = await fut
if isinstance(res, NLPResult):
intent_commands.append(res.to_intent_command())
elif isinstance(res, IntentCommand):
intent_commands.append(res)
except Exception as e:
logger.error('An exception occurred while running '
'some natural language processor:')
logger.exception(e)
intent_commands.sort(key=lambda ic: ic.confidence, reverse=True)
logger.debug(f'Intent commands: {intent_commands}')
if intent_commands and intent_commands[0].confidence >= bot.config.NLP_CONFIDENCE:
# choose the intent command with highest confidence
chosen_cmd = intent_commands[0]
logger.debug(
f'Intent command with highest confidence: {chosen_cmd}')
return await call_command(
bot, ctx, chosen_cmd.name,
args=chosen_cmd.args,
current_arg=chosen_cmd.current_arg,
check_perm=False
)
else:
logger.debug('No intent command has enough confidence')
return False
| 35.937173 | 90 | 0.633741 |
7c1742d2cc93c233946e25e0ecbfc8a0148c343f | 9,566 | py | Python | noxfile.py | renovate-bot/python-deploy | f0c866d525fec05b6eb569573e7d368bda76f527 | [
"Apache-2.0"
] | 3 | 2021-11-04T07:19:01.000Z | 2021-11-08T10:23:33.000Z | noxfile.py | renovate-bot/python-deploy | f0c866d525fec05b6eb569573e7d368bda76f527 | [
"Apache-2.0"
] | 14 | 2021-10-20T23:49:25.000Z | 2022-03-07T21:57:05.000Z | noxfile.py | renovate-bot/python-deploy | f0c866d525fec05b6eb569573e7d368bda76f527 | [
"Apache-2.0"
] | 2 | 2021-11-12T22:00:24.000Z | 2022-01-29T08:11:17.000Z | # -*- coding: utf-8 -*-
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Generated by synthtool. DO NOT EDIT!
from __future__ import absolute_import
import os
import pathlib
import shutil
import warnings
import nox
BLACK_VERSION = "black==22.3.0"
ISORT_VERSION = "isort==5.10.1"
LINT_PATHS = ["docs", "google", "tests", "noxfile.py", "setup.py"]
DEFAULT_PYTHON_VERSION = "3.8"
UNIT_TEST_PYTHON_VERSIONS = ["3.6", "3.7", "3.8", "3.9", "3.10"]
UNIT_TEST_STANDARD_DEPENDENCIES = [
"mock",
"asyncmock",
"pytest",
"pytest-cov",
"pytest-asyncio",
]
UNIT_TEST_EXTERNAL_DEPENDENCIES = []
UNIT_TEST_LOCAL_DEPENDENCIES = []
UNIT_TEST_DEPENDENCIES = []
UNIT_TEST_EXTRAS = []
UNIT_TEST_EXTRAS_BY_PYTHON = {}
SYSTEM_TEST_PYTHON_VERSIONS = ["3.8"]
SYSTEM_TEST_STANDARD_DEPENDENCIES = [
"mock",
"pytest",
"google-cloud-testutils",
]
SYSTEM_TEST_EXTERNAL_DEPENDENCIES = []
SYSTEM_TEST_LOCAL_DEPENDENCIES = []
SYSTEM_TEST_DEPENDENCIES = []
SYSTEM_TEST_EXTRAS = []
SYSTEM_TEST_EXTRAS_BY_PYTHON = {}
CURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute()
# 'docfx' is excluded since it only needs to run in 'docs-presubmit'
nox.options.sessions = [
"unit",
"system",
"cover",
"lint",
"lint_setup_py",
"blacken",
"docs",
]
# Error if a python version is missing
nox.options.error_on_missing_interpreters = True
@nox.session(python=DEFAULT_PYTHON_VERSION)
def lint(session):
"""Run linters.
Returns a failure if the linters find linting errors or sufficiently
serious code quality issues.
"""
session.install("flake8", BLACK_VERSION)
session.run(
"black",
"--check",
*LINT_PATHS,
)
session.run("flake8", "google", "tests")
@nox.session(python=DEFAULT_PYTHON_VERSION)
def blacken(session):
"""Run black. Format code to uniform standard."""
session.install(BLACK_VERSION)
session.run(
"black",
*LINT_PATHS,
)
@nox.session(python=DEFAULT_PYTHON_VERSION)
def format(session):
"""
Run isort to sort imports. Then run black
to format code to uniform standard.
"""
session.install(BLACK_VERSION, ISORT_VERSION)
# Use the --fss option to sort imports using strict alphabetical order.
# See https://pycqa.github.io/isort/docs/configuration/options.html#force-sort-within-sections
session.run(
"isort",
"--fss",
*LINT_PATHS,
)
session.run(
"black",
*LINT_PATHS,
)
@nox.session(python=DEFAULT_PYTHON_VERSION)
def lint_setup_py(session):
"""Verify that setup.py is valid (including RST check)."""
session.install("docutils", "pygments")
session.run("python", "setup.py", "check", "--restructuredtext", "--strict")
def install_unittest_dependencies(session, *constraints):
standard_deps = UNIT_TEST_STANDARD_DEPENDENCIES + UNIT_TEST_DEPENDENCIES
session.install(*standard_deps, *constraints)
if UNIT_TEST_EXTERNAL_DEPENDENCIES:
warnings.warn(
"'unit_test_external_dependencies' is deprecated. Instead, please "
"use 'unit_test_dependencies' or 'unit_test_local_dependencies'.",
DeprecationWarning,
)
session.install(*UNIT_TEST_EXTERNAL_DEPENDENCIES, *constraints)
if UNIT_TEST_LOCAL_DEPENDENCIES:
session.install(*UNIT_TEST_LOCAL_DEPENDENCIES, *constraints)
if UNIT_TEST_EXTRAS_BY_PYTHON:
extras = UNIT_TEST_EXTRAS_BY_PYTHON.get(session.python, [])
elif UNIT_TEST_EXTRAS:
extras = UNIT_TEST_EXTRAS
else:
extras = []
if extras:
session.install("-e", f".[{','.join(extras)}]", *constraints)
else:
session.install("-e", ".", *constraints)
def default(session):
# Install all test dependencies, then install this package in-place.
constraints_path = str(
CURRENT_DIRECTORY / "testing" / f"constraints-{session.python}.txt"
)
install_unittest_dependencies(session, "-c", constraints_path)
# Run py.test against the unit tests.
session.run(
"py.test",
"--quiet",
f"--junitxml=unit_{session.python}_sponge_log.xml",
"--cov=google",
"--cov=tests/unit",
"--cov-append",
"--cov-config=.coveragerc",
"--cov-report=",
"--cov-fail-under=0",
os.path.join("tests", "unit"),
*session.posargs,
)
@nox.session(python=UNIT_TEST_PYTHON_VERSIONS)
def unit(session):
"""Run the unit test suite."""
default(session)
def install_systemtest_dependencies(session, *constraints):
# Use pre-release gRPC for system tests.
session.install("--pre", "grpcio")
session.install(*SYSTEM_TEST_STANDARD_DEPENDENCIES, *constraints)
if SYSTEM_TEST_EXTERNAL_DEPENDENCIES:
session.install(*SYSTEM_TEST_EXTERNAL_DEPENDENCIES, *constraints)
if SYSTEM_TEST_LOCAL_DEPENDENCIES:
session.install("-e", *SYSTEM_TEST_LOCAL_DEPENDENCIES, *constraints)
if SYSTEM_TEST_DEPENDENCIES:
session.install("-e", *SYSTEM_TEST_DEPENDENCIES, *constraints)
if SYSTEM_TEST_EXTRAS_BY_PYTHON:
extras = SYSTEM_TEST_EXTRAS_BY_PYTHON.get(session.python, [])
elif SYSTEM_TEST_EXTRAS:
extras = SYSTEM_TEST_EXTRAS
else:
extras = []
if extras:
session.install("-e", f".[{','.join(extras)}]", *constraints)
else:
session.install("-e", ".", *constraints)
@nox.session(python=SYSTEM_TEST_PYTHON_VERSIONS)
def system(session):
"""Run the system test suite."""
constraints_path = str(
CURRENT_DIRECTORY / "testing" / f"constraints-{session.python}.txt"
)
system_test_path = os.path.join("tests", "system.py")
system_test_folder_path = os.path.join("tests", "system")
# Check the value of `RUN_SYSTEM_TESTS` env var. It defaults to true.
if os.environ.get("RUN_SYSTEM_TESTS", "true") == "false":
session.skip("RUN_SYSTEM_TESTS is set to false, skipping")
# Install pyopenssl for mTLS testing.
if os.environ.get("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true":
session.install("pyopenssl")
system_test_exists = os.path.exists(system_test_path)
system_test_folder_exists = os.path.exists(system_test_folder_path)
# Sanity check: only run tests if found.
if not system_test_exists and not system_test_folder_exists:
session.skip("System tests were not found")
install_systemtest_dependencies(session, "-c", constraints_path)
# Run py.test against the system tests.
if system_test_exists:
session.run(
"py.test",
"--quiet",
f"--junitxml=system_{session.python}_sponge_log.xml",
system_test_path,
*session.posargs,
)
if system_test_folder_exists:
session.run(
"py.test",
"--quiet",
f"--junitxml=system_{session.python}_sponge_log.xml",
system_test_folder_path,
*session.posargs,
)
@nox.session(python=DEFAULT_PYTHON_VERSION)
def cover(session):
"""Run the final coverage report.
This outputs the coverage report aggregating coverage from the unit
test runs (not system test runs), and then erases coverage data.
"""
session.install("coverage", "pytest-cov")
session.run("coverage", "report", "--show-missing", "--fail-under=100")
session.run("coverage", "erase")
@nox.session(python=DEFAULT_PYTHON_VERSION)
def docs(session):
"""Build the docs for this library."""
session.install("-e", ".")
session.install("sphinx==4.0.1", "alabaster", "recommonmark")
shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True)
session.run(
"sphinx-build",
"-W", # warnings as errors
"-T", # show full traceback on exception
"-N", # no colors
"-b",
"html",
"-d",
os.path.join("docs", "_build", "doctrees", ""),
os.path.join("docs", ""),
os.path.join("docs", "_build", "html", ""),
)
@nox.session(python=DEFAULT_PYTHON_VERSION)
def docfx(session):
"""Build the docfx yaml files for this library."""
session.install("-e", ".")
session.install(
"sphinx==4.0.1", "alabaster", "recommonmark", "gcp-sphinx-docfx-yaml"
)
shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True)
session.run(
"sphinx-build",
"-T", # show full traceback on exception
"-N", # no colors
"-D",
(
"extensions=sphinx.ext.autodoc,"
"sphinx.ext.autosummary,"
"docfx_yaml.extension,"
"sphinx.ext.intersphinx,"
"sphinx.ext.coverage,"
"sphinx.ext.napoleon,"
"sphinx.ext.todo,"
"sphinx.ext.viewcode,"
"recommonmark"
),
"-b",
"html",
"-d",
os.path.join("docs", "_build", "doctrees", ""),
os.path.join("docs", ""),
os.path.join("docs", "_build", "html", ""),
)
| 29.164634 | 98 | 0.649279 |
f803cacdbd8125fc66e9a403d018513efae394d7 | 2,943 | py | Python | src/compas/geometry/transformations/reflection.py | ricardoavelino/compas | e3c7f004b8839f96bf01f9f6b21a75786c3f59fa | [
"MIT"
] | null | null | null | src/compas/geometry/transformations/reflection.py | ricardoavelino/compas | e3c7f004b8839f96bf01f9f6b21a75786c3f59fa | [
"MIT"
] | null | null | null | src/compas/geometry/transformations/reflection.py | ricardoavelino/compas | e3c7f004b8839f96bf01f9f6b21a75786c3f59fa | [
"MIT"
] | null | null | null | """
This library for transformations partly derived and was re-implemented from the
following online resources:
* http://www.lfd.uci.edu/~gohlke/code/transformations.py.html
* http://www.euclideanspace.com/maths/geometry/rotations/
* http://code.activestate.com/recipes/578108-determinant-of-matrix-of-any-order/
* http://blog.acipo.com/matrix-inversion-in-javascript/
Many thanks to Christoph Gohlke, Martin John Baker, Sachin Joglekar and Andrew
Ippoliti for providing code and documentation.
"""
from compas.geometry import dot_vectors
from compas.geometry import cross_vectors
from compas.geometry import normalize_vector
from compas.geometry.transformations import identity_matrix
from compas.geometry.transformations import Transformation
class Reflection(Transformation):
"""Class representing a reflection transformation.
A reflection transformation mirrors points at a plane.
Parameters
----------
matrix : list[list[float]], optional
A 4x4 matrix (or similar) representing a reflection.
Examples
--------
>>> point = [1, 1, 1]
>>> normal = [0, 0, 1]
>>> R1 = Reflection.from_plane((point, normal))
>>> R2 = Transformation([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, -1, 2], [0, 0, 0, 1]])
>>> R1 == R2
True
"""
def __init__(self, matrix=None):
if matrix:
pass
super(Reflection, self).__init__(matrix=matrix)
def __repr__(self):
return "Reflection({0!r})".format(self.matrix)
@classmethod
def from_plane(cls, plane):
"""Construct a reflection transformation that mirrors wrt the given plane.
Parameters
----------
plane : [point, vector] | :class:`~compas.geometry.Plane`
The reflection plane.
Returns
-------
:class:`~compas.geometry.Reflection`
The reflection transformation.
"""
point, normal = plane
normal = normalize_vector((list(normal)))
matrix = identity_matrix(4)
for i in range(3):
for j in range(3):
matrix[i][j] -= 2.0 * normal[i] * normal[j]
for i in range(3):
matrix[i][3] = 2 * dot_vectors(point, normal) * normal[i]
R = cls()
R.matrix = matrix
return R
@classmethod
def from_frame(cls, frame):
"""Construct a reflection transformation that mirrors wrt the given frame.
Parameters
----------
frame : [point, vector, vector] | :class:`~compas.geometry.Frame`
Returns
-------
:class:`~compas.geometry.Reflection`
The reflection transformation.
"""
if isinstance(frame, (tuple, list)):
point = frame[0]
zaxis = cross_vectors(frame[1], frame[2])
else:
point = frame.point
zaxis = frame.zaxis
return cls.from_plane((point, zaxis))
| 29.727273 | 86 | 0.609242 |
2fc798f5f10a270e3b5125bf6404e06e995a5a4b | 879 | py | Python | functions/matrix_addition.py | Rohan1904/all-calc | 063cb7cf4e64a884013b94857cf5ceae757ec1b2 | [
"MIT"
] | 2 | 2022-03-23T10:21:33.000Z | 2022-03-27T09:14:41.000Z | functions/matrix_addition.py | Rohan1904/all-calc | 063cb7cf4e64a884013b94857cf5ceae757ec1b2 | [
"MIT"
] | 83 | 2020-10-03T03:57:35.000Z | 2020-10-24T10:01:32.000Z | functions/matrix_addition.py | Rohan1904/all-calc | 063cb7cf4e64a884013b94857cf5ceae757ec1b2 | [
"MIT"
] | 53 | 2020-10-02T17:33:30.000Z | 2020-11-14T11:42:29.000Z | import numpy as np
def calc(should_print=False):
print("""
Name: Matrix Addition
Operation : Addition of 2 matrices
Inputs : a->int/float , b->int/float
Outputs: c=>a+b ->np.array
Author : varunpusarla
\n
""")
# number of elements
n = int(input("Enter number of elements : "))
# Below line read inputs from user using map() function
a_temp = list(map(int,input("\nEnter the numbers for a : ").strip().split()))[:n]
b_temp = list(map(int,input("\nEnter the numbers for b : ").strip().split()))[:n]
print("\n Matrix A - ", a_temp)
print("\n Matrix B - ", b_temp)
a= np.array(a_temp)
b= np.array(b_temp)
result = {}
result['inputs'] = [a, b]
result['outputs'] = np.add(a,b)
if should_print:
print(f"Solution {result['outputs'][0]}")
else:
print(result)
| 21.975 | 86 | 0.575654 |
bf2104d26d65b2e4dd29bce8454f4ee8c8c4698d | 890 | py | Python | django_analyses/migrations/0002_auto_20200712_1144.py | TheLabbingProject/django_analyses | 08cac40a32754a265b37524f08ec6160c69ebea8 | [
"Apache-2.0"
] | 1 | 2020-12-30T12:43:34.000Z | 2020-12-30T12:43:34.000Z | django_analyses/migrations/0002_auto_20200712_1144.py | TheLabbingProject/django_analyses | 08cac40a32754a265b37524f08ec6160c69ebea8 | [
"Apache-2.0"
] | 59 | 2019-12-25T13:14:56.000Z | 2021-07-22T12:24:46.000Z | django_analyses/migrations/0002_auto_20200712_1144.py | TheLabbingProject/django_analyses | 08cac40a32754a265b37524f08ec6160c69ebea8 | [
"Apache-2.0"
] | 2 | 2020-05-24T06:44:27.000Z | 2020-07-09T15:47:31.000Z | # Generated by Django 3.0.8 on 2020-07-12 11:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('django_analyses', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='inputdefinition',
name='value_method',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AlterField(
model_name='directoryinput',
name='value',
field=models.FilePathField(allow_files=False, allow_folders=True, max_length=1000, verbose_name='/media/veracrypt1/media'),
),
migrations.AlterField(
model_name='fileoutput',
name='value',
field=models.FilePathField(blank=True, max_length=1000, null=True, verbose_name='/media/veracrypt1/media'),
),
]
| 30.689655 | 135 | 0.619101 |
4011c0d72876527eff5aa41db1e7af846301a45e | 6,964 | py | Python | bird_classify.py | bozzltron/project-everett | cfcd65e86780deb647d8e2c9d99b8c88258e360a | [
"Apache-2.0"
] | null | null | null | bird_classify.py | bozzltron/project-everett | cfcd65e86780deb647d8e2c9d99b8c88258e360a | [
"Apache-2.0"
] | null | null | null | bird_classify.py | bozzltron/project-everett | cfcd65e86780deb647d8e2c9d99b8c88258e360a | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/python3
"""
Coral Smart Bird Feeder
Uses ClassificationEngine from the EdgeTPU API to analyze animals in
camera frames. Sounds a deterrent if a squirrel is detected.
Users define model, labels file, storage path, deterrent sound, and
optionally can set this to training mode for collecting images for a custom
model.
"""
import sys
import argparse
import time
import re
import imp
import logging
import gstreamer
from edgetpu.classification.engine import ClassificationEngine
from PIL import Image
from playsound import playsound
from picamera import PiCamera
from time import sleep
from twython import Twython
# generate your own auth.py file with credentials
from auth import (
app_key,
app_key_secret,
oauth_token,
oauth_token_secret
)
twitter = Twython(app_key, app_key_secret,
oauth_token, oauth_token_secret)
#camera = PiCamera()
def take_a_picture(path, ext='png'):
print('Take a picture!')
tag = '%010d' % int(time.monotonic()*1000)
name = '%s/hiRes-img-%s.%s' %(path,tag,ext)
camera.capture(name)
return name
def tweet(status, filename):
imageFile = open(filename, 'rb')
response = twitter.upload_media(media=imageFile)
media_id = [response['media_id']]
logging.info('media id : %s', response['media_id'])
twitter.update_status(status=status, media_ids=media_id)
def save_data(image,results,path,ext='png'):
"""Saves camera frame and model inference results
to user-defined storage directory."""
tag = '%010d' % int(time.monotonic()*1000)
name = '%s/img-%s.%s' %(path,tag,ext)
image.save(name)
print('Frame saved as: %s' %name)
logging.info('Image: %s Results: %s', tag,results)
return name
def load_labels(path):
"""Parses provided label file for use in model inference."""
p = re.compile(r'\s*(\d+)(.+)')
with open(path, 'r', encoding='utf-8') as f:
lines = (p.match(line).groups() for line in f.readlines())
return {int(num): text.strip() for num, text in lines}
def print_results(start_time, last_time, end_time, results):
"""Print results to terminal for debugging."""
inference_rate = ((end_time - start_time) * 1000)
fps = (1.0/(end_time - last_time))
print('\nInference: %.2f ms, FPS: %.2f fps' % (inference_rate, fps))
for label, score in results:
print(' %s, score=%.2f' %(label, score))
def do_training(results,last_results,top_k):
"""Compares current model results to previous results and returns
true if at least one label difference is detected. Used to collect
images for training a custom model."""
new_labels = [label[0] for label in results]
old_labels = [label[0] for label in last_results]
shared_labels = set(new_labels).intersection(old_labels)
if len(shared_labels) < top_k:
print('Difference detected')
return True
def user_selections():
parser = argparse.ArgumentParser()
parser.add_argument('--model', required=True,
help='.tflite model path')
parser.add_argument('--labels', required=True,
help='label file path')
parser.add_argument('--top_k', type=int, default=3,
help='number of classes with highest score to display')
parser.add_argument('--threshold', type=float, default=0.1,
help='class score threshold')
parser.add_argument('--storage', required=True,
help='File path to store images and results')
parser.add_argument('--sound', required=True,
help='File path to deterrent sound')
parser.add_argument('--print', default=False, required=False,
help='Print inference results to terminal')
parser.add_argument('--training', default=False, required=False,
help='Training mode for image collection')
args = parser.parse_args()
return args
def main():
"""Creates camera pipeline, and pushes pipeline through ClassificationEngine
model. Logs results to user-defined storage. Runs either in training mode to
gather images for custom model creation or in deterrent mode that sounds an
'alarm' if a defined label is detected."""
args = user_selections()
print("Loading %s with %s labels."%(args.model, args.labels))
engine = ClassificationEngine(args.model)
labels = load_labels(args.labels)
storage_dir = args.storage
#Initialize logging files
logging.basicConfig(filename='%s/results.log'%storage_dir,
format='%(asctime)s-%(message)s',
level=logging.DEBUG)
last_time = time.monotonic()
last_results = [('label', 0)]
last_tweet = None
def user_callback(image,svg_canvas):
nonlocal last_time
nonlocal last_results
nonlocal last_tweet
start_time = time.monotonic()
results = engine.classify_with_image(image, threshold=args.threshold, top_k=args.top_k)
end_time = time.monotonic()
results = [(labels[i], score) for i, score in results]
if args.print:
print_results(start_time,last_time, end_time, results)
if args.training:
print("training mode")
if do_training(results,last_results,args.top_k):
save_data(image,results, storage_dir)
else:
print("looking for birds")
# Custom model mode:
# Save the images if the label is one of the targets and its probability is relatively high
if results[0][1] >= 0.8:
filename = save_data(image, results, storage_dir)
if (last_tweet is None) or ((time.time() - last_tweet > 300 ) and results[0][1] >= 0.9):
try:
#imageFile = take_a_picture(storage_dir)
status = "I'm %d percent sure this is a %s. #ai"%(results[0][1] * 100, results[0][0])
logging.info('Trying to tweet : %s', status)
logging.info('Reading file %s', filename)
tweet(status, filename)
last_tweet = time.time()
except:
logging.exception('Failed to send tweet')
last_tweet = None
last_results=results
last_time = end_time
result = gstreamer.run_pipeline(user_callback)
if __name__ == '__main__':
main()
| 36.846561 | 101 | 0.657094 |
076cba950b5399ca49a2133f65b56f98f356bc96 | 5,254 | py | Python | PVPolyfit/preprocessing.py | Swixx/PVPolyfit | 3a711d4fc887d44d7dee249cc634c50acbfeed04 | [
"MIT"
] | null | null | null | PVPolyfit/preprocessing.py | Swixx/PVPolyfit | 3a711d4fc887d44d7dee249cc634c50acbfeed04 | [
"MIT"
] | null | null | null | PVPolyfit/preprocessing.py | Swixx/PVPolyfit | 3a711d4fc887d44d7dee249cc634c50acbfeed04 | [
"MIT"
] | null | null | null | from numpy import linalg, zeros, ones, hstack, asarray, vstack, array, mean, std
import itertools
import matplotlib.pyplot as plt
from datetime import datetime
import pandas as pd
import numpy as np
import scipy
import matplotlib.dates as mdates
from sklearn.metrics import mean_squared_error
from math import sqrt
import pvlib
from pvlib import clearsky, atmosphere, solarposition
from pvlib.location import Location
from pvlib.iotools import read_tmy3
from pvlib.irradiance import get_extra_radiation
import warnings
import time
warnings.filterwarnings("ignore")
def classify_weather_day_GM_Tina(df, clearsky_ghi_tag, meas_ghi_tag):
X_csm = array(df[clearsky_ghi_tag].tolist())
X_meas = array(df[meas_ghi_tag].tolist())
# Calculate index of serenity, k
# a k is calculated for every index
k = abs(X_meas - X_csm) / X_csm
k = array(k)
#print("final vals k: ", k, "min k: ", k.min(), "max k: ", k.max())
#TODO:
# 1. CHANGE FREQUENCY TO 1 min/10 min
# 2. For every Δt (10 minutes/1 hour), divide it into 3 splits
# 3. For each split, iterate
# Moving average
# MA is calculated for a larger interval, i assume
Nm = 3 # 5
MA = []
for i in range(len(k)):
sumk = 0
if i < Nm:
for j in range(i+1):
sumk += k[j]
else:
for iter in range(Nm):
sumk += k[i - iter]
MA.append(sumk)
MA = array(MA) * (1 / Nm)
#print("CHECK: make sure lengths are equal (k and MA): ", len(k), len(MA))
# Moving function
MF = []
for i in range(len(k)):
sumMF = 0
if i < Nm:
for j in range(i+1):
sumMF += abs(MA[i] - k[i-iter])
else:
for iter in range(Nm):
# MA does not iter
sumMF += abs(MA[i] - k[i-iter])
MF.append(sumMF)
MF = array(MF)
# Classification logic
classification = []
# input k and MF
for i in range(len(k)):
if(MF[i] > 0.05):
# Variable
classification.append(1)
# k[i] = 0.4
elif (k[i] > 0.7):
# Cloudy
classification.append(2)
elif (k[i] > 0.2 or MF[i] > 0.02):
# Slightly Cloudy
classification.append(3)
else:
# Clear
classification.append(4)
return classification, k, MF
def data_preprocessing(df, xs, Y_tag, I_tag, cs_tag, Y_high_filter, print_info, include_preprocess):
# data processing
df.dropna(inplace = True)
#df = df[df[xs[0]] > 20]
# drop where ghi_clearsky is equal to 0 because we will be dividing by that later
df = df[df[Y_tag] > 0]
df = df[df[Y_tag] < Y_high_filter]
# irradiance and temperature sensor verification
# find all points outside of 3 sigma of Isc / Irradiance
# replacing Isc with DC Current because no IV trace at inverter level
if include_preprocess:
if len(cs_tag) != 0:
df = df[df[cs_tag] != 0]
#if True:
# OUTLIER REMOVAL
old_num_rows = len(df.index)
I_vs_Irr = array(df[I_tag].tolist()) / array(df[xs[0]].tolist())
avg = mean(I_vs_Irr, axis = 0)
sd = std(I_vs_Irr, axis = 0)
sigmas = 3
outliers = [(True if ((x < avg - sigmas * sd) or (x > avg + sigmas * sd)) else False) for x in I_vs_Irr]
df['outlier_bool'] = outliers
df = df[df['outlier_bool'] == False]
df.drop(columns = ['outlier_bool'])
if print_info:
new_num_rows = len(df.index)
print("Dropped {} of {} rows with I/Irr filter.".format((old_num_rows - new_num_rows), old_num_rows))
return df
def add_ghi_to_df(df, start, end, freq, dropped_days, xs, ghi_tag, cs_tag, type_ = None):
if type_ == 'NIST':
# multiply GHI by 10^3 because it is in milli
if len(ghi_tag) != 0:
df[ghi_tag] = df[ghi_tag].apply(lambda x: x * 10**2)
#TODO: Classify and group days/or/hours (check regression_notes.txt on Desktop)
if type_ == '8157UCF' or 'PVLifetime' or 'VCAD':
cocoa = Location(28.387566, -80.755984, tz='US/Eastern', altitude = 10.97)
elif type_ == 'NIST':
cocoa = Location(39.994425, -105.265645, tz='US/Mountain', altitude = 1623.974)
else:
print('No valid input.')
times = pd.DatetimeIndex(start=datetime.strptime(start, '%m/%d/%Y %I:%M:%S %p').strftime('%Y-%m-%d'),
end=datetime.strptime(end, '%m/%d/%Y %I:%M:%S %p').strftime('%Y-%m-%d'),
freq=freq+'in', tz=cocoa.tz)
cs = cocoa.get_clearsky(times)
df = df[df[xs[0]] > 20]
cs = cs[cs['ghi'] > 20]
cs = cs.iloc[1:]
cs_ghi = pd.DataFrame()
if len(cs_tag) != 0:
cs_ghi[cs_tag] = cs['ghi']
cs_ghi.index = pd.to_datetime(cs.index, format = '%m/%d/%Y %I:%M:%S %p').strftime('%m/%d/%Y %I:%M:%S %p')
cs = pd.merge(df, cs_ghi, how='inner', left_index = True, right_index = True)
df = cs
print(df.index)
if len(df.index)!= 0:
if str(type(df.index[0])) != "<class 'str'>":
df_index = [i.strftime('%m/%d/%Y %I:%M:%S %p') for i in df.index]
df.index = df_index
return df
| 30.546512 | 113 | 0.577274 |
ca8f55f1c00a74012d52a9b6b8c55f47dc6348bc | 807 | py | Python | project/main/services/logic/log_messages.py | Jabba-The-Gut/PREN_2 | 332b46144f771a96e7dbaaa605a82230aaafa948 | [
"MIT"
] | null | null | null | project/main/services/logic/log_messages.py | Jabba-The-Gut/PREN_2 | 332b46144f771a96e7dbaaa605a82230aaafa948 | [
"MIT"
] | null | null | null | project/main/services/logic/log_messages.py | Jabba-The-Gut/PREN_2 | 332b46144f771a96e7dbaaa605a82230aaafa948 | [
"MIT"
] | null | null | null | import sys
import asyncio
from aio_pika import connect, Message, DeliveryMode, ExchangeType
from project.main.const import const
async def main(loop):
# Perform connection
connection = await connect("amqp://guest:guest@localhost/", loop=loop)
# Creating a channel
channel = await connection.channel()
logs_exchange = await channel.declare_exchange("main", ExchangeType.TOPIC)
message_body = b" ".join(sys.argv[1:]) or b"Wrong!"
message = Message(message_body, delivery_mode=DeliveryMode.PERSISTENT)
# Sending the message
await logs_exchange.publish(message, routing_key=const.LOG_BINDING_KEY)
print(" [x] Sent %r" % message)
await connection.close()
if __name__ == "__main__":
loop = asyncio.get_event_loop()
loop.run_until_complete(main(loop)) | 26.9 | 78 | 0.727385 |
82448c8979d27b96a971685160c12b18b181dba8 | 4,382 | py | Python | service/upload.py | Scarygami/mirror-api | 497783f6d721b24b793c1fcd8c71d0c7d11956d4 | [
"Apache-2.0"
] | 35 | 2015-02-24T14:40:30.000Z | 2022-01-21T23:36:39.000Z | service/upload.py | openube/mirror-api | 702c80b176d62500494779fb0fa0a595cb9320c1 | [
"Apache-2.0"
] | 1 | 2018-03-22T22:36:50.000Z | 2018-03-22T22:36:50.000Z | service/upload.py | openube/mirror-api | 702c80b176d62500494779fb0fa0a595cb9320c1 | [
"Apache-2.0"
] | 10 | 2015-02-18T05:12:20.000Z | 2020-02-07T05:52:12.000Z | #!/usr/bin/python
# Copyright (C) 2013 Gerwin Sturm, FoldedSoft e.U.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper functions to upload mediacontent to cards"""
__author__ = '[email protected] (Gerwin Sturm)'
from utils import base_url
import io
import json
import logging
from apiclient import errors
from apiclient.http import MediaIoBaseUpload
_BOUNDARY = "-----1234567890abc"
def _create_multipart_body(metadata, content, contentType):
base64_data = content.encode("base64").replace("\n", "")
multipart_body = "\r\n--" + _BOUNDARY + "\r\n"
multipart_body += "Content-Type: application/json\r\n\r\n"
multipart_body += json.dumps(metadata)
multipart_body += "\r\n--" + _BOUNDARY + "\r\n"
multipart_body += "Content-Type: " + contentType + "\r\n"
multipart_body += "Content-Transfer-Encoding: base64\r\n\r\n"
multipart_body += base64_data
multipart_body += "\r\n\r\n--" + _BOUNDARY + "--"
return multipart_body
def multipart_insert(metadata, content, contentType, service, test):
if metadata is None:
metadata = {}
"""Insert a new card with metainfo card and media."""
if test is None:
# Using the functionality of the API Client library to directly send multipart request
media = MediaIoBaseUpload(io.BytesIO(content), contentType, resumable=True)
try:
return service.timeline().insert(body=metadata, media_body=media).execute()
except errors.HttpError, error:
logging.error("Multipart update error: %s" % error)
return error
# Constructing the multipart upload for test environement
multipart_body = _create_multipart_body(metadata, content, contentType)
headers = {}
headers["Content-Type"] = "multipart/related; boundary=\"" + _BOUNDARY + "\""
return service._http.request(base_url + "/upload/mirror/v1/timeline", method="POST", body=multipart_body, headers=headers)
def multipart_update(cardId, metadata, content, contentType, service, test):
if metadata is None:
metadata = {}
"""Update a card with metainfo and media."""
if test is None:
# Using the functionality of the API Client library to directly send multipart request
media = MediaIoBaseUpload(io.BytesIO(content), contentType, resumable=True)
try:
return service.timeline().update(id=cardId, body=metadata, media_body=media).execute()
except errors.HttpError, error:
logging.error("Multipart update error: %s" % error)
return error
# Constructing the multipart upload for test environement
multipart_body = _create_multipart_body(metadata, content, contentType)
headers = {}
headers["Content-Type"] = "multipart/related; boundary=\"" + _BOUNDARY + "\""
return service._http.request("%s/upload/mirror/v1/timeline/%s" % (base_url, cardId), method="POST", body=multipart_body, headers=headers)
def media_insert(cardId, content, contentType, service, test):
"""Insert attachment to an existing card."""
if test is None:
# Using the functionality of the API Client library to directly send request
media = MediaIoBaseUpload(io.BytesIO(content), contentType, resumable=True)
try:
return service.timeline().attachments().insert(id=cardId, media_body=media).execute()
except errors.HttpError, error:
logging.error("Attachment insert error: %s" % error)
return error
# Constructing the multipart upload for test environement
multipart_body = _create_multipart_body({}, content, contentType)
headers = {}
headers["Content-Type"] = "multipart/related; boundary=\"" + _BOUNDARY + "\""
return service._http.request("%s/upload/mirror/v1/timeline/%s/attachments" % (base_url, cardId), method="POST", body=multipart_body, headers=headers)
| 38.778761 | 153 | 0.697627 |
25e5185865498434e488402d5510ecf495447807 | 3,498 | py | Python | external_attention_block/BAM.py | Roypic/Attention_Code | 5b6cbfc36e49101567d19d65894641550917a66e | [
"MIT"
] | 3 | 2021-07-05T08:31:03.000Z | 2022-01-12T02:42:29.000Z | external_attention_block/BAM.py | Roypic/Attention_Code | 5b6cbfc36e49101567d19d65894641550917a66e | [
"MIT"
] | null | null | null | external_attention_block/BAM.py | Roypic/Attention_Code | 5b6cbfc36e49101567d19d65894641550917a66e | [
"MIT"
] | 2 | 2021-07-05T08:31:05.000Z | 2021-12-28T10:57:02.000Z | # code from https://github.com/xmu-xiaoma666/External-Attention-pytorch/blob/master/attention/BAM.py
import numpy as np
import torch
from torch import nn
from torch.nn import init
class Flatten(nn.Module):
def forward(self, x):
return x.view(x.shape[0], -1)
class ChannelAttention(nn.Module):
def __init__(self, channel, reduction=16, num_layers=3):
super().__init__()
self.avgpool = nn.AdaptiveAvgPool2d(1)
gate_channels = [channel]
gate_channels += [channel // reduction] * num_layers
gate_channels += [channel]
self.ca = nn.Sequential()
self.ca.add_module('flatten', Flatten())
for i in range(len(gate_channels) - 2):
self.ca.add_module('fc%d' % i, nn.Linear(gate_channels[i], gate_channels[i + 1]))
self.ca.add_module('bn%d' % i, nn.BatchNorm1d(gate_channels[i + 1]))
self.ca.add_module('relu%d' % i, nn.ReLU())
self.ca.add_module('last_fc', nn.Linear(gate_channels[-2], gate_channels[-1]))
def forward(self, x):
res = self.avgpool(x)
res = self.ca(res)
res = res.unsqueeze(-1).unsqueeze(-1).expand_as(x)
return res
class SpatialAttention(nn.Module):
def __init__(self, channel, reduction=16, num_layers=3, dia_val=2):
super().__init__()
self.sa = nn.Sequential()
self.sa.add_module('conv_reduce1',
nn.Conv2d(kernel_size=1, in_channels=channel, out_channels=channel // reduction))
self.sa.add_module('bn_reduce1', nn.BatchNorm2d(channel // reduction))
self.sa.add_module('relu_reduce1', nn.ReLU())
for i in range(num_layers):
self.sa.add_module('conv_%d' % i, nn.Conv2d(kernel_size=3, in_channels=channel // reduction,
out_channels=channel // reduction, padding=1, dilation=dia_val))
self.sa.add_module('bn_%d' % i, nn.BatchNorm2d(channel // reduction))
self.sa.add_module('relu_%d' % i, nn.ReLU())
self.sa.add_module('last_conv', nn.Conv2d(channel // reduction, 1, kernel_size=1))
def forward(self, x):
res = self.sa(x)
res = res.expand_as(x)
return res
class BAMBlock(nn.Module):
def __init__(self, channel=512, reduction=16, dia_val=2):
super().__init__()
self.ca = ChannelAttention(channel=channel, reduction=reduction)
self.sa = SpatialAttention(channel=channel, reduction=reduction, dia_val=dia_val)
self.sigmoid = nn.Sigmoid()
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal_(m.weight, mode='fan_out')
if m.bias is not None:
init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
init.constant_(m.weight, 1)
init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
init.normal_(m.weight, std=0.001)
if m.bias is not None:
init.constant_(m.bias, 0)
def forward(self, x):
b, c, _, _ = x.size()
sa_out = self.sa(x)
ca_out = self.ca(x)
weight = self.sigmoid(sa_out + ca_out)
out = (1 + weight) * x
return out
if __name__ == '__main__':
input = torch.randn(50, 512, 7, 7)
bam = BAMBlock(channel=512, reduction=16, dia_val=2)
output = bam(input)
print(output.shape) | 38.021739 | 120 | 0.596913 |
b615b97c27c4050eef10a01a7babb90d59240f6a | 1,268 | py | Python | lib/galaxy/datatypes/converters/wiggle_to_simple_converter.py | tsungjui/fusionline | 26d5d41e82ac83822ba41df1cd14c54afa112655 | [
"CC-BY-3.0"
] | 1 | 2019-11-03T11:45:43.000Z | 2019-11-03T11:45:43.000Z | lib/galaxy/datatypes/converters/wiggle_to_simple_converter.py | tsungjui/fusionline | 26d5d41e82ac83822ba41df1cd14c54afa112655 | [
"CC-BY-3.0"
] | 4 | 2017-05-24T19:36:34.000Z | 2019-08-23T02:49:18.000Z | lib/galaxy/datatypes/converters/wiggle_to_simple_converter.py | tsungjui/fusionline | 26d5d41e82ac83822ba41df1cd14c54afa112655 | [
"CC-BY-3.0"
] | null | null | null | #!/usr/bin/env python
# code is same as ~/tools/stats/wiggle_to_simple.py
"""
Read a wiggle track and print out a series of lines containing
"chrom position score". Ignores track lines, handles bed, variableStep
and fixedStep wiggle lines.
"""
from __future__ import print_function
import sys
import bx.wiggle
from galaxy.util.ucsc import UCSCOutWrapper, UCSCLimitException
def stop_err( msg ):
sys.stderr.write( msg )
sys.exit()
def main():
if len( sys.argv ) > 1:
in_file = open( sys.argv[1] )
else:
in_file = open( sys.stdin )
if len( sys.argv ) > 2:
out_file = open( sys.argv[2], "w" )
else:
out_file = sys.stdout
try:
for fields in bx.wiggle.IntervalReader( UCSCOutWrapper( in_file ) ):
out_file.write( "%s\n" % "\t".join( map( str, fields ) ) )
except UCSCLimitException:
# Wiggle data was truncated, at the very least need to warn the user.
print('Encountered message from UCSC: "Reached output limit of 100000 data values", so be aware your data was truncated.')
except ValueError as e:
in_file.close()
out_file.close()
stop_err( str( e ) )
in_file.close()
out_file.close()
if __name__ == "__main__":
main()
| 25.36 | 130 | 0.645899 |
f8e8f534e84e5d280d87a0dc8be03ce30a67fcc8 | 4,401 | py | Python | rcs_back/takeouts_app/tasks.py | e-kondr01/rcs_back | f0f224d01f7051cce9d5feef692216d48cba6f31 | [
"MIT"
] | null | null | null | rcs_back/takeouts_app/tasks.py | e-kondr01/rcs_back | f0f224d01f7051cce9d5feef692216d48cba6f31 | [
"MIT"
] | null | null | null | rcs_back/takeouts_app/tasks.py | e-kondr01/rcs_back | f0f224d01f7051cce9d5feef692216d48cba6f31 | [
"MIT"
] | 1 | 2021-09-25T19:18:55.000Z | 2021-09-25T19:18:55.000Z | import datetime
from typing import List
from celery import shared_task
from dateutil.relativedelta import relativedelta
from django.core.mail import EmailMessage
from django.db.models import Sum
from django.db.models.functions import Coalesce
from django.template.loader import render_to_string
from django.utils import timezone
from rcs_back.containers_app.models import Building, Container
from rcs_back.takeouts_app.models import TankTakeoutRequest
@shared_task
def check_time_conditions() -> None:
'''Выполнены ли условия "не больше N дней"'''
building: Building
for building in Building.objects.all():
building.check_conditions_to_notify()
def get_total_mass(start_date: datetime.date, end_date: datetime.date) -> int:
"""Возвращает массу, собранную сервисом
за промежуток времени"""
collected_mass = TankTakeoutRequest.objects.filter(
confirmed_mass__isnull=False
).filter(
confirmed_at__gte=start_date,
confirmed_at__lt=end_date
).aggregate(
collected_mass=Coalesce(Sum("confirmed_mass"), 0)
)["collected_mass"]
return collected_mass
def get_container_owner_emails() -> List[str]:
"""Возвращает список уникальных email'ов
пользователей сервиса."""
return list(Container.objects.exclude(
email__isnull=True
).exclude(email__exact="").filter(
status=Container.ACTIVE
).values_list("email", flat=True).distinct())
def get_collected_mass_per_owner(owners: List[str],
start_date: datetime.date,
end_date: datetime.date) -> "dict[str, int]":
"""Возвращает кол-во собранной макулатуры
каждым из пользователей сервиса"""
res = {}
for owner_email in owners:
collected_mass = 0
containers = Container.objects.filter(
email=owner_email
).filter(
status=Container.ACTIVE
)
container: Container
for container in containers:
collected_mass += container.collected_mass(start_date, end_date)
res[owner_email] = collected_mass
return res
def get_collected_mass_percentage(email: str,
collected_mass: "dict[str, int]") -> int:
"""Возвращает процент пользователей, у которых собранная масса меньше,
чем у данного"""
if len(collected_mass.keys()) > 1:
less = 0
for user in collected_mass:
if collected_mass[user] < collected_mass[email]:
less += 1
res = (less / len(collected_mass.keys())) * 100 // 1
return int(res)
else:
return 100
@shared_task
def collected_mass_mailing() -> None:
"""Рассылка раз в три месяца о кол-ве собранной макулатуры."""
emails = get_container_owner_emails()
end_date = timezone.now().date()
start_date = end_date - relativedelta(months=3)
total_mass = get_total_mass(start_date, end_date)
collected_mass_per_owner = get_collected_mass_per_owner(
emails,
start_date,
end_date
)
for user_email in emails:
containers = Container.objects.filter(
email=user_email
).filter(
status=Container.ACTIVE
)
if len(containers) == 1:
container_ids = f"контейнера с ID {containers[0].pk}"
else:
container_ids = "контейнеров с ID "
for container in containers:
container_ids += f"{container.pk}, "
container_ids = container_ids[:len(container_ids)-2]
building_mass = containers[0].building.confirmed_collected_mass(
start_date=start_date, end_date=end_date
)
msg = render_to_string("collected_mass_mailing.html", {
"start_date": start_date,
"end_date": end_date,
"total_mass": total_mass,
"building_mass": building_mass,
"container_mass": collected_mass_per_owner[user_email],
"container_ids": container_ids,
"percentage": get_collected_mass_percentage(
user_email,
collected_mass_per_owner
)
}
)
email = EmailMessage(
"Оповещение от сервиса RecycleStarter",
msg,
None,
[user_email]
)
email.content_subtype = "html"
email.send()
| 32.6 | 78 | 0.640536 |
e13ae8a5ca28b8744920fefd84b8c7640d536023 | 1,361 | py | Python | build_dismat.py | littletiger0712/nlp_adversarial_examples | aef4066a6a8d028ba4ae91a50700b8c937c08a14 | [
"MIT"
] | 2 | 2020-04-20T04:14:43.000Z | 2020-09-18T02:51:43.000Z | build_dismat.py | littletiger0712/nlp_adversarial_examples | aef4066a6a8d028ba4ae91a50700b8c937c08a14 | [
"MIT"
] | null | null | null | build_dismat.py | littletiger0712/nlp_adversarial_examples | aef4066a6a8d028ba4ae91a50700b8c937c08a14 | [
"MIT"
] | null | null | null | import numpy as np
from progress.bar import Bar
# MAX_VOCAB_SIZE = 500
MAX_VOCAB_SIZE = 60702
STORE_SIZE = 100
embedding_matrix = np.load(('aux_files/embeddings_counter_%d.npy' % (MAX_VOCAB_SIZE))) # c*n
# missed = np.load(('aux_files/missed_embeddings_counter_%d.npy' % (MAX_VOCAB_SIZE)))
# c_ = -2*np.dot(embedding_matrix.T, embedding_matrix) # n*n
# a = np.sum(np.square(embedding_matrix), axis=0).reshape((1, -1))
# b = a.T
# dist_mat = a+b+c_ # n*n
# print('distence matrix build success!')
# dist_order = np.argsort(dist_mat, axis=1)[:,:STORE_SIZE+1]
# idx = (np.arange(MAX_VOCAB_SIZE+1) * (MAX_VOCAB_SIZE+1)).reshape(-1, 1)
# idx = dist_order + idx
# dist_mat_dic = dist_mat.flatten()[idx].reshape(MAX_VOCAB_SIZE+1, STORE_SIZE+1)
dist_mat_dic = np.zeros((MAX_VOCAB_SIZE+1, STORE_SIZE+1))
dist_order = np.zeros((MAX_VOCAB_SIZE+1, STORE_SIZE+1), dtype=np.int)
bar = Bar('test', max=MAX_VOCAB_SIZE+1)
for i in range(MAX_VOCAB_SIZE+1):
item_embedding = embedding_matrix[:, i].reshape(-1, 1)
distance_vec = np.linalg.norm(embedding_matrix-item_embedding, ord=2, axis=0)
dist_order[i] = np.argsort(distance_vec)[:STORE_SIZE+1]
dist_mat_dic[i] = distance_vec[dist_order[i]]
bar.next()
np.save(('aux_files/sdist_mat_dic_%d.npy' % (MAX_VOCAB_SIZE)), dist_mat_dic)
np.save(('aux_files/sdist_order_%d.npy' % (MAX_VOCAB_SIZE)), dist_order)
| 40.029412 | 92 | 0.723733 |
4aa2f4d501a95bc2d003b5452922d9885f74f6d3 | 30,912 | py | Python | node_middleware/socket_listeners/controllers/voder/dataGenerator/visListGenerator.py | TuftsVALT/Snowcat | 4ff1a8ce32e172325a7be7f0095c8659f8709d18 | [
"MIT"
] | 2 | 2021-01-21T10:32:37.000Z | 2021-05-04T11:36:29.000Z | node_middleware/socket_listeners/controllers/voder/dataGenerator/visListGenerator.py | TuftsVALT/Snowcat | 4ff1a8ce32e172325a7be7f0095c8659f8709d18 | [
"MIT"
] | 2 | 2020-10-16T18:14:00.000Z | 2021-01-05T07:31:55.000Z | node_middleware/socket_listeners/controllers/voder/dataGenerator/visListGenerator.py | TuftsVALT/Snowcat | 4ff1a8ce32e172325a7be7f0095c8659f8709d18 | [
"MIT"
] | null | null | null | __author__ = 'arjun010'
from visObject import *
from chartDataFormatter import *
from dataFactGenerator import *
from itertools import combinations, permutations
def getPossibleVisualizations(attributeList, dataList, metadataMap):
possibleVisualizations = []
possibleDataFacts = []
itemAttribute = None # itemAttribute is used in charts like scatterplot and tick plot to enable referring to individual data items
for attribute in metadataMap:
if 'isItemAttr' in metadataMap[attribute]:
if metadataMap[attribute]['isItemAttr'] == "y":
itemAttribute = attribute
break
if len(attributeList) == 1:
attribute = attributeList[0]
if metadataMap[attribute]['type']=="quantitative":
singleAxisTickPlot = getSingleAxisTickPlot(attribute, itemAttribute, dataList)
possibleVisualizations.append(singleAxisTickPlot)
formattedData = getDataForSingleAxisTickPlot(dataList,attribute,itemAttribute)
# tickPlotDataFacts = getDataFacts_TickPlot_Q(attribute,formattedData)
# for dataFact in tickPlotDataFacts:
# dataFact['relatedVisObjects'].append(singleAxisTickPlot)
# possibleDataFacts.append(dataFact)
singleAxisBoxPlot = getSingleAxisBoxPlot(attribute)
possibleVisualizations.append(singleAxisBoxPlot)
singleAxisHistogram = getHistogram(attribute)
possibleVisualizations.append(singleAxisHistogram)
# commonDataFactsForTickAndBoxPlot = getCommonDataFactsForTickPlotAndBoxPlotAndHistogram_Q(attribute, formattedData)
# for dataFact in commonDataFactsForTickAndBoxPlot:
# dataFact['relatedVisObjects'].append(singleAxisTickPlot)
# dataFact['relatedVisObjects'].append(singleAxisBoxPlot)
# if dataFact['type']=="RangeDistributionFact":
# dataFact['relatedVisObjects'].append(singleAxisHistogram)
# possibleDataFacts.append(dataFact)
elif metadataMap[attribute]['type'] == "ordinal" or metadataMap[attribute]['type'] == "nominal":
barChartWithCount = getBarChartWithCount(attribute, dataList)
possibleVisualizations.append(barChartWithCount)
donutChartWithCount = getDonutChartWithCount(attribute, dataList)
possibleVisualizations.append(donutChartWithCount)
formattedData = getDataForBarChartWithCount(dataList,attribute)
commonDataFactsForBarAndDonutChartsWithCount = getCommonFacts_BarAndDonutChartWithCount_N(attribute,formattedData)
for dataFact in commonDataFactsForBarAndDonutChartsWithCount:
dataFact['relatedVisObjects'].append(barChartWithCount)
dataFact['relatedVisObjects'].append(donutChartWithCount)
possibleDataFacts.append(dataFact)
elif len(attributeList) == 2:
attribute1 = attributeList[0]
attribute2 = attributeList[1]
attributeTypeList = [metadataMap[attribute1]['type'],metadataMap[attribute2]['type']]
if attributeTypeList.count("quantitative")==1 and (attributeTypeList.count("nominal")==1 or attributeTypeList.count("ordinal")==1): # N/O x Q
if metadataMap[attribute1]['type']=="quantitative":
yAttr = attribute1
xAttr = attribute2
else:
xAttr = attribute1
yAttr = attribute2
#====================
# generating two axis tick plot and dot plot
#====================
twoAxisTickPlot = getTwoAxisTickPlot(xAttr, yAttr, itemAttribute, dataList)
possibleVisualizations.append(twoAxisTickPlot)
scatterplot = getScatterplot(xAttr, yAttr, itemAttribute, dataList,metadataMap)
possibleVisualizations.append(scatterplot)
formattedData = getDataForTwoAxisTickPlot(dataList,xAttr,yAttr,itemAttribute)
# commonFactsForTickAndDotPlots = getCommonFacts_TickAndDotPlot_NxQ(xAttr,yAttr,None,formattedData)
# for dataFact in commonFactsForTickAndDotPlots:
# dataFact['relatedVisObjects'].append(twoAxisTickPlot)
# dataFact['relatedVisObjects'].append(scatterplot)
# possibleDataFacts.append(dataFact)
#====================
# generating AVG based bar and donut charts
#====================
barChartWithAvg = getBarChartWithAvg(xAttr, yAttr, dataList)
possibleVisualizations.append(barChartWithAvg)
donutChartWithAvg = getDonutChartWithAvg(xAttr, yAttr, dataList)
possibleVisualizations.append(donutChartWithAvg)
formattedData = getDataForBarChartWithAvg(dataList,xAttr,yAttr)
commonDataFactsForBarAndDonutChartsWithAvg = getCommonFacts_BarAndDonutChartWithAvg_NxQ(xAttr, yAttr, "AVG", formattedData)
for dataFact in commonDataFactsForBarAndDonutChartsWithAvg:
dataFact['relatedVisObjects'].append(barChartWithAvg)
dataFact['relatedVisObjects'].append(donutChartWithAvg)
possibleDataFacts.append(dataFact)
#====================
# generating SUM based bar and donut charts
#====================
barChartWithSum = getBarChartWithSum(xAttr, yAttr, dataList)
possibleVisualizations.append(barChartWithSum)
donutChartWithSum = getDonutChartWithSum(xAttr, yAttr, dataList)
possibleVisualizations.append(donutChartWithSum)
formattedData = getDataForBarChartWithSum(dataList,xAttr,yAttr)
commonDataFactsForBarAndDonutChartsWithSum = getCommonFacts_BarAndDonutChartWithSum_NxQ(xAttr, yAttr, "SUM", formattedData)
for dataFact in commonDataFactsForBarAndDonutChartsWithSum:
dataFact['relatedVisObjects'].append(barChartWithSum)
dataFact['relatedVisObjects'].append(donutChartWithSum)
possibleDataFacts.append(dataFact)
elif attributeTypeList.count("quantitative")==2: # Q x Q
# 2 permutations
scatterplot1 = getScatterplot(attribute1,attribute2,itemAttribute,dataList,metadataMap)
possibleVisualizations.append(scatterplot1)
scatterplot2 = getScatterplot(attribute2,attribute1,itemAttribute,dataList,metadataMap)
possibleVisualizations.append(scatterplot2)
formattedData = getDataForScatterplot(dataList,metadataMap,attribute1,attribute2,itemAttribute)
scatterplotDataFacts = getDataFacts_Scatterplot_QxQ(attribute1,attribute2,formattedData,metadataMap)
for dataFact in scatterplotDataFacts:
dataFact['relatedVisObjects'].append(scatterplot1)
dataFact['relatedVisObjects'].append(scatterplot2)
possibleDataFacts.append(dataFact)
elif attributeTypeList.count("quantitative")==0: # N/O x N/O
# aggregated scatterplot with count (2 permutations)
aggregatedScatterplotWithCount1 = getAggregatedScatterplotWithCount(attribute1,attribute2,dataList)
possibleVisualizations.append(aggregatedScatterplotWithCount1)
aggregatedScatterplotWithCount2 = getAggregatedScatterplotWithCount(attribute2,attribute1,dataList)
possibleVisualizations.append(aggregatedScatterplotWithCount2)
# stacked bar chart (2 permutations)
stackedBarChart1 = getStackedBarChart(attribute1,attribute2,dataList)
possibleVisualizations.append(stackedBarChart1)
stackedBarChart2 = getStackedBarChart(attribute2,attribute1,dataList)
possibleVisualizations.append(stackedBarChart2)
# grouped bar chart (maybe)
formattedData1 = getDataForAggregatedScatterplotByCount(dataList,metadataMap,attribute1,attribute2)
commonDataFactsForStackedBarAndAggregatedDotPlotWithCount = getCommonDataFacts_StackedBarAndAggregatedDotPlotWithCount_NxN(attribute1,attribute2,formattedData1)
for dataFact in commonDataFactsForStackedBarAndAggregatedDotPlotWithCount:
dataFact['relatedVisObjects'].append(aggregatedScatterplotWithCount1)
dataFact['relatedVisObjects'].append(aggregatedScatterplotWithCount2)
dataFact['relatedVisObjects'].append(stackedBarChart1)
dataFact['relatedVisObjects'].append(stackedBarChart2)
possibleDataFacts.append(dataFact)
dataFactsForStackedBarChartWithCount = getStackedBarCharDataFacts_NxN(attribute1,attribute2,formattedData1)
for dataFact in dataFactsForStackedBarChartWithCount:
dataFact['relatedVisObjects'].append(aggregatedScatterplotWithCount1)
dataFact['relatedVisObjects'].append(aggregatedScatterplotWithCount2)
dataFact['relatedVisObjects'].append(stackedBarChart1)
dataFact['relatedVisObjects'].append(stackedBarChart2)
possibleDataFacts.append(dataFact)
# formattedData2 = getDataForAggregatedScatterplotByCount(dataList,metadataMap,attribute2,attribute1)
# commonDataFactsForStackedBarAndAggregatedDotPlotWithCount = getCommonDataFacts_StackedBarAndAggregatedDotPlotWithCount_NxN(attribute2,attribute1,formattedData2)
# for dataFact in commonDataFactsForStackedBarAndAggregatedDotPlotWithCount:
# dataFact['relatedVisObjects'].append(aggregatedScatterplotWithCount2)
# dataFact['relatedVisObjects'].append(stackedBarChart2)
# possibleDataFacts.append(dataFact)
# dataFactsForStackedBarChartWithCount = getStackedBarCharDataFacts_NxN(attribute2,attribute1,formattedData2)
# for dataFact in dataFactsForStackedBarChartWithCount:
# dataFact['relatedVisObjects'].append(stackedBarChart2)
# possibleDataFacts.append(dataFact)
elif len(attributeList) == 3:
attribute1 = attributeList[0]
attribute2 = attributeList[1]
attribute3 = attributeList[2]
attributeTypeList = [metadataMap[attribute1]['type'],metadataMap[attribute2]['type'],metadataMap[attribute3]['type']]
if attributeTypeList.count("quantitative")==0: # 3 N/O
pass
elif attributeTypeList.count("quantitative")==1: # 1 Q x 2 N/O
if metadataMap[attribute1]['type']=="quantitative":
quantitativeAttr = attribute1
if len(metadataMap[attribute2]['domain']) <= len(metadataMap[attribute3]['domain']):
smallerNOAttr = attribute2
largerNOAttr = attribute3
else:
smallerNOAttr = attribute3
largerNOAttr = attribute2
elif metadataMap[attribute2]['type']=="quantitative":
quantitativeAttr = attribute2
if len(metadataMap[attribute1]['domain']) <= len(metadataMap[attribute3]['domain']):
smallerNOAttr = attribute1
largerNOAttr = attribute3
else:
smallerNOAttr = attribute3
largerNOAttr = attribute1
elif metadataMap[attribute3]['type']=="quantitative":
quantitativeAttr = attribute3
if len(metadataMap[attribute1]['domain']) <= len(metadataMap[attribute2]['domain']):
smallerNOAttr = attribute1
largerNOAttr = attribute2
else:
smallerNOAttr = attribute2
largerNOAttr = attribute1
# N/O x Q x N/O (2 coloring variations possible for each chart)
coloredTickPlot1 = getColoredTickPlot(largerNOAttr, quantitativeAttr, smallerNOAttr, itemAttribute, dataList)
possibleVisualizations.append(coloredTickPlot1)
coloredTickPlot2 = getColoredTickPlot(smallerNOAttr, quantitativeAttr, largerNOAttr, itemAttribute, dataList)
possibleVisualizations.append(coloredTickPlot2)
coloredScatterplot1 = getColoredScatterplot(smallerNOAttr, quantitativeAttr, largerNOAttr, itemAttribute, dataList,metadataMap)
possibleVisualizations.append(coloredScatterplot1)
coloredScatterplot2 = getColoredScatterplot(largerNOAttr, quantitativeAttr, smallerNOAttr, itemAttribute, dataList,metadataMap)
possibleVisualizations.append(coloredScatterplot2)
formattedData = getDataForColoredScatterplot(dataList,metadataMap,largerNOAttr,quantitativeAttr,smallerNOAttr,itemAttribute)
# commonDataFactsForColoredTickPlotAndScatterplot = getCommonDataFacts_ColoredTickPlotAndScatterplot_NxQxN(largerNOAttr,quantitativeAttr,smallerNOAttr,formattedData,metadataMap,itemAttribute)
# for dataFact in commonDataFactsForColoredTickPlotAndScatterplot:
# dataFact['relatedVisObjects'].append(coloredTickPlot1)
# dataFact['relatedVisObjects'].append(coloredTickPlot2)
# dataFact['relatedVisObjects'].append(coloredScatterplot1)
# dataFact['relatedVisObjects'].append(coloredScatterplot2)
# possibleDataFacts.append(dataFact)
#========================
coloredScatterplotByAvg1 = getColoredScatterplotByAvg(smallerNOAttr, quantitativeAttr, largerNOAttr, itemAttribute, dataList,metadataMap)
possibleVisualizations.append(coloredScatterplotByAvg1)
coloredScatterplotByAvg2 = getColoredScatterplotByAvg(largerNOAttr, quantitativeAttr, smallerNOAttr, itemAttribute, dataList,metadataMap)
possibleVisualizations.append(coloredScatterplotByAvg2)
coloredTickPlotByAvg1 = getColoredTickPlotByAvg(smallerNOAttr, quantitativeAttr, largerNOAttr, itemAttribute, dataList,metadataMap)
possibleVisualizations.append(coloredTickPlotByAvg1)
coloredTickPlotByAvg2 = getColoredTickPlotByAvg(largerNOAttr, quantitativeAttr, smallerNOAttr, itemAttribute, dataList,metadataMap)
possibleVisualizations.append(coloredTickPlotByAvg2)
# N/O x N/O x Q (2 variations for AVG and SUM)
aggregatedAvgScatterplot1 = getAggregatedScatterplotByAvg(smallerNOAttr, largerNOAttr, quantitativeAttr, dataList,metadataMap)
possibleVisualizations.append(aggregatedAvgScatterplot1)
aggregatedAvgScatterplot2 = getAggregatedScatterplotByAvg(largerNOAttr, smallerNOAttr, quantitativeAttr, dataList,metadataMap)
possibleVisualizations.append(aggregatedAvgScatterplot2)
formattedData = getDataForAggregatedScatterplotByAvg(dataList,metadataMap,smallerNOAttr,largerNOAttr,quantitativeAttr)
dataFactsForAggregatedScatterplotByAvg = getDataFactsForAggregatedScatterplotByAvg_NxNxQ(smallerNOAttr, largerNOAttr, quantitativeAttr, formattedData)
for dataFact in dataFactsForAggregatedScatterplotByAvg:
dataFact['relatedVisObjects'].append(aggregatedAvgScatterplot1)
dataFact['relatedVisObjects'].append(aggregatedAvgScatterplot2)
dataFact['relatedVisObjects'].append(coloredScatterplotByAvg1)
dataFact['relatedVisObjects'].append(coloredScatterplotByAvg2)
dataFact['relatedVisObjects'].append(coloredTickPlotByAvg1)
dataFact['relatedVisObjects'].append(coloredTickPlotByAvg2)
possibleDataFacts.append(dataFact)
coloredScatterplotBySum1 = getColoredScatterplotBySum(smallerNOAttr, quantitativeAttr, largerNOAttr, itemAttribute, dataList,metadataMap)
possibleVisualizations.append(coloredScatterplotBySum1)
coloredScatterplotBySum2 = getColoredScatterplotBySum(largerNOAttr, quantitativeAttr, smallerNOAttr, itemAttribute, dataList,metadataMap)
possibleVisualizations.append(coloredScatterplotBySum2)
coloredTickPlotBySum1 = getColoredTickPlotBySum(smallerNOAttr, quantitativeAttr, largerNOAttr, itemAttribute, dataList,metadataMap)
possibleVisualizations.append(coloredTickPlotBySum1)
coloredTickPlotBySum2 = getColoredTickPlotBySum(largerNOAttr, quantitativeAttr, smallerNOAttr, itemAttribute, dataList,metadataMap)
possibleVisualizations.append(coloredTickPlotBySum2)
aggregatedSumScatterplot1 = getAggregatedScatterplotBySum(smallerNOAttr, largerNOAttr, quantitativeAttr, dataList,metadataMap)
possibleVisualizations.append(aggregatedSumScatterplot1)
aggregatedSumScatterplot2 = getAggregatedScatterplotBySum(largerNOAttr, smallerNOAttr, quantitativeAttr, dataList,metadataMap)
possibleVisualizations.append(aggregatedSumScatterplot2)
formattedData = getDataForAggregatedScatterplotBySum(dataList,metadataMap,smallerNOAttr,largerNOAttr,quantitativeAttr)
dataFactsForAggregatedScatterplotBySum = getDataFactsForAggregatedScatterplotBySum_NxNxQ(smallerNOAttr, largerNOAttr, quantitativeAttr, formattedData)
for dataFact in dataFactsForAggregatedScatterplotBySum:
dataFact['relatedVisObjects'].append(aggregatedSumScatterplot1)
dataFact['relatedVisObjects'].append(aggregatedSumScatterplot2)
dataFact['relatedVisObjects'].append(coloredScatterplotBySum1)
dataFact['relatedVisObjects'].append(coloredScatterplotBySum2)
dataFact['relatedVisObjects'].append(coloredTickPlotBySum1)
dataFact['relatedVisObjects'].append(coloredTickPlotBySum2)
possibleDataFacts.append(dataFact)
elif attributeTypeList.count("quantitative")==2: # 2 Q x 1 N/O
if metadataMap[attribute1]['type']=="ordinal" or metadataMap[attribute1]['type']=="nominal":
nonQAttribute = attribute1
quantitativeAttr1 = attribute2
quantitativeAttr2 = attribute3
elif metadataMap[attribute2]['type']=="ordinal" or metadataMap[attribute2]['type']=="nominal":
nonQAttribute = attribute2
quantitativeAttr1 = attribute1
quantitativeAttr2 = attribute3
elif metadataMap[attribute3]['type']=="ordinal" or metadataMap[attribute3]['type']=="nominal":
nonQAttribute = attribute3
quantitativeAttr1 = attribute1
quantitativeAttr2 = attribute2
# 2 axis variations possible for scatterplot of QxQ +color
coloredScatterplot1 = getColoredScatterplot(quantitativeAttr1,quantitativeAttr2,nonQAttribute,itemAttribute,dataList,metadataMap)
possibleVisualizations.append(coloredScatterplot1)
coloredScatterplot2 = getColoredScatterplot(quantitativeAttr2,quantitativeAttr1,nonQAttribute,itemAttribute,dataList,metadataMap)
possibleVisualizations.append(coloredScatterplot2)
formattedData = getDataForColoredScatterplot(dataList,metadataMap,quantitativeAttr1,quantitativeAttr2,nonQAttribute,itemAttribute)
dataFactsForColoredScatterplots = getDataFactsForColoredScatterplot_QxQxN(quantitativeAttr1,quantitativeAttr2,nonQAttribute,formattedData,metadataMap)
for dataFact in dataFactsForColoredScatterplots:
dataFact['relatedVisObjects'].append(coloredScatterplot1)
dataFact['relatedVisObjects'].append(coloredScatterplot2)
possibleDataFacts.append(dataFact)
# 2 sizing variations possible for scatterplot of N/O x Q +size
sizedScatterplot1 = getSizedScatterplot(nonQAttribute, quantitativeAttr1, quantitativeAttr2, itemAttribute, dataList,metadataMap)
possibleVisualizations.append(sizedScatterplot1)
sizedScatterplot2 = getSizedScatterplot(nonQAttribute, quantitativeAttr2, quantitativeAttr1, itemAttribute, dataList,metadataMap)
possibleVisualizations.append(sizedScatterplot2)
formattedData = getDataForColoredScatterplot(dataList,metadataMap,quantitativeAttr1,quantitativeAttr2,nonQAttribute,itemAttribute)
commonDataFactsForColoredAndSizedScatterplot = getCommonDataFactsForColoredAndSizedScatterplot_QxQxN(quantitativeAttr1,quantitativeAttr2,nonQAttribute,formattedData,metadataMap)
for dataFact in commonDataFactsForColoredAndSizedScatterplot:
dataFact['relatedVisObjects'].append(coloredScatterplot1)
dataFact['relatedVisObjects'].append(coloredScatterplot2)
dataFact['relatedVisObjects'].append(sizedScatterplot1)
dataFact['relatedVisObjects'].append(sizedScatterplot2)
possibleDataFacts.append(dataFact)
elif attributeTypeList.count("quantitative")==3: # 3 Q
# 6 permutations
for attributePermutation in permutations(attributeList,3):
attributePermutation = list(attributePermutation)
sizedScatterplot = getSizedScatterplot(attributePermutation[0],attributePermutation[1],attributePermutation[2],itemAttribute,dataList,metadataMap)
possibleVisualizations.append(sizedScatterplot)
formattedData = getDataForSizedScatterplot(dataList, metadataMap, attributePermutation[0],attributePermutation[1],attributePermutation[2],itemAttribute)
dataFactsForSizedScatterplot = getDataFactsForSizedScatterplot_QxQxQ(attributePermutation[0],attributePermutation[1],attributePermutation[2],formattedData,metadataMap)
for dataFact in dataFactsForSizedScatterplot:
dataFact['relatedVisObjects'].append(sizedScatterplot)
possibleDataFacts.append(dataFact)
return possibleVisualizations, possibleDataFacts
def getSingleAxisTickPlot(yAttr, itemAttr, dataList):
visObject = getEmptyVisObject()
visObject['type'] = "SingleAxisTickPlot"
visObject['mark'] = "tick"
visObject['y']['attribute'] = yAttr
# visObject['shapedData'] = getDataForSingleAxisTickPlot(dataList, yAttr, itemAttr)
return visObject
def getSingleAxisBoxPlot(yAttr):
visObject = getEmptyVisObject()
visObject['type'] = "SingleAxisBoxPlot"
visObject['mark'] = "box"
visObject['y']['attribute'] = yAttr
# visObject['shapedData'] = getDataForSingleAxisTickPlot(dataList, yAttr, itemAttr)
return visObject
def getHistogram(yAttr):
visObject = getEmptyVisObject()
visObject['type'] = "Histogram"
visObject['mark'] = "bar"
visObject['y']['attribute'] = yAttr
visObject['y']['transform'] = "BIN"
# visObject['shapedData'] = getDataForSingleAxisTickPlot(dataList, yAttr, itemAttr)
return visObject
def getBarChartWithCount(attribute, dataList):
visObject = getEmptyVisObject()
visObject['type'] = "BarWithCount"
visObject['mark'] = "bar"
visObject['x']['attribute'] = attribute
visObject['y']['transform'] = "COUNT"
# visObject['shapedData'] = getDataForBarChartWithCount(dataList, attribute)
return visObject
def getDonutChartWithCount(attribute, dataList):
visObject = getEmptyVisObject()
visObject['type'] = "DonutWithCount"
visObject['mark'] = "arc"
visObject['x']['attribute'] = attribute
visObject['y']['transform'] = "COUNT"
visObject['color']['attribute'] = attribute
# visObject['shapedData'] = getDataForBarChartWithCount(dataList, attribute) # same data format as bar chart
return visObject
def getTwoAxisTickPlot(xAttr, yAttr, itemAttr, dataList):
visObject = getEmptyVisObject()
visObject['type'] = "TwoAxisTickPlot"
visObject['mark'] = "tick"
visObject['x']['attribute'] = xAttr
visObject['y']['attribute'] = yAttr
# visObject['shapedData'] = getDataForTwoAxisTickPlot(dataList, xAttr, yAttr, itemAttr)
return visObject
def getBarChartWithAvg(xAttr, yAttr, dataList):
visObject = getEmptyVisObject()
visObject['type'] = "BarWithAvg"
visObject['mark'] = "bar"
visObject['x']['attribute'] = xAttr
visObject['y']['attribute'] = yAttr
visObject['y']['transform'] = "AVG"
# visObject['shapedData'] = getDataForBarChartWithAvg(dataList, xAttr, yAttr)
return visObject
def getBarChartWithSum(xAttr, yAttr, dataList):
visObject = getEmptyVisObject()
visObject['type'] = "BarWithSum"
visObject['mark'] = "bar"
visObject['x']['attribute'] = xAttr
visObject['y']['attribute'] = yAttr
visObject['y']['transform'] = "SUM"
# visObject['shapedData'] = getDataForBarChartWithSum(dataList, xAttr, yAttr)
return visObject
def getDonutChartWithAvg(xAttr, yAttr, dataList):
visObject = getEmptyVisObject()
visObject['type'] = "DonutWithAvg"
visObject['mark'] = "arc"
visObject['x']['attribute'] = xAttr
visObject['y']['attribute'] = yAttr
visObject['y']['transform'] = "AVG"
visObject['color']['attribute'] = xAttr
# visObject['shapedData'] = getDataForBarChartWithAvg(dataList, xAttr, yAttr) # same data format as bar chart
return visObject
def getDonutChartWithSum(xAttr, yAttr, dataList):
visObject = getEmptyVisObject()
visObject['type'] = "DonutWithSum"
visObject['mark'] = "arc"
visObject['x']['attribute'] = xAttr
visObject['y']['attribute'] = yAttr
visObject['y']['transform'] = "SUM"
visObject['color']['attribute'] = xAttr
# visObject['shapedData'] = getDataForBarChartWithSum(dataList, xAttr, yAttr) # same data format as bar chart
return visObject
def getScatterplot(xAttr, yAttr, itemAttr, dataList, metadataMap):
visObject = getEmptyVisObject()
visObject['type'] = "Scatterplot"
visObject['mark'] = "point"
visObject['x']['attribute'] = xAttr
visObject['y']['attribute'] = yAttr
# visObject['shapedData'] = getDataForScatterplot(dataList, metadataMap, xAttr,yAttr,itemAttr)
return visObject
def getColoredTickPlot(xAttr, yAttr, colorAttr, itemAttr, dataList):
visObject = getEmptyVisObject()
visObject['type'] = "TickPlotWithColor"
visObject['mark'] = "tick"
visObject['x']['attribute'] = xAttr
visObject['y']['attribute'] = yAttr
visObject['color']['attribute'] = colorAttr
# visObject['shapedData'] = getDataForColoredTickPlot(dataList,xAttr,yAttr,colorAttr,itemAttr)
return visObject
def getColoredScatterplot(xAttr, yAttr, colorAttr, itemAttr, dataList, metadataMap):
visObject = getEmptyVisObject()
visObject['type'] = "ScatterplotWithColor"
visObject['mark'] = "point"
visObject['x']['attribute'] = xAttr
visObject['y']['attribute'] = yAttr
visObject['color']['attribute'] = colorAttr
# visObject['shapedData'] = getDataForColoredScatterplot(dataList, metadataMap,xAttr,yAttr,colorAttr,itemAttr)
return visObject
def getColoredScatterplotByAvg(xAttr, yAttr, colorAttr, itemAttr, dataList, metadataMap):
visObject = getEmptyVisObject()
visObject['type'] = "ScatterplotWithColorByAvg"
visObject['mark'] = "point"
visObject['x']['attribute'] = xAttr
visObject['y']['attribute'] = yAttr
visObject['y']['transform'] = "AVG"
visObject['color']['attribute'] = colorAttr
return visObject
def getColoredTickPlotByAvg(xAttr, yAttr, colorAttr, itemAttr, dataList, metadataMap):
visObject = getEmptyVisObject()
visObject['type'] = "TickPlotWithColorByAvg"
visObject['mark'] = "tick"
visObject['x']['attribute'] = xAttr
visObject['y']['attribute'] = yAttr
visObject['y']['transform'] = "AVG"
visObject['color']['attribute'] = colorAttr
return visObject
def getColoredScatterplotBySum(xAttr, yAttr, colorAttr, itemAttr, dataList, metadataMap):
visObject = getEmptyVisObject()
visObject['type'] = "ScatterplotWithColorBySum"
visObject['mark'] = "point"
visObject['x']['attribute'] = xAttr
visObject['y']['attribute'] = yAttr
visObject['y']['transform'] = "SUM"
visObject['color']['attribute'] = colorAttr
return visObject
def getColoredTickPlotBySum(xAttr, yAttr, colorAttr, itemAttr, dataList, metadataMap):
visObject = getEmptyVisObject()
visObject['type'] = "TickPlotWithColorBySum"
visObject['mark'] = "point"
visObject['x']['attribute'] = xAttr
visObject['y']['attribute'] = yAttr
visObject['y']['transform'] = "SUM"
visObject['color']['attribute'] = colorAttr
return visObject
def getAggregatedScatterplotByAvg(xAttr, yAttr, sizeAttr, dataList, metadataMap):
visObject = getEmptyVisObject()
visObject['type'] = "AggregatedScatterplotWithAvgSize"
visObject['mark'] = "point"
visObject['x']['attribute'] = xAttr
visObject['y']['attribute'] = yAttr
visObject['size']['attribute'] = sizeAttr
visObject['size']['transform'] = "AVG"
# visObject['shapedData'] = getDataForAggregatedScatterplotByAvg(dataList, metadataMap, xAttr,yAttr,sizeAttr)
return visObject
def getAggregatedScatterplotBySum(xAttr, yAttr, sizeAttr, dataList, metadataMap):
visObject = getEmptyVisObject()
visObject['type'] = "AggregatedScatterplotWithSumSize"
visObject['mark'] = "point"
visObject['x']['attribute'] = xAttr
visObject['y']['attribute'] = yAttr
visObject['size']['attribute'] = sizeAttr
visObject['size']['transform'] = "SUM"
# visObject['shapedData'] = getDataForAggregatedScatterplotBySum(dataList, metadataMap, xAttr,yAttr,sizeAttr)
return visObject
def getAggregatedScatterplotWithCount(xAttr, yAttr, dataList):
visObject = getEmptyVisObject()
visObject['type'] = "AggregatedScatterplotWithCountSize"
visObject['mark'] = "point"
visObject['x']['attribute'] = xAttr
visObject['y']['attribute'] = yAttr
visObject['size']['transform'] = "COUNT"
# visObject['shapedData'] = getDataForAggregatedScatterplotByCount(dataList,metadataMap,xAttr,yAttr)
return visObject
def getSizedScatterplot(xAttr, yAttr, sizeAttr, itemAttr, dataList, metadataMap):
visObject = getEmptyVisObject()
visObject['type'] = "ScatterplotWithSize"
visObject['mark'] = "point"
visObject['x']['attribute'] = xAttr
visObject['y']['attribute'] = yAttr
visObject['size']['attribute'] = sizeAttr
# visObject['shapedData'] = getDataForSizedScatterplot(dataList, metadataMap, xAttr,yAttr,sizeAttr,itemAttr)
return visObject
def getStackedBarChart(xAttr,colorAttr,dataList):
visObject = getEmptyVisObject()
visObject['type'] = "StackedBarChart"
visObject['mark'] = "bar"
visObject['x']['attribute'] = xAttr
visObject['y']['transform'] = "COUNT"
visObject['color']['attribute'] = colorAttr
# visObject['shapedData'] = getDataForStackedBarChart(dataList,xAttr,colorAttr)
return visObject
if __name__ == '__main__':
pass | 53.388601 | 203 | 0.705163 |
2068564bace3d74a0c1a79837c0001680c71fbd6 | 10,371 | py | Python | comp0037_planner_controller/src/comp0037_planner_controller/general_forward_search_algorithm.py | hongzhoujiang/COMP0037_CW1_GROUP_B | eb5eb47d90c0dd485881f7d33a9e86177df1c6be | [
"BSD-3-Clause"
] | null | null | null | comp0037_planner_controller/src/comp0037_planner_controller/general_forward_search_algorithm.py | hongzhoujiang/COMP0037_CW1_GROUP_B | eb5eb47d90c0dd485881f7d33a9e86177df1c6be | [
"BSD-3-Clause"
] | null | null | null | comp0037_planner_controller/src/comp0037_planner_controller/general_forward_search_algorithm.py | hongzhoujiang/COMP0037_CW1_GROUP_B | eb5eb47d90c0dd485881f7d33a9e86177df1c6be | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from occupancy_grid import OccupancyGrid
from search_grid import SearchGrid
from planner_base import PlannerBase
from collections import deque
from cell import *
from planned_path import PlannedPath
from math import *
import rospy
class GeneralForwardSearchAlgorithm(PlannerBase):
# This class implements the basic framework for LaValle's general
# template for forward search. It includes a lot of methods for
# managing the graphical output as well.
def __init__(self, title, occupancyGrid):
PlannerBase.__init__(self, title, occupancyGrid)
# Flag to store if the last plan was successful
self.goalReached = None
#All the information trackers are stored here as they are common to all general forward search algorithms.
self.max_queue_length = 0
#Not yet implemented
self.total_angle = 0
#When you go between each waypoint. Angle waypoint 2 - waypoint 1 and sum all that together.
#
# These methods manage the queue of cells to be visied.
def pushCellOntoQueue(self, cell):
raise NotImplementedError()
# This method returns a boolean - true if the queue is empty,
# false if it still has some cells on it.
def isQueueEmpty(self):
raise NotImplementedError()
# This method finds the first cell (at the head of the queue),
# removes it from the queue, and returns it.
def popCellFromQueue(self):
raise NotImplementedError()
# This method determines if the goal has been reached.
def hasGoalBeenReached(self, cell):
raise NotImplementedError()
# This method gets the list of cells which could be visited next.
def getNextSetOfCellsToBeVisited(self, cell):
raise NotImplementedError()
# This method determines whether a cell has been visited already.
def hasCellBeenVisitedAlready(self, cell):
raise NotImplementedError()
def markCellAsVisitedAndRecordParent(self, cell, parentCell):
cell.label = CellLabel.ALIVE
cell.parent = parentCell
# Mark that a cell is dead. A dead cell is one in which all of its
# immediate neighbours have been visited.
def markCellAsDead(self, cell):
cell.label = CellLabel.DEAD
# Handle the case that a cell has been visited already.
def resolveDuplicate(self, cell):
raise NotImplementedError()
# Compute the additive cost of performing a step from the parent to the
# current cell. This calculation is carried out the same way no matter
# what heuristics, etc. are used. The cost computed here takes account
# of the terrain traversability cost using an equation a bit like that
# presented in the lectures.
def computeLStageAdditiveCost(self, parentCell, cell):
# If the parent is empty, this is the start of the path and the
# cost is 0.
if (parentCell is None):
return 0
# Travel cost is Cartesian distance
dX = cell.coords[0] - parentCell.coords[0]
dY = cell.coords[1] - parentCell.coords[1]
# Terrain cost
# Run this in matlab to visualise ro check the image
# However, basically it builds up extremely quickly
# x=[1:0.01:2];
# c=min(1+(.2./((1.7-x).^2)).^2,1000);
cost=min(1+(0.2/((1.75-cell.terrainCost)**2))**2, 1000)
L = sqrt(dX * dX + dY * dY)*cost# Multiplied by the terrain cost of the cell
return L
#function for calculating the angle between two adjacent cells
def computeLstageAngle(self,parentCell,cell):
if (parentCell is None):
return -1
return abs(atan2(abs(parentCell.coords[1]-cell.coords[1]),abs(parentCell.coords[0]-cell.coords[0])))
# The main search routine. The routine searches for a path between a given
# set of coordinates. These are then converted into start and destination
# cells in the search grid and the search algorithm is then run.
def search(self, startCoords, goalCoords):
# Make sure the queue is empty. We do this so that we can keep calling
# the same method multiple times and have it work.
while (self.isQueueEmpty() == False):
self.popCellFromQueue()
# Create or update the search grid from the occupancy grid and seed
# unvisited and occupied cells.
if (self.searchGrid is None):
self.searchGrid = SearchGrid.fromOccupancyGrid(self.occupancyGrid)
else:
self.searchGrid.updateFromOccupancyGrid()
# Get the start cell object and label it as such. Also set its
# path cost to 0.
self.start = self.searchGrid.getCellFromCoords(startCoords)
self.start.label = CellLabel.START
self.start.pathCost = 0
# Get the goal cell object and label it.
self.goal = self.searchGrid.getCellFromCoords(goalCoords)
self.goal.label = CellLabel.GOAL
# If the node is being shut down, bail out here.
if rospy.is_shutdown():
return False
# Draw the initial state
self.resetGraphics()
# Insert the start on the queue to start the process going.
self.markCellAsVisitedAndRecordParent(self.start, None)
self.pushCellOntoQueue(self.start)
# Reset the count
self.numberOfCellsVisited = 0
# Indicates if we reached the goal or not
self.goalReached = False
# Iterate until we have run out of live cells to try or we reached the goal.
# This is the main computational loop and is the implementation of
# LaValle's pseudocode
while (self.isQueueEmpty() == False):
# Check if ROS is shutting down; if so, abort. This stops the
# planner from hanging.
if rospy.is_shutdown():
return False
cell = self.popCellFromQueue()
if (self.hasGoalBeenReached(cell) == True):
self.goalReached = True
break
cells = self.getNextSetOfCellsToBeVisited(cell)
for nextCell in cells:
if (self.hasCellBeenVisitedAlready(nextCell) == False):
self.markCellAsVisitedAndRecordParent(nextCell, cell)
self.pushCellOntoQueue(nextCell)
self.numberOfCellsVisited = self.numberOfCellsVisited + 1
else:
self.resolveDuplicate(nextCell, cell)
# Now that we've checked all the actions for this cell,
# mark it as dead
self.markCellAsDead(cell)
# Draw the update if required
self.drawCurrentState()
# Do a final draw to make sure that the graphics are shown, even at the end state
self.drawCurrentState()
print "numberOfCellsVisited = " + str(self.numberOfCellsVisited)
print "maximumQueueLength = " + str(self.max_queue_length)
if self.goalReached:
print "Goal reached"
else:
print "Goal not reached"
return self.goalReached
# This method extracts a path from the pathEndCell to the start
# cell. The path is a list actually sorted in the order:
# cell(x_1), cell(x_2), ... , cell(x_K), cell(x_G). You can use
# this method to try to find the path from any end cell. However,
# depending upon the planner used, the results might not be
# valid. In this case, the path will probably not terminate at the
# start cell.
def extractPathEndingAtCell(self, pathEndCell, colour):
# Construct the path object and mark if the goal was reached
path = PlannedPath()
path.goalReached = self.goalReached
# Initial condition - the goal cell
path.waypoints.append(pathEndCell)
# Start at the goal and find the parent. Find the cost associated with the parent
cell = pathEndCell.parent
path.travelCost = self.computeLStageAdditiveCost(pathEndCell.parent, pathEndCell)
path.travelAngle = self.computeLstageAngle(pathEndCell.parent,pathEndCell)
lastAngle = path.travelAngle
currentAngle = 0
# Iterate back through and extract each parent in turn and add
# it to the path. To work out the travel length along the
# path, you'll also have to add self at self stage.
while (cell is not None):
path.waypoints.appendleft(cell)
path.travelCost = path.travelCost + self.computeLStageAdditiveCost(cell.parent, cell)
currentAngle = self.computeLstageAngle(cell.parent,cell)
if (currentAngle == -1):
currentAngle = lastAngle
path.travelAngle = path.travelAngle + abs(currentAngle - lastAngle)
lastAngle = currentAngle
cell = cell.parent
# Update the stats on the size of the path
path.numberOfWaypoints = len(path.waypoints)
# Note that if we failed to reach the goal, the above mechanism computes a path length of 0.
# Therefore, if we didn't reach the goal, change it to infinity
if path.goalReached is False:
path.travelCost = float("inf")
print "Path travel cost = " + str(path.travelCost)
print "Path cardinality = " + str(path.numberOfWaypoints)
print "Path travel angle = " + str(path.travelAngle)
# Draw the path if requested
if (self.showGraphics == True):
self.plannerDrawer.update()
self.plannerDrawer.drawPathGraphicsWithCustomColour(path, colour)
self.plannerDrawer.waitForKeyPress()
# Return the path
return path
# Extract the path from a specified end cell to the start. This is not
# necessarily the full path. Rather, it lets us illustrate parts of the
# path.
def extractPathEndingAtCoord(self, endCellCoord):
endCell = self.searchGrid.getCellFromCoords(endCellCoord)
self.extractPathEndingAtCell(endCell, 'red')
# Extract the path between the start and goal.
def extractPathToGoal(self):
path = self.extractPathEndingAtCell(self.goal, 'yellow')
return path
| 39.888462 | 114 | 0.647479 |
31281b9cad885fff52680464864f6157885e0f85 | 1,467 | py | Python | plan2explore/tools/target_network.py | sarthak268/plan2explore | 264f513d46c6e971d5523782344a694b17139a20 | [
"Apache-2.0"
] | 189 | 2020-05-13T01:12:03.000Z | 2022-03-23T01:38:56.000Z | plan2explore/tools/target_network.py | sarthak268/plan2explore | 264f513d46c6e971d5523782344a694b17139a20 | [
"Apache-2.0"
] | 13 | 2020-05-12T22:51:07.000Z | 2022-03-12T00:28:47.000Z | plan2explore/tools/target_network.py | MishaLaskin/plan2explore | a2e81825f6c22ed3c1f25c9f46b7059acabd6d6b | [
"Apache-2.0"
] | 24 | 2020-05-14T03:47:50.000Z | 2021-09-26T04:20:36.000Z | # Copyright 2019 The Dreamer Authors. Copyright 2020 Plan2Explore Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from plan2explore.tools import schedule as schedule_lib
from plan2explore.tools import copy_weights
def track_network(
trainer, batch_size, source_pattern, target_pattern, every, amount):
init_op = tf.cond(
tf.equal(trainer.global_step, 0),
lambda: copy_weights.soft_copy_weights(
source_pattern, target_pattern, 1.0),
tf.no_op)
schedule = schedule_lib.binary(trainer.step, batch_size, 0, every, -1)
with tf.control_dependencies([init_op]):
return tf.cond(
tf.logical_and(tf.equal(trainer.phase, 'train'), schedule),
lambda: copy_weights.soft_copy_weights(
source_pattern, target_pattern, amount),
tf.no_op)
| 37.615385 | 95 | 0.751875 |
d16c9eeecc1e381f7c9a89ff3ad6d09699aecfb0 | 7,461 | py | Python | gistcheck.py | idcrook/awesome-pythonista | 340c8c61e4b97e58567a25327cc4c4395207ded2 | [
"MIT"
] | 24 | 2017-10-23T01:58:04.000Z | 2021-11-15T10:20:22.000Z | gistcheck.py | dpcrook/awesome-pythonista | 340c8c61e4b97e58567a25327cc4c4395207ded2 | [
"MIT"
] | null | null | null | gistcheck.py | dpcrook/awesome-pythonista | 340c8c61e4b97e58567a25327cc4c4395207ded2 | [
"MIT"
] | 4 | 2018-04-29T08:32:45.000Z | 2020-05-17T08:01:40.000Z | # Source: https://gist.github.com/4702275
#
# All-purpose gist tool for Pythonista.
#
# When run directly, this script sets up four other scripts that call various
# functions within this file. Each of these sub-scripts are meant for use as
# action menu items. They are:
#
# Set Gist ID.py - Set the gist id that the current file should be
# associated with.
#
# Download Gist.py - Download the gist from the URL on the clipboard, and
# automatically set the association for it (if possible).
#
# Commit Gist.py - Commits the currently open file to the associated gist.
#
# Pull Gist.py - Replaces the current file with the latest version from
# the associated gist.
# Create Gist.py - create a new gist with the current file
#
#
# Download Gist.py can also be run as a bookmarklet from Mobile Safari (to
# jump from browsing a gist directly to downloading it in Pythonista) by
# creating a bookmark to:
#
# javascript:(function()%7Bif(document.location.href.indexOf('http')===0)document.location.href='pythonista://Gist%20Download?action=run&argv='+document.location.href;%7D)();
#
#
# Credits:
#
# Further building on the previous work of Westacular
# https://gist.github.com/4145515
#
# This combines the gist pull/commit/set id scripts from
# https://gist.github.com/4043334
#
# with the bookmarklet-compatible, multi-gist download script
# https://gist.github.com/5f3f2035d8aa46de42ad
#
# (which in turn is based on https://gist.github.com/4036200,
# which is based on https://gist.github.com/b0644f5ed1d94bd32805)
import clipboard
import console
import editor
import json
import keychain
import os
import re
import requests
import shelve
api_url = 'https://api.github.com/gists/'
class GistDownloadError (Exception): pass
class InvalidGistIDError (Exception): pass
#Perform authorization
def auth(username, password):
data='{"scopes":["gist"],"note":"gistcheck"}'
r = requests.post(
api_url.replace('gist','authorizations'),
data=data,auth=(username,password),
headers={'content-type':'application/json'})
return r.json
#get auth data
def get_token():
token = keychain.get_password('gistcheck','gistcheck')
if token is None:
u, p = console.login_alert('GitHub Login')
token = auth(u, p)['token']
keychain.set_password('gistcheck','gistcheck',token)
return token
def commit_or_create(gist, files, token, message=None):
payload = {"files":{}}
if message is not None: payload['description'] = message
for f, c in files.items():
payload['files'][os.path.basename(f)] = {"content":c}
headers = {
'Content-Type':'application/json',
'Authorization': 'token %s' % token,
}
if gist is None:
# Create a new gist
r = requests.post(api_url[:-1],
data=json.dumps(payload),headers=headers).json
else:
# Commit to existing gist
r = requests.post(api_url + gist,
data=json.dumps(payload),headers=headers).json
return r
def fork(gist,token):
headers = {
'Content-Type':'application/json',
'Authorization': 'token %s' % token,
}
return requests.post(api_url + gist + '/forks',
headers=headers).json
def get_gist_id():
db = shelve.open('gistcheck.db')
gist_id = db.get(editor.get_path(),None)
db.close()
return gist_id
def set_gist_id(gist_id):
gist_id = extract_gist_id(gist_id)
db = shelve.open('gistcheck.db')
db[editor.get_path()] = gist_id
db.close()
def del_gist_id():
db = shelve.open('gistcheck.db')
fpath = editor.get_path()
if fpath in db:
del db[fpath]
db.close()
def extract_gist_id(gist):
if re.match(r'^([0-9a-f]+)$', gist):
return gist
m = re.match(r'^http(s?)://gist.github.com/([^/]+/)?([0-9a-f]*)', gist)
if m:
return m.group(3)
m = re.match(r'^http(s?)://raw.github.com/gist/([0-9a-f]*)', gist)
if m:
return m.group(2)
raise InvalidGistIDError()
#load a file from a gist
def pull():
gist_id = get_gist_id()
if gist_id is None:
console.alert('Error', 'There is no gist id set for this file')
else:
fname = os.path.basename(editor.get_path())
gist_data = requests.get(api_url + gist_id).json
try:
newtext = gist_data['files'][fname]['content']
except:
console.alert('Pull Error', 'There was a problem with the pull',gist_data)
if newtext is not None:
editor.replace_text(0,len(editor.get_text()),newtext)
def commit():
token = get_token()
fpath = editor.get_path()
fname = os.path.basename(fpath)
m = console.input_alert('Edit Description','Enter a new description:','')
if m == '': m = None
gist_id = get_gist_id()
res = commit_or_create(gist_id,{fpath:editor.get_text()},token,m)
try:
id = res['id']
except KeyError:
if gist_id:
f = console.alert('Commit Failed',
'Do you have permission to commit? Would you like to fork?','Fork')
if f == 1:
res = fork(gist_id,token)
try:
id = res['id']
except KeyError:
console.alert('Fork Error', 'There was a problem with the fork')
else:
set_gist_id(id)
res = commit_or_create(id,{fpath:editor.get_text()},token,m)
try:
id = res['id']
except KeyError:
console.alert('Commit Error',
'Commit still failed, maybe fork too')
else:
if gist_id is None:
set_gist_id(id)
def set():
gist = get_gist_id()
if gist == None: gist = ''
gist = console.input_alert('Assign Gist ID','Enter the gist id for this file',gist)
if gist == '':
del_gist_id()
else:
try:
set_gist_id(gist)
except InvalidGistIDError:
console.alert('Invalid Gist ID', 'That does not appear to be a valid gist id')
def download_gist(gist_url):
# Returns a 2-tuple of filename and content
gist_id = extract_gist_id(gist_url)
try:
gist_info = requests.get(api_url + gist_id).json
files = gist_info['files']
except:
raise GistDownloadError()
for file_info in files.values():
lang = file_info.get('language', None)
if lang != 'Python':
continue
yield file_info['filename'],gist_id,file_info['content']
def download(gist_url):
try:
for num, (filename, gist_id, content) in enumerate(download_gist(gist_url), start=1):
if os.path.isfile(filename):
i = console.alert('File exists', 'A file with the name ' + filename +
' already exists in your library.',
'Auto Rename', 'Skip')
if i == 1:
editor.make_new_file(filename, content)
else:
editor.make_new_file(filename, content)
set_gist_id(gist_id)
except InvalidGistIDError:
console.alert('No Gist URL','Invalid Gist URL.','OK')
except GistDownloadError:
console.alert('Error', 'The Gist could not be downloaded.')
if not num:
console.alert('No Python Files', 'This Gist contains no Python files.')
def download_from_args(args):
if len(args) == 2:
url = args[1]
else:
url = clipboard.get()
download(url)
def setup():
script_map={
'Gist Commit' :'gistcheck.commit()',
'Gist Pull' :'gistcheck.pull()',
'Gist Set ID' :'gistcheck.set()',
'Gist Download':'if __name__ == "__main__" : gistcheck.download_from_args( sys.argv )'
}
for s,c in script_map.items():
with open(s+'.py','w') as f:
f.writelines(['import gistcheck\n','%s\n'%c])
if __name__ == '__main__':
setup()
| 30.329268 | 176 | 0.656212 |
97039aebe5c3ad7dfbd23f2b1cf059abaaf40289 | 8,846 | py | Python | scipy/interpolate/rbf.py | stefanv/scipy | 023708ddef24834c8e47b7f4f3f8d7d24096655d | [
"BSD-3-Clause"
] | null | null | null | scipy/interpolate/rbf.py | stefanv/scipy | 023708ddef24834c8e47b7f4f3f8d7d24096655d | [
"BSD-3-Clause"
] | null | null | null | scipy/interpolate/rbf.py | stefanv/scipy | 023708ddef24834c8e47b7f4f3f8d7d24096655d | [
"BSD-3-Clause"
] | null | null | null | """rbf - Radial basis functions for interpolation/smoothing scattered Nd data.
Written by John Travers <[email protected]>, February 2007
Based closely on Matlab code by Alex Chirokov
Additional, large, improvements by Robert Hetland
Some additional alterations by Travis Oliphant
Permission to use, modify, and distribute this software is given under the
terms of the SciPy (BSD style) license. See LICENSE.txt that came with
this distribution for specifics.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
Copyright (c) 2006-2007, Robert Hetland <[email protected]>
Copyright (c) 2007, John Travers <[email protected]>
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of Robert Hetland nor the names of any
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import sys
from numpy import (sqrt, log, asarray, newaxis, all, dot, exp, eye,
float_)
from scipy import linalg
__all__ = ['Rbf']
class Rbf(object):
"""
Rbf(*args)
A class for radial basis function approximation/interpolation of
n-dimensional scattered data.
Parameters
----------
*args : arrays
x, y, z, ..., d, where x, y, z, ... are the coordinates of the nodes
and d is the array of values at the nodes
function : str or callable, optional
The radial basis function, based on the radius, r, given by the norm
(defult is Euclidean distance); the default is 'multiquadric'::
'multiquadric': sqrt((r/self.epsilon)**2 + 1)
'inverse': 1.0/sqrt((r/self.epsilon)**2 + 1)
'gaussian': exp(-(r/self.epsilon)**2)
'linear': r
'cubic': r**3
'quintic': r**5
'thin_plate': r**2 * log(r)
If callable, then it must take 2 arguments (self, r). The epsilon
parameter will be available as self.epsilon. Other keyword
arguments passed in will be available as well.
epsilon : float, optional
Adjustable constant for gaussian or multiquadrics functions
- defaults to approximate average distance between nodes (which is
a good start).
smooth : float, optional
Values greater than zero increase the smoothness of the
approximation. 0 is for interpolation (default), the function will
always go through the nodal points in this case.
norm : callable, optional
A function that returns the 'distance' between two points, with
inputs as arrays of positions (x, y, z, ...), and an output as an
array of distance. E.g, the default::
def euclidean_norm(x1, x2):
return sqrt( ((x1 - x2)**2).sum(axis=0) )
which is called with x1=x1[ndims,newaxis,:] and
x2=x2[ndims,:,newaxis] such that the result is a matrix of the
distances from each point in x1 to each point in x2.
Examples
--------
>>> rbfi = Rbf(x, y, z, d) # radial basis function interpolator instance
>>> di = rbfi(xi, yi, zi) # interpolated values
"""
def _euclidean_norm(self, x1, x2):
return sqrt( ((x1 - x2)**2).sum(axis=0) )
def _h_multiquadric(self, r):
return sqrt((1.0/self.epsilon*r)**2 + 1)
def _h_inverse_multiquadric(self, r):
return 1.0/sqrt((1.0/self.epsilon*r)**2 + 1)
def _h_gaussian(self, r):
return exp(-(1.0/self.epsilon*r)**2)
def _h_linear(self, r):
return r
def _h_cubic(self, r):
return r**3
def _h_quintic(self, r):
return r**5
def _h_thin_plate(self, r):
result = r**2 * log(r)
result[r == 0] = 0 # the spline is zero at zero
return result
# Setup self._function and do smoke test on initial r
def _init_function(self, r):
if isinstance(self.function, str):
self.function = self.function.lower()
_mapped = {'inverse': 'inverse_multiquadric',
'inverse multiquadric': 'inverse_multiquadric',
'thin-plate': 'thin_plate'}
if self.function in _mapped:
self.function = _mapped[self.function]
func_name = "_h_" + self.function
if hasattr(self, func_name):
self._function = getattr(self, func_name)
else:
functionlist = [x[3:] for x in dir(self) if x.startswith('_h_')]
raise ValueError("function must be a callable or one of " +
", ".join(functionlist))
self._function = getattr(self, "_h_"+self.function)
elif callable(self.function):
allow_one = False
if hasattr(self.function, 'func_code') or \
hasattr(self.function, '__code__'):
val = self.function
allow_one = True
elif hasattr(self.function, "im_func"):
val = self.function.im_func
elif hasattr(self.function, "__call__"):
val = self.function.__call__.im_func
else:
raise ValueError("Cannot determine number of arguments to function")
argcount = val.func_code.co_argcount
if allow_one and argcount == 1:
self._function = self.function
elif argcount == 2:
if sys.version_info[0] >= 3:
self._function = function.__get__(self, Rbf)
else:
import new
self._function = new.instancemethod(self.function, self,
Rbf)
else:
raise ValueError("Function argument must take 1 or 2 arguments.")
a0 = self._function(r)
if a0.shape != r.shape:
raise ValueError("Callable must take array and return array of the same shape")
return a0
def __init__(self, *args, **kwargs):
self.xi = asarray([asarray(a, dtype=float_).flatten()
for a in args[:-1]])
self.N = self.xi.shape[-1]
self.di = asarray(args[-1]).flatten()
if not all([x.size==self.di.size for x in self.xi]):
raise ValueError("All arrays must be equal length.")
self.norm = kwargs.pop('norm', self._euclidean_norm)
r = self._call_norm(self.xi, self.xi)
self.epsilon = kwargs.pop('epsilon', r.mean())
self.smooth = kwargs.pop('smooth', 0.0)
self.function = kwargs.pop('function', 'multiquadric')
# attach anything left in kwargs to self
# for use by any user-callable function or
# to save on the object returned.
for item, value in kwargs.items():
setattr(self, item, value)
self.A = self._init_function(r) - eye(self.N)*self.smooth
self.nodes = linalg.solve(self.A, self.di)
def _call_norm(self, x1, x2):
if len(x1.shape) == 1:
x1 = x1[newaxis, :]
if len(x2.shape) == 1:
x2 = x2[newaxis, :]
x1 = x1[..., :, newaxis]
x2 = x2[..., newaxis, :]
return self.norm(x1, x2)
def __call__(self, *args):
args = [asarray(x) for x in args]
if not all([x.shape == y.shape for x in args for y in args]):
raise ValueError("Array lengths must be equal")
shp = args[0].shape
self.xa = asarray([a.flatten() for a in args], dtype=float_)
r = self._call_norm(self.xa, self.xi)
return dot(self._function(r), self.nodes).reshape(shp)
| 40.209091 | 91 | 0.617228 |
b140a8d9af287d6159171987d5070bdbac29da7e | 628 | py | Python | book3/tangosuu.py | Lee-guccii/ExtensiveReading_YL_Estimation | 89e3d4aacdc6e7d812b8cedc24f37e1f173087bc | [
"MIT"
] | null | null | null | book3/tangosuu.py | Lee-guccii/ExtensiveReading_YL_Estimation | 89e3d4aacdc6e7d812b8cedc24f37e1f173087bc | [
"MIT"
] | null | null | null | book3/tangosuu.py | Lee-guccii/ExtensiveReading_YL_Estimation | 89e3d4aacdc6e7d812b8cedc24f37e1f173087bc | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
import numpy as np
import numpy.random as random
###############
resdic={}
number = 2
#textに読み込む
while number < 199:
#text_listにリストとして読み込む
with open("book"+ str(number)+ "_3.txt", "r", encoding="utf-8") as f:
text = f.read()
#####################################
#単語数を計測
# default separator: space
result = len(text.split())
#print("book", number)
#print("There are " + str(result) + " words.")
if 30000 < result < 40000:
resdic[number] = result
number+=1
print()
print("under 20000 words")
print(resdic)
print(len(resdic)) | 16.972973 | 73 | 0.563694 |
89a2169d9ea6b37270ee3f5cd18eb7d6d0307b35 | 943 | py | Python | tests/terraform/checks/resource/aws/test_RDSEncryption.py | graybrandonpfg/checkov | 3081a8560f6369465314ee8f4ac8a8ec01649d68 | [
"Apache-2.0"
] | 4,013 | 2019-12-09T13:16:54.000Z | 2022-03-31T14:31:01.000Z | tests/terraform/checks/resource/aws/test_RDSEncryption.py | graybrandonpfg/checkov | 3081a8560f6369465314ee8f4ac8a8ec01649d68 | [
"Apache-2.0"
] | 1,258 | 2019-12-17T09:55:51.000Z | 2022-03-31T19:17:17.000Z | tests/terraform/checks/resource/aws/test_RDSEncryption.py | graybrandonpfg/checkov | 3081a8560f6369465314ee8f4ac8a8ec01649d68 | [
"Apache-2.0"
] | 638 | 2019-12-19T08:57:38.000Z | 2022-03-30T21:38:37.000Z | import unittest
from checkov.terraform.checks.resource.aws.RDSEncryption import check
from checkov.common.models.enums import CheckResult
class TestRDSEncryption(unittest.TestCase):
def test_failure(self):
resource_conf = {'engine': ['postgres'], 'engine_version': ['9.6.3'], 'multi_az': [False], 'backup_retention_period': [10], 'auto_minor_version_upgrade': [True], 'storage_encrypted': [False]}
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.FAILED, scan_result)
def test_success(self):
resource_conf = {'engine': ['postgres'], 'engine_version': ['9.6.3'], 'multi_az': [False], 'backup_retention_period': [10], 'auto_minor_version_upgrade': [True], 'storage_encrypted': [True]}
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.PASSED, scan_result)
if __name__ == '__main__':
unittest.main()
| 42.863636 | 200 | 0.722163 |
1d35d97dcfb7af39d3647b50a81ebf385f9b6867 | 1,705 | py | Python | psltdsim/plot/sysPLQF.py | thadhaines/PSLTDSim | 1bc598f3733c1369c164f54249e5f7757e6bf466 | [
"MIT"
] | null | null | null | psltdsim/plot/sysPLQF.py | thadhaines/PSLTDSim | 1bc598f3733c1369c164f54249e5f7757e6bf466 | [
"MIT"
] | null | null | null | psltdsim/plot/sysPLQF.py | thadhaines/PSLTDSim | 1bc598f3733c1369c164f54249e5f7757e6bf466 | [
"MIT"
] | null | null | null | def sysPLQF(mirror, blkFlag=True):
"""Plot System Pe, P_load; Qgen, and Frequency of given mirror"""
import matplotlib.pyplot as plt
import numpy as np # to ndarray.flatten ax
mir = mirror
xend = max(mir.r_t)
fig, ax = plt.subplots(nrows=2, ncols=2,)
ax = np.ndarray.flatten(ax)
ax[0].set_title('Real Power Generated')
for mach in mir.Machines:
ax[0].plot(mir.r_t, mach.r_Pe,
marker = 10,
fillstyle='none',
#linestyle = ':',
label = 'Pe Gen '+ mach.Busnam)
ax[0].set_xlabel('Time [sec]')
ax[0].set_ylabel('MW')
ax[2].set_title('Reactive Power Generated')
for mach in mir.Machines:
ax[2].plot(mir.r_t, mach.r_Q,
marker = 10,
fillstyle='none',
#linestyle = ':',
label = 'Q Gen '+ mach.Busnam)
ax[2].set_xlabel('Time [sec]')
ax[2].set_ylabel('MVAR')
ax[1].set_title('Total System P Loading')
ax[1].plot(mir.r_t, mir.r_ss_Pload,
marker = 11,
#fillstyle='none',
#linestyle = ':',
label = 'Pload')
ax[1].set_xlabel('Time [sec]')
ax[1].set_ylabel('MW')
ax[3].set_title('System Mean Frequency')
ax[3].plot(mir.r_t, mir.r_f,
marker = '.',
#linestyle = ':',
label = r'System Frequency')
ax[3].set_xlabel('Time [sec]')
ax[3].set_ylabel('Frequency [PU]')
# Global Plot settings
for x in np.ndarray.flatten(ax):
x.set_xlim(0,xend)
x.legend()
x.grid(True)
fig.tight_layout()
plt.show(block = blkFlag) | 30.446429 | 69 | 0.520821 |
ce66cc452e265993e032db7a12a91cdf7fe9c646 | 11,220 | py | Python | tools/generate_coverage.py | memmett/PFASST | 655085fae12b7cce8558484baefdac1bf3d84c2c | [
"BSD-2-Clause"
] | 28 | 2015-01-05T11:25:32.000Z | 2021-03-09T01:09:52.000Z | tools/generate_coverage.py | memmett/PFASST | 655085fae12b7cce8558484baefdac1bf3d84c2c | [
"BSD-2-Clause"
] | 81 | 2015-01-05T11:23:15.000Z | 2016-12-13T11:03:04.000Z | tools/generate_coverage.py | memmett/PFASST | 655085fae12b7cce8558484baefdac1bf3d84c2c | [
"BSD-2-Clause"
] | 13 | 2015-02-03T07:59:40.000Z | 2021-04-25T20:26:08.000Z | #!/usr/bin/env python
# coding=utf-8
"""
This script will aid in generating a test coverage report for PFASST++ including its examples.
A standard CPython 3.3 compatible Python interpreter with standard library support is required.
No additional modules.
Run it with argument `-h` for usage instructions.
.. moduleauthor:: Torbjörn Klatt <[email protected]>
"""
from sys import version_info
# require at least Python 3.3
# (because subprocess.DEVNULL)
assert(version_info[0] >= 3 and version_info[1] >= 3)
import argparse
import os
import os.path
import shutil
import subprocess as sp
import re
import logging
from logging.config import dictConfig
dictConfig(
{
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'default': {
'style': '{',
'format': '[{levelname!s:<8s}] {message!s}'
}
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'formatter': 'default'
}
},
'root': {
'handlers': ['console'],
'level': 'INFO'
}
}
)
class Options(object):
coverage_dir = ""
build_dir = ""
base_dir = ""
with_examples = True
tests = []
example_tests = []
tracefiles = []
final_tracefile = ""
options = Options()
options.base_dir = ""
def is_lcov_available():
try:
sp.check_call('lcov --version', shell=True, stdout=sp.DEVNULL, stderr=sp.DEVNULL)
except sp.CalledProcessError:
logging.critical("lcov command not available. It is required.")
return False
try:
sp.check_call('genhtml --version', shell=True, stdout=sp.DEVNULL, stderr=sp.DEVNULL)
except sp.CalledProcessError:
logging.critical("genhtml command not available. It is required.")
return False
return True
def get_project_root():
logging.info("Determine project root directory")
curr_dir = os.path.abspath(os.path.curdir)
logging.debug("Trying current path: %s" % curr_dir)
if os.access(curr_dir + "/include", os.R_OK) and os.access(curr_dir + "/examples", os.R_OK):
logging.debug("Project root is: %s" % curr_dir)
options.base_dir = curr_dir
else:
logging.warning("Probably called from within the tools dir. "
"This should work but is not recommended. "
"Trying parent directory as project root.")
os.chdir("..")
get_project_root()
def setup_and_init_options():
help_string = "Note:\n" \
"This only works for builds made with GCC and the following CMake variables:\n" \
" -Dpfasst_WITH_GCC_PROF=ON -Dpfasst_BUILD_TESTS=ON"
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter, description=help_string)
parser.add_argument('-d', '--build-dir', required=True,
help="name of build directory containing a debug build with GCC and enabled profiling")
parser.add_argument('-o', '--output', default='coverage',
help="output directory for generated coverage report")
parser.add_argument('--no-examples', default=False, action='store_true',
help="whether to not run and include tests from the examples")
parser.add_argument('--debug', default=False, action='store_true',
help="enables more verbose debugging output")
_args = parser.parse_args()
if _args.debug:
logging.getLogger().setLevel(logging.DEBUG)
logging.debug("Debug mode enabled.")
get_project_root()
if not os.access(_args.build_dir, os.W_OK):
logging.critical("Given build path could not be found: %s" % _args.build_dir)
raise ValueError("Given build path could not be found: %s" % _args.build_dir)
options.build_dir = os.path.abspath(_args.build_dir)
if os.access(options.build_dir + "/CMakeCache.txt", os.W_OK):
with_gcc_prof = False
with_mpi = False
with open(options.build_dir + "/CMakeCache.txt", 'r') as cache:
for line in cache:
if "pfasst_WITH_GCC_PROF:BOOL=ON" in line:
with_gcc_prof = True
if "pfasst_WITH_MPI:BOOL=ON" in line:
with_mpi = True
if not with_gcc_prof:
raise RuntimeError("PFASST++ must be built with 'pfasst_WITH_GCC_PROF=ON'")
if with_mpi:
logging.warning("Coverage analysis only functional for non-MPI builds")
exit(0)
if not os.access(_args.output, os.W_OK):
logging.info("Output directory not found. Creating: %s" % _args.output)
os.mkdir(_args.output)
else:
logging.warning("Clearing out output directory: %s" % _args.output)
shutil.rmtree(_args.output)
os.mkdir(_args.output)
options.coverage_dir = os.path.abspath(_args.output)
options.with_examples = not _args.no_examples
if not options.with_examples:
logging.debug("Not running and tracing tests from examples.")
def get_test_directories():
logging.info("Looking for tests ...")
for root, dirs, files in os.walk(options.build_dir + '/tests'):
match_name = re.search('^.*/(?P<test_name>test_[a-zA-Z\-_]+)\.dir$', root)
match_is_example = re.search('^.*/tests/examples/.*$', root)
is_example = match_is_example is not None
if match_name is not None:
testname = match_name.groupdict()['test_name']
if is_example:
options.example_tests.append({'path': root, 'name': testname, 'is_example': is_example})
else:
options.tests.append({'path': root, 'name': testname, 'is_example': is_example})
logging.info("%d tests found" % (len(options.tests) + len(options.example_tests)))
logging.info(" %d general tests" % len(options.tests))
if options.with_examples:
logging.info(" %d tests for examples" % len(options.example_tests))
def run_test(path, name, is_example):
logging.info("- %s" % name)
logging.debug("Found in %s" % path)
output_file = open('%s/%s.log' % (options.coverage_dir, name), mode='a')
logging.debug("Output log: %s" % output_file.name)
os.chdir(os.path.abspath(path))
logging.debug("Deleting old tracing data ...")
print('### deleting old tracing data ...', file=output_file, flush=True)
sp.check_call('lcov --zerocounters --directory .', shell=True, stdout=output_file, stderr=output_file)
print('### done.', file=output_file, flush=True)
os.chdir(options.build_dir)
logging.debug("Running test ...")
print('### running test ...', file=output_file, flush=True)
sp.check_call('ctest -R %s' % name, shell=True, stdout=output_file, stderr=output_file)
print('### done.', file=output_file, flush=True)
os.chdir(os.path.abspath(path))
logging.debug("Capturing all tracing data ...")
print('### capturing all tracing data ...', file=output_file, flush=True)
sp.check_call('lcov --capture --directory . --output-file "%s.info.complete"' % name,
shell=True, stdout=output_file, stderr=output_file)
print('### done.', file=output_file, flush=True)
logging.debug("Removing unnecessary data ...")
print('### removing unnecessary data ...', file=output_file, flush=True)
try:
sp.check_call('lcov --remove "%s.info.complete" "%s/include/pfasst/easylogging++.h" --output-file %s.info.prelim'
% (name, options.base_dir, name),
shell=True, stdout=output_file, stderr=output_file)
except sp.CalledProcessError as e:
logging.warning(e)
print('### done.', file=output_file, flush=True)
logging.debug("Extracting interesting tracing data ...")
print('### extracting interesting tracing data ...', file=output_file, flush=True)
try:
sp.check_call('lcov --extract "%s.info.prelim" "*%s/include/**/*" --output-file %s.info'
% (name, options.base_dir, name),
shell=True, stdout=output_file, stderr=output_file)
options.tracefiles.append("%s/%s.info" % (os.path.abspath(path), name))
except sp.CalledProcessError as e:
logging.warning(e)
if is_example:
logging.debug("This test belongs to an example, thus also covering examples code")
try:
sp.check_call('lcov --extract "%s.info.prelim" "*%s/examples/**/*" --output-file %s.info.example'
% (name, options.base_dir, name),
shell=True, stdout=output_file, stderr=output_file)
options.tracefiles.append("%s/%s.info.example" % (os.path.abspath(path), name))
except sp.CalledProcessError as e:
logging.warning(e)
print('### done.', file=output_file, flush=True)
os.chdir(options.base_dir)
output_file.close()
def run_tests():
logging.info("Running general tests ...")
for test in options.tests:
run_test(**test)
if options.with_examples:
logging.info("Running tests for examples ...")
for example in options.example_tests:
run_test(**example)
def aggregate_tracefiles():
logging.info("Aggregating %d tracefiles ..." % len(options.tracefiles))
output_file = open('%s/aggegrating.log' % (options.coverage_dir,), mode='a')
logging.debug("Output log: %s" % output_file.name)
options.final_tracefile = "%s/all_tests.info" % options.coverage_dir
for tracefile in options.tracefiles:
logging.debug("- %s" % (tracefile))
print("### adding tracefile: %s" % (tracefile,), file=output_file, flush=True)
if os.access(options.final_tracefile, os.W_OK):
sp.check_call('lcov --add-tracefile "%s" --add-tracefile "%s" --output-file "%s"'
% (options.final_tracefile, tracefile, options.final_tracefile),
shell=True, stdout=output_file, stderr=output_file)
else:
sp.check_call('lcov --add-tracefile "%s" --output-file "%s"'
% (tracefile, options.final_tracefile),
shell=True, stdout=output_file, stderr=output_file)
print("### done.", file=output_file, flush=True)
output_file.close()
def generate_html():
logging.info("Generating HTML report ...")
output_file = open('%s/generate_html.log' % (options.coverage_dir,), mode='a')
sp.check_call('genhtml --output-directory %s --demangle-cpp --num-spaces 2 --sort '
'--title "PFASST++ Test Coverage" --prefix "%s" --function-coverage --legend "%s"'
% (options.coverage_dir, options.base_dir, options.final_tracefile),
shell=True, stdout=output_file, stderr=output_file)
output_file.close()
logging.info("Coverage report can be found in: file://%s/index.html" % options.coverage_dir)
if __name__ == "__main__":
if not is_lcov_available():
raise RuntimeError("Required commands could not be found.")
setup_and_init_options()
get_test_directories()
run_tests()
aggregate_tracefiles()
generate_html()
| 40.215054 | 121 | 0.631462 |
f197db13c03dbc3b38a77da7b98147a76cc252c0 | 1,387 | py | Python | mailsnoopy.py | AntonKuzminRussia/ms-cli | db362d9e347f207e11fc92ddd761b5cbee8ada0d | [
"MIT"
] | 1 | 2019-01-27T04:14:54.000Z | 2019-01-27T04:14:54.000Z | mailsnoopy.py | AntonKuzminRussia/ms-cli | db362d9e347f207e11fc92ddd761b5cbee8ada0d | [
"MIT"
] | null | null | null | mailsnoopy.py | AntonKuzminRussia/ms-cli | db362d9e347f207e11fc92ddd761b5cbee8ada0d | [
"MIT"
] | null | null | null | #!/usr/bin/python3
# -*- coding: utf-8 -*-
import os
import time
import pprint
import configparser
import shutil
from classes.Database import Database
from classes.MainThread import MainThread
from classes.Config import Config
CURPATH = os.path.dirname(__file__) + "/"
config = configparser.ConfigParser()
config.read(CURPATH + 'config.ini')
Config.set_values(config['main'])
db = Database(
config['main']['db_host'],
config['main']['db_user'],
config['main']['db_pass'],
config['main']['db_name'],
)
if str(Config.get_value('always_start_clean')) == '1':
db.q("TRUNCATE TABLE `folders`;")
db.q("TRUNCATE TABLE `letters`;")
db.q("TRUNCATE TABLE `attachments`;")
db.q("TRUNCATE TABLE `filters_finds`;")
db.q("UPDATE `accounts` SET `in_work` = '0', `active` = 1")
if os.path.exists(Config.get_value('bodies_path')):
shutil.rmtree(Config.get_value('bodies_path'))
if os.path.exists(Config.get_value('attachments_dir')):
shutil.rmtree(Config.get_value('attachments_dir'))
if not os.path.exists(Config.get_value('bodies_path')):
os.mkdir(Config.get_value('bodies_path'))
if not os.path.exists(Config.get_value('attachments_dir')):
os.mkdir(Config.get_value('attachments_dir'))
main_thread = MainThread(db.clone(), int(config['main']['threads_per_host_limit']))
main_thread.start()
while True:
time.sleep(5)
| 26.673077 | 83 | 0.697909 |
966c9b52a86282b3163c9670f3a414e3bcfac5e9 | 3,196 | py | Python | data_structures/doubly_linked_list.py | Kiracorp/data-structures | 33cc5387a4636031193a8214ff6f737689e304c4 | [
"MIT"
] | null | null | null | data_structures/doubly_linked_list.py | Kiracorp/data-structures | 33cc5387a4636031193a8214ff6f737689e304c4 | [
"MIT"
] | null | null | null | data_structures/doubly_linked_list.py | Kiracorp/data-structures | 33cc5387a4636031193a8214ff6f737689e304c4 | [
"MIT"
] | null | null | null | from data_structures.abstract_linked_list import LinkedList
class DoublyLinkedList(LinkedList):
class Node:
def __init__(self, data):
self.data = data
self.next = None
self.prev = None
def __repr__(self):
return f"{{data: {repr(self.data)}}}"
def __repr__(self):
if self.is_empty(): return "Empty"
else:
node_strs = []
curr_node = self.head
for curr_index in range(self.size):
if curr_node.next is not None:
assert(id(curr_node) == id(curr_node.next.prev))
node_strs.append(f"{curr_index} {repr(curr_node)}")
curr_node = curr_node.next
return " <-> ".join(node_strs)
def insert(self, index, data):
assert(index >= 0 and index <= len(self))
node = self.Node(data)
# Case 0 - No head exists
if self.is_empty():
self.head = self.tail = node
# Case 1 - Add to start
elif index == 0:
self.head.prev, node.next = node, self.head
self.head = node
# Case 2 - Add to tail
elif index == len(self):
self.tail.next, node.prev = node, self.tail
self.tail = node
# Case 3 - Insertion in between elements
else:
curr = self.head
for _ in range(index):
curr = curr.next
prev = curr.prev
prev.next, curr.prev = node, node
node.prev, node.next = prev, curr
self.size += 1
def pop(self, index):
assert(not self.is_empty())
assert(index >= 0 and index < len(self))
# Case 0 - Only one element
if len(self) == 1:
out = self.head.data
self.head = self.tail = None
# Case 1 - Removing head
elif index == 0:
out = self.head.data
self.head = self.head.next
self.head.prev = None
# Case 2 - Removing tail
elif index == len(self)-1:
out = self.tail.data
self.tail = self.tail.prev
self.tail.next = None
# Case 3 - Removal in between elements
else:
curr = self.head
for _ in range(index):
curr = curr.next
out = curr.data
prev, nxt = curr.prev, curr.next
prev.next, nxt.prev = nxt, prev
self.size -= 1
return out
def peek(self, index):
assert(not self.is_empty())
assert(index >= 0 and index < len(self))
if index < len(self)//2:
# Search from head
curr = self.head
for _ in range(index):
curr = curr.next
else:
# Search from tail
curr = self.tail
for _ in range(self.size-1-index):
curr = curr.prev
return curr.data
def reverse(self):
if len(self) < 2: return
curr = self.head
self.head, self.tail = self.tail, self.head
while curr is not None:
curr.next, curr.prev = curr.prev, curr.next
curr = curr.prev | 32.612245 | 68 | 0.505632 |
234a03709ac880bbcb74359f1d69178837f62b12 | 7,227 | py | Python | persistence_landscapes/linear_btree.py | gruberan/persistence_stonks | 772efe7c52e96216b0cc4f7b2e8587495194a0f2 | [
"MIT"
] | 3 | 2020-05-04T12:19:33.000Z | 2020-05-18T09:24:35.000Z | persistence_landscapes/linear_btree.py | gruberan/persistence_stonks | 772efe7c52e96216b0cc4f7b2e8587495194a0f2 | [
"MIT"
] | null | null | null | persistence_landscapes/linear_btree.py | gruberan/persistence_stonks | 772efe7c52e96216b0cc4f7b2e8587495194a0f2 | [
"MIT"
] | 1 | 2020-05-04T12:19:07.000Z | 2020-05-04T12:19:07.000Z | import itertools
class _LinearNode:
def __init__(self,x,y,m=None):
self.left, self.right = None, None
self.x, self.y = x, y
if not m == None:
self.m, self.b = m, y - m*x
def get_prev(self,root):
"""Returns in-order previous node."""
if not self.left == None:
return self.left.get_rightmost()
prev = None
while not root == None:
if self.x > root.x:
prev = root
root = root.right
elif self.x < root.x:
root = root.left
else:
break
return prev
def get_next(self,root):
"""Returns in-order successor node."""
if not self.right == None:
return self.right.get_leftmost()
succ = None
while not root == None:
if self.x < root.x:
succ = root
root = root.left
elif self.x > root.x:
root = root.right
else:
break
return succ
def get_leftmost(self):
"""Returns leftmost node of subtree with root self."""
current = self
while not current == None:
if current.left == None:
break
current = current.left
return current
def get_rightmost(self):
"""Returns rightmost node of subtree with root self."""
current = self
while not current == None:
if current.right == None:
break
current = current.right
return current
def __iter__(self):
"""Ordered traversal."""
if self.left:
for node in self.left:
yield node
yield self
if self.right:
for node in self.right:
yield node
def _pairwise_iterator(self):
"""Ordered traversal with consecutive pairs."""
i, j = itertools.tee(self)
next(j,None)
return zip(i,j)
class Linear_BTree:
"""Binary search tree class stores linear parameters to successor node. Capable of linear interpolation between nodes."""
def __init__(self):
self.root = None
def insert(self, x, y, m=None, delay_update=False):
"""Inserts a new node into the tree.
x, y: Coordinates of node to insert
m: slope to next node. Can be taken as an arg if precomputed (such as when converting a list)
delay_update (bool): if True, will not update the linear parameters of adjacent node after insert
"""
if self.root == None:
self.root = _LinearNode(x,y,m)
else:
self._insert(self.root,x,y,m,delay_update)
def _insert(self,node, x, y, m=None, delay_update=False):
if x < node.x:
if node.left == None:
node.left = _LinearNode(x,y,m)
if not delay_update and m == None:
# Update linear parameters for new node
node.left.m = (node.y - y)/(node.x - x)
node.left.b = y - node.left.m * x
if not delay_update:
# Update linear parameters for node previous to new node
prev = node.left.get_prev(self.root)
if not prev == None:
prev.m = (y - prev.y)/(x - prev.x)
prev.b = prev.y - prev.m * prev.x
else:
self._insert(node.left,x,y,m)
elif x > node.x:
if node.right == None:
node.right = _LinearNode(x,y,m)
if not delay_update and m == None:
# Update linear parameters for new node
succ = node.right.get_next(self.root)
if not succ == None:
node.right.m = (succ.y - y)/(succ.x - x)
node.right.b = y - node.right.m * x
if not delay_update:
# Update linear parameters for current node
node.m = (y - node.y)/(x - node.x)
node.b = node.y - node.m * node.x
else:
self._insert(node.right,x,y,m)
else:
# Overwrites if node with same x value already exists
if not (node.y == y) or not delay_update:
node.y = y
if m == None:
# Update linear parameters for successor node
succ = node.get_next(self.root)
if not succ == None:
node.m = (succ.y - y)/(succ.x - x)
node.b = y - node.m * x
else:
node.m, node.b = m, y - m * x
# Update linear parameters for previous node
prev = node.get_prev(self.root)
if not prev == None:
prev.m = (y - prev.y)/(x - prev.x)
prev.b = prev.y - prev.m * prev.x
@classmethod
def from_list(cls, X, Y, already_sorted=False):
"""Returns a new linear binary tree.
Arguments:
X (list): X values
Y (list): Y values
already_sorted (list): indicates that X and Y are already in sorted order by X value
"""
new_lbtree = cls()
if already_sorted:
M = [(y2 - y1)/(x2 - x1) if x1 != x2 else 0.0 for x1, x2, y1, y2 in zip(X,X[1:],Y,Y[1:])]
new_lbtree._from_list(list(zip(X, Y, M+[0.0])), 0, len(X)-1)
else:
new_lbtree._from_list([(x,y,None) for x,y in zip(X,Y)], 0, len(X)-1)
new_lbtree._update()
return new_lbtree
def _from_list(self, nodes, a, b):
if a > b:
return
mid = int(a + (b - a) / 2)
self.insert(nodes[mid][0], nodes[mid][1], nodes[mid][2], delay_update=True)
self._from_list(nodes, a, mid-1)
self._from_list(nodes, mid+1, b)
def _update(self):
"""Updates the slope and intercept for all nodes."""
for node1, node2 in self.root._pairwise_iterator():
node1.m = (node2.y - node1.y)/(node2.x - node1.x)
node1.b = node1.y - node1.m * node1.x
self.root.get_rightmost().m, self.root.get_rightmost().b = 0.0, 0.0
def evaluate(self, x):
""" Find largest node.x below x and return linear interpolation. """
return self._evaluate(x, self.root)
def _evaluate(self, x, node):
if node == None:
return None
if x == node.x:
return node.y
if x > node.x:
y = self._evaluate(x,node.right)
if y == None:
y = (node.m)*x + node.b
return y
if x < node.x:
return self._evaluate(x,node.left)
def deepcopy(self):
new_lbtree = Linear_BTree()
for node in self:
new_lbtree.insert(node.x, node.y, node.m, delay_update=True)
return new_lbtree
def __iter__(self):
""" Yields sorted x values. """
for node in self.root:
yield node.x | 37.252577 | 125 | 0.492874 |
e7932b96194688d7856395cda1b1b58a53b9a86e | 3,794 | py | Python | pypureclient/flasharray/FA_2_7/models/pod_performance_replication_by_array_response.py | genegr/py-pure-client | b497ea569ff569992b7f28a3fc6b87f50a98e51a | [
"BSD-2-Clause"
] | 14 | 2018-12-07T18:30:27.000Z | 2022-02-22T09:12:33.000Z | pypureclient/flasharray/FA_2_7/models/pod_performance_replication_by_array_response.py | genegr/py-pure-client | b497ea569ff569992b7f28a3fc6b87f50a98e51a | [
"BSD-2-Clause"
] | 28 | 2019-09-17T21:03:52.000Z | 2022-03-29T22:07:35.000Z | pypureclient/flasharray/FA_2_7/models/pod_performance_replication_by_array_response.py | genegr/py-pure-client | b497ea569ff569992b7f28a3fc6b87f50a98e51a | [
"BSD-2-Clause"
] | 15 | 2020-06-11T15:50:08.000Z | 2022-03-21T09:27:25.000Z | # coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.7
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_7 import models
class PodPerformanceReplicationByArrayResponse(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'items': 'list[PodPerformanceReplicationByArray]',
'total': 'list[PodPerformanceReplicationByArray]'
}
attribute_map = {
'items': 'items',
'total': 'total'
}
required_args = {
}
def __init__(
self,
items=None, # type: List[models.PodPerformanceReplicationByArray]
total=None, # type: List[models.PodPerformanceReplicationByArray]
):
"""
Keyword args:
items (list[PodPerformanceReplicationByArray]): A list of pod performance replication objects, arranged by array.
total (list[PodPerformanceReplicationByArray]): The aggregate value of all items after filtering. When applicable, the average value is displayed instead. The values are displayed for each field if meaningful.
"""
if items is not None:
self.items = items
if total is not None:
self.total = total
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `PodPerformanceReplicationByArrayResponse`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(PodPerformanceReplicationByArrayResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PodPerformanceReplicationByArrayResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 32.152542 | 221 | 0.585925 |
004d9a48ab004e16c548b6a3fc8d101e74077fa4 | 2,137 | py | Python | simulation/aivika/modeler/experiment/base/final_table.py | dsorokin/aivika-modeler | 5c112015f9af6ba1974a3b208842da01e705f9ac | [
"BSD-3-Clause"
] | null | null | null | simulation/aivika/modeler/experiment/base/final_table.py | dsorokin/aivika-modeler | 5c112015f9af6ba1974a3b208842da01e705f9ac | [
"BSD-3-Clause"
] | null | null | null | simulation/aivika/modeler/experiment/base/final_table.py | dsorokin/aivika-modeler | 5c112015f9af6ba1974a3b208842da01e705f9ac | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 2017 David Sorokin <[email protected]>
#
# Licensed under BSD3. See the LICENSE.txt file in the root of this distribution.
from simulation.aivika.modeler.results import *
from simulation.aivika.modeler.util import *
from simulation.aivika.modeler.experiment.base.types import *
class FinalTableView(BasicExperimentView):
"""It saves the simulation results in final time points for all runs in CSV file."""
def __init__(self,
title = None,
descr = None,
series = None,
separator = None,
run_text = None,
link_text = None):
"""Initializes a new instance."""
BasicExperimentView.__init__(self)
self.title = title
self.descr = descr
self.series = series
self.separator = separator
self.run_text = run_text
self.link_text = link_text
def write(self, file, indent = ''):
"""Write the view definition in the file."""
file.write('defaultFinalTableView')
fields = {}
if not (self.title is None):
func = lambda file, indent: file.write(encode_str(self.title))
fields['finalTableTitle'] = func
if not (self.descr is None):
func = lambda file, indent: file.write(encode_str(self.descr))
fields['finalTableDescription'] = func
if not (self.series is None):
func = lambda file, indent: write_sources(self.series, file, indent + ' ')
fields['finalTableSeries'] = func
if not (self.separator is None):
func = lambda file, indent: file.write(encode_str(self.separator))
fields['finalTableSeparator'] = func
if not (self.run_text is None):
func = lambda file, indent: file.write(encode_str(self.run_text))
fields['finalTableRunText'] = func
if not (self.link_text is None):
func = lambda file, indent: file.write(encode_str(self.link_text))
fields['finalTableLinkText'] = func
write_record_fields(fields, file, indent + ' ')
| 41.901961 | 88 | 0.612541 |
a67fdbb68149a93c0f6558ee38d0a8d1674a1809 | 15,923 | py | Python | trimesh/scene/transforms.py | hauptmech/trimesh | ee43dc3e77b85606e37683307ef12e5cea4e0338 | [
"MIT"
] | 2 | 2019-12-10T22:40:58.000Z | 2022-01-28T03:49:11.000Z | trimesh/scene/transforms.py | snavely/trimesh | 18e8285c55213ce86f57c84042f5c18d415abe7e | [
"MIT"
] | null | null | null | trimesh/scene/transforms.py | snavely/trimesh | 18e8285c55213ce86f57c84042f5c18d415abe7e | [
"MIT"
] | null | null | null | import copy
import time
import collections
import numpy as np
from .. import util
from .. import caching
from .. import exceptions
from .. import transformations
try:
import networkx as nx
_ForestParent = nx.DiGraph
except BaseException as E:
# create a dummy module which will raise the ImportError
# or other exception only when someone tries to use networkx
nx = exceptions.ExceptionModule(E)
_ForestParent = object
class TransformForest(object):
def __init__(self, base_frame='world'):
# a graph structure, subclass of networkx DiGraph
self.transforms = EnforcedForest()
# hashable, the base or root frame
self.base_frame = base_frame
# save paths, keyed with tuple (from, to)
self._paths = {}
# cache transformation matrices keyed with tuples
self._updated = str(np.random.random())
self._cache = caching.Cache(self.md5)
def update(self, frame_to, frame_from=None, **kwargs):
"""
Update a transform in the tree.
Parameters
------------
frame_from : hashable object
Usually a string (eg 'world').
If left as None it will be set to self.base_frame
frame_to : hashable object
Usually a string (eg 'mesh_0')
matrix : (4,4) float
Homogeneous transformation matrix
quaternion : (4,) float
Quaternion ordered [w, x, y, z]
axis : (3,) float
Axis of rotation
angle : float
Angle of rotation, in radians
translation : (3,) float
Distance to translate
geometry : hashable
Geometry object name, e.g. 'mesh_0'
"""
self._updated = str(np.random.random())
self._cache.clear()
# if no frame specified, use base frame
if frame_from is None:
frame_from = self.base_frame
# convert various kwargs to a single matrix
matrix = kwargs_to_matrix(**kwargs)
# create the edge attributes
attr = {'matrix': matrix, 'time': time.time()}
# pass through geometry to edge attribute
if 'geometry' in kwargs:
attr['geometry'] = kwargs['geometry']
# add the edges
changed = self.transforms.add_edge(frame_from,
frame_to,
**attr)
# set the node attribute with the geometry information
if 'geometry' in kwargs:
nx.set_node_attributes(
self.transforms,
name='geometry',
values={frame_to: kwargs['geometry']})
# if the edge update changed our structure
# dump our cache of shortest paths
if changed:
self._paths = {}
def md5(self):
return self._updated
def copy(self):
"""
Return a copy of the current TransformForest
Returns
------------
copied: TransformForest
"""
copied = TransformForest()
copied.base_frame = copy.deepcopy(self.base_frame)
copied.transforms = copy.deepcopy(self.transforms)
return copied
def to_flattened(self, base_frame=None):
"""
Export the current transform graph as a flattened
"""
if base_frame is None:
base_frame = self.base_frame
flat = {}
for node in self.nodes:
if node == base_frame:
continue
transform, geometry = self.get(
frame_to=node, frame_from=base_frame)
flat[node] = {
'transform': transform.tolist(),
'geometry': geometry
}
return flat
def to_gltf(self, scene):
"""
Export a transforms as the 'nodes' section of a GLTF dict.
Flattens tree.
Returns
--------
gltf : dict
with 'nodes' referencing a list of dicts
"""
# geometry is an OrderedDict
# {geometry key : index}
mesh_index = {name: i for i, name
in enumerate(scene.geometry.keys())}
# save the output
gltf = collections.deque([])
# only export nodes which have geometry
for node in self.nodes_geometry:
# don't include edge for base frame
if node == self.base_frame:
continue
# get the transform and geometry from the graph
transform, geometry = self.get(
frame_to=node, frame_from=self.base_frame)
# add a node by name
gltf.append({'name': node})
# if the transform is an identity matrix don't include it
is_identity = np.abs(transform - np.eye(4)).max() < 1e-5
if not is_identity:
gltf[-1]['matrix'] = transform.T.reshape(-1).tolist()
# assign geometry if it exists
if geometry is not None:
gltf[-1]['mesh'] = mesh_index[geometry]
# check to see if we have camera node
if node == scene.camera.name:
gltf[-1]['camera'] = 0
# we have flattened tree, so all nodes will be child of world
gltf.appendleft({
'name': self.base_frame,
'children': list(range(1, 1 + len(gltf)))
})
result = {'nodes': list(gltf)}
return result
def to_edgelist(self):
"""
Export the current transforms as a list of edge tuples, with
each tuple having the format:
(node_a, node_b, {metadata})
Returns
---------
edgelist: (n,) list of tuples
"""
# save cleaned edges
export = []
# loop through (node, node, edge attributes)
for edge in nx.to_edgelist(self.transforms):
a, b, c = edge
# geometry is a node property but save it to the
# edge so we don't need two dictionaries
if 'geometry' in self.transforms.node[b]:
c['geometry'] = self.transforms.node[b]['geometry']
# save the matrix as a float list
c['matrix'] = np.asanyarray(c['matrix'], dtype=np.float64).tolist()
export.append((a, b, c))
return export
def from_edgelist(self, edges, strict=True):
"""
Load transform data from an edge list into the current
scene graph.
Parameters
-------------
edgelist : (n,) tuples
(node_a, node_b, {key: value})
strict : bool
If true, raise a ValueError when a
malformed edge is passed in a tuple.
"""
# loop through each edge
for edge in edges:
# edge contains attributes
if len(edge) == 3:
self.update(edge[1], edge[0], **edge[2])
# edge just contains nodes
elif len(edge) == 2:
self.update(edge[1], edge[0])
# edge is broken
elif strict:
raise ValueError('edge incorrect shape: {}'.format(str(edge)))
def load(self, edgelist):
"""
Load transform data from an edge list into the current
scene graph.
Parameters
-------------
edgelist : (n,) tuples
(node_a, node_b, {key: value})
"""
self.from_edgelist(edgelist, strict=True)
@caching.cache_decorator
def nodes(self):
"""
A list of every node in the graph.
Returns
-------------
nodes: (n,) array, of node names
"""
nodes = np.array(list(self.transforms.nodes()))
return nodes
@caching.cache_decorator
def nodes_geometry(self):
"""
The nodes in the scene graph with geometry attached.
Returns
------------
nodes_geometry: (m,) array, of node names
"""
nodes = np.array([
n for n in self.transforms.nodes()
if 'geometry' in self.transforms.node[n]])
return nodes
def get(self, frame_to, frame_from=None):
"""
Get the transform from one frame to another, assuming they are connected
in the transform tree.
If the frames are not connected a NetworkXNoPath error will be raised.
Parameters
------------
frame_to : hashable
Node name, usually a string (eg 'mesh_0')
frame_from : hashable
Node name, usually a string (eg 'world').
If None it will be set to self.base_frame
Returns
----------
transform : (4, 4) float
Homogeneous transformation matrix
"""
if frame_from is None:
frame_from = self.base_frame
# look up transform to see if we have it already
cache_key = (frame_from, frame_to)
cached = self._cache[cache_key]
if cached is not None:
return cached
# get the path in the graph
path = self._get_path(frame_from, frame_to)
# collect transforms along the path
transforms = []
for i in range(len(path) - 1):
# get the matrix and edge direction
data, direction = self.transforms.get_edge_data_direction(
path[i], path[i + 1])
matrix = data['matrix']
if direction < 0:
matrix = np.linalg.inv(matrix)
transforms.append(matrix)
# do all dot products at the end
if len(transforms) == 0:
transform = np.eye(4)
elif len(transforms) == 1:
transform = np.asanyarray(transforms[0], dtype=np.float64)
else:
transform = util.multi_dot(transforms)
geometry = None
if 'geometry' in self.transforms.node[frame_to]:
geometry = self.transforms.node[frame_to]['geometry']
self._cache[cache_key] = (transform, geometry)
return transform, geometry
def show(self):
"""
Plot the graph layout of the scene.
"""
import matplotlib.pyplot as plt
nx.draw(self.transforms, with_labels=True)
plt.show()
def to_svg(self):
"""
"""
from ..graph import graph_to_svg
return graph_to_svg(self.transforms)
def __contains__(self, key):
return key in self.transforms.node
def __getitem__(self, key):
return self.get(key)
def __setitem__(self, key, value):
value = np.asanyarray(value)
if value.shape != (4, 4):
raise ValueError('Matrix must be specified!')
return self.update(key, matrix=value)
def clear(self):
self.transforms = EnforcedForest()
self._paths = {}
self._updated = str(np.random.random())
self._cache.clear()
def _get_path(self, frame_from, frame_to):
"""
Find a path between two frames, either from cached paths or
from the transform graph.
Parameters
------------
frame_from: a frame key, usually a string
eg, 'world'
frame_to: a frame key, usually a string
eg, 'mesh_0'
Returns
----------
path: (n) list of frame keys
eg, ['mesh_finger', 'mesh_hand', 'world']
"""
# store paths keyed as a tuple
key = (frame_from, frame_to)
if key not in self._paths:
# get the actual shortest paths
path = self.transforms.shortest_path_undirected(
frame_from, frame_to)
# store path to avoid recomputing
self._paths[key] = path
return path
return self._paths[key]
class EnforcedForest(_ForestParent):
"""
A subclass of networkx.DiGraph that will raise an error if an
edge is added which would make the DiGraph not a forest or tree.
"""
def __init__(self, *args, **kwargs):
self.flags = {'strict': False, 'assert_forest': False}
for k, v in self.flags.items():
if k in kwargs:
self.flags[k] = bool(kwargs[k])
kwargs.pop(k, None)
super(self.__class__, self).__init__(*args, **kwargs)
# keep a second parallel but undirected copy of the graph
# all of the networkx methods for turning a directed graph
# into an undirected graph are quite slow so we do minor bookkeeping
self._undirected = nx.Graph()
def add_edge(self, u, v, *args, **kwargs):
changed = False
if u == v:
if self.flags['strict']:
raise ValueError('Edge must be between two unique nodes!')
return changed
if self._undirected.has_edge(u, v):
self.remove_edges_from([[u, v], [v, u]])
elif len(self.nodes()) > 0:
try:
path = nx.shortest_path(self._undirected, u, v)
if self.flags['strict']:
raise ValueError(
'Multiple edge path exists between nodes!')
self.disconnect_path(path)
changed = True
except (nx.NetworkXError, nx.NetworkXNoPath, nx.NetworkXException):
pass
self._undirected.add_edge(u, v)
super(self.__class__, self).add_edge(u, v, *args, **kwargs)
if self.flags['assert_forest']:
# this is quite slow but makes very sure structure is correct
# so is mainly used for testing
assert nx.is_forest(nx.Graph(self))
return changed
def add_edges_from(self, *args, **kwargs):
raise ValueError('EnforcedTree requires add_edge method to be used!')
def add_path(self, *args, **kwargs):
raise ValueError('EnforcedTree requires add_edge method to be used!')
def remove_edge(self, *args, **kwargs):
super(self.__class__, self).remove_edge(*args, **kwargs)
self._undirected.remove_edge(*args, **kwargs)
def remove_edges_from(self, *args, **kwargs):
super(self.__class__, self).remove_edges_from(*args, **kwargs)
self._undirected.remove_edges_from(*args, **kwargs)
def disconnect_path(self, path):
ebunch = np.array([[path[0], path[1]]])
ebunch = np.vstack((ebunch, np.fliplr(ebunch)))
self.remove_edges_from(ebunch)
def shortest_path_undirected(self, u, v):
try:
path = nx.shortest_path(self._undirected, u, v)
except BaseException as E:
print(u, v)
raise E
return path
def get_edge_data_direction(self, u, v):
if self.has_edge(u, v):
direction = 1
elif self.has_edge(v, u):
direction = -1
else:
raise ValueError('Edge does not exist!')
data = self.get_edge_data(*[u, v][::direction])
return data, direction
def kwargs_to_matrix(**kwargs):
"""
Turn a set of keyword arguments into a transformation matrix.
"""
if 'matrix' in kwargs:
# a matrix takes precedence over other options
matrix = np.asanyarray(kwargs['matrix'], dtype=np.float64)
elif 'quaternion' in kwargs:
matrix = transformations.quaternion_matrix(kwargs['quaternion'])
elif ('axis' in kwargs) and ('angle' in kwargs):
matrix = transformations.rotation_matrix(kwargs['angle'],
kwargs['axis'])
else:
raise ValueError('Couldn\'t update transform!')
if 'translation' in kwargs:
# translation can be used in conjunction with any of the methods of
# specifying transforms. In the case a matrix and translation are passed,
# we add the translations together rather than picking one.
matrix[0:3, 3] += kwargs['translation']
return matrix
| 32.562372 | 81 | 0.561703 |
ce23d9cab82ba2de7957f72728c792aeb11ba953 | 42,801 | py | Python | sportsreference/nhl/roster.py | kyle1/sportsreference | baa4890382e7c9e5e38a42c1a71303431345378b | [
"MIT"
] | 1 | 2020-03-08T20:17:39.000Z | 2020-03-08T20:17:39.000Z | sportsreference/nhl/roster.py | JawnnyB/sportsreference | ec9b432e59a1a5e39cf6b3b857f781e5dbf65a3d | [
"MIT"
] | null | null | null | sportsreference/nhl/roster.py | JawnnyB/sportsreference | ec9b432e59a1a5e39cf6b3b857f781e5dbf65a3d | [
"MIT"
] | null | null | null | import pandas as pd
from functools import wraps
from lxml.etree import ParserError, XMLSyntaxError
from pyquery import PyQuery as pq
from urllib.error import HTTPError
from .. import utils
from .constants import PLAYER_SCHEME, PLAYER_URL, ROSTER_URL
from .player import AbstractPlayer
def _int_property_decorator(func):
@property
@wraps(func)
def wrapper(*args):
index = args[0]._index
prop = func(*args)
try:
return int(prop[index])
except (ValueError, TypeError, IndexError):
# If there is no value, default to None
return None
return wrapper
def _float_property_decorator(func):
@property
@wraps(func)
def wrapper(*args):
index = args[0]._index
prop = func(*args)
try:
return float(prop[index])
except (ValueError, TypeError, IndexError):
# If there is no value, default to None
return None
return wrapper
class Player(AbstractPlayer):
"""
Get player information and stats for all seasons.
Given a player ID, such as 'zettehe01' for Henrik Zetterberg, capture all
relevant stats and information like name, team, height/weight, career
goals, single-season assits, penalty minutes, and much more.
By default, the class instance will return the player's career stats, but
single-season stats can be found by calling the instance with the requested
season as denoted on sports-reference.com.
Parameters
----------
player_id : string
A player's ID according to hockey-reference.com, such as 'zettehe01'
for Henrik Zetterberg. The player ID can be found by navigating to the
player's stats page and getting the string between the final slash and
the '.html' in the URL. In general, the ID is in the format 'lllllffnn'
where 'lllll' is the first five letters of the player's last name, 'ff'
is the first two letters of the player's first name, and 'nn' is a
number starting at '01' for the first time that player ID has been used
and increments by 1 for every successive player.
"""
def __init__(self, player_id):
self._most_recent_season = ''
self._index = None
self._player_id = player_id
self._season = None
self._name = None
self._team_abbreviation = None
self._height = None
self._weight = None
self._age = None
self._league = None
self._games_played = None
self._goals = None
self._assists = None
self._points = None
self._plus_minus = None
self._penalties_in_minutes = None
self._even_strength_goals = None
self._power_play_goals = None
self._short_handed_goals = None
self._game_winning_goals = None
self._even_strength_assists = None
self._power_play_assists = None
self._short_handed_assists = None
self._shots_on_goal = None
self._shooting_percentage = None
self._total_shots = None
self._time_on_ice = None
self._average_time_on_ice = None
self._faceoff_wins = None
self._faceoff_losses = None
self._faceoff_percentage = None
self._blocks_at_even_strength = None
self._hits_at_even_strength = None
self._takeaways = None
self._giveaways = None
# Possession Metrics
self._time_on_ice_even_strength = None
self._corsi_for = None
self._corsi_against = None
self._corsi_for_percentage = None
self._relative_corsi_for_percentage = None
self._fenwick_for = None
self._fenwick_against = None
self._fenwick_for_percentage = None
self._relative_fenwick_for_percentage = None
self._goals_for_on_ice = None
self._shooting_percentage_on_ice = None
self._goals_against_on_ice = None
self._save_percentage_on_ice = None
self._pdo = None
self._offensive_zone_start_percentage = None
self._defensive_zone_start_percentage = None
# Miscellaneous
self._goals_created = None
self._adjusted_goals = None
self._adjusted_assists = None
self._adjusted_points = None
self._adjusted_goals_created = None
self._total_goals_for_on_ice = None
self._power_play_goals_for_on_ice = None
self._total_goals_against_on_ice = None
self._power_play_goals_against_on_ice = None
self._offensive_point_shares = None
self._defensive_point_shares = None
self._point_shares = None
self._shootout_attempts = None
self._shootout_goals = None
self._shootout_misses = None
self._shootout_percentage = None
# Goalie Metrics
self._wins = None
self._losses = None
self._ties_plus_overtime_loss = None
self._goals_against = None
self._shots_against = None
self._saves = None
self._save_percentage = None
self._goals_against_average = None
self._shutouts = None
self._minutes = None
self._quality_starts = None
self._quality_start_percentage = None
self._really_bad_starts = None
self._goal_against_percentage_relative = None
self._goals_saved_above_average = None
self._adjusted_goals_against_average = None
self._goalie_point_shares = None
self._even_strength_shots_faced = None
self._even_strength_goals_allowed = None
self._even_strength_save_percentage = None
self._power_play_shots_faced = None
self._power_play_goals_allowed = None
self._power_play_save_percentage = None
self._short_handed_shots_faced = None
self._short_handed_goals_allowed = None
self._short_handed_save_percentage = None
player_data = self._pull_player_data()
if not player_data:
return
self._find_initial_index()
AbstractPlayer.__init__(self, player_id, self._name, player_data)
def _build_url(self):
"""
Create the player's URL to pull stats from.
The player's URL requires the player ID.
Returns
-------
string
The string URL for the player's stats page.
"""
# The first letter of the player's last name is used to sort the player
# list and is a part of the URL.
first_character = self._player_id[0]
return PLAYER_URL % (first_character, self._player_id)
def _retrieve_html_page(self):
"""
Download the requested player's stats page.
Download the requested page and strip all of the comment tags before
returning a PyQuery object which will be used to parse the data.
Oftentimes, important data is contained in tables which are hidden in
HTML comments and not accessible via PyQuery.
Returns
-------
PyQuery object
The requested page is returned as a queriable PyQuery object with
the comment tags removed.
"""
url = self._build_url()
try:
url_data = pq(url)
except HTTPError:
return None
return pq(utils._remove_html_comment_tags(url_data))
def _parse_season(self, row):
"""
Parse the season string from the table.
The season is generally located in the first column of the stats tables
and should be parsed to detonate which season metrics are being pulled
from.
Parameters
----------
row : PyQuery object
A PyQuery object of a single row in a stats table.
Returns
-------
string
A string representation of the season in the format 'YYYY-YY', such
as '2017-18'.
"""
season = utils._parse_field(PLAYER_SCHEME, row, 'season')
return season.replace('*', '').replace('+', '')
def _combine_season_stats(self, table_rows, career_stats, all_stats_dict):
"""
Combine all stats for each season.
Since all of the stats are spread across multiple tables, they should
be combined into a single field which can be used to easily query stats
at once.
Parameters
----------
table_rows : generator
A generator where each element is a row in a stats table.
career_stats : generator
A generator where each element is a row in the footer of a stats
table. Career stats are kept in the footer, hence the usage.
all_stats_dict : dictionary
A dictionary of all stats separated by season where each key is the
season ``string``, such as '2017-18', and the value is a
``dictionary`` with a ``string`` of 'data' and ``string``
containing all of the data.
Returns
-------
dictionary
Returns an updated version of the passed all_stats_dict which
includes more metrics from the provided table.
"""
most_recent_season = self._most_recent_season
if not table_rows:
table_rows = []
for row in table_rows:
season = self._parse_season(row)
try:
all_stats_dict[season]['data'] += str(row)
except KeyError:
all_stats_dict[season] = {'data': str(row)}
most_recent_season = season
self._most_recent_season = most_recent_season
if not career_stats:
return all_stats_dict
try:
all_stats_dict['Career']['data'] += str(next(career_stats))
except KeyError:
all_stats_dict['Career'] = {'data': str(next(career_stats))}
return all_stats_dict
def _combine_all_stats(self, player_info):
"""
Pull stats from all tables into a single data structure.
Pull the stats from all of the requested tables into a dictionary that
is separated by season to allow easy queries of the player's stats for
each season.
Parameters
----------
player_info : PyQuery object
A PyQuery object containing all of the stats information for the
requested player.
Returns
-------
dictionary
Returns a dictionary where all stats from each table are combined
by season to allow easy queries by year.
"""
all_stats_dict = {}
for table_id in ['stats_basic_plus_nhl', 'skaters_advanced',
'stats_misc_plus_nhl', 'stats_goalie_situational']:
table_items = utils._get_stats_table(player_info,
'table#%s' % table_id)
career_items = utils._get_stats_table(player_info,
'table#%s' % table_id,
footer=True)
all_stats_dict = self._combine_season_stats(table_items,
career_items,
all_stats_dict)
return all_stats_dict
def _parse_player_information(self, player_info):
"""
Parse general player information.
Parse general player information such as height, weight, and name. The
attribute for the requested field will be set with the value prior to
returning.
Parameters
----------
player_info : PyQuery object
A PyQuery object containing the HTML from the player's stats page.
"""
for field in ['_height', '_weight', '_name']:
short_field = str(field)[1:]
value = utils._parse_field(PLAYER_SCHEME, player_info, short_field)
setattr(self, field, value)
def _pull_player_data(self):
"""
Pull and aggregate all player information.
Pull the player's HTML stats page and parse unique properties, such as
the player's height, weight, and position. Next, combine all stats for
all seasons plus the player's career stats into a single object which
can easily be iterated upon.
Returns
-------
dictionary
Returns a dictionary of the player's combined stats where each key
is a string of the season and the value is the season's associated
stats.
"""
player_info = self._retrieve_html_page()
if not player_info:
return
self._parse_player_information(player_info)
all_stats = self._combine_all_stats(player_info)
setattr(self, '_season', list(all_stats.keys()))
return all_stats
def _find_initial_index(self):
"""
Find the index of the career stats.
When the Player class is instantiated, the default stats to pull are
the player's career stats. Upon being called, the index of the 'Career'
element should be the index value.
"""
index = 0
for season in self._season or season == 'Career':
if season == 'Career':
self._index = index
break
index += 1
def __call__(self, requested_season=''):
"""
Specify a different season to pull stats from.
A different season can be requested by passing the season string, such
as '2017-18' to the class instance.
Parameters
----------
requested_season : string (optional)
A string of the requested season to query, such as '2017-18'. If
left blank or 'Career' is passed, the career stats will be used for
stats queries.
Returns
-------
Player class instance
Returns the class instance with the updated stats being referenced.
"""
if requested_season.lower() == 'career' or \
requested_season == '':
requested_season = 'Career'
index = 0
for season in self._season:
if season == requested_season:
self._index = index
break
index += 1
return self
def _dataframe_fields(self):
"""
Creates a dictionary of all fields to include with DataFrame.
With the result of the calls to class properties changing based on the
class index value, the dictionary should be regenerated every time the
index is changed when the dataframe property is requested.
Returns
-------
dictionary
Returns a dictionary where the keys are the shortened ``string``
attribute names and the values are the actual value for each
attribute for the specified index.
"""
fields_to_include = {
'adjusted_assists': self.adjusted_assists,
'adjusted_goals': self.adjusted_goals,
'adjusted_goals_against_average':
self.adjusted_goals_against_average,
'adjusted_goals_created': self.adjusted_goals_created,
'adjusted_points': self.adjusted_points,
'age': self.age,
'assists': self.assists,
'average_time_on_ice': self.average_time_on_ice,
'blocks_at_even_strength': self.blocks_at_even_strength,
'corsi_against': self.corsi_against,
'corsi_for': self.corsi_for,
'corsi_for_percentage': self.corsi_for_percentage,
'defensive_point_shares': self.defensive_point_shares,
'defensive_zone_start_percentage':
self.defensive_zone_start_percentage,
'even_strength_assists': self.even_strength_assists,
'even_strength_goals': self.even_strength_goals,
'even_strength_goals_allowed': self.even_strength_goals_allowed,
'even_strength_save_percentage':
self.even_strength_save_percentage,
'even_strength_shots_faced': self.even_strength_shots_faced,
'faceoff_losses': self.faceoff_losses,
'faceoff_percentage': self.faceoff_percentage,
'faceoff_wins': self.faceoff_wins,
'fenwick_against': self.fenwick_against,
'fenwick_for': self.fenwick_for,
'fenwick_for_percentage': self.fenwick_for_percentage,
'game_winning_goals': self.game_winning_goals,
'games_played': self.games_played,
'giveaways': self.giveaways,
'goal_against_percentage_relative':
self.goal_against_percentage_relative,
'goalie_point_shares': self.goalie_point_shares,
'goals': self.goals,
'goals_against': self.goals_against,
'goals_against_average': self.goals_against_average,
'goals_against_on_ice': self.goals_against_on_ice,
'goals_created': self.goals_created,
'goals_for_on_ice': self.goals_for_on_ice,
'goals_saved_above_average': self.goals_saved_above_average,
'height': self.height,
'hits_at_even_strength': self.hits_at_even_strength,
'league': self.league,
'losses': self.losses,
'minutes': self.minutes,
'name': self.name,
'offensive_point_shares': self.offensive_point_shares,
'offensive_zone_start_percentage':
self.offensive_zone_start_percentage,
'pdo': self.pdo,
'penalties_in_minutes': self.penalties_in_minutes,
'player_id': self.player_id,
'plus_minus': self.plus_minus,
'point_shares': self.point_shares,
'points': self.points,
'power_play_assists': self.power_play_assists,
'power_play_goals': self.power_play_goals,
'power_play_goals_against_on_ice':
self.power_play_goals_against_on_ice,
'power_play_goals_allowed': self.power_play_goals_allowed,
'power_play_goals_for_on_ice': self.power_play_goals_for_on_ice,
'power_play_save_percentage': self.power_play_save_percentage,
'power_play_shots_faced': self.power_play_shots_faced,
'quality_start_percentage': self.quality_start_percentage,
'quality_starts': self.quality_starts,
'really_bad_starts': self.really_bad_starts,
'relative_corsi_for_percentage':
self.relative_corsi_for_percentage,
'relative_fenwick_for_percentage':
self.relative_fenwick_for_percentage,
'save_percentage': self.save_percentage,
'save_percentage_on_ice': self.save_percentage_on_ice,
'saves': self.saves,
'season': self.season,
'shooting_percentage': self.shooting_percentage,
'shooting_percentage_on_ice': self.shooting_percentage_on_ice,
'shootout_attempts': self.shootout_attempts,
'shootout_goals': self.shootout_goals,
'shootout_misses': self.shootout_misses,
'shootout_percentage': self.shootout_percentage,
'short_handed_assists': self.short_handed_assists,
'short_handed_goals': self.short_handed_goals,
'short_handed_goals_allowed': self.short_handed_goals_allowed,
'short_handed_save_percentage': self.short_handed_save_percentage,
'short_handed_shots_faced': self.short_handed_shots_faced,
'shots_against': self.shots_against,
'shots_on_goal': self.shots_on_goal,
'shutouts': self.shutouts,
'takeaways': self.takeaways,
'team_abbreviation': self.team_abbreviation,
'ties_plus_overtime_loss': self.ties_plus_overtime_loss,
'time_on_ice': self.time_on_ice,
'time_on_ice_even_strength': self.time_on_ice_even_strength,
'total_goals_against_on_ice': self.total_goals_against_on_ice,
'total_goals_for_on_ice': self.total_goals_for_on_ice,
'total_shots': self.total_shots,
'weight': self.weight,
'wins': self.wins
}
return fields_to_include
@property
def dataframe(self):
"""
Returns a ``pandas DataFrame`` containing all other relevant class
properties and values where each index is a different season plus the
career stats.
"""
temp_index = self._index
rows = []
indices = []
if not self._season:
return None
for season in self._season:
self._index = self._season.index(season)
rows.append(self._dataframe_fields())
indices.append(season)
self._index = temp_index
return pd.DataFrame(rows, index=[indices])
@property
def season(self):
"""
Returns a ``string`` of the season in the format 'YYYY-YY', such as
'2017-18'. If no season was requested, the career stats will be
returned for the player and the season will default to 'Career'.
"""
return self._season[self._index]
@property
def name(self):
"""
Returns a ``string`` of the player's name, such as 'Henrik Zetterberg'.
"""
return self._name
@property
def team_abbreviation(self):
"""
Returns a ``string`` of the team's abbreviation, such as 'DET' for the
Detroit Red Wings.
"""
# For career stats, skip the team abbreviation.
if self._season[self._index].lower() == 'career':
return None
return self._team_abbreviation[self._index]
@property
def height(self):
"""
Returns a ``string`` of the player's height in the format
"feet-inches".
"""
return self._height
@property
def weight(self):
"""
Returns an ``int`` of the player's weight in pounds.
"""
return int(self._weight.replace('lb', ''))
@_int_property_decorator
def age(self):
"""
Returns an ``int`` of the player's age on February 1st of the season.
"""
return self._age
@property
def league(self):
"""
Returns a ``string`` of the league the player's team participates in.
"""
return self._league[self._index]
@_int_property_decorator
def games_played(self):
"""
Returns an ``int`` of the number of games the player participated in.
"""
return self._games_played
@_int_property_decorator
def time_on_ice(self):
"""
Returns an ``int`` of the total time the player has spent on ice in
minutes.
"""
return self._time_on_ice
@property
def average_time_on_ice(self):
"""
Returns a ``string`` of the average time the player spends on the ice
per game.
"""
return self._average_time_on_ice[self._index]
@_int_property_decorator
def total_shots(self):
"""
Returns an ``int`` of the total number of shots the player took
regardless of them being on goal or not.
"""
return self._total_shots
@_int_property_decorator
def faceoff_wins(self):
"""
Returns an ``int`` of the number of faceoffs the player won.
"""
return self._faceoff_wins
@_int_property_decorator
def faceoff_losses(self):
"""
Returns an ``int`` of the number of faceoffs the player lost.
"""
return self._faceoff_losses
@_float_property_decorator
def faceoff_percentage(self):
"""
Returns a ``float`` of the percentage of faceoffs the player wins.
Percentage ranges from 0-100.
"""
return self._faceoff_percentage
@_int_property_decorator
def blocks_at_even_strength(self):
"""
Returns an ``int`` of the number of shots the player blocks while at
even strength.
"""
return self._blocks_at_even_strength
@_int_property_decorator
def takeaways(self):
"""
Returns an ``int`` of the number of times the player took the puck away
from an opponent.
"""
return self._takeaways
@_int_property_decorator
def giveaways(self):
"""
Returns an ``int`` of the number of times the player gave the puck away
to an opponent.
"""
return self._giveaways
@_float_property_decorator
def time_on_ice_even_strength(self):
"""
Returns a ``float`` of the amount of time the player spent on ice in
minutes while at even strength.
"""
return self._time_on_ice_even_strength
@_float_property_decorator
def corsi_for(self):
"""
Returns a ``float`` of the player's 'Corsi For' factor at even
strength, equal to shots + blocks + misses.
"""
return self._corsi_for
@_float_property_decorator
def corsi_against(self):
"""
Returns a ``float`` of the player's 'Corsi Against' factor at even
strength, equal to shots + blocks + misses.
"""
return self._corsi_against
@_int_property_decorator
def fenwick_for(self):
"""
Returns an ``int`` of the player's 'Fenwick For' factor at even
strength, equal to shots + misses.
"""
return self._fenwick_for
@_int_property_decorator
def fenwick_against(self):
"""
Returns an ``int`` of the player's 'Fenwick Against' factor at even
strength, equal to shots + misses.
"""
return self._fenwick_against
@_float_property_decorator
def fenwick_for_percentage(self):
"""
Returns a ``float`` of the player's 'Fenwick For' percentage, equal to
fenwick_for / (fenwick_for + fenwick_against). Percentage ranges from
0-100.
"""
return self._fenwick_for_percentage
@_float_property_decorator
def relative_fenwick_for_percentage(self):
"""
Returns a ``float`` of the player's relative 'Fenwick For' percentage,
equal to the difference between the player's on and off-ice Fenwick For
percentage.
"""
return self._relative_fenwick_for_percentage
@_int_property_decorator
def goals_for_on_ice(self):
"""
Returns an ``int`` of the number of goals the team has scored while the
player is on ice.
"""
return self._goals_for_on_ice
@_float_property_decorator
def shooting_percentage_on_ice(self):
"""
Returns a ``float`` of the team's shooting percentage while the player
is on ice.
"""
return self._shooting_percentage_on_ice
@_int_property_decorator
def goals_against_on_ice(self):
"""
Returns an ``int`` of the number of times the team has been scored on
while the player is on ice.
"""
return self._goals_against_on_ice
@_int_property_decorator
def save_percentage_on_ice(self):
"""
Returns an ``int`` of the team's save percentage while the player is on
ice.
"""
return self._save_percentage_on_ice
@_float_property_decorator
def pdo(self):
"""
Returns a ``float`` of the team's PDO while the player is on ice at
even strength, equal to the team's shooting percentage + save
percentage. Percentage ranges from 0-100.
"""
return self._pdo
@_float_property_decorator
def defensive_zone_start_percentage(self):
"""
Returns a ``float`` of the percentage of faceoffs that occur in the
defensive zone whil the player is on ice. Percentage ranges from
0-100.
"""
return self._defensive_zone_start_percentage
@_int_property_decorator
def goals_created(self):
"""
Returns an ``int`` of the number of goals the player created, equal to
(goals + assists * 0.5) * team_goals / (team_goals + team_assists *
0.5).
"""
return self._goals_created
@_int_property_decorator
def adjusted_goals(self):
"""
Returns an ``int`` of the adjusted number of goals the player has
scored.
"""
return self._adjusted_goals
@_int_property_decorator
def adjusted_assists(self):
"""
Returns an ``int`` of the adjusted number of goals the player has
assisted.
"""
return self._adjusted_assists
@_int_property_decorator
def adjusted_points(self):
"""
Returns an ``int`` of the adjusted number of points the player has
gained.
"""
return self._adjusted_points
@_int_property_decorator
def adjusted_goals_created(self):
"""
Returns an ``int`` of the adjusted number of goals the player created.
"""
return self._adjusted_goals_created
@_int_property_decorator
def total_goals_for_on_ice(self):
"""
Returns an ``int`` of the total number of goals for while the player
was on ice.
"""
return self._total_goals_for_on_ice
@_int_property_decorator
def power_play_goals_for_on_ice(self):
"""
Returns an ``int`` of the total number of power play goals for while
the player was on ice.
"""
return self._power_play_goals_for_on_ice
@_int_property_decorator
def total_goals_against_on_ice(self):
"""
Returns an ``int`` of the total number of goals against while the
player was on ice.
"""
return self._total_goals_against_on_ice
@_int_property_decorator
def power_play_goals_against_on_ice(self):
"""
Returns an ``int`` of the total number of power play goals against
while the player was on ice.
"""
return self._power_play_goals_against_on_ice
@_float_property_decorator
def offensive_point_shares(self):
"""
Returns a ``float`` of the player's offensive point share, equal to the
approximate number of points the player contributed to while on
offense.
"""
return self._offensive_point_shares
@_float_property_decorator
def defensive_point_shares(self):
"""
Returns a ``float`` of the player's denensive point share, equal to the
approximate number of points the player contributed to while on
defense.
"""
return self._defensive_point_shares
@_float_property_decorator
def point_shares(self):
"""
Returns a ``float`` of the player's total point share, equal to the sum
of the player's offensive and defensive point share.
"""
return self._point_shares
@_int_property_decorator
def shootout_attempts(self):
"""
Returns an ``int`` of the number of shootouts the player attempted.
"""
return self._shootout_attempts
@_int_property_decorator
def shootout_goals(self):
"""
Returns an ``int`` of the number of shootout goals the player scored.
"""
return self._shootout_goals
@_int_property_decorator
def shootout_misses(self):
"""
Returns an ``int`` of the number of shootouts the player failed to
score.
"""
return self._shootout_misses
@_float_property_decorator
def shootout_percentage(self):
"""
Returns a ``float`` of the percentage of shootouts the player scores
in. Percentage ranges from 0-100.
"""
return self._shootout_percentage
@_int_property_decorator
def wins(self):
"""
Returns an ``int`` of the number of times the team won while the player
is in goal.
"""
return self._wins
@_int_property_decorator
def losses(self):
"""
Returns an ``int`` of the number of times the team lost while the
player is in goal.
"""
return self._losses
@_int_property_decorator
def ties_plus_overtime_loss(self):
"""
Returns an ``int`` of the number of times the team has either tied or
lost in overtime or a shootout while the player is in goal.
"""
return self._ties_plus_overtime_loss
@_float_property_decorator
def goals_against_average(self):
"""
Returns a ``float`` of the average number of goals the opponent has
scored per game while the player is in goal.
"""
return self._goals_against_average
@_int_property_decorator
def minutes(self):
"""
Returns an ``int`` of the total number of minutes the player has spent
in goal.
"""
return self._minutes
@_int_property_decorator
def quality_starts(self):
"""
Returns an ``int`` of the number of quality starts the player has had,
equal to starting out with an in-game save percentage greater than the
player's average save percentage for the year.
"""
return self._quality_starts
@_float_property_decorator
def quality_start_percentage(self):
"""
Returns a ``float`` of the percentage of the player's starts that are
considered quality starts while in goal. Percentage ranges from 0-1.
"""
return self._quality_start_percentage
@_int_property_decorator
def really_bad_starts(self):
"""
Returns an ``int`` of the number of really bad starts the player has
had, equal to starting out with an in-game save percentage less than
85%.
"""
return self._really_bad_starts
@_int_property_decorator
def goal_against_percentage_relative(self):
"""
Returns an ``int`` of the player's goals against average compared to
the league average where 100 is an average player and 0 means the
player saved every single shot.
"""
return self._goal_against_percentage_relative
@_float_property_decorator
def goals_saved_above_average(self):
"""
Returns a ``float`` of the number of goals the player saved above the
league average.
"""
return self._goals_saved_above_average
@_float_property_decorator
def adjusted_goals_against_average(self):
"""
Returns a ``float`` of the adjusted goals against average for the
player while in goal.
"""
return self._adjusted_goals_against_average
@_float_property_decorator
def goalie_point_shares(self):
"""
Returns a ``float`` of the player's point share while in goal.
"""
return self._goalie_point_shares
@_int_property_decorator
def even_strength_shots_faced(self):
"""
Returns an ``int`` of the number of shots the player has faced while at
even strength.
"""
return self._even_strength_shots_faced
@_int_property_decorator
def even_strength_goals_allowed(self):
"""
Returns an ``int`` of the number of goals the player allowed in goal
while at even strength.
"""
return self._even_strength_goals_allowed
@_float_property_decorator
def even_strength_save_percentage(self):
"""
Returns a ``float`` of the player's save percentage while at even
strength.
"""
return self._even_strength_save_percentage
@_int_property_decorator
def power_play_shots_faced(self):
"""
Returns an ``int`` of the number of shots the player has faced while on
a power play.
"""
return self._power_play_shots_faced
@_int_property_decorator
def power_play_goals_allowed(self):
"""
Returns an ``int`` of the number of goals the player allowed in goal
while on a power play.
"""
return self._power_play_goals_allowed
@_float_property_decorator
def power_play_save_percentage(self):
"""
Returns a ``float`` of the player's save percentage while on a power
play.
"""
return self._power_play_save_percentage
@_int_property_decorator
def short_handed_shots_faced(self):
"""
Returns an ``int`` of the number of shots the player has faced while
short handed.
"""
return self._short_handed_shots_faced
@_int_property_decorator
def short_handed_goals_allowed(self):
"""
Returns an ``int`` of the number of goals the player allowed in goal
while short handed.
"""
return self._short_handed_goals_allowed
@_float_property_decorator
def short_handed_save_percentage(self):
"""
Returns a ``float`` of the player's save percentage while short handed.
"""
return self._short_handed_save_percentage
class Roster:
"""
Get stats for all players on a roster.
Request a team's roster for a given season and create instances of the
Player class for each player, containing a detailed list of the player's
statistics and information.
Parameters
----------
team : string
The team's abbreviation, such as 'DET' for the Detroit Red Wings.
year : string (optional)
The 6-digit year to pull the roster from, such as '2017-18'. If left
blank, defaults to the most recent season.
slim : boolean (optional)
Set to True to return a limited subset of player information including
the name and player ID for each player as opposed to all of their
respective stats which greatly reduces the time to return a response if
just the names and IDs are desired. Defaults to False.
"""
def __init__(self, team, year=None, slim=False):
self._team = team
self._slim = slim
if slim:
self._players = {}
else:
self._players = []
self._find_players(year)
def _pull_team_page(self, url):
"""
Download the team page.
Download the requested team's season page and create a PyQuery object.
Parameters
----------
url : string
A string of the built URL for the requested team and season.
Returns
-------
PyQuery object
Returns a PyQuery object of the team's HTML page.
"""
try:
return pq(utils._remove_html_comment_tags(pq(url)))
except HTTPError:
return None
def _create_url(self, year):
"""
Build the team URL.
Build a URL given a team's abbreviation and the 6-digit year.
Parameters
----------
year : string
The 6-digit string representing the year to pull the team's roster
from.
Returns
-------
string
Returns a string of the team's season page for the requested team
and year.
"""
return ROSTER_URL % (self._team.upper(), year)
def _get_id(self, player):
"""
Parse the player ID.
Given a PyQuery object representing a single player on the team roster,
parse the player ID and return it as a string.
Parameters
----------
player : PyQuery object
A PyQuery object representing the player information from the
roster table.
Returns
-------
string
Returns a string of the player ID.
"""
return player('td[data-stat="player"]').attr('data-append-csv')
def _get_name(self, player):
"""
Parse the player's name.
Given a PyQuery object representing a single player on the team roster,
parse the player ID and return it as a string.
Parameters
----------
player : PyQuery object
A PyQuery object representing the player information from the
roster table.
Returns
-------
string
Returns a string of the player's name.
"""
name_tag = player('td[data-stat="player"] a')
return name_tag.text()
def _find_players(self, year):
"""
Find all player IDs for the requested team.
For the requested team and year (if applicable), pull the roster table
and parse the player ID for all players on the roster and create an
instance of the Player class for the player. All player instances are
added to the 'players' property to get all stats for all players on a
team.
Parameters
----------
year : string
The 6-digit string representing the year to pull the team's roster
from.
"""
if not year:
year = utils._find_year_for_season('nhl')
# If stats for the requested season do not exist yet (as is the
# case right before a new season begins), attempt to pull the
# previous year's stats. If it exists, use the previous year
# instead.
if not utils._url_exists(self._create_url(year)) and \
utils._url_exists(self._create_url(str(int(year) - 1))):
year = str(int(year) - 1)
url = self._create_url(year)
page = self._pull_team_page(url)
if not page:
output = ("Can't pull requested team page. Ensure the following "
"URL exists: %s" % url)
raise ValueError(output)
for player in page('table#roster tbody tr').items():
player_id = self._get_id(player)
if self._slim:
name = self._get_name(player)
self._players[player_id] = name
else:
player_instance = Player(player_id)
self._players.append(player_instance)
@property
def players(self):
"""
Returns a ``list`` of player instances for each player on the requested
team's roster if the ``slim`` property is False when calling the Roster
class. If the ``slim`` property is True, returns a ``dictionary`` where
each key is a string of the player's ID and each value is the player's
first and last name as listed on the roster page.
"""
return self._players
| 34.712895 | 79 | 0.619051 |
34fe23a6a8ffdb139e1b1a41b90dea6c101f05b7 | 13,155 | py | Python | raml_main.py | nonstopfor/seq2seq-exposure-bias-tf | 81a390241ad622ce89318693656cfc083f0aa162 | [
"Apache-2.0"
] | 1 | 2020-04-15T07:23:56.000Z | 2020-04-15T07:23:56.000Z | raml_main.py | nonstopfor/seq2seq-exposure-bias-tf | 81a390241ad622ce89318693656cfc083f0aa162 | [
"Apache-2.0"
] | null | null | null | raml_main.py | nonstopfor/seq2seq-exposure-bias-tf | 81a390241ad622ce89318693656cfc083f0aa162 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 The Texar Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Attentional Seq2seq with RAML algorithm.
Read a pre-processed file containing the augmented samples and
corresponding rewards for every target sentence.
RAML Algorithm is described in https://arxiv.org/pdf/1705.07136.pdf
"""
# NOTICE: complying with the Apache License, this file has been modified
from io import open
import importlib
import os
import tensorflow as tf
import texar.tf as tx
import random
from rouge import Rouge
from raml_translation_with_score import TranslationWithScore
import cotk
import numpy as np
import json
def set_seed(seed=1):
np.random.seed(seed)
tf.set_random_seed(seed)
set_seed()
flags = tf.flags
def raml_loss(batch, output, training_rewards):
mle_loss = tx.losses.sequence_sparse_softmax_cross_entropy(
labels=batch['target_text_ids'][:, 1:],
logits=output.logits,
sequence_length=batch['target_length'] - 1,
average_across_batch=False)
return tf.reduce_sum(mle_loss * training_rewards) / \
tf.reduce_sum(training_rewards)
def build_model(batch, loader, rewards, config_model):
"""
Assembles the seq2seq model.
Code in this function is basically the same of build_model() in
baseline_seq2seq_attn_main.py except the normalization in loss_fn.
"""
source_embedder = tx.modules.WordEmbedder(
vocab_size=loader.vocab_size, hparams=config_model.embedder)
encoder = tx.modules.BidirectionalRNNEncoder(
hparams=config_model.encoder)
enc_outputs, _ = encoder(source_embedder(batch['source_text_ids']))
target_embedder = tx.modules.WordEmbedder(
vocab_size=loader.vocab_size, hparams=config_model.embedder)
decoder = tx.modules.AttentionRNNDecoder(
memory=tf.concat(enc_outputs, axis=2),
memory_sequence_length=batch['source_length'],
vocab_size=loader.vocab_size,
hparams=config_model.decoder)
training_outputs, _, _ = decoder(
decoding_strategy='train_greedy',
inputs=target_embedder(batch['target_text_ids'][:, :-1]),
sequence_length=batch['target_length'] - 1)
train_op = tx.core.get_train_op(
raml_loss(batch, training_outputs, rewards),
hparams=config_model.opt)
start_tokens = tf.ones_like(batch['target_length']) * \
loader.go_id
beam_search_outputs, _, _ = \
tx.modules.beam_search_decode(
decoder_or_cell=decoder,
embedding=target_embedder,
start_tokens=start_tokens,
end_token=loader.eos_id,
beam_width=config_model.beam_width,
max_decoding_length=60)
return train_op, beam_search_outputs
def print_stdout_and_file(content, file):
print(content)
print(content, file=file)
def main(FLAGS=None):
"""Entrypoint.
"""
if FLAGS is None:
flags.DEFINE_string("config_model", "configs.config_model", "The model config.")
flags.DEFINE_string("config_data", "configs.config_giga",
"The dataset config.")
# flags.DEFINE_string('raml_file', 'data/giga/samples_giga.txt',
# 'the samples and rewards described in RAML')
flags.DEFINE_integer('n_samples', 10,
'number of samples for every target sentence')
flags.DEFINE_float('tau', 0.4, 'the temperature in RAML algorithm')
flags.DEFINE_string('output_dir', '.', 'where to keep training logs')
flags.DEFINE_bool('cpu', False, 'whether to use cpu')
flags.DEFINE_string('gpu', '0', 'use which gpu(s)')
flags.DEFINE_bool('debug', False, 'if debug, skip the training process after one step')
flags.DEFINE_bool('load', False, 'Whether to load existing checkpoint')
flags.DEFINE_bool('infer', False, 'infer (use pretrained model)')
FLAGS = flags.FLAGS
if FLAGS.cpu:
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
else:
os.environ['CUDA_VISIBLE_DEVICES'] = FLAGS.gpu
FLAGS = flags.FLAGS
config_model = importlib.import_module(FLAGS.config_model)
config_data = importlib.import_module(FLAGS.config_data)
debug = FLAGS.debug
load = FLAGS.load
infer = FLAGS.infer
dataset = FLAGS.config_data.split('.')[-1].split('_')[-1]
print(f"dataset={dataset}")
if not FLAGS.output_dir.endswith('/'):
FLAGS.output_dir += '/'
log_dir = FLAGS.output_dir + 'training_log_raml' + \
'_' + str(FLAGS.n_samples) + 'samples' + \
'_tau' + str(FLAGS.tau) + '/' + dataset + '/'
tx.utils.maybe_create_dir(log_dir)
checkpoint_dir = './checkpoints/' + 'raml/' + dataset + '/'
tx.utils.maybe_create_dir(checkpoint_dir)
config_data.train['batch_size'] *= FLAGS.n_samples
config_data.val['batch_size'] *= FLAGS.n_samples
config_data.test['batch_size'] *= FLAGS.n_samples
train_data = tx.data.PairedTextData(hparams=config_data.train)
val_data = tx.data.PairedTextData(hparams=config_data.val)
test_data = tx.data.PairedTextData(hparams=config_data.test)
data_iterator = tx.data.TrainTestDataIterator(
train=train_data, val=val_data, test=test_data)
batch_size = config_data.train['batch_size']
batch = data_iterator.get_next()
rewards_ts = tf.placeholder(
dtype=tf.float32, shape=[None, ], name='training_rewards')
max_sent_length = 50
loader = TranslationWithScore(f'./data/{dataset}_raml', 10, max_sent_length, 0, 'nltk', False)
train_op, infer_outputs = build_model(batch, loader, rewards_ts, config_model)
# raml_train_data = read_raml_sample_file()
def _train_epoch(sess, epoch_no):
data_iterator.switch_to_train_data(sess)
training_log_file = \
open(log_dir + 'training_log' + str(epoch_no) + '.txt', 'w',
encoding='utf-8')
step = 0
loader.restart("train", batch_size=batch_size, shuffle=True)
batched_data = loader.get_next_batch("train")
while batched_data is not None:
feed_dict = {
batch['source_text_ids']: batched_data['post'],
batch['source_length']: batched_data['post_length'],
batch['target_text_ids']: batched_data['resp'],
batch['target_length']: batched_data['resp_length'],
rewards_ts: batched_data['score']
}
loss = sess.run(train_op, feed_dict=feed_dict)
print("step={}, loss={:.4f}".format(step, loss),
file=training_log_file)
if step % config_data.observe_steps == 0:
print("step={}, loss={:.4f}".format(step, loss))
training_log_file.flush()
step += 1
if debug:
break
batched_data = loader.get_next_batch("train")
# code below this line is exactly the same as baseline_seq2seq_attn_main.py
def _eval_epoch(sess, mode, epoch_no):
if mode == 'dev':
data_iterator.switch_to_val_data(sess)
else:
data_iterator.switch_to_test_data(sess)
loader.restart(mode, batch_size=batch_size)
batched_data = loader.get_next_batch(mode)
refs, hypos = [], []
refs_id, hypos_id = [], []
while batched_data is not None:
fetches = [infer_outputs.predicted_ids[:, :, 0]]
feed_dict = {
tx.global_mode(): tf.estimator.ModeKeys.EVAL,
batch['source_text_ids']: batched_data['post'],
batch['source_length']: batched_data['post_length'],
batch['target_text_ids']: batched_data['resp'],
batch['target_length']: batched_data['resp_length']
}
output_ids = sess.run(fetches, feed_dict=feed_dict)
x = [loader.convert_ids_to_tokens(q, trim=True)[1:] for q in batched_data['resp']]
target_texts = tx.utils.str_join(x)
# print('x:{}\ntarget_texts:{}'.format(x, target_texts))
y = [loader.convert_ids_to_tokens(q, trim=True) for q in output_ids[0]]
output_texts = tx.utils.str_join(y)
tx.utils.write_paired_text(
target_texts, output_texts,
log_dir + mode + '_results' + str(epoch_no) + '.txt',
append=True, mode='h', sep=' ||| ')
for hypo_id, ref_id in zip(output_ids[0], batched_data['resp']):
if config_data.eval_metric == 'bleu':
hypos_id.append(hypo_id)
refs_id.append(ref_id)
for hypo, ref in zip(output_texts, target_texts):
if config_data.eval_metric == 'bleu':
hypos.append(hypo)
refs.append([ref])
elif config_data.eval_metric == 'rouge':
hypos.append(tx.utils.compat_as_text(hypo))
refs.append(tx.utils.compat_as_text(ref))
batched_data = loader.get_next_batch(mode)
if debug:
break
if config_data.eval_metric == 'bleu':
BleuMetric = cotk.metric.BleuCorpusMetric(loader)
data = {'ref_allvocabs': refs_id, 'gen': hypos_id}
BleuMetric.forward(data)
result = BleuMetric.close()
return result['bleu'], result
elif config_data.eval_metric == 'rouge':
rouge = Rouge()
return rouge.get_scores(hyps=hypos, refs=refs, avg=True)
def _calc_reward(score):
"""
Return the bleu score or the sum of (Rouge-1, Rouge-2, Rouge-L).
"""
if config_data.eval_metric == 'bleu':
return score
elif config_data.eval_metric == 'rouge':
return sum([value['f'] for key, value in score.items()])
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
sess.run(tf.tables_initializer())
saver = tf.train.Saver(max_to_keep=1)
if load and tf.train.latest_checkpoint(checkpoint_dir) is not None:
saver.restore(sess, tf.train.latest_checkpoint(checkpoint_dir))
best_val_score = -1.
scores_file = open(log_dir + 'scores.txt', 'w', encoding='utf-8')
if not infer:
for i in range(config_data.num_epochs):
_train_epoch(sess, i)
val_score, _ = _eval_epoch(sess, 'dev', i)
test_score, result = _eval_epoch(sess, 'test', i)
best_val_score = max(best_val_score, _calc_reward(val_score))
if best_val_score == _calc_reward(val_score):
saver.save(sess, checkpoint_dir, global_step=i + 1)
with open(checkpoint_dir + 'result.json', 'w', encoding='utf-8') as file:
json.dump(result, file)
if config_data.eval_metric == 'bleu':
print_stdout_and_file(
'val epoch={}, BLEU={:.4f}; best-ever={:.4f}'.format(
i, val_score, best_val_score), file=scores_file)
print_stdout_and_file(
'test epoch={}, BLEU={:.4f}'.format(i, test_score),
file=scores_file)
print_stdout_and_file('=' * 50, file=scores_file)
elif config_data.eval_metric == 'rouge':
print_stdout_and_file(
'valid epoch {}:'.format(i), file=scores_file)
for key, value in val_score.items():
print_stdout_and_file(
'{}: {}'.format(key, value), file=scores_file)
print_stdout_and_file('fsum: {}; best_val_fsum: {}'.format(
_calc_reward(val_score), best_val_score), file=scores_file)
print_stdout_and_file(
'test epoch {}:'.format(i), file=scores_file)
for key, value in test_score.items():
print_stdout_and_file(
'{}: {}'.format(key, value), file=scores_file)
print_stdout_and_file('=' * 110, file=scores_file)
scores_file.flush()
else:
val_score, _ = _eval_epoch(sess, 'dev', 0)
test_score, result = _eval_epoch(sess, 'test', 0)
with open(log_dir + 'result.json', 'w', encoding='utf-8') as file:
json.dump(result, file)
if __name__ == '__main__':
main()
| 40.476923 | 98 | 0.615127 |
cdbb9bac967ec4bad8db9d8aca54e9d4752c5b89 | 160 | py | Python | aoj/v100/10021.py | Johniel/contests | b692eff913c20e2c1eb4ff0ce3cd4c57900594e0 | [
"Unlicense"
] | null | null | null | aoj/v100/10021.py | Johniel/contests | b692eff913c20e2c1eb4ff0ce3cd4c57900594e0 | [
"Unlicense"
] | 19 | 2016-05-04T02:46:31.000Z | 2021-11-27T06:18:33.000Z | aoj/v100/10021.py | Johniel/contests | b692eff913c20e2c1eb4ff0ce3cd4c57900594e0 | [
"Unlicense"
] | null | null | null | import sys
import string
n = int(input())
mn = "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
while 0 < n:
n -= 1
s = input()
mn = min(mn, s)
print(mn) | 16 | 47 | 0.6375 |
369ad875f871c7cf81a571e69e6d6540a247dfdf | 5,770 | py | Python | compute_offsets.py | eciraci/iceye_gamma_proc | 68b04bfd55082862f419031c28e7b52f1800f3db | [
"MIT"
] | null | null | null | compute_offsets.py | eciraci/iceye_gamma_proc | 68b04bfd55082862f419031c28e7b52f1800f3db | [
"MIT"
] | null | null | null | compute_offsets.py | eciraci/iceye_gamma_proc | 68b04bfd55082862f419031c28e7b52f1800f3db | [
"MIT"
] | null | null | null | #!/usr/bin/env python
u"""
compute_offsets.py
Calculate Preliminary Offsets Parameter File for a pair of ICEye Single Look
Complex images using GAMMA's Python integration with the py_gamma module.
usage: compute_offsets.py [-h] [--directory DIRECTORY] reference secondary
Calculate Preliminary Offsets Parameter.
positional arguments:
reference Reference SLCs.
secondary Secondary SLCs.
optional arguments:
-h, --help show this help message and exit
--directory DIRECTORY, -D DIRECTORY
Project data directory.
PYTHON DEPENDENCIES:
argparse: Parser for command-line options, arguments and sub-commands
https://docs.python.org/3/library/argparse.html
datetime: Basic date and time types
https://docs.python.org/3/library/datetime.html#module-datetime
tqdm: Progress Bar in Python.
https://tqdm.github.io/
py_gamma: GAMMA's Python integration with the py_gamma module
UPDATE HISTORY:
"""
# - Python Dependencies
from __future__ import print_function
import os
import argparse
import datetime
# - GAMMA's Python integration with the py_gamma module
import py_gamma as pg
from utils.make_dir import make_dir
def main():
parser = argparse.ArgumentParser(
description="""Calculate Preliminary Offsets Parameter."""
)
# - Absolute Path to directory containing input data.
default_dir = os.path.join(os.path.expanduser('~'), 'Desktop',
'iceye_gamma_test', 'output')
parser.add_argument('reference', type=str,
help='Reference SLCs.')
parser.add_argument('secondary', type=str,
help='Secondary SLCs.')
parser.add_argument('--directory', '-D',
type=lambda p: os.path.abspath(os.path.expanduser(p)),
default=default_dir,
help='Project data directory.')
parser.add_argument('--init_offset', '-I', action='store_true',
help='Determine initial offset between SLC'
'images using correlation of image intensity')
args = parser.parse_args()
# - Path to Test directory
data_dir = os.path.join(args.directory, 'slc+par')
# - Parameters
ref = args.reference
sec = args.secondary
# - Ref/sec - possible combination
# ref = '152307_20211022T145808'
# sec = '152566_20211023T145809'
# - Offset Computation parameter
algorithm = 1 # - offset estimation algorithm
rlks = 1 # - number of interferogram range looks (enter - for default: 1)
azlks = 1 # - number of interferogram azimuth looks (enter-for default: 1)
iflg = 0 # - interactive mode flag (enter - for default)
# - init_offset - Parameters
# - center of patch (enter - for default: image center)
rpos = '-' # - center of patch in range (samples)
azpos = '-' # - center of patch in azimuth (lines)
offr = '-' # - initial range offset (samples) (enter - for default: 0)
offaz = '-' # - initial azimuth offset (lines) (enter - for default: 0)
thres = '-' # - cross-correlation threshold (enter - for default: 0.150)
rwin = 512 # - range window size (default: 512)
azwin = 512 # - azimuth window size (default: 512)
# - Output directory name
out_dir_name = ref + '-' + sec
# - Output Directory
if args.init_offset:
out_dir = make_dir(args.directory, 'pair_diff_io')
else:
out_dir = make_dir(args.directory, 'pair_diff')
out_dir = make_dir(out_dir, out_dir_name)
# - output parameter file
out_par = os.path.join(out_dir, out_dir_name+'.par')
# - Create Offset Parameter File
pg.create_offset(os.path.join(data_dir, ref+'.par'),
os.path.join(data_dir, sec+'.par'), out_par,
algorithm, rlks, azlks, iflg)
# - Initial SLC image offset estimation from orbit state-vectors
# - and image parameters
pg.init_offset_orbit(os.path.join(data_dir, ref+'.par'),
os.path.join(data_dir, sec+'.par'), out_par)
# - Determine initial offset between SLC images using correlation
# - of image intensity
if args.init_offset:
pg.init_offset(os.path.join(data_dir, ref+'.slc'),
os.path.join(data_dir, sec+'.slc'),
os.path.join(data_dir, ref+'.par'),
os.path.join(data_dir, sec+'.par'),
out_par, rlks, azlks, rpos, azpos, offr, offaz,
thres, rwin, azwin)
# - Create symbolic links for each of the .slc and .par files
if os.path.isfile(os.path.join(out_dir, ref+'.slc')):
os.remove(os.path.join(out_dir, ref+'.slc'))
os.symlink(os.path.join(data_dir, ref+'.slc'),
os.path.join(out_dir, ref+'.slc'))
# -
if os.path.isfile(os.path.join(out_dir, ref+'.par')):
os.remove(os.path.join(out_dir, ref+'.par'))
os.symlink(os.path.join(data_dir, ref+'.par'),
os.path.join(out_dir, ref+'.par'))
# -
if os.path.isfile(os.path.join(out_dir, sec+'.slc')):
os.remove(os.path.join(out_dir, sec+'.slc'))
os.symlink(os.path.join(data_dir, sec+'.slc'),
os.path.join(out_dir, sec+'.slc'))
# -
if os.path.isfile(os.path.join(out_dir, sec+'.par')):
os.remove(os.path.join(out_dir, sec+'.par'))
os.symlink(os.path.join(data_dir, sec+'.par'),
os.path.join(out_dir, sec+'.par'))
# - run main program
if __name__ == '__main__':
start_time = datetime.datetime.now()
main()
end_time = datetime.datetime.now()
print(f"# - Computation Time: {end_time - start_time}")
| 37.225806 | 80 | 0.618198 |
942a45ea658ddfd877dc5c1dea45bff20acd392b | 5,052 | py | Python | official/cv/ADNet/src/trainers/adnet_train_rl.py | polar-region/MindSpore | b96bf8e175faabe2521882c0b7f6e89928e267c7 | [
"Apache-2.0"
] | 77 | 2021-10-15T08:32:37.000Z | 2022-03-30T13:09:11.000Z | official/cv/ADNet/src/trainers/adnet_train_rl.py | polar-region/MindSpore | b96bf8e175faabe2521882c0b7f6e89928e267c7 | [
"Apache-2.0"
] | 3 | 2021-10-30T14:44:57.000Z | 2022-02-14T06:57:57.000Z | official/cv/ADNet/src/trainers/adnet_train_rl.py | polar-region/MindSpore | b96bf8e175faabe2521882c0b7f6e89928e267c7 | [
"Apache-2.0"
] | 24 | 2021-10-15T08:32:45.000Z | 2022-03-24T18:45:20.000Z | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# matlab code: https://github.com/hellbell/ADNet/blob/master/train/adnet_train_RL.m
# policy gradient in pytorch: https://medium.com/@ts1829/policy-gradient-reinforcement-learning-in-pytorch-df1383ea0baf
import os
import time
import copy
import numpy as np
from src.trainers.RL_tools import TrackingPolicyLoss
from src.datasets.rl_dataset import RLDataset
from src.models.CustomizedCell import WithLossCell, TrainOneStepCell
from src.utils.save_ckpt import save_ckpt
from src.utils.get_wrapper_utils import get_dataLoader
from mindspore import nn, ops
from mindspore.communication.management import get_rank, get_group_size
def adnet_train_rl(net, domain_specific_nets, train_videos, opts, args):
if args.run_online == 'True':
save_path = '/cache/train_out'
else:
save_path = ''
if not os.path.exists(os.path.join(save_path, args.save_folder, args.save_domain_dir)):
os.makedirs(os.path.join(save_path, args.save_folder, args.save_domain_dir))
net.set_phase('test')
optimizer = nn.SGD([{'params': net.base_network.trainable_params(), 'lr': 1e-4},
{'params': net.fc4_5.trainable_params()},
{'params': net.fc6.trainable_params()},
{'params': net.fc7.trainable_params(), 'lr': 0}],
learning_rate=1e-3, momentum=opts['train']['momentum'],
weight_decay=opts['train']['weightDecay'])
criterion = TrackingPolicyLoss()
clip_idx_epoch = 0
prev_net = copy.deepcopy(net)
dataset = RLDataset(prev_net, domain_specific_nets, train_videos, opts, args)
rlnet_with_criterion = WithLossCell(net, criterion)
net_rl = TrainOneStepCell(rlnet_with_criterion, optimizer)
for epoch in range(args.start_epoch, opts['numEpoch']):
if epoch != args.start_epoch:
dataset.reset(prev_net, domain_specific_nets, train_videos, opts, args)
data_loader = get_dataLoader(dataset, opts, args,
["log_probs_list", "reward_list", "vid_idx_list", 'patch'])
# create batch iterator
batch_iterator = iter(data_loader)
epoch_size = len(dataset) // opts['minibatch_size'] # 1 epoch, how many iterations
if args.distributed:
rank_id = get_rank()
rank_size = get_group_size()
epoch_size = epoch_size // rank_size
for iteration in range(epoch_size):
# load train data
# action, action_prob, log_probs, reward, patch, action_dynamic, result_box = next(batch_iterator)
_, reward, vid_idx, patch = next(batch_iterator)
# train
tic = time.time()
patch = patch.transpose(0, 3, 1, 2)
# find out the unique value in vid_idx
# separate the batch with each video idx
if args.multidomain:
vid_idx_unique = ops.Unique()(vid_idx)[0]
for i in range(len(vid_idx_unique)):
choice_list = (vid_idx_unique[i] == vid_idx).asnumpy().nonzero()[0].tolist()
if len(choice_list) == 1:
continue
tmp_patch = patch[choice_list]
tmp_reward = reward[choice_list]
net_rl(tmp_patch, tmp_reward)
# save the ADNetDomainSpecific back to their module
idx = np.asscalar(vid_idx_unique[i].asnumpy())
domain_specific_nets[idx].load_weights_from_adnet(net)
else:
net_rl(patch, reward)
toc = time.time() - tic
print('epoch ' + str(epoch) + ' - iteration ' + str(iteration) + ' - train time: ' + str(toc) + " s")
if iteration % 1000 == 0:
if not args.distributed or rank_id == 0:
save_ckpt(net, domain_specific_nets, save_path, args, iteration, epoch, 1)
clip_idx_epoch += 1
if not args.distributed or rank_id == 0:
save_ckpt(net, domain_specific_nets, save_path, args, iteration, epoch, 2)
if not args.distributed or rank_id == 0:
save_ckpt(net, domain_specific_nets, save_path, args, iteration, epoch, 3)
if args.run_online == 'True':
import moxing
moxing.file.copy_parallel('/cache/train_out/weights', args.train_url)
return net
| 44.707965 | 119 | 0.632423 |
8799765911c6c2ca44efe272756e1c9bb3d790ff | 1,004 | py | Python | scraper/storage_spiders/kapavn.py | chongiadung/choinho | d2a216fe7a5064d73cdee3e928a7beef7f511fd1 | [
"MIT"
] | null | null | null | scraper/storage_spiders/kapavn.py | chongiadung/choinho | d2a216fe7a5064d73cdee3e928a7beef7f511fd1 | [
"MIT"
] | 10 | 2020-02-11T23:34:28.000Z | 2022-03-11T23:16:12.000Z | scraper/storage_spiders/kapavn.py | chongiadung/choinho | d2a216fe7a5064d73cdee3e928a7beef7f511fd1 | [
"MIT"
] | 3 | 2018-08-05T14:54:25.000Z | 2021-06-07T01:49:59.000Z | # Auto generated by generator.py. Delete this line if you make modification.
from scrapy.spiders import Rule
from scrapy.linkextractors import LinkExtractor
XPATH = {
'name' : "//h1[@class='name_product']",
'price' : "//div[@class='row1']/div[@class='bg_price']/div[@class='giadeal']",
'category' : "//ul[@class='menubreakcumb']/li",
'description' : "//div[@class='leftbox_content']/div[@class='detail']",
'images' : "//a[@class='zoomThumbActive']/img/@src",
'canonical' : "",
'base_url' : "",
'brand' : "",
'in_stock' : "",
'guarantee' : "",
'promotion' : ""
}
name = 'kapa.vn'
allowed_domains = ['kapa.vn']
start_urls = ['http://kapa.vn/']
tracking_url = ''
sitemap_urls = ['']
sitemap_rules = [('', 'parse_item')]
sitemap_follow = ['']
rules = [
Rule(LinkExtractor(allow=['/[a-zA-Z0-9-]+-\d+\.html$']), 'parse_item'),
Rule(LinkExtractor(allow=['/[a-zA-Z0-9-]+\.html($|&page=\d+$)']), 'parse'),
#Rule(LinkExtractor(), 'parse_item_and_links'),
]
| 33.466667 | 82 | 0.60259 |
ecd9631ea8f8ec4fab0f12bfb99feec3055b0bb0 | 1,784 | py | Python | src/vbr/tableclasses/redcap/rcaptable.py | a2cps/python-vbr | 9d5d4480386d0530450d59157e0da6937320f928 | [
"BSD-3-Clause"
] | 1 | 2021-05-26T19:08:29.000Z | 2021-05-26T19:08:29.000Z | src/vbr/tableclasses/redcap/rcaptable.py | a2cps/python-vbr | 9d5d4480386d0530450d59157e0da6937320f928 | [
"BSD-3-Clause"
] | 7 | 2021-05-04T13:12:39.000Z | 2022-03-09T21:04:33.000Z | src/vbr/tableclasses/redcap/rcaptable.py | a2cps/python-vbr | 9d5d4480386d0530450d59157e0da6937320f928 | [
"BSD-3-Clause"
] | 2 | 2021-04-20T14:46:52.000Z | 2021-06-07T20:28:28.000Z | from ...pgrest import *
from ..constants import Constants
from .rcconstants import REDCapConstants
from ..vbr_table import TableVBR
class RcapTable(TableVBR):
"""Parent class for REDCap-derived tables"""
# tracking_id (subject GUID)
# tracking_id = Constants.STRING_PERSISTENT_ID
creation_time = Column(CreatedTimeStamp, nullable=True)
# Get timestamp for last update; check if next code line is correct.
last_updated_ts = Column(UpdatedTimeStamp, nullable=True)
# Store record, protocol (which maps to redcap event), and status
# (which maps to redcap status)
# RCap record_id identifying subject
record_id = REDCapConstants.RECORD_ID
# Mandatory
protocol_id = Column(
Integer, ForeignKey("protocol.protocol_id"), comments="VBR protocol"
)
# Mandatory
status_id = Column(Integer, ForeignKey("status.status_id"), comments="VBR status")
# Optionally, join to biosample, measurement, or subject
biosample_id = Column(
Integer,
ForeignKey("biosample.biosample_id", event_action="CASCADE"),
nullable=True,
)
measurement_id = Column(
Integer,
ForeignKey("measurement.measurement_id", event_action="CASCADE"),
nullable=True,
)
subject_id = Column(
Integer, ForeignKey("subject.subject_id", event_action="CASCADE"), nullable=True
)
@classmethod
def _links(cls):
# Update this with any new linkages established by adding FK constraints above
return ("protocol", "status", "biosample", "measurement", "subject")
@classmethod
def link_column_names(cls):
"""Return names of columns linked by foreign key constraints."""
cols = [c + "_id" for c in cls._links()]
return tuple(cols)
| 34.307692 | 88 | 0.68722 |
550848afa905dd01c00a6c92fd826f6743b5d982 | 4,371 | py | Python | CVE-2022-21907.py | michelep/CVE-2022-21907-Vulnerability-PoC | 80c98e9dd8187269a15c6dbcf485f607942edb78 | [
"MIT"
] | 4 | 2022-01-24T06:22:09.000Z | 2022-03-21T07:03:05.000Z | CVE-2022-21907.py | michelep/CVE-2022-21907-Vulnerability-PoC | 80c98e9dd8187269a15c6dbcf485f607942edb78 | [
"MIT"
] | null | null | null | CVE-2022-21907.py | michelep/CVE-2022-21907-Vulnerability-PoC | 80c98e9dd8187269a15c6dbcf485f607942edb78 | [
"MIT"
] | 1 | 2022-03-05T08:56:05.000Z | 2022-03-05T08:56:05.000Z | # Create by antx at 2022-01-17.
# integrated by Michele "[email protected]" just for fun, on 23.01.2022
import requests
from loguru import logger
import time
import argparse
from ipaddress import ip_network
header = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.93 Safari/537.36'}
class CVE_2022_21907():
@logger.catch(level='ERROR')
def first_handshake(self, target: str):
try:
resp = requests.get(target, headers=header, timeout=10)
if resp.status_code == 200:
logger.info(f'The first handshake: the target host is normal and can be verified by POC')
return True
logger.info(f'First handshake: the target host is normal, but returns an exception, status code: {resp.status_code}')
return False
except Exception as e:
logger.info(f'First handshake error: The target host is abnormal, please check whether the target host is alive, error resp: {e}')
return False
@logger.catch(level='ERROR')
def verify_handshake(self, target: str):
try:
resp = requests.get(target, headers=header, timeout=10)
if resp.status_code == 200:
logger.info(f'Verification result: The target host has restarted and returned to normal')
return False
logger.info(f'Verification result: The target host has restarted and returned to normal, but returned an exception with a status code: {resp.status_code}')
return False
except requests.exceptions.ConnectionError as e:
logger.info(f'Verification result: The verification is successful, the target host is abnormal, has been exploited and entered the blue screen restart')
return True
@logger.catch(level='ERROR')
def poc(self, target: str):
# headers = {'Accept-Encoding': 'doar-e, ftw, imo, ,'} # CVE-2021-31166
headers = {
'Accept-Encoding': 'AAAAAAAAAAAAAAAAAAAAAAAA, '
'BBBBBBcccACCCACACATTATTATAASDFADFAFSDDAHJSKSKKSKKSKJHHSHHHAY&AU&**SISODDJJDJJDJJJDJJSU**S, '
'RRARRARYYYATTATTTTATTATTATSHHSGGUGFURYTIUHSLKJLKJMNLSJLJLJSLJJLJLKJHJVHGF, '
'TTYCTCTTTCGFDSGAHDTUYGKJHJLKJHGFUTYREYUTIYOUPIOOLPLMKNLIJOPKOLPKOPJLKOP, '
'OOOAOAOOOAOOAOOOAOOOAOOOAOO, '
'****************************stupiD, *, ,'
} # CVE-2022-21907
try:
r = requests.get(target, headers=headers, timeout=10)
logger.info(f'POC handshake failed: {target} does not exist CVE-2022-21907 Vulnerability, may have been patched')
return False
except requests.exceptions.ReadTimeout as e:
logger.info(f'POC handshake success: {target} maybe can Exploit!')
return True
@logger.catch(level='ERROR')
def dia(self, url: str):
if 'http' not in url:
target = f'http://{url}'
elif 'https' in url:
target = url.replace('https', 'http')
else:
target = url
logger.info(f'start verification: {target}')
if not self.first_handshake(target):
logger.info(f'{target} does not exist CVE-2022-21907 Vulnerability')
return
self.poc(target)
logger.info(f'Deterministic verification again')
while True:
time.sleep(10)
if not self.verify_handshake(target):
break
logger.info(f'{target} have CVE-2022-21907 vulnerability, can be exploited!')
if __name__ == '__main__':
# Parse arguments
parser = argparse.ArgumentParser(description='CVE-2022-21907 PoC')
parser.add_argument('-i','--ip', help='IPs (CIDR) to attack',required=False)
parser.add_argument('-u','--url', help='URL to attack',required=False)
args = parser.parse_args()
if not args.ip and not args.url:
print("Please use -i or -u to specify target(s)!")
# default debug level: just errors!
cve = CVE_2022_21907()
if args.ip:
for ip in ip_network(args.ip).hosts():
cve.dia(format(ip))
elif args.url:
cve.dia(format(args.url))
| 45.061856 | 167 | 0.611759 |
f6834137b672d2905c2386ed1441353f8c46095f | 9,867 | py | Python | options/base_options.py | mengyuxin520/PGGAN | 0cf1e46c8b935893066c0314c07049ee9a9b5ef5 | [
"BSD-3-Clause"
] | null | null | null | options/base_options.py | mengyuxin520/PGGAN | 0cf1e46c8b935893066c0314c07049ee9a9b5ef5 | [
"BSD-3-Clause"
] | 1 | 2022-02-10T09:06:32.000Z | 2022-03-22T07:45:55.000Z | options/base_options.py | mengyuxin520/PGGAN | 0cf1e46c8b935893066c0314c07049ee9a9b5ef5 | [
"BSD-3-Clause"
] | null | null | null | import argparse
import os
from util import util
import torch
import models
import data
class BaseOptions():
"""This class defines options used during both training and test time.
It also implements several helper functions such as parsing, printing, and saving the options.
It also gathers additional options defined in <modify_commandline_options> functions in both dataset class and model class.
"""
def __init__(self):
"""Reset the class; indicates the class hasn't been initailized"""
self.initialized = False
def initialize(self, parser):
"""Define the common options that are used in both training and test."""
# basic parameters
parser.add_argument('--dataroot', required=True, help='path to images (should have subfolders trainA, trainB, valA, valB, etc)')
parser.add_argument('--name', type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models')
parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here')
# model parameters
parser.add_argument('--model', type=str, default='pix2pix', help='chooses which model to use. [cycle_gan | pix2pix | test | colorization]')
parser.add_argument('--input_nc', type=int, default=1, help='# of input image channels: 3 for RGB and 1 for grayscale')
parser.add_argument('--output_nc', type=int, default=1, help='# of output image channels: 3 for RGB and 1 for grayscale')
parser.add_argument('--ngf', type=int, default=64, help='# of gen filters in the last conv layer')
parser.add_argument('--ndf', type=int, default=64, help='# of discrim filters in the first conv layer')
parser.add_argument('--netD', type=str, default='basic', help='specify discriminator architecture [basic | n_layers | pixel]. The basic model is a 70x70 PatchGAN. n_layers allows you to specify the layers in the discriminator')
parser.add_argument('--netG', type=str, default='unet_128', help='specify generator architecture [resnet_9blocks | resnet_6blocks | unet_256 | unet_128]')
parser.add_argument('--n_layers_D', type=int, default=3, help='only used if netD==n_layers')
parser.add_argument('--norm', type=str, default='instance', help='instance normalization or batch normalization [instance | batch | none]')
parser.add_argument('--init_type', type=str, default='normal', help='network initialization [normal | xavier | kaiming | orthogonal]')
parser.add_argument('--init_gain', type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.')
parser.add_argument('--no_dropout', action='store_true', help='no dropout for the generator')
# dataset parameters
parser.add_argument('--dataset_mode', type=str, default='unaligned', help='chooses how datasets are loaded. [unaligned | aligned | single | colorization]')
parser.add_argument('--dataset_mode_test', type=str, default='unalignedtest', help='chooses how datasets are loaded. [unaligned | aligned | single | colorization]')
parser.add_argument('--dataset_mode1', type=str, default='unaligned1', help='chooses how datasets are loaded. [unaligned | aligned | single | colorization]')
parser.add_argument('--dataset_mode2', type=str, default='unaligned2', help='chooses how datasets are loaded. [unaligned | aligned | single | colorization]')
parser.add_argument('--dataset_mode3', type=str, default='unaligned3', help='chooses how datasets are loaded. [unaligned | aligned | single | colorization]')
parser.add_argument('--dataset_mode4', type=str, default='unaligned4', help='chooses how datasets are loaded. [unaligned | aligned | single | colorization]')
parser.add_argument('--dataset_mode5', type=str, default='unaligned5', help='chooses how datasets are loaded. [unaligned | aligned | single | colorization]')
parser.add_argument('--dataset_mode6', type=str, default='unaligned6', help='chooses how datasets are loaded. [unaligned | aligned | single | colorization]')
parser.add_argument('--dataset_mode7', type=str, default='unaligned7', help='chooses how datasets are loaded. [unaligned | aligned | single | colorization]')
parser.add_argument('--dataset_mode8', type=str, default='unaligned8', help='chooses how datasets are loaded. [unaligned | aligned | single | colorization]')
parser.add_argument('--dataset_mode9', type=str, default='unaligned9', help='chooses how datasets are loaded. [unaligned | aligned | single | colorization]')
parser.add_argument('--dataset_mode10', type=str, default='unaligned10', help='chooses how datasets are loaded. [unaligned | aligned | single | colorization]')
parser.add_argument('--direction', type=str, default='AtoB', help='AtoB or BtoA')
parser.add_argument('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly')
parser.add_argument('--num_threads', default=1, type=int, help='# threads for loading data')
parser.add_argument('--batch_size', type=int, default=2, help='input batch size')
parser.add_argument('--load_size', type=int, default=128, help='scale images to this size')
parser.add_argument('--crop_size', type=int, default=128, help='then crop to this size')
parser.add_argument('--max_dataset_size', type=int, default=float("inf"), help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.')
parser.add_argument('--preprocess', type=str, default='resize_and_crop', help='scaling and cropping of images at load time [resize_and_crop | crop | scale_width | scale_width_and_crop | none]')
parser.add_argument('--no_flip', action='store_true', help='if specified, do not flip the images for data augmentation')
parser.add_argument('--display_winsize', type=int, default=64, help='display window size for both visdom and HTML')
# additional parameters
parser.add_argument('--epoch', type=str, default='latest2', help='which epoch to load? set to latest to use latest cached model')
parser.add_argument('--load_iter', type=int, default='0', help='which iteration to load? if load_iter > 0, the code will load models by iter_[load_iter]; otherwise, the code will load models by [epoch]')
parser.add_argument('--verbose', action='store_true', help='if specified, print more debugging information')
parser.add_argument('--suffix', default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{load_size}')
self.initialized = True
return parser
def gather_options(self):
"""Initialize our parser with basic options(only once).
Add additional model-specific and dataset-specific options.
These options are defined in the <modify_commandline_options> function
in model and dataset classes.
"""
if not self.initialized: # check if it has been initialized
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser = self.initialize(parser)
# get the basic options
opt, _ = parser.parse_known_args()
# modify model-related parser options
model_name = opt.model
model_option_setter = models.get_option_setter(model_name)
parser = model_option_setter(parser, self.isTrain)
opt, _ = parser.parse_known_args() # parse again with new defaults
# modify dataset-related parser options
dataset_name = opt.dataset_mode
dataset_option_setter = data.get_option_setter(dataset_name)
parser = dataset_option_setter(parser, self.isTrain)
# save and return the parser
self.parser = parser
return parser.parse_args()
def print_options(self, opt):
"""Print and save options
It will print both current options and default values(if different).
It will save options into a text file / [checkpoints_dir] / opt.txt
"""
message = ''
message += '----------------- Options ---------------\n'
for k, v in sorted(vars(opt).items()):
comment = ''
default = self.parser.get_default(k)
if v != default:
comment = '\t[default: %s]' % str(default)
message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment)
message += '----------------- End -------------------'
print(message)
# save to the disk
expr_dir = os.path.join(opt.checkpoints_dir, opt.name)
util.mkdirs(expr_dir)
file_name = os.path.join(expr_dir, 'opt.txt')
with open(file_name, 'wt') as opt_file:
opt_file.write(message)
opt_file.write('\n')
def parse(self):
"""Parse our options, create checkpoints directory suffix, and set up gpu device."""
opt = self.gather_options()
opt.isTrain = self.isTrain # train or test
# process opt.suffix
if opt.suffix:
suffix = ('_' + opt.suffix.format(**vars(opt))) if opt.suffix != '' else ''
opt.name = opt.name + suffix
self.print_options(opt)
# set gpu ids
str_ids = opt.gpu_ids.split(',')
opt.gpu_ids = []
for str_id in str_ids:
id = int(str_id)
if id >= 0:
opt.gpu_ids.append(id)
if len(opt.gpu_ids) > 0:
torch.cuda.set_device(opt.gpu_ids[0])
self.opt = opt
return self.opt
| 66.668919 | 235 | 0.67295 |
de25402dd45ef8ece5bc650a18f3683539bb2a9c | 3,928 | py | Python | StandardDataSets/collada/library_visual_scenes/visual_scene/node/instance_geometry/bind_material/instance_material/instance_material_same_geometry/instance_material_same_geometry.py | KhronosGroup/COLLADA-CTS | 61f2a560cbb2a06ee62da8025241f6b08d06bfd9 | [
"MIT"
] | 20 | 2015-03-19T08:02:57.000Z | 2020-10-16T15:16:11.000Z | StandardDataSets/collada/library_visual_scenes/visual_scene/node/instance_geometry/bind_material/instance_material/instance_material_same_geometry/instance_material_same_geometry.py | Acidburn0zzz/COLLADA-CTS | 39a36188cf8710bbc003df43ed70b965eb4386bd | [
"MIT"
] | 4 | 2017-04-19T18:42:05.000Z | 2017-06-17T03:03:28.000Z | StandardDataSets/collada/library_visual_scenes/visual_scene/node/instance_geometry/bind_material/instance_material/instance_material_same_geometry/instance_material_same_geometry.py | Acidburn0zzz/COLLADA-CTS | 39a36188cf8710bbc003df43ed70b965eb4386bd | [
"MIT"
] | 10 | 2015-03-26T02:52:24.000Z | 2022-02-24T08:43:48.000Z |
# Copyright (c) 2012 The Khronos Group Inc.
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and /or associated documentation files (the "Materials "), to deal in the Materials without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Materials, and to permit persons to whom the Materials are furnished to do so, subject to
# the following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Materials.
# THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
# See Core.Logic.FJudgementContext for the information
# of the 'context' parameter.
# This sample judging object does the following:
#
# JudgeBaseline: just verifies that the standard steps did not crash.
# JudgeSuperior: also verifies that the validation steps are not in error.
# JudgeExemplary: same as intermediate badge.
# We import an assistant script that includes the common verifications
# methods. The assistant buffers its checks, so that running them again
# does not incurs an unnecessary performance hint.
from StandardDataSets.scripts import JudgeAssistant
# Please feed your node list here:
tagLst = []
attrName = ''
attrVal = ''
dataToCheck = ''
class SimpleJudgingObject:
def __init__(self, _tagLst, _attrName, _attrVal, _data):
self.tagList = _tagLst
self.attrName = _attrName
self.attrVal = _attrVal
self.dataToCheck = _data
self.status_baseline = False
self.status_superior = False
self.status_exemplary = False
self.__assistant = JudgeAssistant.JudgeAssistant()
def JudgeBaseline(self, context):
# No step should not crash
self.__assistant.CheckCrashes(context)
# Import/export/validate must exist and pass, while Render must only exist.
self.__assistant.CheckSteps(context, ["Import", "Export", "Validate"], ["Render"])
if (self.__assistant.GetResults() == False):
self.status_baseline = False
return False
# Compare the rendered images
# Then compare images against reference test for non-equivalence
if ( self.__assistant.CompareRenderedImages(context) ):
self.__assistant.CompareImagesAgainst(context, "_reference_instance_material_same_geometry", None, None, 5, True, True)
self.status_baseline = self.__assistant.DeferJudgement(context)
return self.status_baseline
# To pass intermediate you need to pass basic, this object could also include additional
# tests that were specific to the intermediate badge.
def JudgeSuperior(self, context):
self.status_superior = self.status_baseline
return self.status_superior
# To pass advanced you need to pass intermediate, this object could also include additional
# tests that were specific to the advanced badge
def JudgeExemplary(self, context):
self.status_exemplary = self.status_superior
return self.status_exemplary
# This is where all the work occurs: "judgingObject" is an absolutely necessary token.
# The dynamic loader looks very specifically for a class instance named "judgingObject".
#
judgingObject = SimpleJudgingObject(tagLst, attrName, attrVal, dataToCheck);
| 52.373333 | 467 | 0.72276 |
90759c23ae3924a59eb5ff2b5755ede9631f7b52 | 8,229 | py | Python | tensorflow/python/kernel_tests/inplace_ops_test.py | elacsoft/tensorflow | ca552d54ac67be8837aeabdb43269846d9df4eb5 | [
"Apache-2.0"
] | 4 | 2021-06-15T17:26:07.000Z | 2021-11-17T10:58:08.000Z | tensorflow/python/kernel_tests/inplace_ops_test.py | elacsoft/tensorflow | ca552d54ac67be8837aeabdb43269846d9df4eb5 | [
"Apache-2.0"
] | 1 | 2018-09-17T19:30:27.000Z | 2018-09-17T19:30:27.000Z | tensorflow/python/kernel_tests/inplace_ops_test.py | elacsoft/tensorflow | ca552d54ac67be8837aeabdb43269846d9df4eb5 | [
"Apache-2.0"
] | 6 | 2018-12-20T01:35:20.000Z | 2020-07-10T17:29:57.000Z | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for inplace_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import inplace_ops
from tensorflow.python.platform import test as test_lib
class InplaceOpsTest(test_util.TensorFlowTestCase):
def testBasicUpdate(self):
for dtype in [dtypes.float32, dtypes.int32, dtypes.int64]:
with self.test_session(use_gpu=True):
x = array_ops.ones([7, 3], dtype)
y = np.ones([7, 3], dtype.as_numpy_dtype)
self.assertAllClose(x.eval(), y)
x = inplace_ops.inplace_update(x, [3], array_ops.ones([1, 3], dtype))
y[3, :] = 1
self.assertAllClose(x.eval(), y)
x = inplace_ops.inplace_update(x, [-1],
array_ops.ones([1, 3], dtype) * 2)
y[-1, :] = 2
self.assertAllClose(x.eval(), y)
x = inplace_ops.inplace_update(x, 5, array_ops.ones([3], dtype) * 7)
y[5, :] = 7
self.assertAllClose(x.eval(), y)
def testBasicUpdateBool(self):
with self.test_session(use_gpu=True):
x = array_ops.ones([7, 3], dtypes.bool)
y = np.ones([7, 3], dtypes.bool.as_numpy_dtype)
self.assertAllClose(x.eval(), y)
x = inplace_ops.inplace_update(x, [3], array_ops.ones([1, 3],
dtypes.bool))
y[3, :] = True
self.assertAllClose(x.eval(), y)
x = inplace_ops.inplace_update(x, [-1],
array_ops.zeros([1, 3], dtypes.bool))
y[-1, :] = False
self.assertAllClose(x.eval(), y)
x = inplace_ops.inplace_update(x, 5, array_ops.zeros([3], dtypes.bool))
y[5, :] = False
self.assertAllClose(x.eval(), y)
def testBasicAdd(self):
for dtype in [dtypes.float32, dtypes.int32, dtypes.int64]:
with self.test_session(use_gpu=True):
x = array_ops.ones([7, 3], dtype)
y = np.ones([7, 3], dtype.as_numpy_dtype)
self.assertAllClose(x.eval(), y)
x = array_ops.inplace_add(x, [3], array_ops.ones([1, 3], dtype))
y[3, :] += 1
self.assertAllClose(x.eval(), y)
x = inplace_ops.inplace_add(x, [-1], array_ops.ones([1, 3], dtype) * 2)
y[-1, :] += 2
self.assertAllClose(x.eval(), y)
x = inplace_ops.inplace_add(x, 5, array_ops.ones([3], dtype) * 7)
y[5, :] += 7
self.assertAllClose(x.eval(), y)
x = inplace_ops.inplace_add(x, None, array_ops.ones([7, 3], dtype) * 99)
y[:, :] += 99
self.assertAllClose(x.eval(), y)
def testBasicSub(self):
for dtype in [dtypes.float32, dtypes.int32, dtypes.int64]:
with self.test_session(use_gpu=True):
x = array_ops.ones([7, 3], dtype)
y = np.ones([7, 3], dtype.as_numpy_dtype)
self.assertAllClose(x.eval(), y)
x = inplace_ops.inplace_sub(x, [3], array_ops.ones([1, 3], dtype))
y[3, :] -= 1
self.assertAllClose(x.eval(), y)
x = inplace_ops.inplace_sub(x, [-1], array_ops.ones([1, 3], dtype) * 2)
y[-1, :] -= 2
self.assertAllClose(x.eval(), y)
x = inplace_ops.inplace_sub(x, 5, array_ops.ones([3], dtype) * 7)
y[5, :] -= 7
self.assertAllClose(x.eval(), y)
x = inplace_ops.inplace_sub(x, None, array_ops.ones([7, 3], dtype) * 99)
y[:, :] -= 99
self.assertAllClose(x.eval(), y)
def testRandom(self):
with self.test_session(use_gpu=True):
d0, d1, d2 = 100, 3, 5
x = array_ops.zeros([d0, d1, d2])
y = np.zeros([d0, d1, d2])
for _ in xrange(20):
idx = np.random.choice(d0, d0 // 10, replace=False)
val = np.random.randint(10, size=(d0 // 10, d1, d2))
op = np.random.randint(3)
if op == 0:
x = inplace_ops.inplace_update(x, idx, val)
y[idx, :] = val
elif op == 1:
x = inplace_ops.inplace_add(x, idx, val)
y[idx, :] += val
elif op == 2:
x = inplace_ops.inplace_sub(x, idx, val)
y[idx, :] -= val
self.assertAllClose(x.eval(), y)
def testRandom1D(self):
with self.test_session(use_gpu=True):
d0 = 100
x = array_ops.zeros([d0])
y = np.zeros([d0])
for _ in xrange(20):
idx = np.random.choice(d0, d0 // 10, replace=False)
val = np.random.randint(10, size=(d0 // 10))
op = np.random.randint(3)
if op == 0:
x = inplace_ops.inplace_update(x, idx, val)
y[idx] = val
elif op == 1:
x = inplace_ops.inplace_add(x, idx, val)
y[idx] += val
elif op == 2:
x = inplace_ops.inplace_sub(x, idx, val)
y[idx] -= val
self.assertAllClose(x.eval(), y)
def testAlias(self):
with self.test_session(use_gpu=True) as sess:
x = array_ops.ones([2, 3])
y = inplace_ops.alias_inplace_add(x, [0], [[1, 2, 3]])
with ops.control_dependencies([y]):
z = array_ops.identity(x)
_, vy, vz = sess.run([x, y, z])
self.assertAllClose(vy, vz)
def testError(self):
with self.cached_session():
with self.assertRaisesRegexp(errors.InvalidArgumentError,
"must be a vector"):
_ = inplace_ops.inplace_update([[1.]], [[0]], [[10]]).eval()
with self.assertRaisesRegexp(errors.InvalidArgumentError,
"x and v shape doesn't match"):
_ = inplace_ops.inplace_update([[1.]], [0], [10]).eval()
with self.assertRaisesRegexp(errors.InvalidArgumentError,
"i and x shape doesn't match"):
_ = inplace_ops.inplace_update([[1.]], [0, 1], [[10]]).eval()
def testEmpty(self):
for dtype in [
dtypes.float32, dtypes.float64, dtypes.int32, dtypes.int64, dtypes.bool,
dtypes.uint8
]:
with self.test_session(use_gpu=True):
test_shapes = [(), (1,), (2, 3), (0, 2), (2, 3, 5), (2, 0, 5)]
for shape in test_shapes:
val = inplace_ops.empty(shape, dtype).eval()
self.assertEqual(val.shape, shape)
self.assertEqual(val.dtype, dtype.as_numpy_dtype)
val = inplace_ops.empty(shape, dtype, init=True).eval()
self.assertEqual(val.shape, shape)
self.assertEqual(val.dtype, dtype.as_numpy_dtype)
self.assertAllEqual(val, np.zeros(shape, dtype.as_numpy_dtype))
val = inplace_ops.empty_like(array_ops.zeros(shape, dtype)).eval()
self.assertEqual(val.shape, shape)
self.assertEqual(val.dtype, dtype.as_numpy_dtype)
val = inplace_ops.empty_like(
array_ops.zeros(shape, dtype), init=True).eval()
self.assertEqual(val.shape, shape)
self.assertEqual(val.dtype, dtype.as_numpy_dtype)
self.assertAllEqual(val, np.zeros(shape, dtype.as_numpy_dtype))
with self.test_session(use_gpu=True):
val = inplace_ops.empty((1, 2), dtypes.string, init=True).eval()
self.assertEqual(val.tolist(), [[b"", b""]])
val = inplace_ops.empty((1, 2), dtypes.string, init=False).eval()
self.assertEqual(val.tolist(), [[b"", b""]])
if __name__ == "__main__":
test_lib.main()
| 40.940299 | 80 | 0.590959 |
985b735f3331b1eb5208ba06cdc54585abb28e11 | 16,396 | py | Python | sandbox/lib/jumpscale/JumpscaleLibs/sal/ubuntu/Ubuntu.py | threefoldtech/threebot_prebuilt | 1f0e1c65c14cef079cd80f73927d7c8318755c48 | [
"Apache-2.0"
] | null | null | null | sandbox/lib/jumpscale/JumpscaleLibs/sal/ubuntu/Ubuntu.py | threefoldtech/threebot_prebuilt | 1f0e1c65c14cef079cd80f73927d7c8318755c48 | [
"Apache-2.0"
] | null | null | null | sandbox/lib/jumpscale/JumpscaleLibs/sal/ubuntu/Ubuntu.py | threefoldtech/threebot_prebuilt | 1f0e1c65c14cef079cd80f73927d7c8318755c48 | [
"Apache-2.0"
] | null | null | null | from Jumpscale import j
JSBASE = j.baseclasses.object
TESTTOOLS = j.baseclasses.testtools
class Ubuntu(JSBASE,TESTTOOLS):
__jslocation__ = "j.sal.ubuntu"
def _init(self, **kwargs):
self._aptupdated = False
self._checked = False
self._cache_dict = None
self._installed_pkgs = None
@property
def _cache_ubuntu(self):
if self._cache_dict is None:
self.apt_init()
return self._cache_dict
def uptime(self):
"""return system uptime value.
:return: uptime value
:rtype: float
"""
with open("/proc/uptime") as f:
data = f.read()
uptime, _ = data.split(" ")
return float(uptime)
def apt_init(self, **kwargs):
"""shorthand for doing init_config() and init_system()
"""
try:
import apt
except ImportError:
j.sal.process.execute("apt-get update", False)
j.sal.process.execute("apt-get install python3-apt --force-yes -y")
import apt
apt.apt_pkg.init()
if hasattr(apt.apt_pkg, "Config"):
cfg = apt.apt_pkg.Config
else:
cfg = apt.apt_pkg.Configuration
try:
cfg.set("APT::Install-Recommends", "0")
cfg.set("APT::Install-Suggests", "0")
except BaseException:
pass
self._cache_dict = apt.Cache()
self.apt = apt
def check(self):
"""check if ubuntu or mint (which is based on ubuntu)
:raise: j.exceptions.RuntimeError: is os is not ubuntu nor mint
:return: True if system in ubuntu or mint
:rtype: bool
"""
if not self._checked:
osname = j.core.platformtype.myplatform.osname
osversion = j.core.platformtype.myplatform.osversion
if osname not in ("ubuntu", "linuxmint"):
raise j.exceptions.RuntimeError("Only Ubuntu/Mint supported")
# safe cast to the release to a number
else:
release = float(osversion)
if release < 14:
raise j.exceptions.RuntimeError("Only ubuntu version 14+ supported")
self._checked = True
return self._checked
def version_get(self):
"""get lsb-release information
:return: ['DISTRIB_ID', 'DISTRIB_RELEASE', 'DISTRIB_CODENAME', 'DISTRIB_DESCRIPTION=']
:rtype: list
"""
with open("/etc/lsb-release") as f:
data = f.read()
result = []
for line in data.split("\n")[:-1]:
result.append(line.split("=")[1])
return result
def apt_install_check(self, package_name, cmd_name):
"""check if an ubuntu package is installed or not.
:param package_name: is name of ubuntu package to install e.g. curl
:type package_name: str
:param cmd_name: is cmd to check e.g. curl
:type cmd_name: str
:raise: j.exceptions.RuntimeError: Could not install package
"""
self.check()
rc, out, err = j.sal.process.execute("which %s" % cmd_name, useShell=True, die=False)
if rc != 0:
self.apt_install(package_name)
rc, out, err = j.sal.process.execute("which %s" % cmd_name, useShell=True)
if rc != 0:
raise j.exceptions.RuntimeError(
"Could not install package %s and check for command %s." % (package_name, cmd_name)
)
def apt_install(self, package_name):
"""install a specific ubuntu package.
:param package_name: name of the package
:type package_name: str
"""
self.apt_update()
cmd = "apt-get install %s --force-yes -y" % package_name
j.sal.process.execute(cmd)
def apt_install_version(self, package_name, version):
"""Install a specific version of an ubuntu package.
:param package_name: name of the package
:type package_name: str
:param version: version of the package
:type version: str
"""
self.apt_update()
cmd = "apt-get install %s=%s --force-yes -y" % (package_name, version)
j.sal.process.execute(cmd)
def deb_install(self, path, install_deps=True):
"""Install a debian package.
:param path: debian package path
:type path: str
:param install_deps: install debian package's dependencies
:type install_deps: bool
"""
self.check()
if self._cache_ubuntu is None:
self.apt_init()
import apt.debfile
deb = apt.debfile.DebPackage(path, cache=self._cache_ubuntu)
if install_deps:
deb.check()
for missing_pkg in deb.missing_deps:
self.apt_install(missing_pkg)
deb.install()
def deb_download_install(self, url, remove_downloaded=False):
"""download a debian package to tmp if not there yet, then install it.
:param url: debian package url
:type url: str
:param remove_downloaded: remove tmp download file
:type remove_downloaded: bool
"""
path = j.sal.nettools.download(url, "/tmp", overwrite=False)
self.deb_install(path)
if remove_downloaded:
j.sal.fs.remove(path)
def pkg_list(self, pkg_name, regex=""):
"""list files of dpkg. if regex used only output the ones who are matching regex
:param pkg_name: debian package name
:type pkg_name: str
:param regex: regular expression
:type regex: str
:return: List files owned by package
:rtype: list
"""
rc, out, err = j.sal.process.execute("dpkg -L %s" % pkg_name, useShell=True, die=False)
if regex != "":
return j.data.regex.findAll(regex, out)
else:
return out.split("\n")[:-1]
def pkg_remove(self, package_name):
"""remove an ubuntu package.
:param package_name: package name to be removed
:type package_name: str
"""
self._log_info("ubuntu remove package:%s" % package_name)
self.check()
self.apt_get_installed()
pkg = self._cache_ubuntu[package_name]
if pkg.is_installed:
pkg.mark_delete()
if package_name in self._installed_pkgs:
self._installed_pkgs.pop(self._installed_pkgs.index(package_name))
self._cache_ubuntu.commit()
self._cache_ubuntu.clear()
def _check_init_process(self):
process = j.sal.process.getProcessObject(1)
name = process.name()
if not name == "my_init" and not name == "systemd":
raise j.exceptions.RuntimeError("Unsupported init system process")
return name
def service_install(self, service_name, daemon_path, args="", respawn=True, pwd="/", env=None, reload=True):
"""Install an ubuntu service.
:param service_name: ubuntu service name
:type service_name: str
:param daemon_path: daemon path
:type daemon_path: str
:param args: service args
:type args: str
:param respawn: respawn
:type respawn: bool
:param pwd: chdir to pwd
:type pwd: str
:param env: environment values
:type env: dict
:param reload: reload
:type reload: bool
"""
init = self._check_init_process()
service_path = j.sal.fs.joinPaths(daemon_path, service_name)
if not j.sal.fs.exists(service_path):
raise j.exceptions.Value("Service daemon doesn't exist: %s" % service_path)
if init == "systemd":
cmd = """
[Unit]
Description={servicename}
Wants=network-online.target
After=network-online.target
[Service]
ExecStart={daemonpath} {args}
Restart=always
WorkingDirectory={pwd}
Environment={env}
[Install]
WantedBy=multi-user.target
""".format(
servicename=service_name, daemonpath=service_path, args=args, pwd=pwd, env=env
)
path = "/etc/systemd/system/%s.service" % service_name
else:
cmd = """\
#!/bin/sh
set -e
cd {pwd}
rm -f {logdir}/{servicename}.log
exec {demonpath} {args} >> {logdir}/{servicename}.log 2>&1
""".format(
pwd=pwd, servicename=service_name, demonpath=service_path, args=args, logdir=j.dirs.LOGDIR
)
path = "/etc/service/%s/run" % service_name
if not j.sal.fs.exists(path):
dir_path = j.sal.fs.getDirName(path)
if not j.sal.fs.exists(dir_path):
j.sal.fs.createDir(dir_path)
j.sal.fs.createEmptyFile(path)
j.sal.fs.writeFile(path, cmd)
if init == "my_init":
j.sal.unix.chmod(path, 0o755)
if reload and init == "systemd":
j.sal.process.execute("systemctl daemon-reload;systemctl enable %s" % service_name, useShell=True)
def service_uninstall(self, service_name, reload=True):
"""remove an ubuntu service.
:param service_name: ubuntu service name
:type service_name: str
"""
self.service_stop(service_name)
init = self._check_init_process()
if init == "systemd":
if reload:
j.sal.process.execute("systemctl daemon-reload; systemctl disable %s" % service_name, useShell=True)
path = "/etc/systemd/system/%s.service" % service_name
else:
path = "/etc/service/%s/run" % service_name
j.sal.fs.remove(path)
def _service_command(self, service_name, command):
init = self._check_init_process()
if init == "my_init":
cmd = "sv %s %s" % (command, service_name)
else:
cmd = "systemctl %s %s" % (command, service_name)
return j.sal.process.execute(cmd, die=False)
def service_start(self, service_name):
"""start an ubuntu service.
:param service_name: ubuntu service name
:type service_name: str
:return: start service output
:rtype: bool
"""
if self.service_status(service_name):
return
return self._service_command(service_name, "start")
def service_stop(self, service_name):
"""stop an ubuntu service.
:param service_name: ubuntu service name
:type service_name: str
:return: start service output
:rtype: bool
"""
return self._service_command(service_name, "stop")
def service_restart(self, service_name):
"""restart an ubuntu service.
:param service_name: ubuntu service name
:type service_name: str
:return: start service output
:rtype: bool
"""
return self._service_command(service_name, "restart")
def service_status(self, service_name):
"""check service status.
:param service_name: ubuntu service name
:type service_name: str
:return: True if service is running
:rtype: bool
"""
exitcode, output, error = self._service_command(service_name, "status")
return "run:" in output or "active (running)" in output
def service_disable_start_boot(self, service_name):
"""remove all links for a script
:param service_name: ubuntu service name
:type service_name: str
"""
j.sal.process.execute("update-rc.d -f %s remove" % service_name)
def service_enable_start_boot(self, service_name):
"""it makes links named /etc/rcrunlevel.d/[SK]NNname that point to the script /etc/init.d/name.
:param service_name: ubuntu service name
:type service_name: str
"""
j.sal.process.execute("update-rc.d -f %s defaults" % service_name)
def apt_update(self):
"""it is used to resynchronize the package index files from their sources
"""
self.check()
if self._cache_ubuntu:
self._cache_ubuntu.update()
self._cache_ubuntu.open()
self._cache_ubuntu.commit()
else:
j.sal.process.execute("apt-get update", False)
def apt_upgrade(self):
"""upgrade is used to install the newest versions of all packages currently installed on the system.
"""
self.check()
self.apt_update()
self._cache_ubuntu.upgrade(dist_upgrade=True)
self._cache_ubuntu.commit()
def apt_get_cache_keys(self):
"""get all cached keys.
:return: list of cache keys
:type: list
"""
return list(self._cache_ubuntu.keys())
def apt_get_installed(self):
"""get all the installed packages.
:return: list of installed list
:rtype: list
"""
if self._installed_pkgs is None:
self._installed_pkgs = []
for p in self._cache_ubuntu:
if p.is_installed:
self._installed_pkgs.append(p.name)
return self._installed_pkgs
def apt_find_all(self, package_name):
"""find all packages match with the package_name
:param package_name: ubuntu package name
:type package_name: str
:return: list of package names
:rtype: list
"""
package_name = package_name.lower().strip().replace("_", "").replace("_", "")
result = []
for item in self._cache_ubuntu.keys():
if item.replace("_", "").replace("_", "").lower().find(package_name) != -1:
result.append(item)
return result
def is_pkg_installed(self, package_name):
"""check if the package is installed or not.
:param package_name: package name
:type package_name: str
:return: if the package is installed, return True otherwise return False
:rtype: bool
"""
self.apt_get_installed()
return package_name in self._installed_pkgs
def apt_sources_list(self):
"""represents the full sources.list + sources.list.d file.
:return: list of apt sources
:rtype: list
"""
from aptsources import sourceslist
sources = sourceslist.SourcesList().list
return [str(source) for source in sources if not source.line.startswith("#") and source.line != "\n"]
def apt_sources_uri_add(self, url):
"""add a new apt source url.
:param url: source url
:type: str
"""
url = url.replace(";", ":")
name = url.replace("\\", "/").replace("http://", "").replace("https://", "").split("/")[0]
path = j.tools.path.get("/etc/apt/sources.list.d/%s.list" % name)
path.write_text("deb %s\n" % url)
def whoami(self):
"""get the user name associated with the current effective user ID.
:return: the user name associated with the current effective user ID.
:rtype: str
"""
rc, out, err = j.sal.process.execute("whoami", useShell=True)
return out.strip()
def checkroot(self):
"""check if the current user is root.
:raise j.exceptions.Input: only support root
"""
if self.whoami() != "root":
raise j.exceptions.Input("only support root")
def sshkey_generate(self, passphrase="", ssh_type="rsa", overwrite=False, path="/root/.ssh/id_rsa"):
"""generate a new ssh key.
:param passphrase: ssh key passphrase
:type: str
:param ssh_type: ssh key type (rsa or dsa)
:type: str
:param overwrite: overwrite the existing ssh key, default is (false)
:type: bool
:param path: ssh key path, default is (/root/.ssh/id_rsa)
:type: str
"""
path = j.tools.path.get(path)
if overwrite and path.exists():
path.rmtree_p()
if not path.exists():
if ssh_type not in ["rsa", "dsa"]:
raise j.exceptions.Input("only support rsa or dsa for now")
cmd = "ssh-keygen -t %s -b 4096 -P '%s' -f %s" % (ssh_type, passphrase, path)
j.sal.process.execute(cmd)
def _test(self, name=""):
"""Run tests under tests
:param name: basename of the file to run, defaults to "".
:type name: str, optional
"""
self._test_run(name=name, obj_key="main")
| 32.661355 | 116 | 0.591486 |
0b8cdb8f38003ccc4ef20d1b5786e6b6af2b65ad | 8,178 | py | Python | PedicleScrewPlanner/PedicleScrewPlanner.py | jumbojing/PedicleScrewSimulator | cbb84cd84cd5617693f5ff29593bc396ecb1cb8a | [
"MIT"
] | 3 | 2021-06-10T05:40:18.000Z | 2022-03-23T05:51:08.000Z | PedicleScrewPlanner/PedicleScrewPlanner.py | jumbojing/PedicleScrewSimulator | cbb84cd84cd5617693f5ff29593bc396ecb1cb8a | [
"MIT"
] | null | null | null | PedicleScrewPlanner/PedicleScrewPlanner.py | jumbojing/PedicleScrewSimulator | cbb84cd84cd5617693f5ff29593bc396ecb1cb8a | [
"MIT"
] | 1 | 2021-09-16T02:06:45.000Z | 2021-09-16T02:06:45.000Z | import os
import unittest
import vtk, qt, ctk, slicer
from slicer.ScriptedLoadableModule import *
import logging
import PedicleScrewSimulatorWizard
import PedicleScrewPlannerWizard
#from PedicleScrewPlannerWizard import *
#
# PedicleScrewPlanner
#
class PedicleScrewPlanner(ScriptedLoadableModule):
"""Uses ScriptedLoadableModule base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def __init__(self, parent):
ScriptedLoadableModule.__init__(self, parent)
self.parent.title = "Pedicle Screw Planner"
self.parent.categories = ["Training"]
self.parent.dependencies = []
self.parent.contributors = ["Brendan Polley (University of Toronto)",
"Stewart McLachlin (Sunnybrook Research Institute)",
"Cari Whyne (Sunnybrook Research Institute)",
"Jumbo Jing"]
self.parent.helpText = """
Pedicle Screw Simulator. See more details here: https://github.com/lassoan/PedicleScrewSimulator
"""
self.parent.acknowledgementText = """
Orthopaedic Biomechanics Laboratory, Sunnybrook Health Sciences Centre.
""" # replace with organization, grant and thanks.
#
# PedicleScrewPlannerWidget
#
class PedicleScrewPlannerWidget(ScriptedLoadableModuleWidget):
"""Uses ScriptedLoadableModuleWidget base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def setup(self):
ScriptedLoadableModuleWidget.setup(self)
# Instantiate and connect widgets ...
self.workflow = ctk.ctkWorkflow()
workflowWidget = ctk.ctkWorkflowStackedWidget()
workflowWidget.setWorkflow( self.workflow )
# create all wizard steps
self.loadDataStep = PedicleScrewSimulatorWizard.LoadDataStep( 'LoadData' )
self.defineROIStep = PedicleScrewSimulatorWizard.DefineROIStep( 'DefineROI')#, showSidesSelector=True )
self.measurementsStep = PedicleScrewPlannerWizard.PlanningMeasurementsStep( 'Measurements' )
self.landmarksStep = PedicleScrewPlannerWizard.PlanningLandmarksStep( 'Landmarks' )
# self.screwStep = PedicleScrewSimulatorWizard.ScrewStep( 'Screw' )
self.gradeStep = PedicleScrewPlannerWizard.PlanningGradeStep( 'Grade' )
self.endStep = PedicleScrewSimulatorWizard.EndStep( 'Final' )
# add the wizard steps to an array for convenience
allSteps = []
allSteps.append( self.loadDataStep )
allSteps.append( self.defineROIStep )
allSteps.append( self.landmarksStep)
allSteps.append( self.measurementsStep )
# allSteps.append( self.screwStep)
allSteps.append( self.gradeStep)
allSteps.append( self.endStep )
# Add transition
# Check if volume is loaded
self.workflow.addTransition( self.loadDataStep, self.defineROIStep )
self.workflow.addTransition( self.defineROIStep, self.landmarksStep, 'pass', ctk.ctkWorkflow.Bidirectional )
self.workflow.addTransition( self.defineROIStep, self.loadDataStep, 'fail', ctk.ctkWorkflow.Bidirectional )
self.workflow.addTransition( self.landmarksStep, self.measurementsStep, 'pass', ctk.ctkWorkflow.Bidirectional )
self.workflow.addTransition( self.landmarksStep, self.measurementsStep, 'fail', ctk.ctkWorkflow.Bidirectional )
# self.workflow.addTransition( self.measurementsStep, self.screwStep, 'pass', ctk.ctkWorkflow.Bidirectional )
# self.workflow.addTransition( self.measurementsStep, self.screwStep, 'fail', ctk.ctkWorkflow.Bidirectional )
#
# self.workflow.addTransition( self.screwStep, self.gradeStep, 'pass', ctk.ctkWorkflow.Bidirectional )
# self.workflow.addTransition( self.screwStep, self.gradeStep, 'fail', ctk.ctkWorkflow.Bidirectional )
self.workflow.addTransition( self.measurementsStep, self.gradeStep, 'pass', ctk.ctkWorkflow.Bidirectional )
self.workflow.addTransition( self.measurementsStep, self.gradeStep, 'fail', ctk.ctkWorkflow.Bidirectional )
self.workflow.addTransition( self.gradeStep, self.endStep )
nNodes = slicer.mrmlScene.GetNumberOfNodesByClass('vtkMRMLScriptedModuleNode')
self.parameterNode = None
for n in range(nNodes):
compNode = slicer.mrmlScene.GetNthNodeByClass(n, 'vtkMRMLScriptedModuleNode')
nodeid = None
if compNode.GetModuleName() == 'PedicleScrewPlanner':
self.parameterNode = compNode
logging.debug('Found existing PedicleScrewPlanner parameter node')
break
if self.parameterNode == None:
self.parameterNode = slicer.vtkMRMLScriptedModuleNode()
self.parameterNode.SetModuleName('PedicleScrewPlanner')
slicer.mrmlScene.AddNode(self.parameterNode)
for s in allSteps:
s.setParameterNode (self.parameterNode)
# restore workflow step
currentStep = self.parameterNode.GetParameter('currentStep')
if currentStep != '':
logging.debug('Restoring workflow step to ' + currentStep)
if currentStep == 'LoadData':
self.workflow.setInitialStep(self.loadDataStep)
if currentStep == 'DefineROI':
self.workflow.setInitialStep(self.defineROIStep)
if currentStep == 'Measurements':
self.workflow.setInitialStep(self.measurementsStep)
if currentStep == 'Landmarks':
self.workflow.setInitialStep(self.landmarksStep)
# if currentStep == 'Screw':
# self.workflow.setInitialStep(self.screwStep)
if currentStep == 'Grade':
self.workflow.setInitialStep(self.gradeStep)
if currentStep == 'Final':
self.workflow.setInitialStep(self.endStep)
else:
logging.debug('currentStep in parameter node is empty')
# start the workflow and show the widget
self.workflow.start()
workflowWidget.visible = True
self.layout.addWidget( workflowWidget )
# compress the layout
#self.layout.addStretch(1)
def cleanup(self):
pass
def onReload(self):
logging.debug("Reloading PedicleScrewPlanner")
packageName='PedicleScrewSimulatorWizard'
submoduleNames=['PedicleScrewSimulatorStep',
'DefineROIStep',
'EndStep',
'GradeStep',
'Helper',
'LandmarksStep',
'LoadDataStep',
'MeasurementsStep']
import imp
f, filename, description = imp.find_module(packageName)
package = imp.load_module(packageName, f, filename, description)
for submoduleName in submoduleNames:
f, filename, description = imp.find_module(submoduleName, package.__path__)
try:
imp.load_module(packageName+'.'+submoduleName, f, filename, description)
finally:
f.close()
ScriptedLoadableModuleWidget.onReload(self)
class PedicleScrewPlannerTest(ScriptedLoadableModuleTest):
"""
This is the test case for your scripted module.
Uses ScriptedLoadableModuleTest base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def setUp(self):
""" Do whatever is needed to reset the state - typically a scene clear will be enough.
"""
slicer.mrmlScene.Clear(0)
def runTest(self):
"""Run as few or as many tests as needed here.
"""
self.setUp()
self.test_PedicleScrewPlanner1()
def test_PedicleScrewPlanner1(self):
""" Ideally you should have several levels of tests. At the lowest level
tests should exercise the functionality of the logic with different inputs
(both valid and invalid). At higher levels your tests should emulate the
way the user would interact with your code and confirm that it still works
the way you intended.
One of the most important features of the tests is that it should alert other
developers when their changes will have an impact on the behavior of your
module. For example, if a developer removes a feature that you depend on,
your test should break so they know that the feature is needed.
"""
self.delayDisplay('No test is implemented.')
| 39.892683 | 116 | 0.712155 |
3249df6559615291e638ca0f91134e28fd50f122 | 5,289 | py | Python | beakerx/beakerx/plot/tests/test_plot.py | ssadedin/beakerx | 34479b07d2dfdf1404692692f483faf0251632c3 | [
"Apache-2.0"
] | 1 | 2020-08-04T15:30:19.000Z | 2020-08-04T15:30:19.000Z | beakerx/beakerx/plot/tests/test_plot.py | ssadedin/beakerx | 34479b07d2dfdf1404692692f483faf0251632c3 | [
"Apache-2.0"
] | null | null | null | beakerx/beakerx/plot/tests/test_plot.py | ssadedin/beakerx | 34479b07d2dfdf1404692692f483faf0251632c3 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 TWO SIGMA OPEN SOURCE, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from beakerx import LegendPosition
from ..chart import Plot
from ..plotitem import StrokeType, Color, Crosshair, Points, ShapeType, YAxis, Text, ConstantLine, ConstantBand
class TestPlot(unittest.TestCase):
def test_plot(self):
# given
# when
plot = Plot(title="Title",
xLabel="Horizontal",
yLabel="Vertical",
initWidth=500,
initHeight=200)
# then
model = plot.model
self.assertEqual(model['chart_title'], "Title")
self.assertEqual(model['domain_axis_label'], "Horizontal")
self.assertEqual(model['y_label'], "Vertical")
self.assertEqual(len(model['rangeAxes']), 1)
self.assertEqual(len(model['texts']), 0)
self.assertEqual(len(model['constant_lines']), 0)
self.assertEqual(len(model['constant_bands']), 0)
self.assertEqual(len(model['graphics_list']), 0)
self.assertFalse('crosshair' in plot.model)
def test_add_YAxis_to_plot(self):
# given
plot = Plot()
# when
plot.add(YAxis(label="Right yAxis"))
# then
self.assertEqual(len(plot.model['rangeAxes']), 2)
def test_add_Text_to_plot(self):
# given
plot = Plot()
# when
plot.add(Text(text="Hello"))
# then
self.assertEqual(len(plot.model['texts']), 1)
def test_add_ConstantLine_to_plot(self):
# given
plot = Plot()
# when
plot.add(ConstantLine(x=0.65))
# then
self.assertEqual(len(plot.model['constant_lines']), 1)
def test_add_ConstantBand_to_plot(self):
# given
plot = Plot()
# when
plot.add(ConstantBand(x=[1, 2]))
# then
self.assertEqual(len(plot.model['constant_bands']), 1)
def test_add_list_of_ConstantBand_to_plot(self):
# given
plot = Plot()
list_of_constant_bands = [ConstantBand(x=[1, 2]), ConstantBand(x=[3, 4])]
# when
plot.add(list_of_constant_bands)
# then
self.assertEqual(len(plot.model['constant_bands']), len(list_of_constant_bands))
def test_should_setXBound(self):
# given
plot = Plot()
# when
plot.setXBound([-2, 10])
# then
self.assertEqual(plot.model['x_lower_bound'], -2)
self.assertEqual(plot.model['x_upper_bound'], 10)
def test_should_setYBound(self):
# given
plot = Plot()
# when
plot.setYBound([2, 6])
# then
self.assertEqual(plot.model['y_lower_bound'], 2)
self.assertEqual(plot.model['y_upper_bound'], 6)
def test_should_rise_ValueError_when_setXBound(self):
# given
plot = Plot()
# when
try:
plot.setXBound([-2, 10, 11])
except ValueError as ex:
# then
self.assertEqual(ex.args[0], "to set the x bound, the list needs to be of size=2.")
def test_should_rise_ValueError_when_setYBound(self):
# given
plot = Plot()
# when
try:
plot.setYBound([-2, 10, 11])
except ValueError as ex:
# then
self.assertEqual(ex.args[0], "to set the y bound, the list needs to be of size=2.")
def test_should_setShowLegend(self):
# given
plot = Plot()
# when
plot.setShowLegend(True)
# then
self.assertEqual(plot.model['show_legend'], True)
def test_should_set_crosshair(self):
# given
ch = Crosshair(color=Color.black, width=2)
# when
plot = Plot(crosshair=ch)
# then
self.assertTrue('crosshair' in plot.model)
def test_should_set_stroke_type(self):
# given
ch = Crosshair(color=Color.black, width=2, style=StrokeType.DOT)
# when
plot = Plot(crosshair=ch)
# then
self.assertEqual(plot.model['crosshair']['style'], 'DOT')
def test_set_shape_type(self):
# given
plot = Plot()
# when
plot.add(Points(y=[1, 3, 6, 3, 1],
x=[1, 2, 3, 4, 5],
size=10,
shape=ShapeType.DIAMOND))
# then
item = plot.model['graphics_list'][0]
self.assertEqual(item['shape'], "DIAMOND")
def test_should_set_legend_position(self):
# given
# when
plot = Plot(legendPosition=LegendPosition.LEFT)
# then
self.assertEqual(plot.model['legend_position']['type'], "LegendPosition")
self.assertEqual(plot.model['legend_position']['position'], "LEFT")
| 31.111765 | 111 | 0.591038 |
e582082afdb456c26faad1b92c3e8f58218c9436 | 1,251 | py | Python | bleu/bleu.py | visualbuffer/mcq | 37ca25885969628107879c1f47f8dc840902250a | [
"BSD-3-Clause"
] | null | null | null | bleu/bleu.py | visualbuffer/mcq | 37ca25885969628107879c1f47f8dc840902250a | [
"BSD-3-Clause"
] | null | null | null | bleu/bleu.py | visualbuffer/mcq | 37ca25885969628107879c1f47f8dc840902250a | [
"BSD-3-Clause"
] | 2 | 2020-01-28T07:15:46.000Z | 2020-08-14T13:23:53.000Z | #!/usr/bin/env python
#
# File Name : bleu.py
#
# Description : Wrapper for BLEU scorer.
#
# Creation Date : 06-01-2015
# Last Modified : Thu 19 Mar 2015 09:13:28 PM PDT
# Authors : Hao Fang <[email protected]> and Tsung-Yi Lin <[email protected]>
from bleu.bleu_scorer import BleuScorer
class Bleu:
def __init__(self, n=4):
# default compute Blue score up to 4
self._n = n
self._hypo_for_image = {}
self.ref_for_image = {}
def compute_score(self, gts, res):
assert(gts.keys() == res.keys())
imgIds = gts.keys()
bleu_scorer = BleuScorer(n=self._n)
for id in imgIds:
hypo = res[id]
ref = gts[id]
# Sanity check.
assert(type(hypo) is list)
assert(len(hypo) == 1)
assert(type(ref) is list)
assert(len(ref) >= 1)
bleu_scorer += (hypo[0], ref)
#score, scores = bleu_scorer.compute_score(option='shortest')
score, scores = bleu_scorer.compute_score(option='closest', verbose=0)
#score, scores = bleu_scorer.compute_score(option='average', verbose=1)
# return (bleu, bleu_info)
return score, scores
def method(self):
return "Bleu"
| 26.0625 | 79 | 0.585132 |
f62d861669db17df867347d955dd167ade92f929 | 7,346 | py | Python | audioset_models.py | vasulkalivearchive/audio | 6359f2a906b29f5aaa15f4397d87d0e5e81b9056 | [
"MIT"
] | null | null | null | audioset_models.py | vasulkalivearchive/audio | 6359f2a906b29f5aaa15f4397d87d0e5e81b9056 | [
"MIT"
] | null | null | null | audioset_models.py | vasulkalivearchive/audio | 6359f2a906b29f5aaa15f4397d87d0e5e81b9056 | [
"MIT"
] | null | null | null | from torchlibrosa.stft import Spectrogram, LogmelFilterBank
from torchlibrosa.augmentation import SpecAugmentation
import torch
import torch.nn as nn
import torch.nn.functional as F
from pytorch_utils import do_mixup
def init_layer(layer):
"""Initialize a Linear or Convolutional layer. """
nn.init.xavier_uniform_(layer.weight)
if hasattr(layer, 'bias'):
if layer.bias is not None:
layer.bias.data.fill_(0.)
def init_bn(bn):
"""Initialize a Batchnorm layer. """
bn.bias.data.fill_(0.)
bn.weight.data.fill_(1.)
class ConvBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super(ConvBlock, self).__init__()
self.conv1 = nn.Conv2d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=(3, 3), stride=(1, 1),
padding=(1, 1), bias=False)
self.conv2 = nn.Conv2d(in_channels=out_channels,
out_channels=out_channels,
kernel_size=(3, 3), stride=(1, 1),
padding=(1, 1), bias=False)
self.bn1 = nn.BatchNorm2d(out_channels)
self.bn2 = nn.BatchNorm2d(out_channels)
self.init_weight()
def init_weight(self):
init_layer(self.conv1)
init_layer(self.conv2)
init_bn(self.bn1)
init_bn(self.bn2)
def forward(self, input, pool_size=(2, 2), pool_type='avg'):
x = input
x = F.relu_(self.bn1(self.conv1(x)))
x = F.relu_(self.bn2(self.conv2(x)))
if pool_type == 'max':
x = F.max_pool2d(x, kernel_size=pool_size)
elif pool_type == 'avg':
x = F.avg_pool2d(x, kernel_size=pool_size)
elif pool_type == 'avg+max':
x1 = F.avg_pool2d(x, kernel_size=pool_size)
x2 = F.max_pool2d(x, kernel_size=pool_size)
x = x1 + x2
else:
raise Exception('Incorrect argument!')
return x
class Cnn14(nn.Module):
def __init__(self, sample_rate, window_size, hop_size, mel_bins, fmin,
fmax, classes_num):
super(Cnn14, self).__init__()
window = 'hann'
center = True
pad_mode = 'reflect'
ref = 1.0
amin = 1e-10
top_db = None
# Spectrogram extractor
self.spectrogram_extractor = Spectrogram(n_fft=window_size, hop_length=hop_size,
win_length=window_size, window=window, center=center,
pad_mode=pad_mode,
freeze_parameters=True)
# Logmel feature extractor
self.logmel_extractor = LogmelFilterBank(sr=sample_rate, n_fft=window_size,
n_mels=mel_bins, fmin=fmin, fmax=fmax, ref=ref, amin=amin,
top_db=top_db,
freeze_parameters=True)
# Spec augmenter
self.spec_augmenter = SpecAugmentation(time_drop_width=64, time_stripes_num=2,
freq_drop_width=8, freq_stripes_num=2)
self.bn0 = nn.BatchNorm2d(64)
self.conv_block1 = ConvBlock(in_channels=1, out_channels=64)
self.conv_block2 = ConvBlock(in_channels=64, out_channels=128)
self.conv_block3 = ConvBlock(in_channels=128, out_channels=256)
self.conv_block4 = ConvBlock(in_channels=256, out_channels=512)
self.conv_block5 = ConvBlock(in_channels=512, out_channels=1024)
self.conv_block6 = ConvBlock(in_channels=1024, out_channels=2048)
self.fc1 = nn.Linear(2048, 2048, bias=True)
self.fc_audioset = nn.Linear(2048, classes_num, bias=True)
self.init_weight()
def init_weight(self):
init_bn(self.bn0)
init_layer(self.fc1)
init_layer(self.fc_audioset)
def forward(self, input, mixup_lambda=None):
"""
Input: (batch_size, data_length)"""
x = self.spectrogram_extractor(input) # (batch_size, 1, time_steps, freq_bins)
x = self.logmel_extractor(x) # (batch_size, 1, time_steps, mel_bins)
x = x.transpose(1, 3)
x = self.bn0(x)
x = x.transpose(1, 3)
if self.training:
x = self.spec_augmenter(x)
# Mixup on spectrogram
if self.training and mixup_lambda is not None:
x = do_mixup(x, mixup_lambda)
x = self.conv_block1(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block2(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block3(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block4(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block5(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block6(x, pool_size=(1, 1), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = torch.mean(x, dim=3)
(x1, _) = torch.max(x, dim=2)
x2 = torch.mean(x, dim=2)
x = x1 + x2
x = F.dropout(x, p=0.5, training=self.training)
x = F.relu_(self.fc1(x))
embedding = F.dropout(x, p=0.5, training=self.training)
clipwise_output = torch.sigmoid(self.fc_audioset(x))
output_dict = {'clipwise_output': clipwise_output, 'embedding': embedding}
return output_dict
class TransferCNN14(nn.Module):
def __init__(self, sample_rate, window_size, hop_size, mel_bins, fmin,
fmax, classes_num, freeze_base):
"""Classifier for a new task using pretrained Cnn14 as a sub module.
"""
super(TransferCNN14, self).__init__()
audioset_classes_num = 527
self.base = Cnn14(sample_rate, window_size, hop_size, mel_bins, fmin,
fmax, audioset_classes_num)
# Transfer to another task layer
self.fc_transfer = nn.Linear(2048, classes_num, bias=True)
if freeze_base:
# Freeze AudioSet pretrained layers
for param in self.base.parameters():
param.requires_grad = False
self.init_weights()
def init_weights(self):
init_layer(self.fc_transfer)
def load_from_pretrain(self, pretrained_checkpoint_path):
checkpoint = torch.load(pretrained_checkpoint_path)
self.base.load_state_dict(checkpoint['model'])
def forward(self, input, mixup_lambda=None):
"""Input: (batch_size, data_length)
"""
output_dict = self.base(input, mixup_lambda)
embedding = output_dict['embedding']
clipwise_output = torch.log_softmax(self.fc_transfer(embedding), dim=-1)
output_dict['clipwise_output'] = clipwise_output
return output_dict
| 36.547264 | 108 | 0.574871 |
308144977fcd0a184d6078e84a43b78b6c810ddb | 272 | py | Python | general_classes/user.py | gustavonaldoni/command-line-e-comerce | 9ac2ee15783ff993b822698921fa36378f67bca3 | [
"MIT"
] | 2 | 2020-12-27T01:55:03.000Z | 2020-12-28T02:34:12.000Z | general_classes/user.py | gustavonaldoni/command-line-e-commerce | 9ac2ee15783ff993b822698921fa36378f67bca3 | [
"MIT"
] | 1 | 2020-12-27T04:37:00.000Z | 2020-12-27T04:44:51.000Z | general_classes/user.py | gustavonaldoni/command-line-e-comerce | 9ac2ee15783ff993b822698921fa36378f67bca3 | [
"MIT"
] | 1 | 2020-12-27T02:07:10.000Z | 2020-12-27T02:07:10.000Z | class User:
def __init__(self, id, first_name, last_name, email, account_creation_date):
self.id = id
self.first_name = first_name
self.last_name = last_name
self.email = email
self.account_creation_date = account_creation_date | 34 | 80 | 0.680147 |
f28fba0891c9aeb247dc7b8c485e33a0e9e8d8e8 | 1,220 | py | Python | db.py | Abdulla603/WorkoutApp | 4f68458f3d8b65573166cd2e6d653fb37e2746b5 | [
"MIT"
] | null | null | null | db.py | Abdulla603/WorkoutApp | 4f68458f3d8b65573166cd2e6d653fb37e2746b5 | [
"MIT"
] | null | null | null | db.py | Abdulla603/WorkoutApp | 4f68458f3d8b65573166cd2e6d653fb37e2746b5 | [
"MIT"
] | null | null | null | from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
'''
association_table = db.Table(
'association',
db.Model.metadata,
db.Column('workouts_id', db.Integer, db.ForeignKey('workouts.id')),
db.Column('exercise_id', db.Integer, db.ForeignKey("exercise.id"))
)
'''
class Workouts(db.Model):
__tablename__ = "workouts"
id = db.Column(db.Integer, primary_key = True)
date = db.Column(db.String(50))
notes = db.Column(db.Text)
def __init__(self, **kwargs):
self.date = kwargs.get("date", ' ')
self.notes = kwargs.get('notes', False)
def serialize(self):
return {
'id': self.id,
'date': self.date,
'notes': self.notes
}
class Exercise(db.Model):
__tablename__ = 'exercise'
id = db.Column(db.Integer, primary_key = True)
name = db.Column(db.String(50))
def __init__(self, **kwargs):
self.name = kwargs.get("name", ' ')
def serialize(self):
return {
'id': self.id,
'name': self.description
}
class Sets(db.Model):
__tablename__ = 'sets'
id = db.Column(db.Integer, primary_key = True)
| 25.416667 | 72 | 0.569672 |
333a539762e58299b126750e4db545108891ade8 | 382 | py | Python | python3.4Smartforest/lib/python3.4/site-packages/django/contrib/gis/db/backends/postgis/features.py | letouriste001/SmartForest_2.0 | 109b78bf1e8c8404800f377ab969395ccbb617be | [
"MIT"
] | null | null | null | python3.4Smartforest/lib/python3.4/site-packages/django/contrib/gis/db/backends/postgis/features.py | letouriste001/SmartForest_2.0 | 109b78bf1e8c8404800f377ab969395ccbb617be | [
"MIT"
] | null | null | null | python3.4Smartforest/lib/python3.4/site-packages/django/contrib/gis/db/backends/postgis/features.py | letouriste001/SmartForest_2.0 | 109b78bf1e8c8404800f377ab969395ccbb617be | [
"MIT"
] | null | null | null | from django.contrib.gis.db.backends.base.features import BaseSpatialFeatures
from django.db.backends.postgresql.features import \
DatabaseFeatures as Psycopg2DatabaseFeatures
class DatabaseFeatures(BaseSpatialFeatures, Psycopg2DatabaseFeatures):
supports_3d_storage = True
supports_3d_functions = True
supports_left_right_lookups = True
supports_raster = True
| 382 | 382 | 0.824607 |
d76e859d4fa07a34bcce6a5b2b8bd21d7b3e8301 | 6,268 | py | Python | 6-coding/ct_reconstruction-skimage.py | effepivi/gvxr-demos | 37423d4cc57e324d200475454ffdeaba4cb4c808 | [
"BSD-3-Clause"
] | null | null | null | 6-coding/ct_reconstruction-skimage.py | effepivi/gvxr-demos | 37423d4cc57e324d200475454ffdeaba4cb4c808 | [
"BSD-3-Clause"
] | null | null | null | 6-coding/ct_reconstruction-skimage.py | effepivi/gvxr-demos | 37423d4cc57e324d200475454ffdeaba4cb4c808 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
import os, copy
import numpy as np
dir_path = os.path.dirname(os.path.realpath(__file__))
import math
from skimage.transform import iradon, iradon_sart
import SimpleITK as sitk
import matplotlib
matplotlib.use("TkAgg")
import matplotlib.pyplot as plt
import gvxrPython3 as gvxr
import inp2stl
# Define the NoneType
NoneType = type(None);
# Print the libraries' version
print (gvxr.getVersionOfSimpleGVXR())
print (gvxr.getVersionOfCoreGVXR())
# Create an OpenGL context
print("Create an OpenGL context")
gvxr.createWindow();
gvxr.setWindowSize(512, 512);
# Set up the beam
print("Set up the beam")
gvxr.setSourcePosition(-40.0, 0.0, 0.0, "cm");
#gvxr.usePointSource();
gvxr.useParallelBeam();
gvxr.setMonoChromatic(33, "keV", 1);
# Set up the detector
print("Set up the detector");
gvxr.setDetectorPosition(40.0, 0.0, 0.0, "cm");
gvxr.setDetectorUpVector(0, 0, -1);
gvxr.setDetectorNumberOfPixels(641, 320);
spacing_in_mm = 0.5;
gvxr.setDetectorPixelSize(spacing_in_mm, spacing_in_mm, "mm");
# Load the data
print("Load the data from the INP file");
vertex_set, triangle_index_set, material_set = inp2stl.readInpFile('male_model.inp', True);
inp2stl.writeStlFile("male_model.stl", vertex_set, triangle_index_set[0]);
# Get the bounding box
min_corner = None;
max_corner = None;
vertex_set = np.array(vertex_set).astype(np.float32);
for triangle in triangle_index_set[0]:
for vertex_id in triangle:
if isinstance(min_corner, NoneType):
min_corner = copy.deepcopy(vertex_set[vertex_id]);
else:
min_corner[0] = min(min_corner[0], vertex_set[vertex_id][0]);
min_corner[1] = min(min_corner[1], vertex_set[vertex_id][1]);
min_corner[2] = min(min_corner[2], vertex_set[vertex_id][2]);
if isinstance(max_corner, NoneType):
max_corner = copy.deepcopy(vertex_set[vertex_id]);
else:
max_corner[0] = max(max_corner[0], vertex_set[vertex_id][0]);
max_corner[1] = max(max_corner[1], vertex_set[vertex_id][1]);
max_corner[2] = max(max_corner[2], vertex_set[vertex_id][2]);
# Compute the bounding box
bbox_range = [max_corner[0] - min_corner[0],
max_corner[1] - min_corner[1],
max_corner[2] - min_corner[2]];
# print("X Range:", min_corner[0], "to", max_corner[0], "(delta:", bbox_range[0], ")")
# print("Y Range:", min_corner[1], "to", max_corner[1], "(delta:", bbox_range[1], ")")
# print("Z Range:", min_corner[2], "to", max_corner[2], "(delta:", bbox_range[2], ")")
# Centre the mesh
for vertex_id in range(len(vertex_set)):
vertex_set[vertex_id][0] -= min_corner[0] + bbox_range[0] / 2.0;
vertex_set[vertex_id][1] -= min_corner[1] + bbox_range[1] / 2.0;
vertex_set[vertex_id][2] -= min_corner[2] + bbox_range[2] / 2.0;
gvxr.makeTriangularMesh("male_model",
np.array(vertex_set).astype(np.float32).flatten(),
np.array(triangle_index_set).astype(np.int32).flatten(),
"m");
# The model is made of silicon carbide
# Silicon carbide total mass attenuation at 33 keV is 0.855 cm2.g-1.
# Its density is 3.2 g.cm-3.
# The theoretical linear attenuation coefficient is obtained as follows: 0.855 x 3.2 = 2.736 cm-1.
gvxr.setCompound("male_model", "SiC");
gvxr.setDensity("male_model",
3.2,
"g/cm3");
# Add the mesh to the simulation
gvxr.addPolygonMeshAsInnerSurface("male_model");
# Compute an X-ray image, update the 3D visualisation, and rotate the object
#gvxr.renderLoop();
projections = [];
theta = [];
number_of_angles = 360;
rotation_angle = 180 / number_of_angles;
for i in range(number_of_angles):
# Compute an X-ray image and add it to the list of projections
projections.append(gvxr.computeXRayImage());
# Update the 3D visualisation
gvxr.displayScene();
# Rotate the model by 1 degree
gvxr.rotateNode("male_model", rotation_angle, 0, 0, -1);
theta.append(i * rotation_angle);
# Convert the projections as a Numpy array
projections = np.array(projections);
# Retrieve the total energy
energy_bins = gvxr.getEnergyBins("MeV");
photon_count_per_bin = gvxr.getPhotonCountEnergyBins();
total_energy = 0.0;
for energy, count in zip(energy_bins, photon_count_per_bin):
print(energy, count)
total_energy += energy * count;
# Perform the flat-field correction of raw data
dark = np.zeros(projections.shape);
flat = np.ones(projections.shape) * total_energy;
projections = (projections - dark) / (flat - dark);
# Calculate -log(projections) to linearize transmission tomography data
projections = -np.log(projections)
volume = sitk.GetImageFromArray(projections);
sitk.WriteImage(volume, 'projections-skimage.mhd');
# Resample as a sinogram stack
sinograms = np.swapaxes(projections, 0, 1);
# Perform the reconstruction
# Process slice by slice
recon_fbp = [];
recon_sart = [];
slice_id = 0;
for sinogram in sinograms:
slice_id+=1;
print("Reconstruct slice #", slice_id, "/", number_of_angles);
recon_fbp.append(iradon(sinogram.T, theta=theta, circle=True));
# Two iterations of SART
# recon_sart.append(iradon_sart(sinogram.T, theta=theta));
# recon_sart[-1] = iradon_sart(sinogram.T, theta=theta, image=recon_sart[-1]);
recon_fbp = np.array(recon_fbp);
# recon_sart = np.array(recon_sart);
# Plot the slice in the middle of the volume
plt.figure();
plt.title("FBP")
plt.imshow(recon_fbp[int(projections.shape[1]/2), :, :])
# plt.figure();
# plt.title("SART")
# plt.imshow(recon_sart[int(projections.shape[1]/2), :, :])
plt.show()
# Save the volume
volume = sitk.GetImageFromArray(recon_fbp);
volume.SetSpacing([spacing_in_mm, spacing_in_mm, spacing_in_mm]);
sitk.WriteImage(volume, 'recon-fbp-skimage.mhd');
# volume = sitk.GetImageFromArray(recon_sart);
# volume.SetSpacing([spacing_in_mm, spacing_in_mm, spacing_in_mm]);
# sitk.WriteImage(volume, 'recon-sart-skimage.mhd');
# Display the 3D scene (no event loop)
# Run an interactive loop
# (can rotate the 3D scene and zoom-in)
# Keys are:
# Q/Escape: to quit the event loop (does not close the window)
# B: display/hide the X-ray beam
# W: display the polygon meshes in solid or wireframe
# N: display the X-ray image in negative or positive
# H: display/hide the X-ray detector
gvxr.renderLoop();
| 30.427184 | 99 | 0.710115 |
1f9339d06522e3c497ba053d5df7b9853fab2acf | 5,338 | py | Python | saspy/sasdecorator.py | hahewlet/saspy | 8949d931146b0cfdccfe40fedc0013c028741328 | [
"Apache-2.0"
] | 317 | 2016-04-01T17:50:29.000Z | 2022-03-04T14:06:38.000Z | saspy/sasdecorator.py | oehs7/saspy | 47adeb5b9e298e6b9ec017f850245e318f2faa57 | [
"Apache-2.0"
] | 361 | 2016-04-25T16:04:06.000Z | 2022-03-30T13:52:03.000Z | saspy/sasdecorator.py | oehs7/saspy | 47adeb5b9e298e6b9ec017f850245e318f2faa57 | [
"Apache-2.0"
] | 167 | 2016-04-01T18:45:38.000Z | 2022-02-24T21:24:26.000Z | import logging
import inspect
import sys
from functools import wraps
import warnings
from .sasproccommons import SASProcCommons
# from pdb import set_trace as bp
class procDecorator:
def __init__(self):
self.logger = logging.getLogger(__name__)
self.logger.setLevel(logging.WARN)
if sys.version_info[0] < 3 or (sys.version_info[0] >= 3 and sys.version_info[1] < 4):
warnings.warn('Python 3.4+ is required to get correct tab complete and docstring '
'information for methods')
def proc_decorator(req_set):
"""
Decorator that provides the wrapped function with an attribute 'actual_kwargs'
containing just those keyword arguments actually passed in to the function.
"""
def decorator(func):
@wraps(func)
def inner(self, *args, **kwargs):
proc = func.__name__.lower()
inner.proc_decorator = kwargs
self.logger.debug("processing proc:{}".format(func.__name__))
self.logger.debug(req_set)
self.logger.debug("kwargs type: " + str(type(kwargs)))
if proc in ['hplogistic', 'hpreg']:
kwargs['ODSGraphics'] = kwargs.get('ODSGraphics', False)
if proc == 'hpcluster':
proc = 'hpclus'
# read the signature for the proc and use that as the legal set - kwargs and args
# legal_set = set(kwargs.keys())
legal_set = set(inspect.signature(self.__getattribute__(proc)).parameters.keys() - {'kwargs', 'args'})
self.logger.debug(legal_set)
return SASProcCommons._run_proc(self, proc, req_set, legal_set, **kwargs)
return inner
return decorator
def doc_convert(ls, proc: str = '') -> dict:
"""
The `doc_convert` method takes two arguments: a list of the valid statements and the proc name.
It returns a dictionary with two keys, method_stmt and markup_stmt.
These outputs can be copied into the appropriate product file.
:param proc: str
:return: dict with two keys method_stmt and markup_stmt
"""
generic_terms = ['procopts', 'stmtpassthrough']
assert isinstance(ls, set)
ls_list = [x.lower() for x in ls]
doc_list = []
doc_markup = []
for i in [j for j in ls_list if j not in generic_terms]:
if i.lower() == 'class':
i = 'cls'
doc_mstr = ''.join([':parm ', i, ': The {} variable can only be a string type.'.format(i)])
doc_str = ': str = None,'
if i.lower() in ['target', 'input']:
doc_mstr = ''.join([':parm ', i,
': The {} variable can be a string, list or dict type. It refers to the dependent, y, or label variable.'.format(i)])
doc_str = ': [str, list, dict] = None,'
if i.lower() == 'score':
doc_str = ": [str, bool, 'SASdata' ] = True,"
if i.lower() in ['output', 'out']:
doc_str = ": [str, bool, 'SASdata' ] = None,"
doc_mstr = ''.join([':parm ', i,
': The {} variable can be a string, boolean or SASdata type. The member name for a boolean is "_output".'.format(i)])
if i.lower() in ['cls']:
doc_mstr = ''.join([':parm ', i,
': The {} variable can be a string or list type. It refers to the categorical, or nominal variables.'.format(i)])
doc_str = ': [str, list] = None,'
if i.lower() in ['id', 'by']:
doc_mstr = ''.join([':parm ', i, ': The {} variable can be a string or list type. '.format(i)])
doc_str = ': [str, list] = None,'
if i.lower() in ['level', 'irregular', 'slope', 'estimate']:
doc_str = ": [str, bool] = True,"
doc_list.append(''.join([i, doc_str, '\n']))
doc_markup.append(''.join([doc_mstr, '\n']))
doc_list.sort()
doc_markup.sort()
# add procopts and stmtpassthrough last for each proc
for j in generic_terms:
doc_list.append(''.join([j, doc_str, '\n']))
doc_mstr = ''.join([':parm ', j,
': The {} variable is a generic option available for advanced use. It can only be a string type.'.format(j)])
doc_markup.append(''.join([doc_mstr, '\n']))
doc_markup.insert(0, ''.join([':param data: SASdata object or string. This parameter is required..', '\n']))
first_line = ''.join(["data: ['SASdata', str] = None,", '\n'])
if len(proc) > 0:
first_line = ''.join(["def {}(self, data: ['SASdata', str] = None,".format(proc), '\n'])
doc_markup.insert(0, ''.join(['Python method to call the {} procedure.\n'.format(proc.upper()),
'\n', 'Documentation link:', '\n\n']))
doc_list.insert(0, first_line)
doc_list.append("**kwargs: dict) -> 'SASresults':")
doc_markup.append(''.join([':return: SAS Result Object', '\n']))
return {'method_stmt' : ''.join(doc_list), 'markup_stmt' : ''.join(doc_markup)}
| 49.88785 | 153 | 0.541027 |
76470965557059dc62f6a920d5bcf1ea319f2dc4 | 9,834 | py | Python | scipy/fft/tests/test_helper.py | ikamensh/scipy | d645404be21b7c0b1e7ba24bf8d525b624aeb848 | [
"BSD-3-Clause"
] | null | null | null | scipy/fft/tests/test_helper.py | ikamensh/scipy | d645404be21b7c0b1e7ba24bf8d525b624aeb848 | [
"BSD-3-Clause"
] | null | null | null | scipy/fft/tests/test_helper.py | ikamensh/scipy | d645404be21b7c0b1e7ba24bf8d525b624aeb848 | [
"BSD-3-Clause"
] | null | null | null | from scipy.fft._helper import next_fast_len, _init_nd_shape_and_axes
from numpy.testing import assert_equal, assert_array_equal
from pytest import raises as assert_raises
import pytest
import numpy as np
import sys
_5_smooth_numbers = [
2, 3, 4, 5, 6, 8, 9, 10,
2 * 3 * 5,
2**3 * 3**5,
2**3 * 3**3 * 5**2,
]
def test_next_fast_len():
for n in _5_smooth_numbers:
assert_equal(next_fast_len(n), n)
def _assert_n_smooth(x, n):
x_orig = x
if n < 2:
assert False
while True:
q, r = divmod(x, 2)
if r != 0:
break
x = q
for d in range(3, n+1, 2):
while True:
q, r = divmod(x, d)
if r != 0:
break
x = q
assert x == 1, \
f'x={x_orig} is not {n}-smooth, remainder={x}'
class TestNextFastLen(object):
def test_next_fast_len(self):
np.random.seed(1234)
def nums():
for j in range(1, 1000):
yield j
yield 2**5 * 3**5 * 4**5 + 1
for n in nums():
m = next_fast_len(n)
_assert_n_smooth(m, 11)
assert m == next_fast_len(n, False)
m = next_fast_len(n, True)
_assert_n_smooth(m, 5)
def test_np_integers(self):
ITYPES = [np.int16, np.int32, np.int64, np.uint16, np.uint32, np.uint64]
for ityp in ITYPES:
x = ityp(12345)
testN = next_fast_len(x)
assert_equal(testN, next_fast_len(int(x)))
def testnext_fast_len_small(self):
hams = {
1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 8, 8: 8, 14: 15, 15: 15,
16: 16, 17: 18, 1021: 1024, 1536: 1536, 51200000: 51200000
}
for x, y in hams.items():
assert_equal(next_fast_len(x, True), y)
@pytest.mark.xfail(sys.maxsize < 2**32,
reason="Hamming Numbers too large for 32-bit",
raises=ValueError, strict=True)
def testnext_fast_len_big(self):
hams = {
510183360: 510183360, 510183360 + 1: 512000000,
511000000: 512000000,
854296875: 854296875, 854296875 + 1: 859963392,
196608000000: 196608000000, 196608000000 + 1: 196830000000,
8789062500000: 8789062500000, 8789062500000 + 1: 8796093022208,
206391214080000: 206391214080000,
206391214080000 + 1: 206624260800000,
470184984576000: 470184984576000,
470184984576000 + 1: 470715894135000,
7222041363087360: 7222041363087360,
7222041363087360 + 1: 7230196133913600,
# power of 5 5**23
11920928955078125: 11920928955078125,
11920928955078125 - 1: 11920928955078125,
# power of 3 3**34
16677181699666569: 16677181699666569,
16677181699666569 - 1: 16677181699666569,
# power of 2 2**54
18014398509481984: 18014398509481984,
18014398509481984 - 1: 18014398509481984,
# above this, int(ceil(n)) == int(ceil(n+1))
19200000000000000: 19200000000000000,
19200000000000000 + 1: 19221679687500000,
288230376151711744: 288230376151711744,
288230376151711744 + 1: 288325195312500000,
288325195312500000 - 1: 288325195312500000,
288325195312500000: 288325195312500000,
288325195312500000 + 1: 288555831593533440,
}
for x, y in hams.items():
assert_equal(next_fast_len(x, True), y)
def test_keyword_args(self):
assert next_fast_len(11, real=True) == 12
assert next_fast_len(target=7, real=False) == 7
class Test_init_nd_shape_and_axes(object):
def test_py_0d_defaults(self):
x = np.array(4)
shape = None
axes = None
shape_expected = np.array([])
axes_expected = np.array([])
shape_res, axes_res = _init_nd_shape_and_axes(x, shape, axes)
assert_equal(shape_res, shape_expected)
assert_equal(axes_res, axes_expected)
def test_np_0d_defaults(self):
x = np.array(7.)
shape = None
axes = None
shape_expected = np.array([])
axes_expected = np.array([])
shape_res, axes_res = _init_nd_shape_and_axes(x, shape, axes)
assert_equal(shape_res, shape_expected)
assert_equal(axes_res, axes_expected)
def test_py_1d_defaults(self):
x = np.array([1, 2, 3])
shape = None
axes = None
shape_expected = np.array([3])
axes_expected = np.array([0])
shape_res, axes_res = _init_nd_shape_and_axes(x, shape, axes)
assert_equal(shape_res, shape_expected)
assert_equal(axes_res, axes_expected)
def test_np_1d_defaults(self):
x = np.arange(0, 1, .1)
shape = None
axes = None
shape_expected = np.array([10])
axes_expected = np.array([0])
shape_res, axes_res = _init_nd_shape_and_axes(x, shape, axes)
assert_equal(shape_res, shape_expected)
assert_equal(axes_res, axes_expected)
def test_py_2d_defaults(self):
x = np.array([[1, 2, 3, 4],
[5, 6, 7, 8]])
shape = None
axes = None
shape_expected = np.array([2, 4])
axes_expected = np.array([0, 1])
shape_res, axes_res = _init_nd_shape_and_axes(x, shape, axes)
assert_equal(shape_res, shape_expected)
assert_equal(axes_res, axes_expected)
def test_np_2d_defaults(self):
x = np.arange(0, 1, .1).reshape(5, 2)
shape = None
axes = None
shape_expected = np.array([5, 2])
axes_expected = np.array([0, 1])
shape_res, axes_res = _init_nd_shape_and_axes(x, shape, axes)
assert_equal(shape_res, shape_expected)
assert_equal(axes_res, axes_expected)
def test_np_5d_defaults(self):
x = np.zeros([6, 2, 5, 3, 4])
shape = None
axes = None
shape_expected = np.array([6, 2, 5, 3, 4])
axes_expected = np.array([0, 1, 2, 3, 4])
shape_res, axes_res = _init_nd_shape_and_axes(x, shape, axes)
assert_equal(shape_res, shape_expected)
assert_equal(axes_res, axes_expected)
def test_np_5d_set_shape(self):
x = np.zeros([6, 2, 5, 3, 4])
shape = [10, -1, -1, 1, 4]
axes = None
shape_expected = np.array([10, 2, 5, 1, 4])
axes_expected = np.array([0, 1, 2, 3, 4])
shape_res, axes_res = _init_nd_shape_and_axes(x, shape, axes)
assert_equal(shape_res, shape_expected)
assert_equal(axes_res, axes_expected)
def test_np_5d_set_axes(self):
x = np.zeros([6, 2, 5, 3, 4])
shape = None
axes = [4, 1, 2]
shape_expected = np.array([4, 2, 5])
axes_expected = np.array([4, 1, 2])
shape_res, axes_res = _init_nd_shape_and_axes(x, shape, axes)
assert_equal(shape_res, shape_expected)
assert_equal(axes_res, axes_expected)
def test_np_5d_set_shape_axes(self):
x = np.zeros([6, 2, 5, 3, 4])
shape = [10, -1, 2]
axes = [1, 0, 3]
shape_expected = np.array([10, 6, 2])
axes_expected = np.array([1, 0, 3])
shape_res, axes_res = _init_nd_shape_and_axes(x, shape, axes)
assert_equal(shape_res, shape_expected)
assert_equal(axes_res, axes_expected)
def test_shape_axes_subset(self):
x = np.zeros((2, 3, 4, 5))
shape, axes = _init_nd_shape_and_axes(x, shape=(5, 5, 5), axes=None)
assert_array_equal(shape, [5, 5, 5])
assert_array_equal(axes, [1, 2, 3])
def test_errors(self):
x = np.zeros(1)
with assert_raises(ValueError, match="axes must be a scalar or "
"iterable of integers"):
_init_nd_shape_and_axes(x, shape=None, axes=[[1, 2], [3, 4]])
with assert_raises(ValueError, match="axes must be a scalar or "
"iterable of integers"):
_init_nd_shape_and_axes(x, shape=None, axes=[1., 2., 3., 4.])
with assert_raises(ValueError,
match="axes exceeds dimensionality of input"):
_init_nd_shape_and_axes(x, shape=None, axes=[1])
with assert_raises(ValueError,
match="axes exceeds dimensionality of input"):
_init_nd_shape_and_axes(x, shape=None, axes=[-2])
with assert_raises(ValueError,
match="all axes must be unique"):
_init_nd_shape_and_axes(x, shape=None, axes=[0, 0])
with assert_raises(ValueError, match="shape must be a scalar or "
"iterable of integers"):
_init_nd_shape_and_axes(x, shape=[[1, 2], [3, 4]], axes=None)
with assert_raises(ValueError, match="shape must be a scalar or "
"iterable of integers"):
_init_nd_shape_and_axes(x, shape=[1., 2., 3., 4.], axes=None)
with assert_raises(ValueError,
match="when given, axes and shape arguments"
" have to be of the same length"):
_init_nd_shape_and_axes(np.zeros([1, 1, 1, 1]),
shape=[1, 2, 3], axes=[1])
with assert_raises(ValueError,
match="invalid number of data points"
r" \(\[0\]\) specified"):
_init_nd_shape_and_axes(x, shape=[0], axes=None)
with assert_raises(ValueError,
match="invalid number of data points"
r" \(\[-2\]\) specified"):
_init_nd_shape_and_axes(x, shape=-2, axes=None)
| 32.562914 | 80 | 0.570775 |
965d54140bd3fc369881fe15071d5121ce45dbce | 4,121 | py | Python | custom_components/fpl/sensor_KWHSensor.py | flyboy013/hass-fpl | 5eec090c2c49dc07ba0e2fe6bbb341c917a281d1 | [
"MIT"
] | 1 | 2022-02-15T17:42:00.000Z | 2022-02-15T17:42:00.000Z | custom_components/fpl/sensor_KWHSensor.py | zoopster/hass-fpl | 8e3681070127a91074eb05a21d29d22b8ceb6758 | [
"MIT"
] | null | null | null | custom_components/fpl/sensor_KWHSensor.py | zoopster/hass-fpl | 8e3681070127a91074eb05a21d29d22b8ceb6758 | [
"MIT"
] | null | null | null | from .fplEntity import FplEntity
class ProjectedKWHSensor(FplEntity):
def __init__(self, coordinator, config, account):
super().__init__(coordinator, config, account, "Projected")
@property
def state(self):
try:
self._state = self.getData("projectedKWH")
except:
pass
return self._state
@property
def icon(self):
return "mdi:flash"
def defineAttributes(self):
"""Return the state attributes."""
attributes = {}
attributes["friendly_name"] = "Projected KWH"
attributes["device_class"] = "energy"
attributes["state_class"] = "total"
attributes["unit_of_measurement"] = "kWh"
return attributes
class DailyAverageKWHSensor(FplEntity):
def __init__(self, coordinator, config, account):
super().__init__(coordinator, config, account, "Daily Average")
@property
def state(self):
try:
self._state = self.getData("dailyAverageKWH")
except:
pass
return self._state
@property
def icon(self):
return "mdi:flash"
def defineAttributes(self):
"""Return the state attributes."""
attributes = {}
attributes["friendly_name"] = "Daily Average"
attributes["device_class"] = "energy"
attributes["state_class"] = "total"
attributes["unit_of_measurement"] = "kWh"
return attributes
class BillToDateKWHSensor(FplEntity):
def __init__(self, coordinator, config, account):
super().__init__(coordinator, config, account, "Bill To Date")
@property
def state(self):
try:
self._state = self.getData("billToDateKWH")
except:
pass
return self._state
@property
def icon(self):
return "mdi:flash"
def defineAttributes(self):
"""Return the state attributes."""
attributes = {}
attributes["friendly_name"] = "Billing Usage"
attributes["device_class"] = "energy"
attributes["state_class"] = "total_increasing"
attributes["unit_of_measurement"] = "kWh"
if self.getData("billStartDate") is not None:
attributes["last_reset"] = self.getData("billStartDate")
return attributes
class NetReceivedKWHSensor(FplEntity):
def __init__(self, coordinator, config, account):
super().__init__(coordinator, config, account, "Received Reading")
@property
def state(self):
try:
self._state = self.getData("recMtrReading")
except:
pass
return self._state
@property
def icon(self):
return "mdi:flash"
def defineAttributes(self):
"""Return the state attributes."""
attributes = {}
attributes["friendly_name"] = "Meter Return to Grid"
attributes["device_class"] = "energy"
attributes["state_class"] = "total_increasing"
attributes["unit_of_measurement"] = "kWh"
if self.getData("billStartDate") is not None:
attributes["last_reset"] = self.getData("billStartDate")
return attributes
class NetDeliveredKWHSensor(FplEntity):
def __init__(self, coordinator, config, account):
super().__init__(coordinator, config, account, "Delivered Reading")
@property
def state(self):
try:
self._state = self.getData("delMtrReading")
except:
try:
self._state = self.getData("billToDateKWH")
except:
pass
return self._state
@property
def icon(self):
return "mdi:flash"
def defineAttributes(self):
"""Return the state attributes."""
attributes = {}
attributes["friendly_name"] = "Meter Consumption"
attributes["device_class"] = "energy"
attributes["state_class"] = "total_increasing"
attributes["unit_of_measurement"] = "kWh"
if self.getData("billStartDate") is not None:
attributes["last_reset"] = self.getData("billStartDate")
return attributes
| 28.818182 | 75 | 0.609561 |
9b00d7326222ce39f4041581152ee9906e605a47 | 702 | py | Python | prplatform/exercises/migrations/0005_question.py | piehei/prplatform | f3248b66019f207bb06a4681a62057e175408b3e | [
"MIT"
] | 3 | 2018-10-07T18:50:01.000Z | 2020-07-29T14:43:51.000Z | prplatform/exercises/migrations/0005_question.py | piehei/prplatform | f3248b66019f207bb06a4681a62057e175408b3e | [
"MIT"
] | 9 | 2019-08-26T11:55:00.000Z | 2020-05-04T13:56:06.000Z | prplatform/exercises/migrations/0005_question.py | piehei/prplatform | f3248b66019f207bb06a4681a62057e175408b3e | [
"MIT"
] | null | null | null | # Generated by Django 2.0.4 on 2018-05-17 07:02
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('exercises', '0004_auto_20180511_0850'),
]
operations = [
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.CharField(max_length=200)),
('exercise', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='questions', to='exercises.ReviewExercise')),
],
),
]
| 30.521739 | 150 | 0.623932 |
25836a56a6bbdd9bcc18bac8817c4cd368f76453 | 391 | py | Python | api/src/producer/migrations/0006_product_disabled.py | asermax/msa | 2d74167f5da661517e7deaea05a5a29ba677c206 | [
"MIT"
] | null | null | null | api/src/producer/migrations/0006_product_disabled.py | asermax/msa | 2d74167f5da661517e7deaea05a5a29ba677c206 | [
"MIT"
] | null | null | null | api/src/producer/migrations/0006_product_disabled.py | asermax/msa | 2d74167f5da661517e7deaea05a5a29ba677c206 | [
"MIT"
] | null | null | null | # Generated by Django 2.1.1 on 2018-10-05 04:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('producer', '0005_auto_20180930_0113'),
]
operations = [
migrations.AddField(
model_name='product',
name='disabled',
field=models.BooleanField(default=False),
),
]
| 20.578947 | 53 | 0.603581 |
b50cdf9c8745b493191391f00a33d708c7fd8125 | 41 | py | Python | plugins/admin/exec.py | Cha14ka/KBot5 | 883b0607b64566c92366d7e6b7a6984b2dadb3cf | [
"MIT"
] | null | null | null | plugins/admin/exec.py | Cha14ka/KBot5 | 883b0607b64566c92366d7e6b7a6984b2dadb3cf | [
"MIT"
] | 1 | 2019-06-28T17:23:23.000Z | 2019-06-28T17:23:23.000Z | plugins/admin/exec.py | Cha14ka/KBot5 | 883b0607b64566c92366d7e6b7a6984b2dadb3cf | [
"MIT"
] | 2 | 2019-07-28T20:48:33.000Z | 2019-09-20T11:17:57.000Z | exec(pack['user_text'].replace('»',' '))
| 20.5 | 40 | 0.585366 |
891e22abd6525461dbbea103feb3aa1e8b378526 | 534 | py | Python | python3/1221(accepted).py | lhmongelos/dev-uri-problems | eb20789178563ddcdc2d3d2243e7205b92c788f5 | [
"MIT"
] | null | null | null | python3/1221(accepted).py | lhmongelos/dev-uri-problems | eb20789178563ddcdc2d3d2243e7205b92c788f5 | [
"MIT"
] | 1 | 2019-08-06T01:42:49.000Z | 2019-08-06T01:46:51.000Z | python3/1221(accepted).py | lhmongelos/dev-uri-problems | eb20789178563ddcdc2d3d2243e7205b92c788f5 | [
"MIT"
] | 1 | 2019-10-01T01:24:56.000Z | 2019-10-01T01:24:56.000Z | ############################################################
### Celso "Shaggy" Antonio - Nov 2017
############################################################
def isPrime(x):
if(x == 1):
return 0
if(x == 2):
return 1
if(x % 2 == 0):
return 0
num = 3
while(num * num <= x):
if(x % num == 0):
return 0
num += 2
return 1
n = int(input())
for i in range(n):
y = int(input())
if(isPrime(y)):
print('Prime')
else:
print('Not Prime') | 21.36 | 60 | 0.333333 |
350d0d20f205f77d2a240eee0c41fb17d437e206 | 4,536 | py | Python | src/data/make_dataset.py | cdipaolo/inflection | 2e667bc351196db3c12f8e13b150455483040c82 | [
"MIT"
] | 2 | 2019-07-15T14:26:45.000Z | 2019-10-14T13:11:29.000Z | src/data/make_dataset.py | cdipaolo/inflection | 2e667bc351196db3c12f8e13b150455483040c82 | [
"MIT"
] | null | null | null | src/data/make_dataset.py | cdipaolo/inflection | 2e667bc351196db3c12f8e13b150455483040c82 | [
"MIT"
] | 2 | 2017-04-08T07:22:59.000Z | 2020-05-20T22:50:05.000Z | # make_dataset.py
#
# Takes the raw Yelp Academic Dataset in
# /data/raw and uploads it into an sqlite3
# database.
from math import floor
import sqlite3
import json
# whether to upload which sections of the data
# into the sqlite3 db
BUS, USER, CHECKIN, REVIEW = True, True, True, True
# lengths of the different files for
# progress reports
lengths = {
"business": 77445,
"user": 552339,
"checkin": 55569,
"review": 2225213
}
conn = sqlite3.connect('data/yelp.db')
c = conn.cursor()
# read through json dataset line by
# line, uploading data to sqlite3.
if BUS:
print('==> Uploading business data to sqlite')
with open('data/raw/yelp_academic_dataset_business.json', 'r') as f:
i = 0
total = lengths["business"]
for business in f:
# convert from string json object
# to a map
b = json.loads(business)
# insert the business into the
# database
c.execute('''INSERT INTO business
(business_id, name, city, state,
longitude, latitude, stars, review_count,
categories, attributes, type)
VALUES
(?,?,?,?,?,?,?,?,?,?,?)''',
(b['business_id'], b['name'], b['city'],
b['state'], b['longitude'], b['latitude'],
b['stars'], b['review_count'], json.dumps(b['categories']),
json.dumps(b['attributes']), b['type']
)
)
i += 1
if i%500 == 0:
conn.commit()
if i % floor(total/10) == 0:
print('-- uploaded business {}/{} = {:0.2f}'.format(i,total, i/total))
print('==> Finished uploading business data')
if USER:
print('==> Uploading user data to sqlite')
with open('data/raw/yelp_academic_dataset_user.json', 'r') as f:
i = 0
total = lengths["user"]
for user in f:
# convert user from json string to map
u = json.loads(user)
# insert user into db
c.execute('''INSERT INTO users
(user_id, name, review_count,
average_stars)
VALUES
(?,?,?,?)''',
(u['user_id'], u['name'], u['review_count'],
u['average_stars'])
)
i += 1
if i%500 == 0:
conn.commit()
if i % floor(total/10) == 0:
print('-- uploaded user {}/{} = {:0.2f}'.format(i,total, i/total))
print('==> Finished uploading user data')
if CHECKIN:
print('==> Uploading check-in data to business dataset')
with open('data/raw/yelp_academic_dataset_checkin.json', 'r') as f:
i = 0
total = lengths["checkin"]
for checkin in f:
# convert checkin from json to map
ch = json.loads(checkin)
# add checkin data to business table
c.execute('''UPDATE business
SET checkin_info = (?)
WHERE business_id == ?''',
(json.dumps(ch['checkin_info']), ch['business_id'])
)
i += 1
if i%500 == 0:
conn.commit()
if i % floor(total/10) == 0:
print('-- uploaded checkin {}/{} = {:0.2f}'.format(i,total, i/total))
print('==> Finished uploading checkin data')
if REVIEW:
print('==> Uploading review data to sqlite')
with open('data/raw/yelp_academic_dataset_review.json','r') as f:
i = 0
total = lengths["review"]
for review in f:
# convert review from json to map
r = json.loads(review)
# add review into db
c.execute('''INSERT INTO review
(user_id, business_id, stars, text, timestamp)
VALUES
(?,?,?,?,?)''',
(r['user_id'], r['business_id'], r['stars'],
r['text'], r['date'])
)
i += 1
if i%500 == 0:
conn.commit()
if i % floor(total/10) == 0:
print('-- uploaded review {}/{} = {:0.2f}'.format(i,total, i/total))
print('==> Finished uploading review data')
conn.commit()
conn.close()
| 34.105263 | 87 | 0.474868 |
ca5485902d49301e6371c69ed1e4342c104ef86d | 301 | py | Python | ex44c.py | shwezinoo/python-excercises | f2507ad8ec161bb1e12a3667946f2cb48c422bb4 | [
"MIT"
] | null | null | null | ex44c.py | shwezinoo/python-excercises | f2507ad8ec161bb1e12a3667946f2cb48c422bb4 | [
"MIT"
] | null | null | null | ex44c.py | shwezinoo/python-excercises | f2507ad8ec161bb1e12a3667946f2cb48c422bb4 | [
"MIT"
] | null | null | null | class Parent(object):
def altered(self):
print "PaRENT altered()"
class Child(Parent):
def altered(self):
print "Child,Before parent altered()"
super(Child,self).altered()
print "Child,Afer parent altered()"
dad=Parent()
son=Child()
dad.altered()
son.altered()
| 23.153846 | 45 | 0.641196 |
6e6c40350031a76204c1a49b0f28ecd8e8556712 | 1,561 | py | Python | datasets/util/common/jpg_to_raw.py | mariecwhite/mobile_app_open | 811bfb98d0f2f92924901d5958eeb667cf7a0c99 | [
"Apache-2.0"
] | 9 | 2021-09-03T06:27:34.000Z | 2022-02-23T04:17:06.000Z | datasets/util/common/jpg_to_raw.py | mariecwhite/mobile_app_open | 811bfb98d0f2f92924901d5958eeb667cf7a0c99 | [
"Apache-2.0"
] | 249 | 2021-08-03T13:51:32.000Z | 2022-03-31T13:11:48.000Z | datasets/util/common/jpg_to_raw.py | mariecwhite/mobile_app_open | 811bfb98d0f2f92924901d5958eeb667cf7a0c99 | [
"Apache-2.0"
] | 9 | 2021-09-03T06:01:39.000Z | 2022-03-22T04:45:02.000Z | #!/bin/bash
# Copyright (c) 2020-2021 Qualcomm Innovation Center, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##########################################################################
import os
import sys
import numpy as np
from os import listdir
from os.path import isfile, join, isdir
from io import BytesIO
from PIL import Image
model_name = sys.argv[1]
DATA_PATH = model_name
RAW_DATA_PATH = model_name + "_raw"
image_list = [f for f in listdir(DATA_PATH) if isfile(join(DATA_PATH, f))]
for image in image_list:
src = DATA_PATH + '/' + image
dst = RAW_DATA_PATH + '/' + image
with open(src, 'rb') as f:
jpeg_str = f.read()
original_im = Image.open(BytesIO(jpeg_str))
converted_image = original_im.convert('RGB')
npimage = np.asarray(converted_image).astype(np.float32)
npimage = npimage * 0.00784313771874
npimage = npimage - 1.0
img_ndarray = np.array(npimage)
tmp = dst.split(".")
tmp[-1] = "raw"
f_name = ".".join(tmp)
npimage.astype(np.float32).tofile(f_name)
| 34.688889 | 79 | 0.680333 |
9088fc46bad47fe172961b85b535b9d47beadf51 | 698 | py | Python | gemstone/plugins/__init__.py | vladcalin/pymicroservice | 325a49d17621b9d45ffd2b5eca6f0de284de8ba4 | [
"MIT"
] | 2 | 2016-12-17T13:09:14.000Z | 2016-12-31T18:38:57.000Z | gemstone/plugins/__init__.py | vladcalin/pymicroservice | 325a49d17621b9d45ffd2b5eca6f0de284de8ba4 | [
"MIT"
] | 15 | 2016-11-27T13:28:25.000Z | 2017-01-10T09:09:30.000Z | gemstone/plugins/__init__.py | vladcalin/pymicroservice | 325a49d17621b9d45ffd2b5eca6f0de284de8ba4 | [
"MIT"
] | null | null | null | """
This module provides various tools to create general use plugins for the microservice.
A plugin can override specific attributes from the :py:class:`BasePlugin` class that will
be called in specific situations.
Also, a plugin can define extra methods that can be used later inside the method calls,
as shown in the following example:
::
@gemstone.core.exposed_method()
def say_hello(self, name):
self.get_plugin("remote_logging").log_info("Somebody said hello to {}".format(name))
return True
"""
from .base import BasePlugin
from .error import MissingPluginNameError, PluginError
__all__ = [
'BasePlugin',
'MissingPluginNameError',
'PluginError'
]
| 24.068966 | 92 | 0.737822 |
e17a7840b462bfca66063b67c04cf98cc2f5186a | 2,236 | py | Python | tests/tasks/github/test_issues.py | skyline-ai/prefect | 92430f2f91215d6c27d92ad67df67ccd639e587c | [
"Apache-2.0"
] | null | null | null | tests/tasks/github/test_issues.py | skyline-ai/prefect | 92430f2f91215d6c27d92ad67df67ccd639e587c | [
"Apache-2.0"
] | null | null | null | tests/tasks/github/test_issues.py | skyline-ai/prefect | 92430f2f91215d6c27d92ad67df67ccd639e587c | [
"Apache-2.0"
] | null | null | null | from unittest.mock import MagicMock
import pytest
import prefect
from prefect.tasks.github import OpenGitHubIssue
from prefect.utilities.configuration import set_temporary_config
class TestOpenGithubIssueInitialization:
def test_initializes_with_nothing_and_sets_defaults(self):
task = OpenGitHubIssue()
assert task.repo is None
assert task.title is None
assert task.body is None
assert task.labels == []
assert task.token_secret == "GITHUB_ACCESS_TOKEN"
def test_additional_kwargs_passed_upstream(self):
task = OpenGitHubIssue(name="test-task", checkpoint=True, tags=["bob"])
assert task.name == "test-task"
assert task.checkpoint is True
assert task.tags == {"bob"}
@pytest.mark.parametrize(
"attr", ["repo", "body", "title", "labels", "token_secret"]
)
def test_initializes_attr_from_kwargs(self, attr):
task = OpenGitHubIssue(**{attr: "my-value"})
assert getattr(task, attr) == "my-value"
def test_repo_is_required_eventually(self):
task = OpenGitHubIssue()
with pytest.raises(ValueError) as exc:
task.run()
assert "repo" in str(exc.value)
class TestCredentialsandProjects:
def test_creds_are_pulled_from_secret_at_runtime(self, monkeypatch):
task = OpenGitHubIssue()
req = MagicMock()
monkeypatch.setattr("prefect.tasks.github.issues.requests", req)
with set_temporary_config({"cloud.use_local_secrets": True}):
with prefect.context(secrets=dict(GITHUB_ACCESS_TOKEN={"key": 42})):
task.run(repo="org/repo")
assert req.post.call_args[1]["headers"]["AUTHORIZATION"] == "token {'key': 42}"
def test_creds_secret_can_be_overwritten(self, monkeypatch):
task = OpenGitHubIssue(token_secret="MY_SECRET")
req = MagicMock()
monkeypatch.setattr("prefect.tasks.github.issues.requests", req)
with set_temporary_config({"cloud.use_local_secrets": True}):
with prefect.context(secrets=dict(MY_SECRET={"key": 42})):
task.run(repo="org/repo")
assert req.post.call_args[1]["headers"]["AUTHORIZATION"] == "token {'key': 42}"
| 35.492063 | 87 | 0.670841 |
91ab3ac3869d27ab744196cbf8657af8b2719abb | 9,129 | py | Python | sdk/python/pulumi_azure_native/network/v20160330/get_public_ip_address.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/network/v20160330/get_public_ip_address.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/network/v20160330/get_public_ip_address.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetPublicIPAddressResult',
'AwaitableGetPublicIPAddressResult',
'get_public_ip_address',
]
@pulumi.output_type
class GetPublicIPAddressResult:
"""
PublicIPAddress resource
"""
def __init__(__self__, dns_settings=None, etag=None, id=None, idle_timeout_in_minutes=None, ip_address=None, ip_configuration=None, location=None, name=None, provisioning_state=None, public_ip_address_version=None, public_ip_allocation_method=None, resource_guid=None, tags=None, type=None):
if dns_settings and not isinstance(dns_settings, dict):
raise TypeError("Expected argument 'dns_settings' to be a dict")
pulumi.set(__self__, "dns_settings", dns_settings)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if idle_timeout_in_minutes and not isinstance(idle_timeout_in_minutes, int):
raise TypeError("Expected argument 'idle_timeout_in_minutes' to be a int")
pulumi.set(__self__, "idle_timeout_in_minutes", idle_timeout_in_minutes)
if ip_address and not isinstance(ip_address, str):
raise TypeError("Expected argument 'ip_address' to be a str")
pulumi.set(__self__, "ip_address", ip_address)
if ip_configuration and not isinstance(ip_configuration, dict):
raise TypeError("Expected argument 'ip_configuration' to be a dict")
pulumi.set(__self__, "ip_configuration", ip_configuration)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if public_ip_address_version and not isinstance(public_ip_address_version, str):
raise TypeError("Expected argument 'public_ip_address_version' to be a str")
pulumi.set(__self__, "public_ip_address_version", public_ip_address_version)
if public_ip_allocation_method and not isinstance(public_ip_allocation_method, str):
raise TypeError("Expected argument 'public_ip_allocation_method' to be a str")
pulumi.set(__self__, "public_ip_allocation_method", public_ip_allocation_method)
if resource_guid and not isinstance(resource_guid, str):
raise TypeError("Expected argument 'resource_guid' to be a str")
pulumi.set(__self__, "resource_guid", resource_guid)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="dnsSettings")
def dns_settings(self) -> Optional['outputs.PublicIPAddressDnsSettingsResponse']:
"""
Gets or sets FQDN of the DNS record associated with the public IP address
"""
return pulumi.get(self, "dns_settings")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
Gets a unique read-only string that changes whenever the resource is updated
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource Id
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="idleTimeoutInMinutes")
def idle_timeout_in_minutes(self) -> Optional[int]:
"""
Gets or sets the idle timeout of the public IP address
"""
return pulumi.get(self, "idle_timeout_in_minutes")
@property
@pulumi.getter(name="ipAddress")
def ip_address(self) -> Optional[str]:
return pulumi.get(self, "ip_address")
@property
@pulumi.getter(name="ipConfiguration")
def ip_configuration(self) -> Optional['outputs.IPConfigurationResponse']:
"""
IPConfiguration
"""
return pulumi.get(self, "ip_configuration")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource location
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
"""
Gets or sets Provisioning state of the PublicIP resource Updating/Deleting/Failed
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="publicIPAddressVersion")
def public_ip_address_version(self) -> Optional[str]:
"""
Gets or sets PublicIP address version (IPv4/IPv6)
"""
return pulumi.get(self, "public_ip_address_version")
@property
@pulumi.getter(name="publicIPAllocationMethod")
def public_ip_allocation_method(self) -> Optional[str]:
"""
Gets or sets PublicIP allocation method (Static/Dynamic)
"""
return pulumi.get(self, "public_ip_allocation_method")
@property
@pulumi.getter(name="resourceGuid")
def resource_guid(self) -> Optional[str]:
"""
Gets or sets resource GUID property of the PublicIP resource
"""
return pulumi.get(self, "resource_guid")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type
"""
return pulumi.get(self, "type")
class AwaitableGetPublicIPAddressResult(GetPublicIPAddressResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetPublicIPAddressResult(
dns_settings=self.dns_settings,
etag=self.etag,
id=self.id,
idle_timeout_in_minutes=self.idle_timeout_in_minutes,
ip_address=self.ip_address,
ip_configuration=self.ip_configuration,
location=self.location,
name=self.name,
provisioning_state=self.provisioning_state,
public_ip_address_version=self.public_ip_address_version,
public_ip_allocation_method=self.public_ip_allocation_method,
resource_guid=self.resource_guid,
tags=self.tags,
type=self.type)
def get_public_ip_address(expand: Optional[str] = None,
public_ip_address_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetPublicIPAddressResult:
"""
PublicIPAddress resource
:param str expand: expand references resources.
:param str public_ip_address_name: The name of the subnet.
:param str resource_group_name: The name of the resource group.
"""
__args__ = dict()
__args__['expand'] = expand
__args__['publicIpAddressName'] = public_ip_address_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:network/v20160330:getPublicIPAddress', __args__, opts=opts, typ=GetPublicIPAddressResult).value
return AwaitableGetPublicIPAddressResult(
dns_settings=__ret__.dns_settings,
etag=__ret__.etag,
id=__ret__.id,
idle_timeout_in_minutes=__ret__.idle_timeout_in_minutes,
ip_address=__ret__.ip_address,
ip_configuration=__ret__.ip_configuration,
location=__ret__.location,
name=__ret__.name,
provisioning_state=__ret__.provisioning_state,
public_ip_address_version=__ret__.public_ip_address_version,
public_ip_allocation_method=__ret__.public_ip_allocation_method,
resource_guid=__ret__.resource_guid,
tags=__ret__.tags,
type=__ret__.type)
| 38.682203 | 295 | 0.666667 |
a90306f769ac42c9bfa7d8a4de28716aec9b4d18 | 2,118 | py | Python | dvc/scm/tree.py | drorata/dvc | b6bc65fcbf269b94b7c1ce2d9dff641dedb039b0 | [
"Apache-2.0"
] | 1 | 2019-09-02T00:28:11.000Z | 2019-09-02T00:28:11.000Z | dvc/scm/tree.py | drorata/dvc | b6bc65fcbf269b94b7c1ce2d9dff641dedb039b0 | [
"Apache-2.0"
] | null | null | null | dvc/scm/tree.py | drorata/dvc | b6bc65fcbf269b94b7c1ce2d9dff641dedb039b0 | [
"Apache-2.0"
] | 1 | 2019-09-02T00:29:40.000Z | 2019-09-02T00:29:40.000Z | import os
from dvc.utils import dvc_walk
from dvc.utils.compat import open
class BaseTree(object):
"""Abstract class to represent access to files"""
@property
def tree_root(self):
pass
def open(self, path, binary=False):
"""Open file and return a stream."""
def exists(self, path):
"""Test whether a path exists."""
def isdir(self, path):
"""Return true if the pathname refers to an existing directory."""
def isfile(self, path):
"""Test whether a path is a regular file"""
def walk(self, top, topdown=True, dvcignore=None):
"""Directory tree generator.
See `os.walk` for the docs. Differences:
- no support for symlinks
- it could raise exceptions, there is no onerror argument
"""
class WorkingTree(BaseTree):
"""Proxies the repo file access methods to working tree files"""
def __init__(self, repo_root=os.getcwd()):
self.repo_root = repo_root
@property
def tree_root(self):
return self.repo_root
def open(self, path, binary=False):
"""Open file and return a stream."""
if binary:
return open(path, "rb")
return open(path, encoding="utf-8")
def exists(self, path):
"""Test whether a path exists."""
return os.path.exists(path)
def isdir(self, path):
"""Return true if the pathname refers to an existing directory."""
return os.path.isdir(path)
def isfile(self, path):
"""Test whether a path is a regular file"""
return os.path.isfile(path)
def walk(self, top, topdown=True, dvcignore=None):
"""Directory tree generator.
See `os.walk` for the docs. Differences:
- no support for symlinks
- it could raise exceptions, there is no onerror argument
"""
assert dvcignore
def onerror(e):
raise e
for root, dirs, files in dvc_walk(
os.path.abspath(top), dvcignore, topdown=topdown, onerror=onerror
):
yield os.path.normpath(root), dirs, files
| 26.475 | 77 | 0.609065 |
dc4ef0b2273aa7cff23f286493417b48d7fbbe6d | 262 | py | Python | dealer/dealer/doctype/cuotas_de_pago/cuotas_de_pago.py | josmeldiaz21/dealer | 16fb46db6d1b23c666fab9a8c708a9a7fd033736 | [
"MIT"
] | null | null | null | dealer/dealer/doctype/cuotas_de_pago/cuotas_de_pago.py | josmeldiaz21/dealer | 16fb46db6d1b23c666fab9a8c708a9a7fd033736 | [
"MIT"
] | null | null | null | dealer/dealer/doctype/cuotas_de_pago/cuotas_de_pago.py | josmeldiaz21/dealer | 16fb46db6d1b23c666fab9a8c708a9a7fd033736 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2020, Josmel Diaz and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
# import frappe
from frappe.model.document import Document
class CuotasdePago(Document):
pass
| 23.818182 | 50 | 0.774809 |
049e5c83cb0e2e8eeb0cd014e702ec7c1e8c90ae | 8,716 | py | Python | plugins/modules/oci_key_management_decrypted_data.py | sohwaje/oci-ansible-collection | 9e6b8cf55e596a96560710a457a7df05886fc59c | [
"Apache-2.0"
] | null | null | null | plugins/modules/oci_key_management_decrypted_data.py | sohwaje/oci-ansible-collection | 9e6b8cf55e596a96560710a457a7df05886fc59c | [
"Apache-2.0"
] | null | null | null | plugins/modules/oci_key_management_decrypted_data.py | sohwaje/oci-ansible-collection | 9e6b8cf55e596a96560710a457a7df05886fc59c | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# Copyright (c) 2020, 2021 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
# GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_key_management_decrypted_data
short_description: Manage a DecryptedData resource in Oracle Cloud Infrastructure
description:
- This module allows the user to create a DecryptedData resource in Oracle Cloud Infrastructure
- For I(state=present), decrypts data using the given L(DecryptDataDetails,https://docs.cloud.oracle.com/api/#/en/key/latest/datatypes/DecryptDataDetails)
resource.
version_added: "2.9.0"
author: Oracle (@oracle)
options:
associated_data:
description:
- Information that can be used to provide an encryption context for the encrypted data.
The length of the string representation of the associated data must be fewer than 4096 characters.
type: dict
ciphertext:
description:
- The encrypted data to decrypt.
type: str
required: true
key_id:
description:
- The OCID of the key used to encrypt the ciphertext.
type: str
required: true
logging_context:
description:
- Information that provides context for audit logging. You can provide this additional
data as key-value pairs to include in audit logs when audit logging is enabled.
type: dict
key_version_id:
description:
- The OCID of the key version used to encrypt the ciphertext.
type: str
encryption_algorithm:
description:
- The encryption algorithm to use to encrypt or decrypt data with a customer-managed key.
`AES_256_GCM` indicates that the key is a symmetric key that uses the Advanced Encryption Standard (AES) algorithm and
that the mode of encryption is the Galois/Counter Mode (GCM). `RSA_OAEP_SHA_1` indicates that the
key is an asymmetric key that uses the RSA encryption algorithm and uses Optimal Asymmetric Encryption Padding (OAEP).
`RSA_OAEP_SHA_256` indicates that the key is an asymmetric key that uses the RSA encryption algorithm with a SHA-256 hash
and uses OAEP.
type: str
choices:
- "AES_256_GCM"
- "RSA_OAEP_SHA_1"
- "RSA_OAEP_SHA_256"
service_endpoint:
description:
- The endpoint of the service to call using this client. For example 'https://kms.{region}.{secondLevelDomain}'.
type: str
required: true
state:
description:
- The state of the DecryptedData.
- Use I(state=present) to create a DecryptedData.
type: str
required: false
default: 'present'
choices: ["present"]
extends_documentation_fragment: [ oracle.oci.oracle, oracle.oci.oracle_creatable_resource ]
"""
EXAMPLES = """
- name: Create decrypted_data
oci_key_management_decrypted_data:
ciphertext: "AAwgpauIe9AAAM6dU7pS7AKwmDFyXOqNh0uAvNY9a3E95rw7Ae3LZNBnDtHWdkB1l/pIDBfg"
service_endpoint: "https://xxx.kms.{region}.oraclecloud.com"
"""
RETURN = """
decrypted_data:
description:
- Details of the DecryptedData resource acted upon by the current operation
returned: on success
type: complex
contains:
plaintext:
description:
- The decrypted data, expressed as a base64-encoded value.
returned: on success
type: str
sample: plaintext_example
plaintext_checksum:
description:
- The checksum of the decrypted data.
returned: on success
type: str
sample: plaintext_checksum_example
key_id:
description:
- The OCID of the key used to encrypt the ciphertext.
returned: on success
type: str
sample: "ocid1.key.oc1..xxxxxxEXAMPLExxxxxx"
key_version_id:
description:
- The OCID of the key version used to encrypt the ciphertext.
returned: on success
type: str
sample: "ocid1.keyversion.oc1..xxxxxxEXAMPLExxxxxx"
encryption_algorithm:
description:
- The encryption algorithm to use to encrypt and decrypt data with a customer-managed key
`AES_256_GCM` indicates that the key is a symmetric key that uses the Advanced Encryption Standard (AES) algorithm and
that the mode of encryption is the Galois/Counter Mode (GCM). `RSA_OAEP_SHA_1` indicates that the
key is an asymmetric key that uses the RSA encryption algorithm and uses Optimal Asymmetric Encryption Padding (OAEP).
`RSA_OAEP_SHA_256` indicates that the key is an asymmetric key that uses the RSA encryption algorithm with a SHA-256 hash
and uses OAEP.
returned: on success
type: str
sample: AES_256_GCM
sample: {
"plaintext": "plaintext_example",
"plaintext_checksum": "plaintext_checksum_example",
"key_id": "ocid1.key.oc1..xxxxxxEXAMPLExxxxxx",
"key_version_id": "ocid1.keyversion.oc1..xxxxxxEXAMPLExxxxxx",
"encryption_algorithm": "AES_256_GCM"
}
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import (
oci_common_utils,
oci_wait_utils,
)
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIResourceHelperBase,
get_custom_class,
)
try:
from oci.key_management import KmsCryptoClient
from oci.key_management.models import DecryptDataDetails
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class DecryptedDataHelperGen(OCIResourceHelperBase):
"""Supported operations: create"""
def get_module_resource_id(self):
return None
# There is no idempotency for this module (no get or list ops)
def get_matching_resource(self):
return None
def get_create_model_class(self):
return DecryptDataDetails
def create_resource(self):
create_details = self.get_create_model()
return oci_wait_utils.call_and_wait(
call_fn=self.client.decrypt,
call_fn_args=(),
call_fn_kwargs=dict(decrypt_data_details=create_details,),
waiter_type=oci_wait_utils.NONE_WAITER_KEY,
operation=oci_common_utils.CREATE_OPERATION_KEY,
waiter_client=self.get_waiter_client(),
resource_helper=self,
wait_for_states=self.get_wait_for_states_for_operation(
oci_common_utils.CREATE_OPERATION_KEY,
),
)
DecryptedDataHelperCustom = get_custom_class("DecryptedDataHelperCustom")
class ResourceHelper(DecryptedDataHelperCustom, DecryptedDataHelperGen):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec(
supports_create=True, supports_wait=False
)
module_args.update(
dict(
associated_data=dict(type="dict"),
ciphertext=dict(type="str", required=True),
key_id=dict(type="str", required=True),
logging_context=dict(type="dict"),
key_version_id=dict(type="str"),
encryption_algorithm=dict(
type="str",
choices=["AES_256_GCM", "RSA_OAEP_SHA_1", "RSA_OAEP_SHA_256"],
),
service_endpoint=dict(type="str", required=True),
state=dict(type="str", default="present", choices=["present"]),
)
)
module = AnsibleModule(argument_spec=module_args, supports_check_mode=True)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_helper = ResourceHelper(
module=module,
resource_type="decrypted_data",
service_client_class=KmsCryptoClient,
namespace="key_management",
)
result = dict(changed=False)
if resource_helper.is_create():
result = resource_helper.create()
module.exit_json(**result)
if __name__ == "__main__":
main()
| 36.316667 | 158 | 0.664754 |
a518b4e4d9bd19f9aadcdbee8f80991076a2647d | 3,213 | py | Python | sme_uniforme_apps/proponentes/services.py | prefeiturasp/SME-PortalUniforme-BackEnd | d7e15dd3b9c7b9cfb621e53ec81d40f18c1fab3f | [
"MIT"
] | null | null | null | sme_uniforme_apps/proponentes/services.py | prefeiturasp/SME-PortalUniforme-BackEnd | d7e15dd3b9c7b9cfb621e53ec81d40f18c1fab3f | [
"MIT"
] | null | null | null | sme_uniforme_apps/proponentes/services.py | prefeiturasp/SME-PortalUniforme-BackEnd | d7e15dd3b9c7b9cfb621e53ec81d40f18c1fab3f | [
"MIT"
] | 1 | 2020-02-01T12:10:42.000Z | 2020-02-01T12:10:42.000Z | import logging
import requests
from django.conf import settings
from ..custom_user.models import User
from .models.lista_negra import ListaNegra
LAYERS = 'address'
BUNDARY = 'whosonfirst:locality:101965533'
API_URL = f'{settings.GEOREF_API_URL}/v1/search'
log = logging.getLogger(__name__)
def cnpj_esta_bloqueado(cnpj):
return ListaNegra.cnpj_bloqueado(cnpj)
def cria_usuario_proponentes_existentes(queryset):
for proponente in queryset.all():
if not proponente.usuario:
novo_usuario = User.objects.create_user(email=proponente.email,
first_name=proponente.responsavel.split(" ")[0],
last_name=" ".join(proponente.responsavel.split(" ")[1:]),
password="".join([n for n in proponente.cnpj if n.isdigit()])[:5])
proponente.usuario = novo_usuario
proponente.save()
def muda_status_de_proponentes(queryset, novo_status):
for proponente in queryset.all():
if proponente.status != novo_status:
proponente.status = novo_status
proponente.save()
if novo_status == "CREDENCIADO":
atualiza_coordenadas_lojas(proponente.lojas)
def envia_email_pendencias(queryset):
for proponente in queryset.all():
proponente.comunicar_pendencia()
def atualiza_coordenadas(queryset):
for proponente in queryset.all():
atualiza_coordenadas_lojas(proponente.lojas)
def atualiza_coordenadas_lojas(lojas):
log.info("Atualizando coordendas das lojas físicas")
for loja in lojas.all():
params = {
'text': f'{loja.endereco}, {loja.numero}, {loja.bairro}, {loja.cep}',
'layers': LAYERS,
'boundary.gid': BUNDARY}
try:
log.info(f"Buscando coordenadas: {params}")
response = requests.get(API_URL, params=params)
log.info(f"retorno da api: {response.json()}")
loja.latitude, loja.longitude = busca_latitude_e_longitude(response.json())
loja.save()
except Exception as e:
log.info(f"Erro ao acessar georef.sme API: {e.__str__()}")
def busca_latitude_e_longitude(payload):
if not payload['features']:
raise Exception(f"API não retornou dados válidos: {payload}")
# A georef.sme API retorna longitude e latitude
# mas o retorno será latitude e longitude
return payload['features'][0]['geometry']['coordinates'][::-1]
def haversine(lat, lon):
"""
Formula haversine para buscar as lojas ordenando pela distancia.
Para limitar os resultados a partir de uma distancia, descomentar a linha do where.
"""
return f"""
SELECT id
FROM ( SELECT
id,
111.045 * DEGREES(ACOS(COS(RADIANS({lat}))
* COS(RADIANS(latitude))
* COS(RADIANS(longitude) - RADIANS({lon})) + SIN(RADIANS({lat}))
* SIN(RADIANS(latitude)))) AS distance_in_km
FROM proponentes_loja) as distancias
-- WHERE distancias.distance_in_km <= 10
"""
| 34.923913 | 118 | 0.617803 |
fe693358f7b9b5209aa73131d69644ed409f59a4 | 501 | py | Python | fairseq/examples/simultaneous_translation/criterions/__init__.py | VictorBeraldo/wave2vec-recognize-docker | aec0c7d718c0c670c37fdef3d5ea40396e610881 | [
"MIT"
] | null | null | null | fairseq/examples/simultaneous_translation/criterions/__init__.py | VictorBeraldo/wave2vec-recognize-docker | aec0c7d718c0c670c37fdef3d5ea40396e610881 | [
"MIT"
] | null | null | null | fairseq/examples/simultaneous_translation/criterions/__init__.py | VictorBeraldo/wave2vec-recognize-docker | aec0c7d718c0c670c37fdef3d5ea40396e610881 | [
"MIT"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import importlib
import os
for file in os.listdir(os.path.dirname(__file__)):
if file.endswith(".py") and not file.startswith("_"):
criterion_name = file[: file.find(".py")]
importlib.import_module(
"examples.simultaneous_translation.criterions." + criterion_name
)
| 31.3125 | 77 | 0.676647 |
075870574ef36d765e03748ec558541dc06d5f90 | 2,076 | py | Python | sentry_sdk/integrations/django/asgi.py | saxenanurag/sentry-python | a67cf84e88186c3cf6e541040ddb1e94df1dba98 | [
"BSD-2-Clause"
] | null | null | null | sentry_sdk/integrations/django/asgi.py | saxenanurag/sentry-python | a67cf84e88186c3cf6e541040ddb1e94df1dba98 | [
"BSD-2-Clause"
] | null | null | null | sentry_sdk/integrations/django/asgi.py | saxenanurag/sentry-python | a67cf84e88186c3cf6e541040ddb1e94df1dba98 | [
"BSD-2-Clause"
] | 1 | 2021-02-18T18:43:27.000Z | 2021-02-18T18:43:27.000Z | """
Instrumentation for Django 3.0
Since this file contains `async def` it is conditionally imported in
`sentry_sdk.integrations.django` (depending on the existence of
`django.core.handlers.asgi`.
"""
from sentry_sdk import Hub
from sentry_sdk._types import MYPY
from sentry_sdk.integrations.django import DjangoIntegration
from sentry_sdk.integrations.asgi import SentryAsgiMiddleware
if MYPY:
from typing import Any
from typing import Union
from django.http.response import HttpResponse
def patch_django_asgi_handler_impl(cls):
# type: (Any) -> None
old_app = cls.__call__
async def sentry_patched_asgi_handler(self, scope, receive, send):
# type: (Any, Any, Any, Any) -> Any
if Hub.current.get_integration(DjangoIntegration) is None:
return await old_app(self, scope, receive, send)
middleware = SentryAsgiMiddleware(
old_app.__get__(self, cls), unsafe_context_data=True
)._run_asgi3
return await middleware(scope, receive, send)
cls.__call__ = sentry_patched_asgi_handler
def patch_get_response_async(cls, _before_get_response):
# type: (Any, Any) -> None
old_get_response_async = cls.get_response_async
async def sentry_patched_get_response_async(self, request):
# type: (Any, Any) -> Union[HttpResponse, BaseException]
_before_get_response(request)
return await old_get_response_async(self, request)
cls.get_response_async = sentry_patched_get_response_async
def patch_channels_asgi_handler_impl(cls):
# type: (Any) -> None
old_app = cls.__call__
async def sentry_patched_asgi_handler(self, receive, send):
# type: (Any, Any, Any) -> Any
if Hub.current.get_integration(DjangoIntegration) is None:
return await old_app(self, receive, send)
middleware = SentryAsgiMiddleware(
lambda _scope: old_app.__get__(self, cls), unsafe_context_data=True
)
return await middleware(self.scope)(receive, send)
cls.__call__ = sentry_patched_asgi_handler
| 30.985075 | 79 | 0.72158 |
a9eee95eb778e63ccf726ce09a56226e75d66295 | 2,385 | py | Python | src/visualization/animation.py | neurodata/dos_and_donts | b49a61a8aa29dbde86651bd39c9322f0eb3c0694 | [
"BSD-3-Clause"
] | 3 | 2020-05-17T21:56:52.000Z | 2020-12-09T04:27:31.000Z | src/visualization/animation.py | neurodata/dos_and_donts | b49a61a8aa29dbde86651bd39c9322f0eb3c0694 | [
"BSD-3-Clause"
] | 2 | 2020-08-06T04:58:37.000Z | 2020-08-06T05:02:37.000Z | src/visualization/animation.py | neurodata/dos_and_donts | b49a61a8aa29dbde86651bd39c9322f0eb3c0694 | [
"BSD-3-Clause"
] | 1 | 2020-08-12T02:29:11.000Z | 2020-08-12T02:29:11.000Z | import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
import pandas as pd
LABELS = dict(
# num_edges = "# Edges",
modularity="Modularity",
density="Density",
# total_triangles = '# Triangles',
triangle_ratio="Triangle Ratio",
# is_planar="Is Planar Graph?",
avg_shortest_path_length="Avg Shortest Path",
global_clustering_coefficient="Global Clustering",
avg_clustering_coefficient="Avg Clustering",
# square_clustering="Square Clustering",
global_efficiency="Global Efficiency",
local_efficiency="Local Efficiency",
# degree_assortativity="Degree Assortativity",
# diameter = 'Diameter',
node_connectivity="Node Connectivity",
)
POSITION = nx.circular_layout(range(0, 10))
SPACING = 0.125
FONTDICT = {"family": "monospace", "weight": "normal", "size": 30}
def make_frame(graph, data, ax):
"""
graph = nx.Graph
data = pd.Series
"""
# font = FontProperties()
# font.set_family('monospace')
# fig, ax = plt.subplots(figsize=(10, 10))
nx.draw(graph, pos=POSITION, ax=ax)
# Dealing with variable values
# values we plot are based on LABELS variable
x_pos = 1.2
loc = (data.size * SPACING) / 2
y_pos = np.linspace(loc, -loc, len(LABELS))
max_char = max([len(name) for _, name in LABELS.items()])
for idx, (key, name) in enumerate(LABELS.items()):
value = data[key]
name = name.ljust(max_char) + ": "
if not np.issubdtype(value.dtype, np.bool_):
text = name + "{: .9f}".format(value)
ax.text(x_pos, y_pos[idx], text, fontdict=FONTDICT, alpha=0.3)
ax.text(x_pos, y_pos[idx], text[:-7], fontdict=FONTDICT, alpha=1)
else:
text = f"{name} {value}"
ax.text(x_pos, y_pos[idx], text, fontdict=FONTDICT, alpha=1)
def make_gif(graphs, df, name="visualization.gif"):
indices = df.index
graphs_subset = [graphs[i] for i in indices]
fig, ax = plt.subplots(figsize=(21, 10))
def update(i):
ax.clear()
g = graphs_subset[i]
data = t.loc[indices[i]]
make_frame(g, data, ax)
plt.tight_layout()
ani = FuncAnimation(
fig, update, interval=100, frames=range(df.shape[0]), repeat=True
)
ani.save(name, writer="imagemagick", savefig_kwargs={"facecolor": "white"}, fps=16)
plt.close()
| 28.73494 | 87 | 0.630189 |
bae64e8481cf4afae6d12f79e46cd795d8fe2b3f | 8,510 | py | Python | Action-TSN/generate_dataset_from_FatigueView.py | FatigueView/fatigueview | eaa724e5391953a6cac757bf02decdf7ddddba2d | [
"MIT"
] | 1 | 2022-02-15T07:23:56.000Z | 2022-02-15T07:23:56.000Z | Action-TSN/generate_dataset_from_FatigueView.py | FatigueView/fatigueview | eaa724e5391953a6cac757bf02decdf7ddddba2d | [
"MIT"
] | 1 | 2021-04-08T12:07:28.000Z | 2021-04-08T12:13:08.000Z | Action-TSN/generate_dataset_from_FatigueView.py | FatigueView/fatigueview | eaa724e5391953a6cac757bf02decdf7ddddba2d | [
"MIT"
] | null | null | null | # -*- coding: UTF-8 -*-
'''=================================================
@Author :zhenyu.yang
@Date :2020/11/5 11:37 AM
=================================================='''
import sys
sys.path.append('./')
sys.path.insert(0,'/data/zhenyu.yang/modules')
import cv2
import json
import numpy as np
import random
import copy
from multiprocessing import Process
import os
def getFiles(path, suffix,prefix):
return [os.path.join(root, file) for root, dirs, files in os.walk(path)
for file in files if file.endswith(suffix) and file.startswith(prefix)]
def get_ear(ldmk):
eps = 1e-5
get_distance = lambda x,y:((x[0]-y[0])**2 + (x[1]-y[1])**2 + eps)**0.5
w = get_distance(ldmk[0],ldmk[4])
h = get_distance(ldmk[2],ldmk[6])
ear = h/w
ear = min(ear,0.7)
return ear
def get_ear_height(ldmk):
heights = [ldmk[2][1]-ldmk[6][1],ldmk[1][1]-ldmk[7][1],ldmk[3][1]-ldmk[5][1]]
return np.mean(np.abs(heights))
def get_fea_label(img_info):
skeleton = np.zeros((17,2)) - 1
if 'skeleton' in img_info and img_info['skeleton'] is not None and len(img_info['skeleton']) > 4:
skeleton = np.array(img_info['skeleton'])
return skeleton
def get_perclose(height_list):
max_height = max(height_list)
preclose_list = [1 - v/max_height for v in height_list]
preclose_50 = sum(v > 0.5 for v in preclose_list)
preclose_70 = sum(v > 0.7 for v in preclose_list)
preclose_90 = sum(v > 0.9 for v in preclose_list)
return [preclose_50,preclose_70,preclose_90]
def get_eye_movement(height_list):
height_change = [abs(height_list[i+1] - height_list[i]) for i in range(len(height_list)-1)]
return sum(v>1 for v in height_change) / len(height_list)
def list2num(slice_list):
num_list = []
for slice in slice_list:
num_list.extend(list(range(slice[0], slice[1] + 1)))
return num_list
def is_stretch(stretch_list,left_index,right_index):
# 1 : stretch 0: normal -1 : ignore
max_union = -1
frame_len = right_index - left_index
for stretch in stretch_list:
stretch_len = abs(stretch[1] - stretch[0])
temp_left = max(left_index,stretch[0])
temp_right = min(right_index,stretch[1])
if [temp_left,temp_right] in [stretch,[left_index,right_index]]:
return 1
union = (temp_right - temp_left) /( min(stretch_len,frame_len) + 0.1)
max_union = max(max_union,union)
if max_union < 0.2:
return 0
return -1
def get_batch_data(video_list,suffix,dst_dir,time_len = 10):
random.shuffle(video_list)
half_frame_len = time_len*25//2
while True:
if len(video_list) == 0:
break
video_path = video_list.pop()
video_suffix = '.mp4'
if video_path.endswith('.mp4'):
video_suffix = '.mp4'
elif video_path.endswith('.avi'):
video_suffix = '.avi'
json_path = video_path.replace(video_suffix, suffix)
if not os.path.exists(json_path):
continue
with open(json_path, 'r') as f:
big_json = f.readlines()
skeleton_list = []
for json_info in big_json:
try:
json_info = json.loads(json_info.strip())
except:
continue
skeleton_list.append(get_fea_label(json_info))
stretch_path = video_path.replace(os.path.basename(video_path), 'stretch.json')
if not os.path.exists(stretch_path):
continue
with open(stretch_path, 'r') as f:
stretch_list = json.load(f)
stretch_index_list = []
normal_index_list = []
for i in range(0,len(skeleton_list),40):
if i < half_frame_len or i >= len(skeleton_list) - (half_frame_len+1):
continue
temp_stretch = is_stretch(stretch_list,i-half_frame_len,i+half_frame_len)
if temp_stretch == 1:
stretch_index_list.append([i-half_frame_len,i+half_frame_len])
if temp_stretch == 0:
normal_index_list.append([i-half_frame_len,i+half_frame_len])
if len(stretch_index_list) == 0:
continue
random.shuffle(normal_index_list)
normal_index_list = normal_index_list[:min(len(normal_index_list),len(stretch_index_list))]
frame_info = {}
for temp_index in stretch_index_list:
temp_name = '_'.join(video_path.split(os.sep)[-4:]).replace(video_suffix,'')
temp_name = '{}__{}__{}'.format(1, temp_name,int(sum(temp_index) // 2))
temp_dst_dir = os.path.join(dst_dir, temp_name)
for index in range(temp_index[0],temp_index[1]):
temp_list = frame_info.get(index,[])
temp_list.append(temp_dst_dir)
frame_info[index] = temp_list
for temp_index in normal_index_list:
temp_name = '_'.join(video_path.split(os.sep)[-4:]).replace(video_suffix,'')
temp_name = '{}__{}__{}'.format(0, temp_name,int(sum(temp_index) // 2))
temp_dst_dir = os.path.join(dst_dir, temp_name)
for index in range(temp_index[0],temp_index[1]):
temp_list = frame_info.get(index,[])
temp_list.append(temp_dst_dir)
frame_info[index] = temp_list
max_frame = max(frame_info.keys())
frame_id = -1
cap = cv2.VideoCapture(video_path)
while True:
frame_id += 1
if frame_id > max_frame:
break
ret,frame = cap.read()
if not ret:
break
if frame_id in frame_info:
for temp_dir in frame_info[frame_id]:
if not os.path.exists(temp_dir):
try:
os.makedirs(temp_dir)
except:
pass
cv2.imwrite(os.path.join(temp_dir,'{}.jpg'.format(str(frame_id).zfill(8))),frame)
def split(input,num=60):
random.shuffle(input)
ans = []
sep = len(input) //num
for i in range(num-1):
ans.append(input[i*sep:(i+1)*sep])
ans.append(input[(num-1)*sep:])
return ans
if __name__ == '__main__':
version = 'v0.1'
suffix = '_{}.json'.format(version)
src_dir_dict = {'train':'/data/weiyu.li/DMSData/FatigueView/raw_video',
'test':'/data/weiyu.li/DMSData/FatigueView/test_video'
}
src_dir_dict = {'train':'/data/weiyu.li/DMSData/FatigueView/raw_video'
}
src_dir_dict = {'test':'/data/weiyu.li/DMSData/FatigueView/test_video'
}
camera_list = ['ir_down','ir_front','ir_left','ir_left_up','ir_up','rgb_down','rgb_front','rgb_left','rgb_left_up','rgb_up']
src_dir_dict = {'beelab_test':'/data/weiyu.li/DMSData/FatigueView/beelab_test_video',
'beelab_train': '/data/weiyu.li/DMSData/FatigueView/beelab_train_video'
}
camera_list = ['ir_down','ir_front','ir_left','ir_left_up','ir_up','rgb_down','rgb_front','rgb_left','rgb_left_up','rgb_up']
data_type = 'train'
camera_id = 0
for data_type in src_dir_dict.keys():
for camera_id in range(len(camera_list)):
src_dir = src_dir_dict[data_type]
camera_type = camera_list[camera_id]
dst_dir = './data/{}/{}'.format(data_type,camera_type)
if not os.path.exists(dst_dir):
os.makedirs(dst_dir)
video_list = getFiles(src_dir, '.mp4', camera_type)
video_list += getFiles(src_dir, '.avi', camera_type)
# if data_type == 'test':
# video_list = [v for v in video_list if 'fengchunshen' not in v and 'panbijia' not in v]
#
#
# if data_type == 'train':
# video_list = [v for v in video_list if 'zhaoxinmei' not in v]
#
all_num = 60000
running_num = 32
batch_size = all_num//running_num
split_videos = split(video_list, running_num)
process_list = []
for i in range(running_num):
temp_p = Process(target=get_batch_data,args=(split_videos[i],suffix,dst_dir,))
process_list.append(temp_p)
for temp_p in process_list:
temp_p.start()
for temp_p in process_list:
temp_p.join()
print('END')
| 27.275641 | 128 | 0.583431 |
ae0cfea0753e6968698d6fa70d86850a4eadedde | 10,501 | py | Python | dataset_vol2.py | Shimaa1/group_activity_gcn | 53f86e93eb7a78d537532d48c836ce30cbf7e8d1 | [
"MIT"
] | 1 | 2022-01-04T14:18:36.000Z | 2022-01-04T14:18:36.000Z | dataset_vol2.py | Shimaa1/group_activity_gcn | 53f86e93eb7a78d537532d48c836ce30cbf7e8d1 | [
"MIT"
] | null | null | null | dataset_vol2.py | Shimaa1/group_activity_gcn | 53f86e93eb7a78d537532d48c836ce30cbf7e8d1 | [
"MIT"
] | null | null | null | import os
from sklearn.model_selection import train_test_split
import torch
import cv2
import numpy as np
from torch.utils.data import Dataset
from mypath import Path
# from network.model.graph_front.graphFront import _graphFront
from torchvision import transforms
from PIL import Image
class VolleyballDataset(Dataset):
r"""A Dataset for a folder of videos. Expects the directory structure to be
directory->[train/val/test]->[class labels]->[videos]. Initializes with a list
of all file names, along with an array of labels, with label being automatically
inferred from the respective folder names.
Args:
dataset (str): Name of dataset. Defaults to 'ucf101'.
split (str): Determines which folder of the directory the dataset will read from. Defaults to 'train'.
clip_len (int): Determines how many frames are there in each clip. Defaults to 16.
preprocess (bool): Determines whether to preprocess dataset. Default is False.
"""
def __init__(self, dataset='volleyball', split='train', transforms=transforms.ToTensor()):
# self.root_dir, self.output_dir, self.bbox_output_dir = root_dir, output_dir, bbox_output_dir
self.root_dir, self.bbox_output_dir = Path.db_dir(dataset)
self.bbox_output = '/data/dataset/volleyball/person1/'
# dic = {'train': '1 3 6', \
# 'val': '5', \
# 'test': '4' }
dic ={'train': '1 3 6 7 10 13 15 16 18 22 23 31 32 36 38 39 40 41 42 48 50 52 53 54', \
'val': '0 2 8 12 17 19 24 26 27 28 30 33 46 49 51', \
'test': '4 5 9 11 14 20 21 25 29 34 35 37 43 44 45 47'}
label_index = {'r_set': 0, 'r_spike': 1, 'r-pass': 2, 'r_winpoint': 3, 'l_winpoint': 4, \
'l-pass': 5, 'l-spike': 6, 'l_set': 7}
video_index = dic[split].split(' ')
self.split = split
# The following three parameters are chosen as described in the paper section 4.1
self.resize_height = 192#256#112#780
self.resize_width = 112#384#112#1280
self.transform2 = transforms
# self.transform2 = transforms.Compose([
# transforms.ToTensor(),
# transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
# ])
self.fnames, self.labels, self.bboxes = self.make_dataset_sth(video_index, label_index)
def __len__(self):
return len(self.fnames)
def __getitem__(self, index):
# Loading and preprocessing.
labels = np.array(self.labels[index])
buffer, dist= self.load_frames(self.fnames[index], self.bboxes[index])
# buffer = self.transform2(buffer)
# buffer, buffer_bbox = self.crop(buffer, buffer_bbox, self.clip_len, self.crop_size)
# adjacent_matrix = self.graph.build_graph(buffer_bbox[::2,:,:])
# if self.split == 'test':
# # Perform data augmentation
# buffer = self.randomflip(buffer)
# buffer = self.normalize(buffer)
# buffer = self.to_tensor(buffer)
# return torch.from_numpy(buffer), torch.from_numpy(labels), torch.from_numpy(buffer_bbox)
# return torch.from_numpy(buffer), torch.from_numpy(buffer_bbox), \
# torch.from_numpy(labels), adjacent_matrix
return torch.from_numpy(buffer[::2,:,:,:,:]), torch.from_numpy(labels)
# torch.from_numpy(dist[::2,:,:])
def randomflip(self, buffer):
"""Horizontally flip the given image and ground truth randomly with a probability of 0.5."""
if np.random.random() < 0.5:
for i, frame in enumerate(buffer):
frame = cv2.flip(buffer[i], flipCode=1)
buffer[i] = cv2.flip(frame, flipCode=1)
return buffer
def normalize(self, buffer):
for i, frame in enumerate(buffer):
frame -= np.array([[[90.0, 98.0, 102.0]]])
buffer[i] = frame
return buffer
def to_tensor(self, buffer):
return buffer.transpose((3, 0, 1, 2))
def make_dataset_sth(self, video_index, label_index):
frame_name = []
frame_label = []
frame_bbox = []
for video in video_index:
with open(os.path.join(self.root_dir, video, 'annotations.txt'),'r') as f:
info = f.readlines()
for item in info:
item_index = item.split(' ')
frame_name.append(os.path.join(self.root_dir, video, \
item.split(' ')[0][:-4]))
frame_label.append(label_index[item_index[1]])
frame_bbox.append(os.path.join(self.bbox_output_dir, video, \
item.split(' ')[0][:-4], 'tracklets.txt'))
return frame_name, frame_label, frame_bbox
def load_frames(self, file_dir, bbox_dir):
with open(bbox_dir, 'r') as f:
det_lines = f.readlines()
det_lines = [item.strip().split('\t') for item in det_lines]
if len(det_lines) < 12:
for i in range(12-len(det_lines)):
det_lines.append(det_lines[-(i+1)]) #person number 12
frames = sorted([os.path.join(file_dir, img) for img in os.listdir(file_dir)])
frame_count = len(frames)
buffer = np.empty((frame_count, 12, 3, 109, 64), np.dtype('float32'))
dist = np.zeros((frame_count, 12, 12), np.dtype('float64'))
for i, frame_name in enumerate(frames):
frame = cv2.imread(frame_name)
seq_x = np.zeros((12), dtype="float64")
for j in range(len(det_lines)):
buffer_bbox = [int(x) for x in det_lines[j][i+1].split(' ')]
j_center_h = buffer_bbox[1]+buffer_bbox[3]/2
seq_x[j] = j_center_h
seq_index = np.argsort(seq_x)
m = 0
for item in seq_index:
buffer_bbox = [int(x) for x in det_lines[item][i+1].split(' ')]
person = frame[buffer_bbox[1]:buffer_bbox[1]+buffer_bbox[3], \
buffer_bbox[0]:buffer_bbox[0]+buffer_bbox[2]]
person = cv2.resize(person,(self.resize_width, self.resize_height))
person = Image.fromarray(cv2.cvtColor(person, cv2.COLOR_BGR2RGB))
person = self.transform2(person)
buffer[i][m][:] = person
m += 1
# for j in range(len(det_lines)):
# buffer_bbox = [int(x) for x in det_lines[j][i+1].split(' ')]
# person = frame[buffer_bbox[1]:buffer_bbox[1]+buffer_bbox[3], \
# buffer_bbox[0]:buffer_bbox[0]+buffer_bbox[2]]
# j_center_h = buffer_bbox[1]+buffer_bbox[3]/2
# person = cv2.resize(person,(self.resize_width, self.resize_height))
# person = Image.fromarray(cv2.cvtColor(person, cv2.COLOR_BGR2RGB))
#
# person = self.transform2(person)
# seq_x[j] = j_center_h
# buffer[i][j][:] = person
# seq_index = np.argsort(seq_x)
# for j in range(len(det_lines)):
# buffer_bbox = [int(x) for x in det_lines[j][i+1].split(' ')]
# person = frame[buffer_bbox[1]:buffer_bbox[1]+buffer_bbox[3], \
# buffer_bbox[0]:buffer_bbox[0]+buffer_bbox[2]]
# j_center_h = buffer_bbox[1]+buffer_bbox[3]/2
# j_center_w = buffer_bbox[0]+buffer_bbox[2]/2
# for k in range(j+1, len(det_lines)):
# buffer_bbox_k = [int(x) for x in det_lines[k][i+1].split(' ')]
# k_center_h = buffer_bbox_k[1]+buffer_bbox_k[3]/2
# k_center_w = buffer_bbox_k[0]+buffer_bbox_k[2]/2
# dist[i][j][k] = abs(j_center_h-k_center_h)+abs(j_center_w-k_center_w)
# dist[i][k][j] = dist[i][j][k]
#
# person = cv2.resize(person,(self.resize_width, self.resize_height))
# person = Image.fromarray(cv2.cvtColor(person, cv2.COLOR_BGR2RGB))
#
# person = self.transform2(person)
# buffer[i][j][:] = person
dist_index = np.argsort(dist[i], axis=1)
# for id in range(12):
# frame_show = frame
# pt_index = dist_index[id][:3]
# for jd in pt_index:
# buffer_bbox = [int(x) for x in det_lines[jd][id+1].split(" ")]
# frame_show = cv2.rectangle(frame_show, (buffer_bbox[0], buffer_bbox[1]), \
# (buffer_bbox[0]+buffer_bbox[2], buffer_bbox[1]+buffer_bbox[3]),(0,255,0),2)
# cv2.imshow("id", frame)
# cv2.waitKey(0)
for l in np.arange(12):
dist[i][l][dist_index[:, 3:][l]] = 1/12
dist[i][l][dist_index[:, :3][l]] = 1/12
return buffer, dist
def crop(self, buffer, buffer_bbox, clip_len, crop_size):
# randomly select time index for temporal jittering
# time_index = np.random.randint(buffer.shape[0] - clip_len)
#
# # Randomly select start indices in order to crop the video
# height_index = np.random.randint(buffer.shape[1] - crop_size)
# width_index = np.random.randint(buffer.shape[2] - crop_size)
time_index = 0
# Randomly select start indices in order to crop the video
height_index = 0
width_index = 0
# Crop and jitter the video using indexing. The spatial crop is performed on
# the entire array, so each frame is cropped in the same location. The temporal
# jitter takes place via the selection of consecutive frames
buffer = buffer[time_index:time_index + clip_len,
height_index:height_index + crop_size,
width_index:width_index + crop_size, :]
buffer_bbox = buffer_bbox[time_index:time_index + clip_len, :]
return buffer, buffer_bbox
if __name__ == "__main__":
from torch.utils.data import DataLoader
root_dir = '/data/dataset/volleyball/videos/'
train_data = VideoDataset(dataset='volleyball', split='test', clip_len=8, preprocess=True)
train_loader = DataLoader(train_data, batch_size=100, shuffle=True, num_workers=0)
for i, sample in enumerate(train_loader):
inputs = sample[0]
labels = sample[1]
print(inputs.size())
print(labels)
if i == 1:
break
| 43.392562 | 114 | 0.582516 |
adbc77d6deb288e632b89a583c81e53ad5ccae5d | 1,251 | py | Python | doc/examples/filters/plot_hysteresis.py | taylor-scott/scikit-image | e9cf8b850c4c2800cc221be6f1dfff6a2a32a4eb | [
"BSD-3-Clause"
] | 3 | 2019-02-28T16:05:36.000Z | 2020-04-03T17:29:07.000Z | doc/examples/filters/plot_hysteresis.py | taylor-scott/scikit-image | e9cf8b850c4c2800cc221be6f1dfff6a2a32a4eb | [
"BSD-3-Clause"
] | 1 | 2021-06-25T15:22:36.000Z | 2021-06-25T15:22:36.000Z | doc/examples/filters/plot_hysteresis.py | taylor-scott/scikit-image | e9cf8b850c4c2800cc221be6f1dfff6a2a32a4eb | [
"BSD-3-Clause"
] | 3 | 2019-12-31T23:21:40.000Z | 2020-04-03T17:29:08.000Z | """
=======================
Hysteresis thresholding
=======================
*Hysteresis* is the lagging of an effect---a kind of inertia. In the
context of thresholding, it means that areas above some *low* threshold
are considered to be above the threshold *if* they are also connected
to areas above a higher, more stringent, threshold. They can thus be
seen as continuations of these high-confidence areas.
Below, we compare normal thresholding to hysteresis thresholding.
Notice how hysteresis allows one to ignore "noise" outside of the coin
edges.
"""
import matplotlib.pyplot as plt
from skimage import data, filters
fig, ax = plt.subplots(nrows=2, ncols=2)
image = data.coins()
edges = filters.sobel(image)
low = 0.1
high = 0.35
lowt = (edges > low).astype(int)
hight = (edges > high).astype(int)
hyst = filters.apply_hysteresis_threshold(edges, low, high)
ax[0, 0].imshow(image, cmap='gray')
ax[0, 0].set_title('Original image')
ax[0, 1].imshow(edges, cmap='magma')
ax[0, 1].set_title('Sobel edges')
ax[1, 0].imshow(lowt, cmap='magma')
ax[1, 0].set_title('Low threshold')
ax[1, 1].imshow(hight + hyst, cmap='magma')
ax[1, 1].set_title('Hysteresis threshold')
for a in ax.ravel():
a.axis('off')
plt.tight_layout()
plt.show()
| 25.02 | 71 | 0.69944 |
b4fa861e00e19b3e8284a9bebb369adf726e88d2 | 6,358 | py | Python | python/ee/featurecollection.py | Elgyii/earthengine-api | 8650c1f58f3abc502ea5296d1f628b69bc295243 | [
"Apache-2.0"
] | 1 | 2020-09-09T11:22:17.000Z | 2020-09-09T11:22:17.000Z | python/ee/featurecollection.py | Elgyii/earthengine-api | 8650c1f58f3abc502ea5296d1f628b69bc295243 | [
"Apache-2.0"
] | 1 | 2021-02-23T13:42:32.000Z | 2021-02-23T13:42:32.000Z | python/ee/featurecollection.py | Elgyii/earthengine-api | 8650c1f58f3abc502ea5296d1f628b69bc295243 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""Representation of an Earth Engine FeatureCollection."""
# Using lowercase function naming to match the JavaScript names.
# pylint: disable=g-bad-name
# pylint: disable=g-long-lambda
from . import apifunction
from . import collection
from . import computedobject
from . import data
from . import deprecation
from . import ee_exception
from . import ee_list
from . import ee_types
from . import feature
from . import geometry
class FeatureCollection(collection.Collection):
"""A representation of a FeatureCollection."""
_initialized = False
def __init__(self, args, opt_column=None):
"""Constructs a collection features.
Args:
args: constructor argument. One of:
1) A string - assumed to be the name of a collection.
2) A geometry.
3) A feature.
4) An array of features.
5) A GeoJSON FeatureCollection.
6) A computed object - reinterpreted as a collection.
opt_column: The name of the geometry column to use. Only useful with the
string constructor.
Raises:
EEException: if passed something other than the above.
"""
self.initialize()
# Wrap geometries with features.
if isinstance(args, geometry.Geometry):
args = feature.Feature(args)
# Wrap single features in an array.
if isinstance(args, feature.Feature):
args = [args]
if ee_types.isString(args):
# An ID.
actual_args = {'tableId': args}
if opt_column:
actual_args['geometryColumn'] = opt_column
super(FeatureCollection, self).__init__(
apifunction.ApiFunction.lookup('Collection.loadTable'), actual_args)
elif isinstance(args, (list, tuple)):
# A list of features.
super(FeatureCollection, self).__init__(
apifunction.ApiFunction.lookup('Collection'), {
'features': [feature.Feature(i) for i in args]
})
elif isinstance(args, ee_list.List):
# A computed list of features.
super(FeatureCollection, self).__init__(
apifunction.ApiFunction.lookup('Collection'), {
'features': args
})
elif isinstance(args, dict) and args.get('type') == 'FeatureCollection':
# A GeoJSON FeatureCollection
super(FeatureCollection, self).__init__(
apifunction.ApiFunction.lookup('Collection'),
{'features': [feature.Feature(i) for i in args.get('features', [])]})
elif isinstance(args, computedobject.ComputedObject):
# A custom object to reinterpret as a FeatureCollection.
super(FeatureCollection, self).__init__(
args.func, args.args, args.varName)
else:
raise ee_exception.EEException(
'Unrecognized argument type to convert to a FeatureCollection: %s' %
args)
@classmethod
def initialize(cls):
"""Imports API functions to this class."""
if not cls._initialized:
super(FeatureCollection, cls).initialize()
apifunction.ApiFunction.importApi(
cls, 'FeatureCollection', 'FeatureCollection')
cls._initialized = True
@classmethod
def reset(cls):
"""Removes imported API functions from this class."""
apifunction.ApiFunction.clearApi(cls)
cls._initialized = False
def getMapId(self, vis_params=None):
"""Fetch and return a map id and token, suitable for use in a Map overlay.
Args:
vis_params: The visualization parameters. Currently only one parameter,
'color', containing a hex RGB color string is allowed.
Returns:
A map ID dictionary as described in ee.data.getMapId, including an
additional 'image' field containing Collection.draw image wrapping a
FeatureCollection containing this feature.
"""
painted = apifunction.ApiFunction.apply_('Collection.draw', {
'collection': self,
'color': (vis_params or {}).get('color', '000000')
})
return painted.getMapId({})
def getDownloadURL(self, filetype=None, selectors=None, filename=None):
"""Get a download URL for this feature collection.
Args:
filetype: The filetype of download, either CSV or JSON. Defaults to CSV.
selectors: The selectors that should be used to determine which attributes
will be downloaded.
filename: The name of the file to be downloaded.
Returns:
A URL to download the specified feature collection.
"""
request = {}
if data._use_cloud_api: # pylint: disable=protected-access
request['table'] = self
else:
request['table'] = self.serialize()
if filetype is not None:
request['format'] = filetype.upper()
if filename is not None:
request['filename'] = filename
if selectors is not None:
if isinstance(selectors, (list, tuple)):
selectors = ','.join(selectors)
request['selectors'] = selectors
return data.makeTableDownloadUrl(data.getTableDownloadId(request))
# Deprecated spelling to match the JS library.
getDownloadUrl = deprecation.Deprecated('Use getDownloadURL().')(
getDownloadURL)
def select(self, propertySelectors, newProperties=None,
retainGeometry=True, *args):
"""Select properties from each feature in a collection.
Args:
propertySelectors: An array of names or regexes specifying the properties
to select.
newProperties: An array of strings specifying the new names for the
selected properties. If supplied, the length must match the number
of properties selected.
retainGeometry: A boolean. When false, the result will have no geometry.
*args: Selector elements as varargs.
Returns:
The feature collection with selected properties.
"""
if len(args) or ee_types.isString(propertySelectors):
args = list(args)
if not isinstance(retainGeometry, bool):
args.insert(0, retainGeometry)
if newProperties is not None:
args.insert(0, newProperties)
args.insert(0, propertySelectors)
return self.map(lambda feat: feat.select(args, None, True))
else:
return self.map(
lambda feat: feat.select(
propertySelectors, newProperties, retainGeometry))
@staticmethod
def name():
return 'FeatureCollection'
@staticmethod
def elementType():
return feature.Feature
| 34 | 80 | 0.673482 |
1588029d4b3c74e294f92f3178753c8e0e2279fc | 1,125 | py | Python | data/utils/init_net.py | DLWK/CGRNet | a9a65fa192cc9888e7861755313b8b3ac80fa512 | [
"MIT"
] | 1 | 2022-03-29T06:32:34.000Z | 2022-03-29T06:32:34.000Z | data/utils/init_net.py | DLWK/CGRNet | a9a65fa192cc9888e7861755313b8b3ac80fa512 | [
"MIT"
] | null | null | null | data/utils/init_net.py | DLWK/CGRNet | a9a65fa192cc9888e7861755313b8b3ac80fa512 | [
"MIT"
] | null | null | null | import torch.nn as nn
def init_weights(net, init_type='normal', gain=0.02):
def init_func(m):
classname = m.__class__.__name__
if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
if init_type == 'normal':
nn.init.normal_(m.weight.data, 0.0, gain)
elif init_type == 'xavier':
nn.init.xavier_normal_(m.weight.data, gain=gain)
elif init_type == 'kaiming':
nn.init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
nn.init.orthogonal_(m.weight.data, gain=gain)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
if hasattr(m, 'bias') and m.bias is not None:
nn.init.constant_(m.bias.data, 0.0)
elif classname.find('BatchNorm2d') != -1:
nn.init.normal_(m.weight.data, 1.0, gain)
nn.init.constant_(m.bias.data, 0.0)
print('initialize network with %s' % init_type)
net.apply(init_func) | 46.875 | 102 | 0.580444 |
4d670535a6248f579378e13919e3e203a0e023e5 | 22,037 | py | Python | e3/Run.py | sguillory6/e3 | 1505e6ea389157b9645155b9da13d6d316235f1a | [
"Apache-2.0"
] | null | null | null | e3/Run.py | sguillory6/e3 | 1505e6ea389157b9645155b9da13d6d316235f1a | [
"Apache-2.0"
] | null | null | null | e3/Run.py | sguillory6/e3 | 1505e6ea389157b9645155b9da13d6d316235f1a | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import json
import logging
import logging.config
import os
import sys
import time
from threading import Thread
import click
import requests
import spur
from spur.results import ExecutionResult
from common import Utils
from common.Config import run_config
from common.E3 import e3
from common.Utils import LogWrapper
class Run:
"""
This class runs an run as defined in an run file.
"""
def __init__(self, run_name):
self._log = logging.getLogger('run')
self._run_config = e3.load_run(run_name)
self._run_name = run_name
self.number_stages()
self.denormalize_config()
def denormalize_config(self):
"""
This method goes through the run configuration and de-normalises all the stage configuration into
independent stages. This means that each stage should have all the information it needs to run without having to
rely on any information from the parent config. If a stage does not a required attribute then that property is
copied from the top level run configuration.
:return: an run configuration dict comprising fully denormalized stages
"""
self.denormalize_attribute("duration")
self.denormalize_attribute("workload")
def denormalize_attribute(self, attribute_name):
"""
Denormalizes the supplied attribute in the run configuration
:param attribute_name: the name of the attribute to denormalize
:return: a set of attribute values
"""
threads = self._run_config["threads"]
attributes = set()
if attribute_name in self._run_config:
attribute = self._run_config[attribute_name]
attributes.add(attribute)
self._log.debug("Found a top level attribute %s (%s) in configuration, applying to all %s-less stages",
attribute_name, attribute, attribute_name)
for thread in threads:
for stage in thread["stages"]:
if attribute_name not in stage:
stage[attribute_name] = attribute
attributes.add(stage[attribute_name])
else:
self._log.debug("No top level [%s] attribute found, checking that each stage contains one", attribute_name)
for thread in threads:
for stage in thread["stages"]:
if attribute_name not in stage:
raise Exception("Stage [%s] does not have attribute [%s] and no top level instance defined" %
(stage, attribute_name))
return attributes
def run(self):
"""
Iterate through the run map and start a thread of run execution for each key
:return:
"""
run_threads = []
thread_index = 1
for thread in self._run_config["threads"]:
run_thread = RunThread(thread, self._run_name, thread_index)
thread_index += 1
run_threads.append(run_thread)
run_thread.start()
for run_thread in run_threads:
run_thread.join()
self._log.info("Finished running %s" % self._run_name)
return self._run_name
def number_stages(self):
for thread in self._run_config["threads"]:
stage_key = 0
for stage in thread["stages"]:
stage['key'] = '%03d' % stage_key
stage_key += 1
class RunThread(Thread):
"""
This class is responsible for setting up and executing a run thread.
The following activities make up the execution of a stage.
1. Worker nodes are sanitized. This means that all git processes are killed. Certain java processes are also
terminated
2. The data, execution/grinder, and execution/lib directories are distributed to all worker nodes
3. The grinder console is started on one of the worker nodes. Typically the worker node that was provisioned first
4. The grinder agents are started on all worker nodes
5. Stage is started
6. Wait until run duration has elapsed
7. Repeat with the next stage on the queue
"""
def __init__(self, thread, run_name, thread_index):
self._log = logging.getLogger("run")
self._run_name = run_name
self._thread = thread
# list of workers that have already had the e3 distribution synced to them
self._synced_workers = set()
Thread.__init__(self, name='RunThread:%d' % thread_index)
self._run_config = None
def run(self):
for stage in self._thread["stages"]:
self.run_stage(self._thread, stage)
def run_stage(self, thread, stage):
worker_stage = WorkerStage(thread, stage)
self._log.info("Running Stage - worker: %s, workload: %s, instance %s, clients: %d, worker-nodes: %d, "
"clients-per-worker: %d, duration %s ms",
worker_stage.worker['stack']['Name'],
worker_stage.workload,
worker_stage.instance['stack']['Name'],
worker_stage.clients,
len(worker_stage.worker_nodes),
worker_stage.clients_per_worker,
worker_stage.duration)
# TODO refactor run_config into class E3
self._run_config = run_config(worker_stage.worker['stack']['Name'])
self.sanitize_workers(worker_stage)
self.restart_bitbucket(worker_stage)
self.distribute_grinder(worker_stage)
self.start_console(worker_stage)
self.start_agents(worker_stage)
self.start_stage(worker_stage)
self.wait_finish(worker_stage)
def sanitize_workers(self, worker_stage):
self._log.info("Sanitizing workers")
for worker in worker_stage.worker_nodes:
self._log.debug("Sanitizing worker [%s] " % worker.hostname)
while True:
try:
# Kill all orphaned git processes
if self._remotely_kill_process(worker, "TERM", process="git"):
# If we could kill processes we wait to see that they exited cleanly
time.sleep(10)
if self._remotely_kill_process(worker, "0", process="git"):
# If they did not, we get forceful
self._remotely_kill_process(worker, "KILL", process="git")
# Attempt to kill the worker and agent
find_grinder_processes = [
"sh",
"-c",
"/usr/sbin/lsof -n -i TCP:6372 -i TCP:6373 -i TCP:3333 | "
"grep java | awk '{print $2}' | sort -u"
]
grinder_processes_result = self.run_command(worker, find_grinder_processes)
if grinder_processes_result.return_code == 0:
grinder_pids_arr = grinder_processes_result.output.split("\n")
grinder_pids = []
for grinder_pid in grinder_pids_arr:
if grinder_pid.isdigit():
grinder_pids.append(int(grinder_pid))
grinders_killed = 0
for pid in grinder_pids:
if self._remotely_kill_process(worker, "TERM", pid=int(pid)):
grinders_killed += 1
if grinders_killed > 0:
time.sleep(10)
for pid in grinder_pids:
if self._remotely_kill_process(worker, "0", pid=int(pid)):
self._remotely_kill_process(worker, "KILL", pid=int(pid))
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
self._log.info("Could not sanitize worker node %s (%s: %s), retrying" %
(worker.user_host, exc_type, exc_value))
time.sleep(5)
else:
break
stage_dir = "%s/%s/stage-%s" % (self._run_config['worker_run_dir'], self._run_name, worker_stage.key)
self.clean_folder(stage_dir, worker)
tmp_dir = "%s/tmp" % self._run_config['data_dir']
self.clean_folder(tmp_dir, worker)
def dir_exists(self, server, remote_file_name):
return self.run_command(server, [
'sudo', 'sh', '-c', 'test -d "%s"' % remote_file_name
], is_sudo=True).return_code == 0
def clean_folder(self, folder_to_clean, worker):
self._log.debug("Cleaning folder %s " % folder_to_clean)
if not self.dir_exists(worker, folder_to_clean):
return
to_be_deleted = self.do_run_command(worker, [
'sudo', 'sh', '-c', 'find %s -maxdepth 1 -type d' % folder_to_clean
], is_sudo=True).output
if len(to_be_deleted) > 0:
to_be_deleted = filter(lambda name: len(name) > 1 and name != folder_to_clean, to_be_deleted.split('\r\n'))
total = len(to_be_deleted)
if total > 0:
for delete_me in to_be_deleted:
self.run_command(worker, ['sudo', 'rm', '-rf', delete_me], is_sudo=True)
def distribute_grinder(self, worker_stage):
self._log.info("Distributing grinder to workers")
for worker_node in worker_stage.worker_nodes:
if worker_node.user_host in self._synced_workers:
self._log.debug("Skipping grinder rsync. %s already has grinder" % worker_node.user_host)
return
self._log.debug("Distributing grinder to instance: %s, user_host: %s",
worker_node.instance, worker_node.user_host)
remote_run_dir = '%s/stage-%s' % (self._run_config['worker_run_dir'], worker_stage.key)
remote_grinder_lib_dir = '%s/execution/lib/grinder-3.11/lib' % self._run_config['worker_e3_dir']
instances_dir = '%s/data/instances' % self._run_config['worker_e3_dir']
remote_tmp_dir = '%s/tmp' % self._run_config['data_dir']
remote_site_packages = '%s/site-packages' % self._run_config['data_dir']
self.run_command(worker_node, [
'mkdir', '-p', remote_run_dir, remote_grinder_lib_dir,
remote_tmp_dir, instances_dir, remote_site_packages
])
key_file = '%s/%s/%s.pem' % (e3.get_e3_home(), "instances", worker_node.instance)
remote_data_dir = os.path.join(self._run_config['worker_e3_dir'], 'data')
for directory in ['keys', 'snapshots', 'workloads']:
local_data_dir = os.path.join(e3.get_e3_home(), directory)
Utils.rsync(worker_node.user_host, key_file, remote_data_dir, local_data_dir)
remote_execution_dir = os.path.join(self._run_config['worker_e3_dir'], 'execution')
local_execution_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), 'execution'))
for directory in ['grinder', 'lib']:
local_execution_subdir = os.path.join(local_execution_dir, directory)
Utils.rsync(worker_node.user_host, key_file, remote_execution_dir, local_execution_subdir)
local_site_packages = os.path.abspath(os.path.join(e3.get_e3_home(), "site-packages"))
Utils.rsync(worker_node.user_host, key_file, remote_site_packages, local_site_packages)
instance_files = set()
instance_files.add(worker_stage.instance['stack']['Name'])
instance_files.add(worker_stage.instance['stack']['RunConfig']['network'])
remote_instance_dir = os.path.join(self._run_config['worker_e3_dir'], 'data', 'instances')
local_instance_dir = os.path.join(e3.get_e3_home(), 'instances')
for inst in instance_files:
remote_instance_file = os.path.join(remote_instance_dir, inst + '.json')
local_instance_file = os.path.join(local_instance_dir, inst + '.json')
Utils.rsync(worker_node.user_host, key_file, remote_instance_file, local_instance_file)
# Add worker to synced workers when synced
self._synced_workers.add(worker_node.user_host)
def start_console(self, worker_stage):
self._log.info("Starting grinder console on node %s", worker_stage.console.user_host)
self.spawn_command(worker_stage.console, [
"java",
"-cp",
"%s/execution/lib/grinder-3.11/lib/grinder.jar" % self._run_config['worker_e3_dir'],
"-Dgrinder.console.httpHost=%s" % worker_stage.console.hostname,
"-Dgrinder.logLevel=info",
"net.grinder.Console",
"-headless"
])
Utils.poll_url("http://%s:6373/version" % worker_stage.console.hostname, 600,
lambda response: response.text == 'The Grinder 3.11')
def start_agents(self, worker_stage):
self._log.info("Starting grinder agents on workers")
for worker in worker_stage.worker_nodes:
self.spawn_command(worker, [
"java",
"-cp",
"%s/execution/lib/grinder-3.11/lib/grinder.jar" % self._run_config['worker_e3_dir'],
"-Dcom.sun.management.jmxremote",
"-Dcom.sun.management.jmxremote.port=3333",
"-Dcom.sun.management.jmxremote.authenticate=false",
"-Dcom.sun.management.jmxremote.ssl=false",
"-Dgrinder.consoleHost=%s" % worker_stage.console.hostname,
'-Dgrinder.jvm.arguments=-Droot=%s -Dinstance=%s -Dworkload=%s -DagentCount=%s' % (
self._run_config['data_dir'],
worker_stage.instance['stack']['Name'],
worker_stage.workload,
len(worker_stage.worker_nodes)
),
"net.grinder.Grinder",
"-daemon", "2"
], cwd='%s/execution/grinder' % self._run_config['worker_e3_dir'])
Utils.poll_url("http://%s:6373/agents/status" % worker_stage.console.hostname, 600,
lambda response: len(json.loads(response.text)) == len(worker_stage.worker_nodes))
def start_stage(self, worker_stage):
self._log.info("Beginning execution of load (%s/stage-%s)" % (self._run_name, worker_stage.key))
requests.post('http://%s:6373/agents/start-workers' % worker_stage.console.hostname, timeout=60,
data=json.dumps({
"grinder.duration": "%d" % worker_stage.duration,
"grinder.logDirectory": "../../../runs/%s/stage-%s" % (self._run_name, worker_stage.key),
"grinder.numberOfOldLogs": "0",
"grinder.processes": "1",
"grinder.runs": "0",
"grinder.script": "TestRunner.py",
"grinder.threads": "%d" % worker_stage.clients_per_worker
}),
headers={"Content-Type": "application/json"})
def wait_finish(self, worker_stage):
wait_seconds = worker_stage.duration / 1000.0
self._log.info("Waiting %d seconds for load execution to complete", wait_seconds)
time.sleep(wait_seconds)
Utils.poll_url("http://%s:6373/agents/status" % worker_stage.console.hostname, 600, self.workers_finished)
requests.post('http://%s:6373/agents/stop' % worker_stage.console.hostname, timeout=600)
Utils.poll_url("http://%s:6373/agents/status" % worker_stage.console.hostname, 600, self.workers_stopped)
self._log.info("Waiting another 10 seconds for agents to gracefully exit")
time.sleep(10)
@staticmethod
def workers_stopped(response):
for node in json.loads(response.text):
if len(node['workers']) > 0:
return False
return True
@staticmethod
def workers_finished(response):
for node in json.loads(response.text):
for worker in node['workers']:
if 'state' in worker and worker['state'] != 'FINISHED':
return False
return True
def run_command(self, worker_node, cmd, cwd=None, is_sudo=False):
stdout = LogWrapper(worker_node.hostname, LogWrapper.stdout)
stderr = LogWrapper(worker_node.hostname, LogWrapper.stderr)
return self.do_run_command(worker_node, cmd, cwd, is_sudo, stdout, stderr)
def do_run_command(self, worker_node, cmd, cwd=None, is_sudo=False, stdout=None, stderr=None):
"""
Executes the specified command on a remote node
:param worker_node: The node on which to execute the command
:param cmd: The command to execute
:param cwd: The working folder from which to launch the command
:param is_sudo: Does the comment include sudo
:param stdout: The output from stdout will be written here
:param stderr: The output from stderr will be written here
:return: The process exit code
:rtype: ExecutionResult
"""
if type(cmd) is str:
run_command = cmd.split(" ")
else:
run_command = cmd
args = {
"allow_error": True,
"cwd": cwd,
"stderr": stderr,
"stdout": stdout
}
if is_sudo:
args["use_pty"] = True
result = worker_node.shell.run(run_command, **args)
self._log.debug("%s -- cwd: %s, exit code: %d, instance: %s, user_host: %s, stdout: \"%s\", stderr: \"%s\"",
" ".join(run_command), cwd, result.return_code, worker_node.instance, worker_node.user_host,
result.output.rstrip(), result.stderr_output.rstrip())
return result
def spawn_command(self, worker_node, cmd, cwd=None):
stdout = LogWrapper(worker_node.hostname, LogWrapper.stdout)
stderr = LogWrapper(worker_node.hostname, LogWrapper.stderr)
if type(cmd) is str:
run_command = cmd.split(" ")
else:
run_command = cmd
result = worker_node.shell.spawn(run_command, allow_error=True, cwd=cwd, stdout=stdout, stderr=stderr)
self._log.debug("%s -- cwd: %s running: %d, instance: %s, user_host: %s", " ".join(run_command), cwd,
result.is_running(), worker_node.instance, worker_node.user_host)
def restart_bitbucket(self, worker_stage):
for instance in worker_stage.instance_nodes:
self.run_command(instance, "sudo service atlbitbucket restart")
self._log.info("Sleeping for two minutes to allow bitbucket server time to restart")
time.sleep(120)
def _remotely_kill_process(self, node, signal, pid=None, process=None):
"""
Kill a process on a remote node using the provided signal number and process id (PID)
:param node: The node on which the process should be killed
:type node: Node
:param signal: The signal number to send to the process (TERM, KILL, HUP, 0)
:type signal: str
:param pid: The PID of the process to kill
:type pid: int
:return: True on success else False
:rtype: bool
"""
if pid or process:
if pid:
return self.run_command(node, "kill -s %s %d" % (signal, pid)).return_code == 0
else:
return self.run_command(node, "killall -s %s %s" % (signal, process)).return_code == 0
else:
logging.warn("You must specify either a process name or a pid to kill")
return False
class WorkerStage:
def __init__(self, thread, stage):
self._log = logging.getLogger("run")
self.clients = stage['clients']
self.instance = thread['instance']
self.worker = thread['worker']
self.duration = stage['duration']
self.workload = stage['workload']
self.key = stage['key']
self.worker_nodes = self.make_nodes(self.worker['stack']['Name'])
self.instance_nodes = self.make_nodes(self.instance['stack']['Name'])
self.console = self.worker_nodes[0]
self.clients_per_worker = float(stage['clients']) / len(self.worker_nodes)
def make_nodes(self, instance_id):
nodes = []
self._log.debug("Loading instance config for instance %s ", instance_id)
instance_config = e3.load_instance(instance_id)
self.instance_nodes = []
for user_host in instance_config['ClusterNodes']:
if user_host == 'localhost':
shell = spur.LocalShell()
else:
(username, hostname) = user_host.split("@")
shell = spur.SshShell(
hostname=hostname,
username=username,
private_key_file='%s/%s/%s.pem' % (e3.get_e3_home(), "instances", instance_id),
missing_host_key=spur.ssh.MissingHostKey.accept,
connect_timeout=3600
)
nodes.append(Node(instance_id, user_host, shell))
return nodes
class Node:
def __init__(self, instance, user_host, shell):
self.instance = instance
self.user_host = user_host
if '@' in user_host:
self.hostname = user_host.split('@')[1]
else:
self.hostname = user_host
self.shell = shell
@click.command()
@click.option('-r', '--run', required=True, help='The experiment run you want to execute',
type=click.Choice(e3.get_runs()), default=e3.get_single_run())
def command(run):
e3.setup_logging()
run_inst = Run(run)
run_inst.run()
if __name__ == '__main__':
command()
| 45.719917 | 120 | 0.597087 |
aa0f303a820f42b00ed0ca2b52910cb1595ed93f | 2,346 | py | Python | src/data_collector.py | synapse-wireless-labs/raspi-aws-iot-sensor | c3334e77e05762c55fdb57153d76bfd99a6dfa0c | [
"Apache-2.0"
] | null | null | null | src/data_collector.py | synapse-wireless-labs/raspi-aws-iot-sensor | c3334e77e05762c55fdb57153d76bfd99a6dfa0c | [
"Apache-2.0"
] | null | null | null | src/data_collector.py | synapse-wireless-labs/raspi-aws-iot-sensor | c3334e77e05762c55fdb57153d76bfd99a6dfa0c | [
"Apache-2.0"
] | null | null | null | import time
import datetime
from spi_sensor import SpiSensor
def create_collector(sensor_config):
def create_spi_sensor_data(channel, config):
sensor = SpiSensor(channel=channel,
name=(config.get("name", "error")),
type_str=(config.get("type", "error")),
description=(config.get("description", None)),
avg_length=(config.get("avg_length", 0)),
offset=(config.get("cal_offset", 0)),
max_value=(config.get("sensor_max", 0)))
return DataCollector.SensorData(sensor)
# create list of SpiSensors
sensor_data_objects = [create_spi_sensor_data(c, s) for c, s in enumerate(sensor_config)]
# return new object
return DataCollector(sensor_data_objects)
class DataCollector:
class SensorData:
def __init__(self, sensor):
self._values = []
self._sensor = sensor
self._start_time = 0
def store_sensor_value(self):
if not self._values:
self._start_time = time.mktime(datetime.datetime.now().timetuple())
# Save latest reading
self._values.append(self._sensor.get_sensor_reading())
def reset(self):
del self._values[:]
def get_payload(self):
return {'timestamp': self._start_time,
'values': self._values,
'sensor': self._sensor.get_sensor_name(),
'type': self._sensor.get_sensor_type(),
'description': self._sensor.get_sensor_description()
}
def __init__(self, sensor_data_list=None, max_readings_per_sensor=60):
self._sensors = []
self._readings = 0
self._max_values = max_readings_per_sensor
if isinstance(sensor_data_list, list):
self._sensors = sensor_data_list
def is_full(self):
return self._readings >= self._max_values
def read_all_sensors(self):
self._readings += 1
map(lambda s: s.store_sensor_value(), self._sensors)
def reset(self):
map(lambda s: s.reset(), self._sensors)
self._readings = 0
def get_sensor_payload_list(self):
return [s.get_payload() for s in self._sensors] | 33.514286 | 93 | 0.58994 |
9fb27c66ca0cfe855a718745e27accb036e3f1c7 | 544 | py | Python | crds/tests/compare_pickles.py | nden/crds | b72f14cf07531ca70b61daa6b58e762e5899afa4 | [
"BSD-3-Clause"
] | null | null | null | crds/tests/compare_pickles.py | nden/crds | b72f14cf07531ca70b61daa6b58e762e5899afa4 | [
"BSD-3-Clause"
] | null | null | null | crds/tests/compare_pickles.py | nden/crds | b72f14cf07531ca70b61daa6b58e762e5899afa4 | [
"BSD-3-Clause"
] | null | null | null | import crds
from crds.python23 import pickle
from crds import log
def main():
p = crds.get_cached_mapping("hst.pmap")
s = pickle.dumps(p)
q = pickle.loads(s)
p._trace_compare(q)
log.divider("p == q --> " + repr(p == q))
log.divider("__getstate__ --> " + repr(p.__getstate__() == q.__getstate__()))
log.divider("rmap __getstate__ --> " + repr(p.get_imap("acs").get_rmap("biasfile").__getstate__() == q.get_imap("acs").get_rmap("biasfile").__getstate__()))
if __name__ == "__main__":
main()
| 25.904762 | 160 | 0.617647 |
c6bea373d81bfdaa798908fdb45fd1fdc7c4d066 | 762 | py | Python | python/en/archive/books/udemy-AutomateTheBoringStuffWithPythonProgramming/Ch03-Functions-02-return_statements.py | aimldl/coding | 70ddbfaa454ab92fd072ee8dc614ecc330b34a70 | [
"MIT"
] | null | null | null | python/en/archive/books/udemy-AutomateTheBoringStuffWithPythonProgramming/Ch03-Functions-02-return_statements.py | aimldl/coding | 70ddbfaa454ab92fd072ee8dc614ecc330b34a70 | [
"MIT"
] | null | null | null | python/en/archive/books/udemy-AutomateTheBoringStuffWithPythonProgramming/Ch03-Functions-02-return_statements.py | aimldl/coding | 70ddbfaa454ab92fd072ee8dc614ecc330b34a70 | [
"MIT"
] | null | null | null | # Ch03-Functions-02-return_statements.py
#
# https://automatetheboringstuff.com/chapter3/
# Functions: Lesson 9 - def Statements, arguments, and the None value
#
# Changes from the original code
# - I recuded the number of if-else statements.
# Return Values and return Statements
import random
def getAnswer( answerNumber ):
if answerNumber == 1:
return 'It is certain'
elif answerNumber == 2:
return 'It is decidedly so'
elif answerNumber == 3:
return 'Yes'
else:
return 'Out of the answer range of 1~3'
for i in range(1,11):
# Run this for ten times
rand_num = random.randint(1,4)
print('rand_num = ', rand_num)
answer = getAnswer( rand_num )
print( answer )
| 26.275862 | 70 | 0.645669 |
ebfe20c15b76755ccf50803b98c7e86463d0d233 | 24,913 | py | Python | detectron2/config/defaults.py | alliedel/detectron2 | f3a641da26a467f32d04325372d0d2da51670cf5 | [
"Apache-2.0"
] | null | null | null | detectron2/config/defaults.py | alliedel/detectron2 | f3a641da26a467f32d04325372d0d2da51670cf5 | [
"Apache-2.0"
] | null | null | null | detectron2/config/defaults.py | alliedel/detectron2 | f3a641da26a467f32d04325372d0d2da51670cf5 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from .config import CfgNode as CN
# -----------------------------------------------------------------------------
# Convention about Training / Test specific parameters
# -----------------------------------------------------------------------------
# Whenever an argument can be either used for training or for testing, the
# corresponding name will be post-fixed by a _TRAIN for a training parameter,
# or _TEST for a test-specific parameter.
# For example, the number of images during training will be
# IMAGES_PER_BATCH_TRAIN, while the number of images for testing will be
# IMAGES_PER_BATCH_TEST
# -----------------------------------------------------------------------------
# Config definition
# -----------------------------------------------------------------------------
_C = CN()
_C.VERSION = 2
_C.MODEL = CN()
_C.MODEL.LOAD_PROPOSALS = False
_C.MODEL.MASK_ON = False
_C.MODEL.KEYPOINT_ON = False
_C.MODEL.DEVICE = "cuda"
_C.MODEL.META_ARCHITECTURE = "GeneralizedRCNN"
# If the WEIGHT starts with a catalog://, like :R-50, the code will look for
# the path in ModelCatalog. Else, it will use it as the specified absolute
# path
_C.MODEL.WEIGHTS = ""
# Values to be used for image normalization (BGR order)
# Default values are the mean pixel value from ImageNet: [103.53, 116.28, 123.675]
_C.MODEL.PIXEL_MEAN = [103.530, 116.280, 123.675]
# When using pre-trained models in Detectron1 or any MSRA models,
# std has been absorbed into its conv1 weights, so the std needs to be set 1.
# Otherwise, you can use [57.375, 57.120, 58.395] (ImageNet std)
_C.MODEL.PIXEL_STD = [1.0, 1.0, 1.0]
# -----------------------------------------------------------------------------
# INPUT
# -----------------------------------------------------------------------------
_C.INPUT = CN()
# Size of the smallest side of the image during training
_C.INPUT.MIN_SIZE_TRAIN = (800,)
# Sample size of smallest side by choice or random selection from range give by
# INPUT.MIN_SIZE_TRAIN
_C.INPUT.MIN_SIZE_TRAIN_SAMPLING = "choice"
# Maximum size of the side of the image during training
_C.INPUT.MAX_SIZE_TRAIN = 1333
# Size of the smallest side of the image during testing. Set to zero to disable resize in testing.
_C.INPUT.MIN_SIZE_TEST = 800
# Maximum size of the side of the image during testing
_C.INPUT.MAX_SIZE_TEST = 1333
# `True` if cropping is used for data augmentation during training
_C.INPUT.CROP = CN({"ENABLED": False})
# Cropping type:
# - "relative" crop (H * CROP.SIZE[0], W * CROP.SIZE[1]) part of an input of size (H, W)
# - "relative_range" uniformly sample relative crop size from between [CROP.SIZE[0], [CROP.SIZE[1]].
# and [1, 1] and use it as in "relative" scenario.
# - "absolute" crop part of an input with absolute size: (CROP.SIZE[0], CROP.SIZE[1]).
_C.INPUT.CROP.TYPE = "relative_range"
# Size of crop in range (0, 1] if CROP.TYPE is "relative" or "relative_range" and in number of
# pixels if CROP.TYPE is "absolute"
_C.INPUT.CROP.SIZE = [0.9, 0.9]
# Whether the model needs RGB, YUV, HSV etc.
# Should be one of the modes defined here, as we use PIL to read the image:
# https://pillow.readthedocs.io/en/stable/handbook/concepts.html#concept-modes
# with BGR being the one exception. One can set image format to BGR, we will
# internally use RGB for conversion and flip the channels over
_C.INPUT.FORMAT = "BGR"
_C.INPUT.MASK_FORMAT = "polygon" # alternative: "bitmask"
# -----------------------------------------------------------------------------
# Dataset
# -----------------------------------------------------------------------------
_C.DATASETS = CN()
# List of the dataset names for training. Must be registered in DatasetCatalog
_C.DATASETS.TRAIN = ()
# List of the pre-computed proposal files for training, which must be consistent
# with datasets listed in DATASETS.TRAIN.
_C.DATASETS.PROPOSAL_FILES_TRAIN = ()
# Number of top scoring precomputed proposals to keep for training
_C.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TRAIN = 2000
# List of the dataset names for testing. Must be registered in DatasetCatalog
_C.DATASETS.TEST = ()
# List of the pre-computed proposal files for test, which must be consistent
# with datasets listed in DATASETS.TEST.
_C.DATASETS.PROPOSAL_FILES_TEST = ()
# Number of top scoring precomputed proposals to keep for test
_C.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TEST = 1000
# -----------------------------------------------------------------------------
# DataLoader
# -----------------------------------------------------------------------------
_C.DATALOADER = CN()
# Number of data loading threads
_C.DATALOADER.NUM_WORKERS = 4
# If True, each batch should contain only images for which the aspect ratio
# is compatible. This groups portrait images together, and landscape images
# are not batched with portrait images.
_C.DATALOADER.ASPECT_RATIO_GROUPING = True
# Options: TrainingSampler, RepeatFactorTrainingSampler
_C.DATALOADER.SAMPLER_TRAIN = "TrainingSampler"
# Repeat threshold for RepeatFactorTrainingSampler
_C.DATALOADER.REPEAT_THRESHOLD = 0.0
# if True, the dataloader will filter out images that have no associated
# annotations at train time.
_C.DATALOADER.FILTER_EMPTY_ANNOTATIONS = True
# ---------------------------------------------------------------------------- #
# Backbone options
# ---------------------------------------------------------------------------- #
_C.MODEL.BACKBONE = CN()
_C.MODEL.BACKBONE.NAME = "build_resnet_backbone"
# Add StopGrad at a specified stage so the bottom layers are frozen
_C.MODEL.BACKBONE.FREEZE_AT = 2
# ---------------------------------------------------------------------------- #
# FPN options
# ---------------------------------------------------------------------------- #
_C.MODEL.FPN = CN()
# Names of the input feature maps to be used by FPN
# They must have contiguous power of 2 strides
# e.g., ["res2", "res3", "res4", "res5"]
_C.MODEL.FPN.IN_FEATURES = []
_C.MODEL.FPN.OUT_CHANNELS = 256
# Options: "" (no norm), "GN"
_C.MODEL.FPN.NORM = ""
# Types for fusing the FPN top-down and lateral features. Can be either "sum" or "avg"
_C.MODEL.FPN.FUSE_TYPE = "sum"
# ---------------------------------------------------------------------------- #
# Proposal generator options
# ---------------------------------------------------------------------------- #
_C.MODEL.PROPOSAL_GENERATOR = CN()
# Current proposal generators include "RPN", "RRPN" and "PrecomputedProposals"
_C.MODEL.PROPOSAL_GENERATOR.NAME = "RPN"
# Proposal height and width both need to be greater than MIN_SIZE
# (a the scale used during training or inference)
_C.MODEL.PROPOSAL_GENERATOR.MIN_SIZE = 0
# ---------------------------------------------------------------------------- #
# Anchor generator options
# ---------------------------------------------------------------------------- #
_C.MODEL.ANCHOR_GENERATOR = CN()
# The generator can be any name in the ANCHOR_GENERATOR registry
_C.MODEL.ANCHOR_GENERATOR.NAME = "DefaultAnchorGenerator"
# anchor sizes given in absolute pixels w.r.t. the scaled network input.
# Format: list of lists of sizes. SIZES[i] specifies the list of sizes
# to use for IN_FEATURES[i]; len(SIZES) == len(IN_FEATURES) must be true,
# or len(SIZES) == 1 is true and size list SIZES[0] is used for all
# IN_FEATURES.
_C.MODEL.ANCHOR_GENERATOR.SIZES = [[32, 64, 128, 256, 512]]
# Anchor aspect ratios.
# Format is list of lists of sizes. ASPECT_RATIOS[i] specifies the list of aspect ratios
# to use for IN_FEATURES[i]; len(ASPECT_RATIOS) == len(IN_FEATURES) must be true,
# or len(ASPECT_RATIOS) == 1 is true and aspect ratio list ASPECT_RATIOS[0] is used
# for all IN_FEATURES.
_C.MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS = [[0.5, 1.0, 2.0]]
# Anchor angles.
# list[float], the angle in degrees, for each input feature map.
# ANGLES[i] specifies the list of angles for IN_FEATURES[i].
_C.MODEL.ANCHOR_GENERATOR.ANGLES = [[-90, 0, 90]]
# ---------------------------------------------------------------------------- #
# RPN options
# ---------------------------------------------------------------------------- #
_C.MODEL.RPN = CN()
_C.MODEL.RPN.HEAD_NAME = "StandardRPNHead" # used by RPN_HEAD_REGISTRY
# Names of the input feature maps to be used by RPN
# e.g., ["p2", "p3", "p4", "p5", "p6"] for FPN
_C.MODEL.RPN.IN_FEATURES = ["res4"]
# Remove RPN anchors that go outside the image by BOUNDARY_THRESH pixels
# Set to -1 or a large value, e.g. 100000, to disable pruning anchors
_C.MODEL.RPN.BOUNDARY_THRESH = -1
# IOU overlap ratios [BG_IOU_THRESHOLD, FG_IOU_THRESHOLD]
# Minimum overlap required between an anchor and ground-truth box for the
# (anchor, gt box) pair to be a positive example (IoU >= FG_IOU_THRESHOLD
# ==> positive RPN example: 1)
# Maximum overlap allowed between an anchor and ground-truth box for the
# (anchor, gt box) pair to be a negative examples (IoU < BG_IOU_THRESHOLD
# ==> negative RPN example: 0)
# Anchors with overlap in between (BG_IOU_THRESHOLD <= IoU < FG_IOU_THRESHOLD)
# are ignored (-1)
_C.MODEL.RPN.IOU_THRESHOLDS = [0.3, 0.7]
_C.MODEL.RPN.IOU_LABELS = [0, -1, 1]
# Total number of RPN examples per image
_C.MODEL.RPN.BATCH_SIZE_PER_IMAGE = 256
# Target fraction of foreground (positive) examples per RPN minibatch
_C.MODEL.RPN.POSITIVE_FRACTION = 0.5
# Weights on (dx, dy, dw, dh) for normalizing RPN anchor regression targets
_C.MODEL.RPN.BBOX_REG_WEIGHTS = (1.0, 1.0, 1.0, 1.0)
# The transition point from L1 to L2 loss. Set to 0.0 to make the loss simply L1.
_C.MODEL.RPN.SMOOTH_L1_BETA = 0.0
_C.MODEL.RPN.LOSS_WEIGHT = 1.0
# Number of top scoring RPN proposals to keep before applying NMS
# When FPN is used, this is *per FPN level* (not total)
_C.MODEL.RPN.PRE_NMS_TOPK_TRAIN = 12000
_C.MODEL.RPN.PRE_NMS_TOPK_TEST = 6000
# Number of top scoring RPN proposals to keep after applying NMS
# When FPN is used, this limit is applied per level and then again to the union
# of proposals from all levels
# NOTE: When FPN is used, the meaning of this config is different from Detectron1.
# It means per-batch topk in Detectron1, but per-image topk here.
# See "modeling/rpn/rpn_outputs.py" for details.
_C.MODEL.RPN.POST_NMS_TOPK_TRAIN = 2000
_C.MODEL.RPN.POST_NMS_TOPK_TEST = 1000
# NMS threshold used on RPN proposals
_C.MODEL.RPN.NMS_THRESH = 0.7
# ---------------------------------------------------------------------------- #
# ROI HEADS options
# ---------------------------------------------------------------------------- #
_C.MODEL.ROI_HEADS = CN()
_C.MODEL.ROI_HEADS.NAME = "Res5ROIHeads"
# Number of foreground classes
_C.MODEL.ROI_HEADS.NUM_CLASSES = 80
# Names of the input feature maps to be used by ROI heads
# Currently all heads (box, mask, ...) use the same input feature map list
# e.g., ["p2", "p3", "p4", "p5"] is commonly used for FPN
_C.MODEL.ROI_HEADS.IN_FEATURES = ["res4"]
# IOU overlap ratios [IOU_THRESHOLD]
# Overlap threshold for an RoI to be considered background (if < IOU_THRESHOLD)
# Overlap threshold for an RoI to be considered foreground (if >= IOU_THRESHOLD)
_C.MODEL.ROI_HEADS.IOU_THRESHOLDS = [0.5]
_C.MODEL.ROI_HEADS.IOU_LABELS = [0, 1]
# RoI minibatch size *per image* (number of regions of interest [ROIs])
# Total number of RoIs per training minibatch =
# ROI_HEADS.BATCH_SIZE_PER_IMAGE * SOLVER.IMS_PER_BATCH
# E.g., a common configuration is: 512 * 16 = 8192
_C.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 512
# Target fraction of RoI minibatch that is labeled foreground (i.e. class > 0)
_C.MODEL.ROI_HEADS.POSITIVE_FRACTION = 0.25
# Only used on test mode
# Minimum score threshold (assuming scores in a [0, 1] range); a value chosen to
# balance obtaining high recall with not having too many low precision
# detections that will slow down inference post processing steps (like NMS)
# A default threshold of 0.0 increases AP by ~0.2-0.3 but significantly slows down
# inference.
_C.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.05
# Overlap threshold used for non-maximum suppression (suppress boxes with
# IoU >= this threshold)
_C.MODEL.ROI_HEADS.NMS_THRESH_TEST = 0.5
# If True, augment proposals with ground-truth boxes before sampling proposals to
# train ROI heads.
_C.MODEL.ROI_HEADS.PROPOSAL_APPEND_GT = True
# ---------------------------------------------------------------------------- #
# Box Head
# ---------------------------------------------------------------------------- #
_C.MODEL.ROI_BOX_HEAD = CN()
# C4 don't use head name option
# Options for non-C4 models: FastRCNNConvFCHead,
_C.MODEL.ROI_BOX_HEAD.NAME = ""
# Default weights on (dx, dy, dw, dh) for normalizing bbox regression targets
# These are empirically chosen to approximately lead to unit variance targets
_C.MODEL.ROI_BOX_HEAD.BBOX_REG_WEIGHTS = (10.0, 10.0, 5.0, 5.0)
# The transition point from L1 to L2 loss. Set to 0.0 to make the loss simply L1.
_C.MODEL.ROI_BOX_HEAD.SMOOTH_L1_BETA = 0.0
_C.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION = 14
_C.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO = 0
# Type of pooling operation applied to the incoming feature map for each RoI
_C.MODEL.ROI_BOX_HEAD.POOLER_TYPE = "ROIAlignV2"
_C.MODEL.ROI_BOX_HEAD.NUM_FC = 0
# Hidden layer dimension for FC layers in the RoI box head
_C.MODEL.ROI_BOX_HEAD.FC_DIM = 1024
_C.MODEL.ROI_BOX_HEAD.NUM_CONV = 0
# Channel dimension for Conv layers in the RoI box head
_C.MODEL.ROI_BOX_HEAD.CONV_DIM = 256
# Normalization method for the convolution layers.
# Options: "" (no norm), "GN", "SyncBN".
_C.MODEL.ROI_BOX_HEAD.NORM = ""
# Whether to use class agnostic for bbox regression
_C.MODEL.ROI_BOX_HEAD.CLS_AGNOSTIC_BBOX_REG = False
# ---------------------------------------------------------------------------- #
# Cascaded Box Head
# ---------------------------------------------------------------------------- #
_C.MODEL.ROI_BOX_CASCADE_HEAD = CN()
# The number of cascade stages is implicitly defined by the length of the following two configs.
_C.MODEL.ROI_BOX_CASCADE_HEAD.BBOX_REG_WEIGHTS = (
(10.0, 10.0, 5.0, 5.0),
(20.0, 20.0, 10.0, 10.0),
(30.0, 30.0, 15.0, 15.0),
)
_C.MODEL.ROI_BOX_CASCADE_HEAD.IOUS = (0.5, 0.6, 0.7)
# ---------------------------------------------------------------------------- #
# Mask Head
# ---------------------------------------------------------------------------- #
_C.MODEL.ROI_MASK_HEAD = CN()
# Added by APD
_C.MODEL.ROI_MASK_HEAD.CUSTOM_NAME = "CustomMaskRCNNConvUpsampleHeadAPD" # Added by APD
_C.MODEL.ROI_MASK_HEAD.INIT_ACTIVATED_MASK_HEAD = "standard"
_C.MODEL.ROI_MASK_HEAD.N_MASKS_PER_ROI = 1
_C.MODEL.ROI_MASK_HEAD.NAME = "MaskRCNNConvUpsampleHead"
_C.MODEL.ROI_MASK_HEAD.POOLER_RESOLUTION = 14
_C.MODEL.ROI_MASK_HEAD.POOLER_SAMPLING_RATIO = 0
_C.MODEL.ROI_MASK_HEAD.NUM_CONV = 0 # The number of convs in the mask head
_C.MODEL.ROI_MASK_HEAD.CONV_DIM = 256
# Normalization method for the convolution layers.
# Options: "" (no norm), "GN", "SyncBN".
_C.MODEL.ROI_MASK_HEAD.NORM = ""
# Whether to use class agnostic for mask prediction
_C.MODEL.ROI_MASK_HEAD.CLS_AGNOSTIC_MASK = False
# Type of pooling operation applied to the incoming feature map for each RoI
_C.MODEL.ROI_MASK_HEAD.POOLER_TYPE = "ROIAlignV2"
# ---------------------------------------------------------------------------- #
# Keypoint Head
# ---------------------------------------------------------------------------- #
_C.MODEL.ROI_KEYPOINT_HEAD = CN()
_C.MODEL.ROI_KEYPOINT_HEAD.NAME = "KRCNNConvDeconvUpsampleHead"
_C.MODEL.ROI_KEYPOINT_HEAD.POOLER_RESOLUTION = 14
_C.MODEL.ROI_KEYPOINT_HEAD.POOLER_SAMPLING_RATIO = 0
_C.MODEL.ROI_KEYPOINT_HEAD.CONV_DIMS = tuple(512 for _ in range(8))
_C.MODEL.ROI_KEYPOINT_HEAD.NUM_KEYPOINTS = 17 # 17 is the number of keypoints in COCO.
# Images with too few (or no) keypoints are excluded from training.
_C.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE = 1
# Normalize by the total number of visible keypoints in the minibatch if True.
# Otherwise, normalize by the total number of keypoints that could ever exist
# in the minibatch.
# The keypoint softmax loss is only calculated on visible keypoints.
# Since the number of visible keypoints can vary significantly between
# minibatches, this has the effect of up-weighting the importance of
# minibatches with few visible keypoints. (Imagine the extreme case of
# only one visible keypoint versus N: in the case of N, each one
# contributes 1/N to the gradient compared to the single keypoint
# determining the gradient direction). Instead, we can normalize the
# loss by the total number of keypoints, if it were the case that all
# keypoints were visible in a full minibatch. (Returning to the example,
# this means that the one visible keypoint contributes as much as each
# of the N keypoints.)
_C.MODEL.ROI_KEYPOINT_HEAD.NORMALIZE_LOSS_BY_VISIBLE_KEYPOINTS = True
# Multi-task loss weight to use for keypoints
# Recommended values:
# - use 1.0 if NORMALIZE_LOSS_BY_VISIBLE_KEYPOINTS is True
# - use 4.0 if NORMALIZE_LOSS_BY_VISIBLE_KEYPOINTS is False
_C.MODEL.ROI_KEYPOINT_HEAD.LOSS_WEIGHT = 1.0
# Type of pooling operation applied to the incoming feature map for each RoI
_C.MODEL.ROI_KEYPOINT_HEAD.POOLER_TYPE = "ROIAlignV2"
# ---------------------------------------------------------------------------- #
# Semantic Segmentation Head
# ---------------------------------------------------------------------------- #
_C.MODEL.SEM_SEG_HEAD = CN()
_C.MODEL.SEM_SEG_HEAD.NAME = "SemSegFPNHead"
_C.MODEL.SEM_SEG_HEAD.IN_FEATURES = ["p2", "p3", "p4", "p5"]
# Label in the semantic segmentation ground truth that is ignored, i.e., no loss is calculated for
# the correposnding pixel.
_C.MODEL.SEM_SEG_HEAD.IGNORE_VALUE = 255
# Number of classes in the semantic segmentation head
_C.MODEL.SEM_SEG_HEAD.NUM_CLASSES = 54
# Number of channels in the 3x3 convs inside semantic-FPN heads.
_C.MODEL.SEM_SEG_HEAD.CONVS_DIM = 128
# Outputs from semantic-FPN heads are up-scaled to the COMMON_STRIDE stride.
_C.MODEL.SEM_SEG_HEAD.COMMON_STRIDE = 4
# Normalization method for the convolution layers. Options: "" (no norm), "GN".
_C.MODEL.SEM_SEG_HEAD.NORM = "GN"
_C.MODEL.SEM_SEG_HEAD.LOSS_WEIGHT = 1.0
_C.MODEL.PANOPTIC_FPN = CN()
# Scaling of all losses from instance detection / segmentation head.
_C.MODEL.PANOPTIC_FPN.INSTANCE_LOSS_WEIGHT = 1.0
# options when combining instance & semantic segmentation outputs
_C.MODEL.PANOPTIC_FPN.COMBINE = CN({"ENABLED": True})
_C.MODEL.PANOPTIC_FPN.COMBINE.OVERLAP_THRESH = 0.5
_C.MODEL.PANOPTIC_FPN.COMBINE.STUFF_AREA_LIMIT = 4096
_C.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH = 0.5
# ---------------------------------------------------------------------------- #
# RetinaNet Head
# ---------------------------------------------------------------------------- #
_C.MODEL.RETINANET = CN()
# This is the number of foreground classes.
_C.MODEL.RETINANET.NUM_CLASSES = 80
_C.MODEL.RETINANET.IN_FEATURES = ["p3", "p4", "p5", "p6", "p7"]
# Convolutions to use in the cls and bbox tower
# NOTE: this doesn't include the last conv for logits
_C.MODEL.RETINANET.NUM_CONVS = 4
# IoU overlap ratio [bg, fg] for labeling anchors.
# Anchors with < bg are labeled negative (0)
# Anchors with >= bg and < fg are ignored (-1)
# Anchors with >= fg are labeled positive (1)
_C.MODEL.RETINANET.IOU_THRESHOLDS = [0.4, 0.5]
_C.MODEL.RETINANET.IOU_LABELS = [0, -1, 1]
# Prior prob for rare case (i.e. foreground) at the beginning of training.
# This is used to set the bias for the logits layer of the classifier subnet.
# This improves training stability in the case of heavy class imbalance.
_C.MODEL.RETINANET.PRIOR_PROB = 0.01
# Inference cls score threshold, only anchors with score > INFERENCE_TH are
# considered for inference (to improve speed)
_C.MODEL.RETINANET.SCORE_THRESH_TEST = 0.05
_C.MODEL.RETINANET.TOPK_CANDIDATES_TEST = 1000
_C.MODEL.RETINANET.NMS_THRESH_TEST = 0.5
# Weights on (dx, dy, dw, dh) for normalizing Retinanet anchor regression targets
_C.MODEL.RETINANET.BBOX_REG_WEIGHTS = (1.0, 1.0, 1.0, 1.0)
# Loss parameters
_C.MODEL.RETINANET.FOCAL_LOSS_GAMMA = 2.0
_C.MODEL.RETINANET.FOCAL_LOSS_ALPHA = 0.25
_C.MODEL.RETINANET.SMOOTH_L1_LOSS_BETA = 0.1
# ---------------------------------------------------------------------------- #
# ResNe[X]t options (ResNets = {ResNet, ResNeXt}
# Note that parts of a resnet may be used for both the backbone and the head
# These options apply to both
# ---------------------------------------------------------------------------- #
_C.MODEL.RESNETS = CN()
_C.MODEL.RESNETS.DEPTH = 50
_C.MODEL.RESNETS.OUT_FEATURES = ["res4"] # res4 for C4 backbone, res2..5 for FPN backbone
# Number of groups to use; 1 ==> ResNet; > 1 ==> ResNeXt
_C.MODEL.RESNETS.NUM_GROUPS = 1
# Options: FrozenBN, GN, "SyncBN", "BN"
_C.MODEL.RESNETS.NORM = "FrozenBN"
# Baseline width of each group.
# Scaling this parameters will scale the width of all bottleneck layers.
_C.MODEL.RESNETS.WIDTH_PER_GROUP = 64
# Place the stride 2 conv on the 1x1 filter
# Use True only for the original MSRA ResNet; use False for C2 and Torch models
_C.MODEL.RESNETS.STRIDE_IN_1X1 = True
# Apply dilation in stage "res5"
_C.MODEL.RESNETS.RES5_DILATION = 1
# Output width of res2. Scaling this parameters will scale the width of all 1x1 convs in ResNet
_C.MODEL.RESNETS.RES2_OUT_CHANNELS = 256
_C.MODEL.RESNETS.STEM_OUT_CHANNELS = 64
# Apply Deformable Convolution in stages
# Specify if apply deform_conv on Res2, Res3, Res4, Res5
_C.MODEL.RESNETS.DEFORM_ON_PER_STAGE = [False, False, False, False]
# Use True to use modulated deform_conv (DeformableV2, https://arxiv.org/abs/1811.11168);
# Use False for DeformableV1.
_C.MODEL.RESNETS.DEFORM_MODULATED = False
# Number of groups in deformable conv.
_C.MODEL.RESNETS.DEFORM_NUM_GROUPS = 1
# ---------------------------------------------------------------------------- #
# Solver
# ---------------------------------------------------------------------------- #
_C.SOLVER = CN()
# See detectron2/solver/build.py for LR scheduler options
_C.SOLVER.LR_SCHEDULER_NAME = "WarmupMultiStepLR"
_C.SOLVER.MAX_ITER = 40000
_C.SOLVER.BASE_LR = 0.001
_C.SOLVER.MOMENTUM = 0.9
_C.SOLVER.WEIGHT_DECAY = 0.0001
# The weight decay that's applied to parameters of normalization layers
# (typically the affine transformation)
_C.SOLVER.WEIGHT_DECAY_NORM = 0.0
_C.SOLVER.GAMMA = 0.1
_C.SOLVER.STEPS = (30000,)
_C.SOLVER.WARMUP_FACTOR = 1.0 / 1000
_C.SOLVER.WARMUP_ITERS = 1000
_C.SOLVER.WARMUP_METHOD = "linear"
_C.SOLVER.CHECKPOINT_PERIOD = 5000
# Number of images per batch across all machines.
# If we have 16 GPUs and IMS_PER_BATCH = 32,
# each GPU will see 2 images per batch.
_C.SOLVER.IMS_PER_BATCH = 16
# Detectron v1 (and previous detection code) used a 2x higher LR and 0 WD for
# biases. This is not useful (at least for recent models). You should avoid
# changing these and they exist only to reproduce Detectron v1 training if
# desired.
_C.SOLVER.BIAS_LR_FACTOR = 1.0
_C.SOLVER.WEIGHT_DECAY_BIAS = _C.SOLVER.WEIGHT_DECAY
# ---------------------------------------------------------------------------- #
# Specific test options
# ---------------------------------------------------------------------------- #
_C.TEST = CN()
# For end-to-end tests to verify the expected accuracy.
# Each item is [task, metric, value, tolerance]
# e.g.: [['bbox', 'AP', 38.5, 0.2]]
_C.TEST.EXPECTED_RESULTS = []
# The period (in terms of steps) to evaluate the model during training.
# Set to 0 to disable.
_C.TEST.EVAL_PERIOD = 0
# The sigmas used to calculate keypoint OKS.
# When empty it will use the defaults in COCO.
# Otherwise it should have the same length as ROI_KEYPOINT_HEAD.NUM_KEYPOINTS.
_C.TEST.KEYPOINT_OKS_SIGMAS = []
# Maximum number of detections to return per image during inference (100 is
# based on the limit established for the COCO dataset).
_C.TEST.DETECTIONS_PER_IMAGE = 100
_C.TEST.AUG = CN({"ENABLED": False})
_C.TEST.AUG.MIN_SIZES = (400, 500, 600, 700, 800, 900, 1000, 1100, 1200)
_C.TEST.AUG.MAX_SIZE = 4000
_C.TEST.AUG.FLIP = True
_C.TEST.PRECISE_BN = CN({"ENABLED": False})
_C.TEST.PRECISE_BN.NUM_ITER = 200
# ---------------------------------------------------------------------------- #
# Misc options
# ---------------------------------------------------------------------------- #
# Directory where output files are written
_C.OUTPUT_DIR = "./output"
# Set seed to negative to fully randomize everything.
# Set seed to positive to use a fixed seed. Note that a fixed seed does not
# guarantee fully deterministic behavior.
_C.SEED = -1
# Benchmark different cudnn algorithms. It has large overhead for about 10k
# iterations. It usually hurts total time, but can benefit for certain models.
_C.CUDNN_BENCHMARK = False
# global config is for quick hack purposes.
# You can set them in command line or config files,
# and access it with:
#
# from detectron2.config import global_cfg
# print(global_cfg.HACK)
#
# Do not commit any configs into it.
_C.GLOBAL = CN()
_C.GLOBAL.HACK = 1.0
| 43.707018 | 100 | 0.657649 |
01926d7b8f54545bf8128551cabca87802dcde4c | 495 | py | Python | rooms/admin.py | josylad/RoomScout | a3d067dd67dfdd43702ea2e89064213dbd469157 | [
"MIT"
] | 24 | 2020-02-01T17:22:47.000Z | 2020-10-24T19:49:36.000Z | rooms/admin.py | josylad/RoomScout | a3d067dd67dfdd43702ea2e89064213dbd469157 | [
"MIT"
] | 16 | 2020-02-01T14:30:15.000Z | 2020-08-13T20:49:56.000Z | rooms/admin.py | josylad/RoomScout | a3d067dd67dfdd43702ea2e89064213dbd469157 | [
"MIT"
] | 6 | 2020-02-01T22:07:46.000Z | 2021-03-05T14:05:27.000Z | from django.contrib import admin
from .models import Room, Inquiry, RoomLike
class RoomAdmin(admin.ModelAdmin):
list_display = ('name', 'house', 'price', 'is_available', 'created_at', 'updated_at')
class InquiryAdmin(admin.ModelAdmin):
list_display = ('user', 'room', 'status')
class RoomLikeAdmin(admin.ModelAdmin):
list_display = ('user', 'room')
admin.site.register(Room, RoomAdmin)
admin.site.register(Inquiry, InquiryAdmin)
admin.site.register(RoomLike, RoomLikeAdmin)
| 23.571429 | 89 | 0.737374 |
c5cefff8f9c4ccbc6d33f80fe4580fb68a686279 | 5,950 | py | Python | dpmhm/datasets/ims/ims.py | yanncalec/dpmhm | 0a242bc8add0ba1463bb2b63b2c15abb80b83fa7 | [
"MIT"
] | null | null | null | dpmhm/datasets/ims/ims.py | yanncalec/dpmhm | 0a242bc8add0ba1463bb2b63b2c15abb80b83fa7 | [
"MIT"
] | null | null | null | dpmhm/datasets/ims/ims.py | yanncalec/dpmhm | 0a242bc8add0ba1463bb2b63b2c15abb80b83fa7 | [
"MIT"
] | null | null | null | """ims dataset."""
import os
import pathlib
import itertools
import json
import numpy as np
import tensorflow as tf
import tensorflow_datasets as tfds
import pandas as pd
# from scipy.io import loadmat
# TODO(ims): Markdown description that will appear on the catalog page.
_DESCRIPTION = """
IMS Bearing Data Set.
Test-to-failure experiments on bearings. The data set was provided by the Center for Intelligent Maintenance Systems (IMS), University of Cincinnati.
Description
===========
An AC motor, coupled by a rub belt, keeps the rotation speed constant. The four bearings are in the same shaft and are forced lubricated by a circulation system that regulates the flow and the temperature. It is announced on the provided “Readme Document for IMS Bearing Data” in the downloaded file, that the test was stopped when the accumulation of debris on a magnetic plug exceeded a certain level indicating the possibility of an impending failure. The four bearings are all of the same type. There are double range pillow blocks rolling elements bearing.
Three (3) data sets are included in the data packet. Each data set describes a test-to-failure experiment. Each data set consists of individual files that are 1-second vibration signal snapshots recorded at specific intervals. Each file consists of 20,480 points with the sampling rate set at 20 kHz. The file name indicates when the data was collected. Each record (row) in the data file is a data point. Data collection was facilitated by NI DAQ Card 6062E. Larger intervals of time stamps (showed in file names) indicate resumption of the experiment in the next working day.
For more details, see the descriptions in
- `Readme Document for IMS Bearing Data.pdf` included in the downloaded data.
Homepage
--------
http://imscenter.net/
http://ti.arc.nasa.gov/project/prognostic-data-repository
Download
--------
https://ti.arc.nasa.gov/c/3/
Original Data
=============
Format: text
Sampling rate: 20480 Hz
Size: 6.1 Gb
Notes
=====
- There is no `label` in this package since this is a run-to-failure dataset.
- The original data has a single `.7z` file which extracts to three subsets.
- The extracted subfolder named `4th_test` corresponds actually to the 3rd test.
"""
_CHARACTERISTICS_BEARING = {
'Pitch diameter': 71.5, # mm
'Rolling element diameter': 8.4, # mm
'Number of rolling element per row': 16,
'Contact angle': 15.17, # degree
'Static load': 26690 # N
}
_CHARACTERISTICS_TESTRIG = {
'Shaft frequency': 33.3, # Hz
'Ball Pass Frequency Outer race (BPFO)': 236, # Hz
'Ball Pass Frequency Inner race (BPFI)': 297, # Hz
'Ball Spin Frequency (BSF)': 278, # Hz
'Fundamental Train Frequency (FTF)': 15 # Hz
}
_CITATION = """
- Hai Qiu, Jay Lee, Jing Lin. “Wavelet Filter-based Weak Signature Detection Method and its Application on Roller Bearing Prognostics.” Journal of Sound and Vibration 289 (2006) 1066-1090
- J. Lee, H. Qiu, G. Yu, J. Lin, and Rexnord Technical Services (2007). IMS, University of Cincinnati. "Bearing Data Set", NASA Ames Prognostics Data Repository (http://ti.arc.nasa.gov/project/prognostic-data-repository), NASA Ames Research Center, Moffett Field, CA
"""
class IMS(tfds.core.GeneratorBasedBuilder):
"""DatasetBuilder for ims dataset."""
VERSION = tfds.core.Version('1.0.0')
RELEASE_NOTES = {
'1.0.0': 'Initial release.',
}
MANUAL_DOWNLOAD_INSTRUCTIONS = """
Due to unsupported file format (7zip, rar), automatic download & extraction is not supported in this package. Please download all data from
https://ti.arc.nasa.gov/c/3/
extract all files, then proceed the installation manually.
"""
def _info(self) -> tfds.core.DatasetInfo:
"""Returns the dataset metadata."""
# TODO(ims): Specifies the tfds.core.DatasetInfo object
return tfds.core.DatasetInfo(
builder=self,
description=_DESCRIPTION,
features=tfds.features.FeaturesDict({
'signal': tfds.features.Tensor(shape=(None,None,1), encoding='zlib', dtype=tf.float64),
# {
# 'dataset1': tfds.features.Tensor(shape=(None,8), dtype=tf.float64),
# 'dataset2': tfds.features.Tensor(shape=(None,4), dtype=tf.float64),
# 'dataset3': tfds.features.Tensor(shape=(None,4), dtype=tf.float64),
# },
# Note: there is no 'label' here since this is a run-to-failure dataset.
'metadata': {
# 'SamplingRate': tf.uint32,
# 'RotatingSpeed': tf.float32,
'OriginalSplit': tf.string,
'FileName': tf.string,
},
}),
# If there's a common (input, target) tuple from the
# features, specify them here. They'll be used if
# `as_supervised=True` in `builder.as_dataset`.
supervised_keys=None,
homepage='http://imscenter.net/',
citation=_CITATION,
)
def _split_generators(self, dl_manager: tfds.download.DownloadManager):
if dl_manager._manual_dir.exists(): # prefer to use manually downloaded data
datadir = dl_manager._manual_dir
else: # automatically download data
# datadir = dl_manager.download_and_extract(_URL)
raise FileNotFoundError(self.MANUAL_DOWNLOAD_INSTRUCTIONS)
return {
'dataset1': self._generate_examples(datadir / '1st_test', 'dataset1'),
'dataset2': self._generate_examples(datadir / '2nd_test', 'dataset2'),
'dataset3': self._generate_examples(datadir / '4th_test' / 'txt', 'dataset3'),
}
def _generate_examples(self, path, mode):
"""Yields examples."""
for fp in path.glob('*'):
x = pd.read_csv(fp, sep='\t',header=None).values[:,:,np.newaxis]
metadata = {
# 'SamplingRate': 20480,
# 'RotatingSpeed': 33.3,
'OriginalSplit': mode,
'FileName': fp.name,
}
yield hash(frozenset(metadata.items())), {
'signal': x,
'metadata': metadata,
}
| 39.666667 | 577 | 0.690924 |
b574fc25a709c1889ddaeb32c7d4a60de85f8029 | 4,651 | py | Python | myvenv1/lib/python3.7/site-packages/dash_core_components/Checklist.py | tangcc35/MarsWeather | 8b7f26e23eedaa4dc78c82799b664be9016981b9 | [
"MIT"
] | 3 | 2020-02-04T21:39:20.000Z | 2020-11-17T19:07:07.000Z | myvenv1/lib/python3.7/site-packages/dash_core_components/Checklist.py | tangcc35/MarsWeather | 8b7f26e23eedaa4dc78c82799b664be9016981b9 | [
"MIT"
] | 12 | 2020-06-06T01:22:26.000Z | 2022-03-12T00:13:42.000Z | myvenv1/lib/python3.7/site-packages/dash_core_components/Checklist.py | tangcc35/MarsWeather | 8b7f26e23eedaa4dc78c82799b664be9016981b9 | [
"MIT"
] | 2 | 2021-04-14T20:15:27.000Z | 2021-05-10T18:16:35.000Z | # AUTO GENERATED FILE - DO NOT EDIT
from dash.development.base_component import Component, _explicitize_args
class Checklist(Component):
"""A Checklist component.
Checklist is a component that encapsulates several checkboxes.
The values and labels of the checklist are specified in the `options`
property and the checked items are specified with the `value` property.
Each checkbox is rendered as an input with a surrounding label.
Keyword arguments:
- id (string; optional): The ID of this component, used to identify dash components
in callbacks. The ID needs to be unique across all of the
components in an app.
- options (dict; optional): An array of options. options has the following type: list of dicts containing keys 'label', 'value', 'disabled'.
Those keys have the following types:
- label (string | number; required): The checkbox's label
- value (string | number; required): The value of the checkbox. This value
corresponds to the items specified in the
`value` property.
- disabled (boolean; optional): If true, this checkbox is disabled and can't be clicked on.
- value (list of string | numbers; optional): The currently selected value
- className (string; optional): The class of the container (div)
- style (dict; optional): The style of the container (div)
- inputStyle (dict; optional): The style of the <input> checkbox element
- inputClassName (string; default ''): The class of the <input> checkbox element
- labelStyle (dict; optional): The style of the <label> that wraps the checkbox input
and the option's label
- labelClassName (string; default ''): The class of the <label> that wraps the checkbox input
and the option's label
- loading_state (dict; optional): Object that holds the loading state object coming from dash-renderer. loading_state has the following type: dict containing keys 'is_loading', 'prop_name', 'component_name'.
Those keys have the following types:
- is_loading (boolean; optional): Determines if the component is loading or not
- prop_name (string; optional): Holds which property is loading
- component_name (string; optional): Holds the name of the component that is loading
- persistence (boolean | string | number; optional): Used to allow user interactions in this component to be persisted when
the component - or the page - is refreshed. If `persisted` is truthy and
hasn't changed from its previous value, a `value` that the user has
changed while using the app will keep that change, as long as
the new `value` also matches what was given originally.
Used in conjunction with `persistence_type`.
- persisted_props (list of a value equal to: 'value's; default ['value']): Properties whose user interactions will persist after refreshing the
component or the page. Since only `value` is allowed this prop can
normally be ignored.
- persistence_type (a value equal to: 'local', 'session', 'memory'; default 'local'): Where persisted user changes will be stored:
memory: only kept in memory, reset on page refresh.
local: window.localStorage, data is kept after the browser quit.
session: window.sessionStorage, data is cleared once the browser quit."""
@_explicitize_args
def __init__(self, id=Component.UNDEFINED, options=Component.UNDEFINED, value=Component.UNDEFINED, className=Component.UNDEFINED, style=Component.UNDEFINED, inputStyle=Component.UNDEFINED, inputClassName=Component.UNDEFINED, labelStyle=Component.UNDEFINED, labelClassName=Component.UNDEFINED, loading_state=Component.UNDEFINED, persistence=Component.UNDEFINED, persisted_props=Component.UNDEFINED, persistence_type=Component.UNDEFINED, **kwargs):
self._prop_names = ['id', 'options', 'value', 'className', 'style', 'inputStyle', 'inputClassName', 'labelStyle', 'labelClassName', 'loading_state', 'persistence', 'persisted_props', 'persistence_type']
self._type = 'Checklist'
self._namespace = 'dash_core_components'
self._valid_wildcard_attributes = []
self.available_properties = ['id', 'options', 'value', 'className', 'style', 'inputStyle', 'inputClassName', 'labelStyle', 'labelClassName', 'loading_state', 'persistence', 'persisted_props', 'persistence_type']
self.available_wildcard_properties = []
_explicit_args = kwargs.pop('_explicit_args')
_locals = locals()
_locals.update(kwargs) # For wildcard attrs
args = {k: _locals[k] for k in _explicit_args if k != 'children'}
for k in []:
if k not in args:
raise TypeError(
'Required argument `' + k + '` was not specified.')
super(Checklist, self).__init__(**args)
| 66.442857 | 450 | 0.742636 |
216803e7c58be7f82622fc59f08b1d43ba229d5d | 2,570 | py | Python | src/pydocstyle/cli.py | Nurdok/pep257 | cdce1da5acc2f16e118c808e88929508107c414b | [
"MIT"
] | 1 | 2018-12-26T03:02:11.000Z | 2018-12-26T03:02:11.000Z | src/pydocstyle/cli.py | Nurdok/pep257 | cdce1da5acc2f16e118c808e88929508107c414b | [
"MIT"
] | 3 | 2018-02-08T21:01:01.000Z | 2018-02-08T21:04:14.000Z | src/pydocstyle/cli.py | Nurdok/pep257 | cdce1da5acc2f16e118c808e88929508107c414b | [
"MIT"
] | 3 | 2020-02-03T13:24:54.000Z | 2020-02-03T13:45:37.000Z | """Command line interface for pydocstyle."""
import logging
import sys
from .utils import log
from .violations import Error
from .config import ConfigurationParser, IllegalConfiguration
from .checker import check
__all__ = ('main', )
class ReturnCode(object):
no_violations_found = 0
violations_found = 1
invalid_options = 2
def run_pydocstyle():
log.setLevel(logging.DEBUG)
conf = ConfigurationParser()
setup_stream_handlers(conf.get_default_run_configuration())
try:
conf.parse()
except IllegalConfiguration:
return ReturnCode.invalid_options
run_conf = conf.get_user_run_configuration()
# Reset the logger according to the command line arguments
setup_stream_handlers(run_conf)
log.debug("starting in debug mode.")
Error.explain = run_conf.explain
Error.source = run_conf.source
errors = []
try:
for filename, checked_codes, ignore_decorators in \
conf.get_files_to_check():
errors.extend(check((filename,), select=checked_codes,
ignore_decorators=ignore_decorators))
except IllegalConfiguration as error:
# An illegal configuration file was found during file generation.
log.error(error.args[0])
return ReturnCode.invalid_options
count = 0
for error in errors:
if hasattr(error, 'code'):
sys.stdout.write('%s\n' % error)
count += 1
if count == 0:
exit_code = ReturnCode.no_violations_found
else:
exit_code = ReturnCode.violations_found
if run_conf.count:
print(count)
return exit_code
def main():
"""Run pydocstyle as a script."""
try:
sys.exit(run_pydocstyle())
except KeyboardInterrupt:
pass
def setup_stream_handlers(conf):
"""Setup logging stream handlers according to the options."""
class StdoutFilter(logging.Filter):
def filter(self, record):
return record.levelno in (logging.DEBUG, logging.INFO)
log.handlers = []
stdout_handler = logging.StreamHandler(sys.stdout)
stdout_handler.setLevel(logging.WARNING)
stdout_handler.addFilter(StdoutFilter())
if conf.debug:
stdout_handler.setLevel(logging.DEBUG)
elif conf.verbose:
stdout_handler.setLevel(logging.INFO)
else:
stdout_handler.setLevel(logging.WARNING)
log.addHandler(stdout_handler)
stderr_handler = logging.StreamHandler(sys.stderr)
stderr_handler.setLevel(logging.WARNING)
log.addHandler(stderr_handler)
| 27.052632 | 73 | 0.684436 |
be2f3b33cb98c3c7b3ff5ee0f771b63684da569b | 3,081 | py | Python | fonts/font5var.py | darkgrue/dc26-fur-scripts | 000e81f8dc3bdac9420634efaf42251c64e28250 | [
"MIT"
] | null | null | null | fonts/font5var.py | darkgrue/dc26-fur-scripts | 000e81f8dc3bdac9420634efaf42251c64e28250 | [
"MIT"
] | null | null | null | fonts/font5var.py | darkgrue/dc26-fur-scripts | 000e81f8dc3bdac9420634efaf42251c64e28250 | [
"MIT"
] | null | null | null | font = {
' ':bytearray([0x00,0x00]),
'!':bytearray([0x17,0x17]),
'"':bytearray(),
'#':bytearray(),
'$':bytearray(),
'%':bytearray(),
'&':bytearray(),
'\'':bytearray(),
'(':bytearray(),
')':bytearray(),
'*':bytearray(),
'+':bytearray(),
',':bytearray([0x10,0x18]),
'-':bytearray(),
'.':bytearray([0x10,0x10]),
'/':bytearray(),
'0':bytearray(),
'1':bytearray(),
'2':bytearray(),
'3':bytearray(),
'4':bytearray(),
'5':bytearray(),
'6':bytearray(),
'7':bytearray(),
'8':bytearray(),
'9':bytearray(),
':':bytearray([0x0a]),
';':bytearray([0x10,0x0a]),
'<':bytearray(),
'=':bytearray(),
'>':bytearray(),
'?':bytearray([0x01,0x15,0x02]),
'@':bytearray(),
'A':bytearray([0x1e,0x05,0x05,0x1e]),
'B':bytearray([0x1f,0x15,0x15,0x0e]),
'C':bytearray([0x0e,0x11,0x11,0x11]),
'D':bytearray([0x1f,0x11,0x11,0x0e]),
'E':bytearray([0x1f,0x15,0x15,0x11]),
'F':bytearray([0x1f,0x05,0x05,0x01]),
'G':bytearray([0x1f,0x11,0x15,0x1d]),
'H':bytearray([0x1f,0x04,0x04,0x1f]),
'I':bytearray([0x11,0x1f,0x11]),
'J':bytearray([0x18,0x11,0x11,0x1f]),
'K':bytearray([0x1f,0x04,0x07,0x1c]),
'L':bytearray([0x1f,0x10,0x10,0x10]),
'M':bytearray([0x1f,0x01,0x1f,0x01,0x1f]),
'N':bytearray([0x1f,0x01,0x02,0x1f]),
'O':bytearray([0x0e,0x11,0x11,0x0e]),
'P':bytearray([0x1f,0x05,0x05,0x07]),
'Q':bytearray([0x1f,0x15,0x3d,0x11,0x1f]),
'R':bytearray([0x1f,0x05,0x1d,0x07]),
'S':bytearray([0x16,0x15,0x15,0x1d]),
'T':bytearray([0x01,0x01,0x1f,0x01,0x01]),
'U':bytearray([0x0f,0x10,0x10,0x0f]),
'V':bytearray([0x1f,0x10,0x08,0x07]),
'W':bytearray([0x0f,0x10,0x1c,0x10,0x0f]),
'X':bytearray([0x1c,0x04,0x1f,0x04,0x07]),
'Y':bytearray([0x03,0x04,0x1c,0x04,0x03]),
'Z':bytearray([0x19,0x15,0x13,0x11]),
'[':bytearray(),
'\\':bytearray(),
']':bytearray(),
'^':bytearray(),
'_':bytearray(),
'`':bytearray(),
'a':bytearray([0x18,0x14,0x1c]),
'b':bytearray([0x1f,0x14,0x1c]),
'c':bytearray([0x08,0x14,0x14]),
'd':bytearray([0x18,0x14,0x1f]),
'e':bytearray([0x0c,0x1a,0x14]),
'f':bytearray([0x1e,0x05,0x01]),
'g':bytearray([0x16,0x1d,0x0f]),
'h':bytearray([0x1f,0x04,0x18]),
'i':bytearray([0x05,0x0d]),
'j':bytearray([0x11,0x0d]),
'k':bytearray([0x1f,0x0c,0x14]),
'l':bytearray([0x1f]),
'm':bytearray([0x1c,0x04,0x18,0x04,0x18]),
'n':bytearray([0x1c,0x04,0x18]),
'o':bytearray([0x08,0x14,0x08]),
'p':bytearray([0x3e,0x0a,0x06]),
'q':bytearray([0x0c,0x0a,0x3e,0x10]),
'r':bytearray([0x1c,0x04,0x04]),
's':bytearray([0x14,0x12,0x0a]),
't':bytearray([0x04,0x1e,0x04]),
'u':bytearray([0x0c,0x10,0x1c]),
'v':bytearray([0x1c,0x10,0x0c]),
'w':bytearray([0x1c,0x10,0x0c,0x10,0x0c]),
'x':bytearray([0x10,0x0c,0x18,0x04]),
'y':bytearray([0x12,0x14,0x0e]),
'z':bytearray([0x1a,0x16,0x12]),
'{':bytearray(),
'|':bytearray(),
'}':bytearray(),
'~':bytearray(),
} | 31.762887 | 46 | 0.562804 |