hexsha
stringlengths 40
40
| size
int64 5
1.04M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
236
| max_stars_repo_name
stringlengths 4
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequence | max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
236
| max_issues_repo_name
stringlengths 4
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequence | max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
236
| max_forks_repo_name
stringlengths 4
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequence | max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
1.04M
| avg_line_length
float64 1.3
664k
| max_line_length
int64 1
1.01M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b61ff226cc8bc5c97a79f089b48d89c585563de5 | 10,549 | py | Python | kubernetes_asyncio/client/models/v1_certificate_signing_request_condition.py | lsst-sqre/kubernetes_asyncio | f028cc793e3a2c519be6a52a49fb77ff0b014c9b | [
"Apache-2.0"
] | null | null | null | kubernetes_asyncio/client/models/v1_certificate_signing_request_condition.py | lsst-sqre/kubernetes_asyncio | f028cc793e3a2c519be6a52a49fb77ff0b014c9b | [
"Apache-2.0"
] | null | null | null | kubernetes_asyncio/client/models/v1_certificate_signing_request_condition.py | lsst-sqre/kubernetes_asyncio | f028cc793e3a2c519be6a52a49fb77ff0b014c9b | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1.19.15
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes_asyncio.client.configuration import Configuration
class V1CertificateSigningRequestCondition(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'last_transition_time': 'datetime',
'last_update_time': 'datetime',
'message': 'str',
'reason': 'str',
'status': 'str',
'type': 'str'
}
attribute_map = {
'last_transition_time': 'lastTransitionTime',
'last_update_time': 'lastUpdateTime',
'message': 'message',
'reason': 'reason',
'status': 'status',
'type': 'type'
}
def __init__(self, last_transition_time=None, last_update_time=None, message=None, reason=None, status=None, type=None, local_vars_configuration=None): # noqa: E501
"""V1CertificateSigningRequestCondition - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._last_transition_time = None
self._last_update_time = None
self._message = None
self._reason = None
self._status = None
self._type = None
self.discriminator = None
if last_transition_time is not None:
self.last_transition_time = last_transition_time
if last_update_time is not None:
self.last_update_time = last_update_time
if message is not None:
self.message = message
if reason is not None:
self.reason = reason
self.status = status
self.type = type
@property
def last_transition_time(self):
"""Gets the last_transition_time of this V1CertificateSigningRequestCondition. # noqa: E501
lastTransitionTime is the time the condition last transitioned from one status to another. If unset, when a new condition type is added or an existing condition's status is changed, the server defaults this to the current time. # noqa: E501
:return: The last_transition_time of this V1CertificateSigningRequestCondition. # noqa: E501
:rtype: datetime
"""
return self._last_transition_time
@last_transition_time.setter
def last_transition_time(self, last_transition_time):
"""Sets the last_transition_time of this V1CertificateSigningRequestCondition.
lastTransitionTime is the time the condition last transitioned from one status to another. If unset, when a new condition type is added or an existing condition's status is changed, the server defaults this to the current time. # noqa: E501
:param last_transition_time: The last_transition_time of this V1CertificateSigningRequestCondition. # noqa: E501
:type: datetime
"""
self._last_transition_time = last_transition_time
@property
def last_update_time(self):
"""Gets the last_update_time of this V1CertificateSigningRequestCondition. # noqa: E501
lastUpdateTime is the time of the last update to this condition # noqa: E501
:return: The last_update_time of this V1CertificateSigningRequestCondition. # noqa: E501
:rtype: datetime
"""
return self._last_update_time
@last_update_time.setter
def last_update_time(self, last_update_time):
"""Sets the last_update_time of this V1CertificateSigningRequestCondition.
lastUpdateTime is the time of the last update to this condition # noqa: E501
:param last_update_time: The last_update_time of this V1CertificateSigningRequestCondition. # noqa: E501
:type: datetime
"""
self._last_update_time = last_update_time
@property
def message(self):
"""Gets the message of this V1CertificateSigningRequestCondition. # noqa: E501
message contains a human readable message with details about the request state # noqa: E501
:return: The message of this V1CertificateSigningRequestCondition. # noqa: E501
:rtype: str
"""
return self._message
@message.setter
def message(self, message):
"""Sets the message of this V1CertificateSigningRequestCondition.
message contains a human readable message with details about the request state # noqa: E501
:param message: The message of this V1CertificateSigningRequestCondition. # noqa: E501
:type: str
"""
self._message = message
@property
def reason(self):
"""Gets the reason of this V1CertificateSigningRequestCondition. # noqa: E501
reason indicates a brief reason for the request state # noqa: E501
:return: The reason of this V1CertificateSigningRequestCondition. # noqa: E501
:rtype: str
"""
return self._reason
@reason.setter
def reason(self, reason):
"""Sets the reason of this V1CertificateSigningRequestCondition.
reason indicates a brief reason for the request state # noqa: E501
:param reason: The reason of this V1CertificateSigningRequestCondition. # noqa: E501
:type: str
"""
self._reason = reason
@property
def status(self):
"""Gets the status of this V1CertificateSigningRequestCondition. # noqa: E501
status of the condition, one of True, False, Unknown. Approved, Denied, and Failed conditions may not be \"False\" or \"Unknown\". # noqa: E501
:return: The status of this V1CertificateSigningRequestCondition. # noqa: E501
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this V1CertificateSigningRequestCondition.
status of the condition, one of True, False, Unknown. Approved, Denied, and Failed conditions may not be \"False\" or \"Unknown\". # noqa: E501
:param status: The status of this V1CertificateSigningRequestCondition. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and status is None: # noqa: E501
raise ValueError("Invalid value for `status`, must not be `None`") # noqa: E501
self._status = status
@property
def type(self):
"""Gets the type of this V1CertificateSigningRequestCondition. # noqa: E501
type of the condition. Known conditions are \"Approved\", \"Denied\", and \"Failed\". An \"Approved\" condition is added via the /approval subresource, indicating the request was approved and should be issued by the signer. A \"Denied\" condition is added via the /approval subresource, indicating the request was denied and should not be issued by the signer. A \"Failed\" condition is added via the /status subresource, indicating the signer failed to issue the certificate. Approved and Denied conditions are mutually exclusive. Approved, Denied, and Failed conditions cannot be removed once added. Only one condition of a given type is allowed. # noqa: E501
:return: The type of this V1CertificateSigningRequestCondition. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this V1CertificateSigningRequestCondition.
type of the condition. Known conditions are \"Approved\", \"Denied\", and \"Failed\". An \"Approved\" condition is added via the /approval subresource, indicating the request was approved and should be issued by the signer. A \"Denied\" condition is added via the /approval subresource, indicating the request was denied and should not be issued by the signer. A \"Failed\" condition is added via the /status subresource, indicating the signer failed to issue the certificate. Approved and Denied conditions are mutually exclusive. Approved, Denied, and Failed conditions cannot be removed once added. Only one condition of a given type is allowed. # noqa: E501
:param type: The type of this V1CertificateSigningRequestCondition. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and type is None: # noqa: E501
raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
self._type = type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1CertificateSigningRequestCondition):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1CertificateSigningRequestCondition):
return True
return self.to_dict() != other.to_dict()
| 39.807547 | 675 | 0.661864 |
ef0ecc7c942476be2cb92d1d1a13b7b292ce8a16 | 1,538 | py | Python | .travis/docs_post_process.py | goerz/bibdeskparser | 4f60f9960f6f0156c2f3c89033065c4e121800ab | [
"BSD-3-Clause"
] | null | null | null | .travis/docs_post_process.py | goerz/bibdeskparser | 4f60f9960f6f0156c2f3c89033065c4e121800ab | [
"BSD-3-Clause"
] | null | null | null | .travis/docs_post_process.py | goerz/bibdeskparser | 4f60f9960f6f0156c2f3c89033065c4e121800ab | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
from pathlib import Path
import subprocess
from versions import get_versions_data, write_versions_json
INDEX_HTML = r'''<!DOCTYPE html>
<html>
<head>
<meta http-equiv="Refresh" content="0; url={default_branch}" />
</head>
<body>
<p>Got to <a href="{default_branch}">default documentation</a>.</p>
</body>
</html>
'''
def write_index_html(default_branch):
"""Write an index.html that redirects to the DEFAULT_BRANCH."""
with open("index.html", "w") as out_fh:
out_fh.write(INDEX_HTML.format(default_branch=default_branch))
subprocess.run(['git', 'add', 'index.html'], check=True)
def find_downloads(folder):
"""Find files in the 'download' subfolder of the given `folder`."""
downloads = []
for filename in Path(folder).glob(r'download/*'):
label = "".join(filename.suffixes).replace('.', '').lower()
if len(label) > 0:
downloads.append((label, str(filename)))
return downloads
def main():
"""Main function."""
print("Post-processing documentation on gh-pages")
print("Gather versions info")
versions_data = get_versions_data(find_downloads=find_downloads)
latest_release = versions_data['latest_release']
if latest_release is None:
latest_release = 'master'
print("Write index.html")
write_index_html(latest_release)
print("Write versions.json")
write_versions_json(versions_data, outfile='versions.json')
print("DONE post-processing")
if __name__ == "__main__":
main()
| 29.018868 | 71 | 0.678153 |
eb688de90834d6518898aa2ec1fef74fddf96e40 | 404 | gyp | Python | ui/webui/resources/cr_components/compiled_resources2.gyp | zipated/src | 2b8388091c71e442910a21ada3d97ae8bc1845d3 | [
"BSD-3-Clause"
] | 2,151 | 2020-04-18T07:31:17.000Z | 2022-03-31T08:39:18.000Z | ui/webui/resources/cr_components/compiled_resources2.gyp | zipated/src | 2b8388091c71e442910a21ada3d97ae8bc1845d3 | [
"BSD-3-Clause"
] | 395 | 2020-04-18T08:22:18.000Z | 2021-12-08T13:04:49.000Z | ui/webui/resources/cr_components/compiled_resources2.gyp | zipated/src | 2b8388091c71e442910a21ada3d97ae8bc1845d3 | [
"BSD-3-Clause"
] | 338 | 2020-04-18T08:03:10.000Z | 2022-03-29T12:33:22.000Z | # Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'targets': [
{
'target_name': 'cr_components_resources',
'type': 'none',
'dependencies': [
'chromeos/compiled_resources2.gyp:*',
'certificate_manager/compiled_resources2.gyp:*',
],
},
]
}
| 25.25 | 72 | 0.636139 |
7811bebd99ec25f433c0a6ed27e8a627c3b61246 | 1,982 | py | Python | RCJ_pcms_base/scripts/action_commands/go_to_point.py | FablabHome/The_Essense_of_the_Grey_Region | 6385ada0879bdc6c00cb707192841fdab9ab7bf1 | [
"MIT"
] | 1 | 2021-09-23T09:42:32.000Z | 2021-09-23T09:42:32.000Z | RCJ_pcms_base/scripts/action_commands/go_to_point.py | FablabHome/The_Essense_of_the_Grey_Region | 6385ada0879bdc6c00cb707192841fdab9ab7bf1 | [
"MIT"
] | null | null | null | RCJ_pcms_base/scripts/action_commands/go_to_point.py | FablabHome/The_Essense_of_the_Grey_Region | 6385ada0879bdc6c00cb707192841fdab9ab7bf1 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import argparse
import json
import sys
from os import path
import rospy
from geometry_msgs.msg import PoseStamped
from rospkg import RosPack
def main(args, goal_pub):
global config
msg = PoseStamped()
if args['point']:
x, y, z, w = args['point']
else:
try:
x, y, z, w = config[args['loc']]
except KeyError:
raise KeyError(f'Location {args["loc"]} does not exist')
msg.header.frame_id = 'map'
msg.pose.position.x = x
msg.pose.position.y = y
msg.pose.orientation.z = z
msg.pose.orientation.w = w
while rospy.get_param('/status_monitor/status_code') != 0:
goal_pub.publish(msg)
if args['wait_until_end']:
while rospy.get_param('/status_monitor/status_code') != 3:
continue
base = RosPack().get_path('rcj_pcms_base')
config = json.load(open(path.join(base, 'config/points.json')))
if __name__ == '__main__':
rospy.init_node('go_to_point', anonymous=True)
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--point', nargs=4,
type=float,
help='point for robot to go, (x, y, z. w)')
parser.add_argument('-l', '--loc', type=str,
help='Publish point based config file "points.json"'
)
parser.add_argument('--wait-until-end', action='store_true',
help="Wait until the slam has end")
args = vars(parser.parse_args())
goal_pub = rospy.Publisher(
'/move_base_simple/goal',
PoseStamped,
queue_size=1
)
try:
if not (args['point'] or args['loc']):
raise Exception('Must specify -p or -l')
elif args['point'] and args['loc']:
raise Exception('Can only specify one of them')
main(args, goal_pub)
sys.exit(0)
except Exception as e:
print(f'Program ended due to: {e}')
sys.exit(1)
| 29.147059 | 76 | 0.584763 |
edb80171102d708b01a20f8f8778a9ca45f6e6e6 | 4,318 | py | Python | adapter/images/mkimage/mkextimage.py | ShadowCCY/build | 5c88ebad21093ef816087c9160bda8e5e9035008 | [
"Apache-2.0"
] | null | null | null | adapter/images/mkimage/mkextimage.py | ShadowCCY/build | 5c88ebad21093ef816087c9160bda8e5e9035008 | [
"Apache-2.0"
] | null | null | null | adapter/images/mkimage/mkextimage.py | ShadowCCY/build | 5c88ebad21093ef816087c9160bda8e5e9035008 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# Copyright (c) 2021 Huawei Device Co., Ltd.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import argparse
import subprocess
import os
FS_TYPE = "ext4"
BLOCKSIZE = 4096
def args_parse(argv):
parser = argparse.ArgumentParser(description='mkextimage.py')
parser.add_argument("src_dir", help="The source file for sload.")
parser.add_argument("device", help="The deivce for mkfs.")
parser.add_argument("mount_point", help="The filesystem mountpoint.")
parser.add_argument("fs_size", help="The size of filesystem.")
parser.add_argument("--fs_type", help="The filesystem type.")
parser.add_argument("--dac_config",
help="The path of dac config to e2fsdroid.")
parser.add_argument("--inode_size", help="The inode size to mke2fs.")
parser.add_argument("--file_context",
help="The path of file_context to e2fsdroid.")
parser.add_argument("--root_dir", help="The root dir for root image.")
parser.add_argument("--journal_size", help="The journal_size for mke2fs.")
parser.add_argument("--reserve_percent",
help="The reserve_percent for mke2fs.")
parser.add_argument("--extend_opts", nargs='+',
help="The extend opt for mke2fs.")
args = parser.parse_known_args(argv)[0]
return args
def run_cmd(cmd):
res = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
sout, serr = res.communicate()
return res.pid, res.returncode, sout, serr
def build_run_mke2fs(args):
mke2fs_opts = ""
mke2fs_cmd = ""
is_data = False
if "data" in args.mount_point:
is_data = True
if args.extend_opts:
mke2fs_opts += " -E " + ",".join(args.extend_opts)
if args.inode_size:
mke2fs_opts += " -I " + args.inode_size
else:
mke2fs_opts += " -I " + "256"
if args.journal_size:
mke2fs_opts += " -J size=" + args.journal_size
elif not is_data:
mke2fs_opts += " -O ^has_journal"
if args.reserve_percent:
mke2fs_opts += " -m " + args.reserve_percent
elif not is_data:
mke2fs_opts += " -m 0"
mke2fs_opts += " -L " + args.mount_point + " -M " + args.mount_point
blocks = int(int(args.fs_size) / BLOCKSIZE)
mke2fs_cmd += ("mke2fs " + str(mke2fs_opts) + " -t " + FS_TYPE + " -b "
+ str(BLOCKSIZE) + " " + args.device + " " + str(blocks))
res = run_cmd(mke2fs_cmd)
if res[1] != 0:
print("info: " + mke2fs_cmd)
print("pid " + str(res[0]) + " ret " + str(res[1]) + "\n" +
res[2].decode() + res[3].decode())
return res[1]
def build_run_e2fsdroid(args):
e2fsdroid_opts = ""
e2fsdroid_cmd = ""
if not args.extend_opts or not "android_sparse" in args.extend_opts:
e2fsdroid_opts += " -e"
if args.dac_config:
e2fsdroid_opts += " -C " + args.dac_config
if args.file_context:
e2fsdroid_opts += " -S " + args.file_context
e2fsdroid_cmd += ("e2fsdroid" + e2fsdroid_opts + " -f " +
args.src_dir + " -a " + args.mount_point + " " + args.device)
res = run_cmd(e2fsdroid_cmd)
if res[1] != 0:
print("info: " + e2fsdroid_cmd)
print("pid " + str(res[0]) + " ret " + str(res[1]) + "\n" +
res[2].decode() + res[3].decode())
return res[1]
def build(args):
args = args_parse(args)
res = build_run_mke2fs(args)
if res != 0:
print("error run mke2fs errno: " + str(res))
sys.exit(1)
res = build_run_e2fsdroid(args)
if res != 0:
print("error run e2fsdroid errno: " + str(res))
os.remove(args.device)
sys.exit(2)
if __name__ == '__main__':
build(sys.argv[1:])
| 33.734375 | 78 | 0.619037 |
ee70cabd99219dee1472a3a0e566c6b2960ba004 | 163 | py | Python | backend/modules/terms_and_conditions/apps.py | crowdbotics-apps/test-004-32225 | 11bcaa9ba15964c39a2aca2a5eaf016334585c50 | [
"FTL",
"AML",
"RSA-MD"
] | 1 | 2022-02-09T16:02:17.000Z | 2022-02-09T16:02:17.000Z | backend/modules/terms_and_conditions/apps.py | crowdbotics-apps/test-004-32225 | 11bcaa9ba15964c39a2aca2a5eaf016334585c50 | [
"FTL",
"AML",
"RSA-MD"
] | 321 | 2021-07-16T15:22:20.000Z | 2021-07-19T20:57:51.000Z | backend/modules/terms_and_conditions/apps.py | crowdbotics-apps/test-004-32225 | 11bcaa9ba15964c39a2aca2a5eaf016334585c50 | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | from django.apps import AppConfig
class TermsAndConditionsConfig(AppConfig):
name = "modules.terms_and_conditions"
verbose_name = "Terms and Conditions"
| 23.285714 | 42 | 0.785276 |
d9ead0c192eb7fafb7e0d312f76149804e905a54 | 7,786 | py | Python | tests/test_enable_ssl.py | Spread0x/pg_auto_failover | e471d140a528e277c0272a6a5a307be8d2050ac4 | [
"PostgreSQL"
] | null | null | null | tests/test_enable_ssl.py | Spread0x/pg_auto_failover | e471d140a528e277c0272a6a5a307be8d2050ac4 | [
"PostgreSQL"
] | null | null | null | tests/test_enable_ssl.py | Spread0x/pg_auto_failover | e471d140a528e277c0272a6a5a307be8d2050ac4 | [
"PostgreSQL"
] | null | null | null | import pgautofailover_utils as pgautofailover
import ssl_cert_utils as cert
import subprocess
import os
cluster = None
monitor = None
node1 = None
node2 = None
def setup_module():
global cluster
cluster = pgautofailover.Cluster()
def teardown_module():
cluster.destroy()
# remove client side setup for certificates too
client_top_directory = os.path.join(os.getenv("HOME"), ".postgresql")
p = subprocess.Popen(["sudo", "-E", '-u', os.getenv("USER"),
'env', 'PATH=' + os.getenv("PATH"),
"rm", "-rf", client_top_directory])
assert(p.wait() == 0)
# also remove certificates we created for the servers
p = subprocess.run(["sudo", "-E", '-u', os.getenv("USER"),
'env', 'PATH=' + os.getenv("PATH"),
"rm", "-rf", "/tmp/certs"])
assert(p.returncode == 0)
def test_000_create_monitor():
global monitor
monitor = cluster.create_monitor("/tmp/enable/monitor")
monitor.run()
monitor.wait_until_pg_is_running()
monitor.check_ssl("off", "prefer")
def test_001_init_primary():
global node1
node1 = cluster.create_datanode("/tmp/enable/node1")
node1.create()
node1.run()
assert node1.wait_until_state(target_state="single")
node1.wait_until_pg_is_running()
node1.check_ssl("off", "prefer", primary=True)
def test_002_create_t1():
node1.run_sql_query("CREATE TABLE t1(a int)")
node1.run_sql_query("INSERT INTO t1 VALUES (1), (2)")
def test_003_init_secondary():
global node2
node2 = cluster.create_datanode("/tmp/enable/node2")
node2.create()
node2.run()
assert node2.wait_until_state(target_state="secondary")
assert node1.wait_until_state(target_state="primary")
node2.check_ssl("off", "prefer")
def test_004_maintenance():
print()
print("Enabling maintenance on node2")
node2.enable_maintenance()
assert node2.wait_until_state(target_state="maintenance")
def test_005_enable_ssl_monitor():
monitor.enable_ssl(sslSelfSigned=True, sslMode="require")
monitor.sleep(2) # we signaled, wait some time
monitor.check_ssl("on", "require")
def test_006_enable_ssl_primary():
# we stop pg_autoctl to make it easier for the test to be reliable
# without too much delay/sleep hacking; when doing the `pg_autoctl
# enable ssl` online we need to make sure the signal made it to the
# running process and then was acted upon
node1.stop_pg_autoctl()
node1.enable_ssl(sslSelfSigned=True, sslMode="require")
node1.run()
node1.wait_until_pg_is_running()
node1.check_ssl("on", "require", primary=True)
def test_007_enable_ssl_secondary():
node2.stop_pg_autoctl()
node2.enable_ssl(sslSelfSigned=True, sslMode="require")
node2.run()
node2.wait_until_pg_is_running()
node2.check_ssl("on", "require")
def test_008_disable_maintenance():
print("Disabling maintenance on node2")
node2.disable_maintenance()
assert node2.wait_until_pg_is_running()
assert node2.wait_until_state(target_state="secondary")
assert node1.wait_until_state(target_state="primary")
# upgrade to verify full
def test_009_enable_maintenance():
print()
print("Enabling maintenance on node2")
node2.enable_maintenance()
assert node2.wait_until_state(target_state="maintenance")
def test_010_enable_ssl_verify_ca_monitor():
client_top_directory = os.path.join(os.getenv("HOME"), ".postgresql")
print()
print("Creating cluster root certificate")
cluster.create_root_cert(client_top_directory,
basename = "root",
CN = "/CN=root.pgautofailover.ca")
p = subprocess.run(["ls", "-ld",
client_top_directory,
cluster.cert.crt, cluster.cert.csr, cluster.cert.key],
text=True,
capture_output=True)
print("%s" % p.stdout)
# now create and sign the CLIENT certificate
print("Creating cluster client certificate")
clientCert = cert.SSLCert(client_top_directory,
basename = "postgresql",
CN = "/CN=autoctl_node")
clientCert.create_signed_certificate(cluster.cert)
p = subprocess.run(["ls", "-ld",
client_top_directory,
clientCert.crt, clientCert.csr, clientCert.key],
text=True,
capture_output=True)
print("%s" % p.stdout)
# the root user also needs the certificates, tests are connecting with it
subprocess.run(["ln", "-s", client_top_directory, "/root/.postgresql"])
assert(p.returncode == 0)
p = subprocess.run(["ls", "-l", "/root/.postgresql"],
text=True,
capture_output=True)
print("%s" % p.stdout)
# now create and sign the SERVER certificate for the monitor
print("Creating monitor server certificate")
monitorCert = cert.SSLCert("/tmp/certs/monitor", "server",
"/CN=monitor.pgautofailover.ca")
monitorCert.create_signed_certificate(cluster.cert)
p = subprocess.run(["ls", "-ld",
client_top_directory,
cluster.cert.crt, cluster.cert.csr, cluster.cert.key,
clientCert.crt, clientCert.csr, clientCert.key,
monitorCert.crt, monitorCert.csr, monitorCert.key],
text=True,
capture_output=True)
print("%s" % p.stdout)
monitor.enable_ssl(sslCAFile=cluster.cert.crt,
sslServerKey=monitorCert.key,
sslServerCert=monitorCert.crt,
sslMode="verify-ca")
monitor.sleep(2) # we signaled, wait some time
monitor.check_ssl("on", "verify-ca")
def test_011_enable_ssl_verify_ca_primary():
node1Cert = cert.SSLCert("/tmp/certs/node1", "server",
"/CN=node1.pgautofailover.ca")
node1Cert.create_signed_certificate(cluster.cert)
node1.stop_pg_autoctl()
node1.enable_ssl(sslCAFile = cluster.cert.crt,
sslServerKey = node1Cert.key,
sslServerCert = node1Cert.crt,
sslMode="verify-ca")
node1.run()
node1.wait_until_pg_is_running()
node1.check_ssl("on", "verify-ca", primary=True)
def test_012_enable_ssl_verify_ca_secondary():
node2Cert = cert.SSLCert("/tmp/certs/node2", "server",
"/CN=node2.pgautofailover.ca")
node2Cert.create_signed_certificate(cluster.cert)
node2.stop_pg_autoctl()
node2.enable_ssl(sslCAFile = cluster.cert.crt,
sslServerKey = node2Cert.key,
sslServerCert = node2Cert.crt,
sslMode="verify-ca")
node2.run()
node2.wait_until_pg_is_running()
node2.check_ssl("on", "verify-ca")
def test_013_disable_maintenance():
print("Disabling maintenance on node2")
node2.disable_maintenance()
assert node2.wait_until_pg_is_running()
assert node2.wait_until_state(target_state="secondary")
assert node1.wait_until_state(target_state="primary")
def test_014_enable_ssl_require_primary():
node1Cert = cert.SSLCert("/tmp/certs/node1", "server",
"/CN=node1.pgautofailover.ca")
node1Cert.create_signed_certificate(cluster.cert)
node1.stop_pg_autoctl()
node1.enable_ssl(sslServerKey = node1Cert.key,
sslServerCert = node1Cert.crt,
sslMode="require")
node1.run()
node2.wait_until_pg_is_running()
node1.check_ssl("on", "require", primary=True)
| 34.451327 | 78 | 0.636142 |
bef868f360c61403e17d12cd7dda002ebac14b20 | 497 | py | Python | mcf_standard_browser/standards_review/migrations/0005_auto_20160825_1731.py | andy-d-palmer/curatr | 2bd5140c9d4e121c7a0ad32529350e5ccc6d201d | [
"Apache-2.0"
] | 12 | 2016-04-27T21:25:57.000Z | 2021-10-01T08:33:03.000Z | mcf_standard_browser/standards_review/migrations/0005_auto_20160825_1731.py | andy-d-palmer/curatr | 2bd5140c9d4e121c7a0ad32529350e5ccc6d201d | [
"Apache-2.0"
] | 4 | 2017-11-10T13:50:46.000Z | 2021-06-10T19:21:21.000Z | mcf_standard_browser/standards_review/migrations/0005_auto_20160825_1731.py | andy-d-palmer/curatr | 2bd5140c9d4e121c7a0ad32529350e5ccc6d201d | [
"Apache-2.0"
] | 4 | 2017-12-19T06:47:41.000Z | 2020-03-24T16:54:48.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-08-25 17:31
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('standards_review', '0004_auto_20160816_1205'),
]
operations = [
migrations.AlterField(
model_name='molecule',
name='tags',
field=models.ManyToManyField(blank=True, to='standards_review.MoleculeTag'),
),
]
| 23.666667 | 88 | 0.639839 |
1a34e67bbed7d4659482659d167a66b8102e6dcc | 2,445 | py | Python | app/core/tests/test_models.py | dherbison/recipe-app-api | 3dc5cf1dfcb2a4068c3536209cf78b8d3a134dd4 | [
"MIT"
] | null | null | null | app/core/tests/test_models.py | dherbison/recipe-app-api | 3dc5cf1dfcb2a4068c3536209cf78b8d3a134dd4 | [
"MIT"
] | null | null | null | app/core/tests/test_models.py | dherbison/recipe-app-api | 3dc5cf1dfcb2a4068c3536209cf78b8d3a134dd4 | [
"MIT"
] | null | null | null | from core import models
from django.contrib.auth import get_user_model
from django.test import TestCase
from unittest.mock import patch
def sample_user(email='[email protected]', password='testpass'):
return get_user_model().objects.create_user(email, password)
class ModelTests(TestCase):
def test_create_user_with_email_successful(self):
email = '[email protected]'
password = 'Testpass123'
user = get_user_model().objects.create_user(
email=email,
password=password
)
self.assertEqual(user.email, email)
self.assertTrue(user.check_password(password))
def test_new_user_email_normallized(self):
email = '[email protected]'
user = get_user_model().objects.create_user(email, 'tst21132')
self.assertEqual(user.email, email.lower())
def test_new_user_invalid_email(self):
with self.assertRaises(ValueError):
get_user_model().objects.create_user(None, 'test123')
def test_create_new_superuser(self):
""""""
user = get_user_model().objects.create_superuser(
'[email protected]', 'test123')
self.assertTrue(user.is_superuser)
self.assertTrue(user.is_staff)
def test_create_tag_str(self):
tag = models.Tag.objects.create(
user=sample_user(),
name='Vegan'
)
# this set() fx is the __str__ function in the
# Tag.__str__ method in the models.py file.
self.assertEqual(str(tag), tag.name)
def test_ingredient_str(self):
ingredient = models.Ingredient.objects.create(
user=sample_user(),
name='Cucumber'
)
self.assertEqual(str(ingredient), ingredient.name)
def test_recipe_str(self):
"""Test the recipe string representation"""
recipe = models.Recipe.objects.create(
user=sample_user(),
title='Steak and mushroom sauce',
time_minutes=5,
price=5.00
)
self.assertEqual(str(recipe), recipe.title)
@patch('uuid.uuid4')
def test_recipe_file_name_uuid(self, mock_uuid):
"""Test that image is saved in the correct location"""
uuid = 'test-uuid'
mock_uuid.return_value = uuid
file_path = models.recipe_image_file_path(None, 'myimage.jpg')
exp_path = f'uploads/recipe/{uuid}.jpg'
self.assertEqual(file_path, exp_path)
| 32.171053 | 70 | 0.64499 |
bc69429d3c97ccccf75eb792d3c7fc738cbe1c15 | 642 | py | Python | auana/matplot.py | Hoohaha/Auana-P | f60603468322751682204e42718cc1089a23ac60 | [
"Artistic-2.0"
] | 6 | 2015-03-26T11:32:14.000Z | 2017-02-17T02:40:44.000Z | auana/matplot.py | Hoohaha/Auana-P | f60603468322751682204e42718cc1089a23ac60 | [
"Artistic-2.0"
] | 1 | 2015-06-07T19:09:33.000Z | 2015-06-23T08:54:50.000Z | auana/matplot.py | Hoohaha/Auana-P | f60603468322751682204e42718cc1089a23ac60 | [
"Artistic-2.0"
] | 1 | 2015-04-23T09:13:23.000Z | 2015-04-23T09:13:23.000Z | import numpy as np
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
# example data
mu = 100 # mean of distribution
sigma = 15 # standard deviation of distribution
x = mu + sigma * np.random.randn(10000)
print len(x)
num_bins = 20
# the histogram of the data
n, bins, patches = plt.hist(x, num_bins, normed=1, facecolor='green', alpha=0.5)#num_bins,
# add a 'best fit' line
# y = mlab.normpdf(bins, mu, sigma)
# plt.plot(bins, y, 'r--')
plt.xlabel('Smarts')
plt.ylabel('Probability')
plt.title(r'Histogram of IQ: $\mu=100$, $\sigma=15$')
# Tweak spacing to prevent clipping of ylabel
plt.subplots_adjust(left=0.15)
plt.show() | 26.75 | 90 | 0.714953 |
a06596eba9cb41f16483b714bc52536b1507941d | 4,044 | py | Python | sample/nov_rp/models.py | hdknr/pypam | e2253a31073f6b46c504515479683dc8571433b8 | [
"Apache-2.0"
] | null | null | null | sample/nov_rp/models.py | hdknr/pypam | e2253a31073f6b46c504515479683dc8571433b8 | [
"Apache-2.0"
] | 1 | 2016-01-28T17:05:58.000Z | 2016-01-28T17:05:58.000Z | sample/nov_rp/models.py | hdknr/pypam | e2253a31073f6b46c504515479683dc8571433b8 | [
"Apache-2.0"
] | null | null | null | # This is an auto-generated Django model module.
# You'll have to do the following manually to clean this up:
# * Rearrange models' order
# * Make sure each model has one field with primary_key=True
# Feel free to rename the models,
# but don't rename db_table values or field names.
#
# Also note: You'll have to insert the output
# of 'django-admin.py sqlcustom [appname]'
# into your database.
from __future__ import unicode_literals
from django.db import models
import requests
class Accounts(models.Model):
id = models.IntegerField(primary_key=True)
created_at = models.DateTimeField()
updated_at = models.DateTimeField()
class Meta:
db_table = 'accounts'
class OpenIds(models.Model):
id = models.IntegerField(primary_key=True)
account_id = models.IntegerField(null=True, blank=True)
provider_id = models.IntegerField(null=True, blank=True)
identifier = models.CharField(max_length=255, blank=True)
access_token = models.CharField(max_length=255, blank=True)
id_token = models.CharField(max_length=1024, blank=True)
created_at = models.DateTimeField()
updated_at = models.DateTimeField()
class Meta:
db_table = 'open_ids'
def __init__(self, *args, **kwargs):
super(OpenIds, self).__init__(*args, **kwargs)
self._provider = None
@property
def provider(self):
if self._provider is None:
self._provider = Providers.objects.get(id=self.provider_id)
return self._provider
@property
def authorization_header(self):
return {"Authorization": "Bearer %s" % self.access_token}
def get_resource(self, endpoint):
res = requests.get(
endpoint, headers=self.authorization_header)
return res.json()
def post_resource(self, endpoint, **kwargs):
res = requests.post(
endpoint, data=kwargs, headers=self.authorization_header)
return res.json()
def get_user_info(self):
return self.get_resource(self.provider.userinfo_endpoint)
def introspect_test(self):
return self.get_resource(self.provider.introspect_endpoint)
def introspect_id_token(self):
return self.post_resource(
self.provider.introspect_endpoint,
token=self.id_token,
token_type_hint="id_token",
)
def introspect_access_token(self):
return self.post_resource(
self.provider.introspect_endpoint,
token=self.access_token,
token_type_hint="access_token",
)
class Providers(models.Model):
id = models.IntegerField(primary_key=True)
account_id = models.IntegerField(null=True, blank=True)
issuer = models.CharField(max_length=255, blank=True)
jwks_uri = models.CharField(max_length=255, blank=True)
name = models.CharField(max_length=255, blank=True)
identifier = models.CharField(max_length=255, blank=True)
secret = models.CharField(max_length=255, blank=True)
scope = models.CharField(max_length=255, blank=True)
host = models.CharField(max_length=255, blank=True)
scheme = models.CharField(max_length=255, blank=True)
authorization_endpoint = models.CharField(max_length=255, blank=True)
token_endpoint = models.CharField(max_length=255, blank=True)
userinfo_endpoint = models.CharField(max_length=255, blank=True)
# dynamic = models.NullBooleanField(null=True, blank=True)
dynamic = models.CharField(max_length=1, null=True, blank=True)
expires_at = models.DateTimeField(null=True, blank=True)
created_at = models.DateTimeField()
updated_at = models.DateTimeField()
jwkset = models.TextField(blank=True, null=True,)
class Meta:
db_table = 'providers'
@property
def introspect_endpoint(self):
return self.userinfo_endpoint.replace(
'user_info', 'introspect')
class SchemaMigrations(models.Model):
version = models.CharField(max_length=255, unique=True)
class Meta:
db_table = 'schema_migrations'
| 33.983193 | 73 | 0.699555 |
942e03334ff1906a7790bfac6507dbdf182930c3 | 5,001 | py | Python | django_concurrent_tests/utils.py | alexey74/django-concurrent-test-helper | 1202915049a498d8fc31a75d83b459854f76750b | [
"Apache-2.0"
] | 13 | 2016-03-30T10:45:10.000Z | 2020-12-29T14:15:50.000Z | django_concurrent_tests/utils.py | alexey74/django-concurrent-test-helper | 1202915049a498d8fc31a75d83b459854f76750b | [
"Apache-2.0"
] | 5 | 2017-04-05T14:56:32.000Z | 2020-03-28T20:34:09.000Z | django_concurrent_tests/utils.py | alexey74/django-concurrent-test-helper | 1202915049a498d8fc31a75d83b459854f76750b | [
"Apache-2.0"
] | 1 | 2021-01-12T16:38:49.000Z | 2021-01-12T16:38:49.000Z | from __future__ import print_function
import os
import logging
import subprocess
import sys
import threading
from collections import namedtuple
from contextlib import contextmanager
import six
from django.conf import settings
from django.core.management import call_command
from . import b64pickle, errors
logger = logging.getLogger(__name__)
SUBPROCESS_TIMEOUT = int(os.environ.get('DJANGO_CONCURRENT_TESTS_TIMEOUT', '30'))
SubprocessRun = namedtuple('SubprocessRun', ['manager', 'result'])
class ProcessManager(object):
def __init__(self, cmd):
"""
Kwargs:
cmd (Union[str, List[str]]): `args` arg to `Popen` call
"""
self.cmd = cmd
self.process = None
self.stdout = None
self.stderr = None
self.terminated = False # whether subprocess was terminated by timeout
def run(self, timeout):
"""
Kwargs:
timeout (Float): how long to wait for the subprocess to complete task
Returns:
str: stdout output from subprocess
"""
def target():
env = os.environ.copy()
env['DJANGO_CONCURRENT_TESTS_PARENT_PID'] = str(os.getpid())
self.process = subprocess.Popen(
self.cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env,
)
logger.debug('[{pid}] {cmd}'.format(pid=self.process.pid, cmd=' '.join(self.cmd)))
self.stdout, self.stderr = self.process.communicate()
thread = threading.Thread(target=target)
thread.start()
thread.join(timeout)
if thread.is_alive():
# we reached the timeout deadline with process still running
if self.process:
logger.debug('[{pid}] reached timeout: terminating...'.format(pid=self.process.pid))
self.process.terminate()
logger.debug('[{pid}] reached timeout: terminated.'.format(pid=self.process.pid))
else:
logger.debug('reached timeout: process did not start.')
self.terminated = True
thread.join()
if self.stderr:
logger.error(self.stderr)
return self.stdout
def run_in_subprocess(f, **kwargs):
"""
Args:
f (Union[function, str]): the function to call, or
the 'dotted module.path.to:function' as a string (NOTE
colon separates the name to import)
**kwargs - kwargs to pass to `function`
Returns:
SubprocessRun: where `<SubprocessRun>.result` is either
<return value> OR <exception raised>
or None if result was empty
NOTE:
`kwargs` must be pickleable
<return value> of `function` must be pickleable
"""
# wrap everything in a catch-all except to avoid hanging the subprocess
try:
serialized_kwargs = b64pickle.dumps(kwargs)
if isinstance(f, six.string_types):
function_path = f
else:
function_path = '{module}:{name}'.format(
module=f.__module__,
name=f.__name__,
)
if not os.environ.get('CONCURRENT_TESTS_NO_SUBPROCESS'):
cmd = [
getattr(settings, 'MANAGE_PY_PATH', './manage.py'),
'concurrent_call_wrapper',
function_path,
'--kwargs=%s' % serialized_kwargs,
]
manager = ProcessManager(cmd)
result = manager.run(timeout=SUBPROCESS_TIMEOUT)
if manager.terminated:
raise errors.TerminatedProcessError(result)
else:
logger.debug('Calling {f} in current process'.format(f=function_path))
manager = None
# TODO: collect stdout and maybe log it from here
result = call_command(
'concurrent_call_wrapper',
function_path,
kwargs=serialized_kwargs,
)
# deserialize the result from subprocess run
# (any error raised when running the concurrent func will be stored in `result`)
return SubprocessRun(
manager=manager,
result=b64pickle.loads(result) if result else None,
)
except Exception as e:
# handle any errors which occurred during setup of subprocess
return SubprocessRun(
manager=manager,
result=errors.WrappedError(e),
)
@contextmanager
def redirect_stdout(to):
original = sys.stdout
sys.stdout = to
yield
sys.stdout = original
@contextmanager
def override_environment(**kwargs):
"""
NOTE:
The values in `kwargs` must be strings else you will get a cryptic:
TypeError: execve() arg 3 contains a non-string value
"""
old_env = os.environ
new_env = os.environ.copy()
new_env.update(kwargs)
os.environ = new_env
yield
os.environ = old_env
| 30.309091 | 100 | 0.596081 |
c984158c7f9cb31b236eb10f777233a69c43da0b | 255 | py | Python | fython/test/instruction/various_test.py | nicolasessisbreton/fython | 988f5a94cee8b16b0000501a22239195c73424a1 | [
"Apache-2.0"
] | 41 | 2016-01-21T05:14:45.000Z | 2021-11-24T20:37:21.000Z | fython/test/instruction/various_test.py | nicolasessisbreton/fython | 988f5a94cee8b16b0000501a22239195c73424a1 | [
"Apache-2.0"
] | 5 | 2016-01-21T05:36:37.000Z | 2016-08-22T19:26:51.000Z | fython/test/instruction/various_test.py | nicolasessisbreton/fython | 988f5a94cee8b16b0000501a22239195c73424a1 | [
"Apache-2.0"
] | 3 | 2016-01-23T04:03:44.000Z | 2016-08-21T15:58:38.000Z | s = r"""
.a.fy
real : x y z(10)
int dimension(3) a = [1, 2, 3]
x += 1 + 3
x /= sum(
x +
y +
z
)
"""
from fython.test import *
writer(s)
w = load('.a', force=1, release=1, verbose=0)
# print(open(w.module.url.fortran_path, 'r').read())
| 11.590909 | 52 | 0.517647 |
429ef552bab3a2edc7fc294879023c596ddcef88 | 15,649 | py | Python | fedml_experiments/standalone/fedavg/main_fedavg.py | Starry-Hu/FedML | 0fd4bd55b7b3122c8cb4faee9fe36dcb1998657d | [
"Apache-2.0"
] | 1 | 2021-08-10T13:16:36.000Z | 2021-08-10T13:16:36.000Z | fedml_experiments/standalone/fedavg/main_fedavg.py | Starry-Hu/FedML | 0fd4bd55b7b3122c8cb4faee9fe36dcb1998657d | [
"Apache-2.0"
] | null | null | null | fedml_experiments/standalone/fedavg/main_fedavg.py | Starry-Hu/FedML | 0fd4bd55b7b3122c8cb4faee9fe36dcb1998657d | [
"Apache-2.0"
] | null | null | null | import argparse
import logging
import os
import random
import sys
import numpy as np
import torch
import wandb
sys.path.insert(0, os.path.abspath(os.path.join(os.getcwd(), "../../../")))
from fedml_api.data_preprocessing.cifar10.data_loader import load_partition_data_cifar10
from fedml_api.data_preprocessing.cifar100.data_loader import load_partition_data_cifar100
from fedml_api.data_preprocessing.cinic10.data_loader import load_partition_data_cinic10
from fedml_api.data_preprocessing.fed_cifar100.data_loader import load_partition_data_federated_cifar100
from fedml_api.data_preprocessing.shakespeare.data_loader import load_partition_data_shakespeare
from fedml_api.data_preprocessing.fed_shakespeare.data_loader import load_partition_data_federated_shakespeare
from fedml_api.data_preprocessing.stackoverflow_lr.data_loader import load_partition_data_federated_stackoverflow_lr
from fedml_api.data_preprocessing.stackoverflow_nwp.data_loader import load_partition_data_federated_stackoverflow_nwp
from fedml_api.data_preprocessing.ImageNet.data_loader import load_partition_data_ImageNet
from fedml_api.data_preprocessing.Landmarks.data_loader import load_partition_data_landmarks
from fedml_api.model.cv.mobilenet import mobilenet
from fedml_api.model.cv.resnet import resnet56
from fedml_api.model.cv.cnn import CNN_DropOut
from fedml_api.data_preprocessing.FederatedEMNIST.data_loader import load_partition_data_federated_emnist
from fedml_api.model.nlp.rnn import RNN_OriginalFedAvg, RNN_StackOverFlow
from fedml_api.data_preprocessing.MNIST.data_loader import load_partition_data_mnist
from fedml_api.model.linear.lr import LogisticRegression
from fedml_api.model.cv.resnet_gn import resnet18
from fedml_api.standalone.fedavg.fedavg_api import FedAvgAPI
from fedml_api.standalone.fedavg.my_model_trainer_classification import MyModelTrainer as MyModelTrainerCLS
from fedml_api.standalone.fedavg.my_model_trainer_nwp import MyModelTrainer as MyModelTrainerNWP
from fedml_api.standalone.fedavg.my_model_trainer_tag_prediction import MyModelTrainer as MyModelTrainerTAG
def add_args(parser):
"""
parser : argparse.ArgumentParser
return a parser added with args required by fit
"""
# Training settings
parser.add_argument('--model', type=str, default='resnet56', metavar='N',
help='neural network used in training')
parser.add_argument('--dataset', type=str, default='cifar10', metavar='N',
help='dataset used for training')
parser.add_argument('--data_dir', type=str, default='./../../../data/cifar10',
help='data directory')
parser.add_argument('--partition_method', type=str, default='hetero', metavar='N',
help='how to partition the dataset on local workers')
parser.add_argument('--partition_alpha', type=float, default=0.5, metavar='PA',
help='partition alpha (default: 0.5)')
parser.add_argument('--batch_size', type=int, default=128, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--client_optimizer', type=str, default='adam',
help='SGD with momentum; adam')
parser.add_argument('--lr', type=float, default=0.001, metavar='LR',
help='learning rate (default: 0.001)')
parser.add_argument('--wd', help='weight decay parameter;', type=float, default=0.001)
parser.add_argument('--epochs', type=int, default=5, metavar='EP',
help='how many epochs will be trained locally')
parser.add_argument('--client_num_in_total', type=int, default=10, metavar='NN',
help='number of workers in a distributed cluster')
parser.add_argument('--client_num_per_round', type=int, default=10, metavar='NN',
help='number of workers')
parser.add_argument('--comm_round', type=int, default=10,
help='how many round of communications we shoud use')
parser.add_argument('--frequency_of_the_test', type=int, default=5,
help='the frequency of the algorithms')
parser.add_argument('--gpu', type=int, default=0,
help='gpu')
parser.add_argument('--ci', type=int, default=0,
help='CI')
return parser
def load_data(args, dataset_name):
# check if the centralized training is enabled(如果总共只有一个设备参与,则认为是中心化训练)
centralized = True if args.client_num_in_total == 1 else False
# check if the full-batch training is enabled
args_batch_size = args.batch_size
if args.batch_size <= 0: # 为什么是<=0?
full_batch = True
args.batch_size = 128 # temporary batch size
else:
full_batch = False
if dataset_name == "mnist":
logging.info("load_data. dataset_name = %s" % dataset_name)
client_num, train_data_num, test_data_num, train_data_global, test_data_global, \
train_data_local_num_dict, train_data_local_dict, test_data_local_dict, \
class_num = load_partition_data_mnist(args.batch_size)
"""
For shallow NN or linear models,
we uniformly sample a fraction of clients each round (as the original FedAvg paper)
"""
args.client_num_in_total = client_num
elif dataset_name == "femnist":
logging.info("load_data. dataset_name = %s" % dataset_name)
client_num, train_data_num, test_data_num, train_data_global, test_data_global, \
train_data_local_num_dict, train_data_local_dict, test_data_local_dict, \
class_num = load_partition_data_federated_emnist(args.dataset, args.data_dir)
args.client_num_in_total = client_num
elif dataset_name == "shakespeare":
logging.info("load_data. dataset_name = %s" % dataset_name)
client_num, train_data_num, test_data_num, train_data_global, test_data_global, \
train_data_local_num_dict, train_data_local_dict, test_data_local_dict, \
class_num = load_partition_data_shakespeare(args.batch_size)
args.client_num_in_total = client_num
elif dataset_name == "fed_shakespeare":
logging.info("load_data. dataset_name = %s" % dataset_name)
client_num, train_data_num, test_data_num, train_data_global, test_data_global, \
train_data_local_num_dict, train_data_local_dict, test_data_local_dict, \
class_num = load_partition_data_federated_shakespeare(args.dataset, args.data_dir)
args.client_num_in_total = client_num
elif dataset_name == "fed_cifar100":
logging.info("load_data. dataset_name = %s" % dataset_name)
client_num, train_data_num, test_data_num, train_data_global, test_data_global, \
train_data_local_num_dict, train_data_local_dict, test_data_local_dict, \
class_num = load_partition_data_federated_cifar100(args.dataset, args.data_dir)
args.client_num_in_total = client_num
elif dataset_name == "stackoverflow_lr":
logging.info("load_data. dataset_name = %s" % dataset_name)
client_num, train_data_num, test_data_num, train_data_global, test_data_global, \
train_data_local_num_dict, train_data_local_dict, test_data_local_dict, \
class_num = load_partition_data_federated_stackoverflow_lr(args.dataset, args.data_dir)
args.client_num_in_total = client_num
elif dataset_name == "stackoverflow_nwp":
logging.info("load_data. dataset_name = %s" % dataset_name)
client_num, train_data_num, test_data_num, train_data_global, test_data_global, \
train_data_local_num_dict, train_data_local_dict, test_data_local_dict, \
class_num = load_partition_data_federated_stackoverflow_nwp(args.dataset, args.data_dir)
args.client_num_in_total = client_num
elif dataset_name == "ILSVRC2012":
logging.info("load_data. dataset_name = %s" % dataset_name)
train_data_num, test_data_num, train_data_global, test_data_global, \
train_data_local_num_dict, train_data_local_dict, test_data_local_dict, \
class_num = load_partition_data_ImageNet(dataset=dataset_name, data_dir=args.data_dir,
partition_method=None, partition_alpha=None,
client_number=args.client_num_in_total, batch_size=args.batch_size)
elif dataset_name == "gld23k":
logging.info("load_data. dataset_name = %s" % dataset_name)
args.client_num_in_total = 233
fed_train_map_file = os.path.join(args.data_dir, 'mini_gld_train_split.csv')
fed_test_map_file = os.path.join(args.data_dir, 'mini_gld_test.csv')
train_data_num, test_data_num, train_data_global, test_data_global, \
train_data_local_num_dict, train_data_local_dict, test_data_local_dict, \
class_num = load_partition_data_landmarks(dataset=dataset_name, data_dir=args.data_dir,
fed_train_map_file=fed_train_map_file,
fed_test_map_file=fed_test_map_file,
partition_method=None, partition_alpha=None,
client_number=args.client_num_in_total, batch_size=args.batch_size)
elif dataset_name == "gld160k":
logging.info("load_data. dataset_name = %s" % dataset_name)
args.client_num_in_total = 1262
fed_train_map_file = os.path.join(args.data_dir, 'federated_train.csv')
fed_test_map_file = os.path.join(args.data_dir, 'test.csv')
train_data_num, test_data_num, train_data_global, test_data_global, \
train_data_local_num_dict, train_data_local_dict, test_data_local_dict, \
class_num = load_partition_data_landmarks(dataset=dataset_name, data_dir=args.data_dir,
fed_train_map_file=fed_train_map_file,
fed_test_map_file=fed_test_map_file,
partition_method=None, partition_alpha=None,
client_number=args.client_num_in_total, batch_size=args.batch_size)
else:
if dataset_name == "cifar10":
data_loader = load_partition_data_cifar10
elif dataset_name == "cifar100":
data_loader = load_partition_data_cifar100
elif dataset_name == "cinic10":
data_loader = load_partition_data_cinic10
else:
data_loader = load_partition_data_cifar10
train_data_num, test_data_num, train_data_global, test_data_global, \
train_data_local_num_dict, train_data_local_dict, test_data_local_dict, \
class_num = data_loader(args.dataset, args.data_dir, args.partition_method,
args.partition_alpha, args.client_num_in_total, args.batch_size)
# 如果是中心化训练(只有一个设备参与),修改客户端本地数据映射字典,全部映射到0号
if centralized:
train_data_local_num_dict = {
0: sum(user_train_data_num for user_train_data_num in train_data_local_num_dict.values())}
train_data_local_dict = {
0: [batch for cid in sorted(train_data_local_dict.keys()) for batch in train_data_local_dict[cid]]}
test_data_local_dict = {
0: [batch for cid in sorted(test_data_local_dict.keys()) for batch in test_data_local_dict[cid]]}
args.client_num_in_total = 1
# 如果是全批次训练,则
if full_batch:
train_data_global = combine_batches(train_data_global)
test_data_global = combine_batches(test_data_global)
train_data_local_dict = {cid: combine_batches(train_data_local_dict[cid]) for cid in
train_data_local_dict.keys()}
test_data_local_dict = {cid: combine_batches(test_data_local_dict[cid]) for cid in test_data_local_dict.keys()}
args.batch_size = args_batch_size
dataset = [train_data_num, test_data_num, train_data_global, test_data_global,
train_data_local_num_dict, train_data_local_dict, test_data_local_dict, class_num]
return dataset
def combine_batches(batches):
full_x = torch.from_numpy(np.asarray([])).float()
full_y = torch.from_numpy(np.asarray([])).long()
for (batched_x, batched_y) in batches:
full_x = torch.cat((full_x, batched_x), 0)
full_y = torch.cat((full_y, batched_y), 0)
return [(full_x, full_y)]
def create_model(args, model_name, output_dim):
logging.info("create_model. model_name = %s, output_dim = %s" % (model_name, output_dim))
model = None
if model_name == "lr" and args.dataset == "mnist":
logging.info("LogisticRegression + MNIST")
model = LogisticRegression(28 * 28, output_dim)
elif model_name == "cnn" and args.dataset == "femnist":
logging.info("CNN + FederatedEMNIST")
model = CNN_DropOut(False)
elif model_name == "resnet18_gn" and args.dataset == "fed_cifar100":
logging.info("ResNet18_GN + Federated_CIFAR100")
model = resnet18()
elif model_name == "rnn" and args.dataset == "shakespeare":
logging.info("RNN + shakespeare")
model = RNN_OriginalFedAvg()
elif model_name == "rnn" and args.dataset == "fed_shakespeare":
logging.info("RNN + fed_shakespeare")
model = RNN_OriginalFedAvg()
elif model_name == "lr" and args.dataset == "stackoverflow_lr":
logging.info("lr + stackoverflow_lr")
model = LogisticRegression(10000, output_dim)
elif model_name == "rnn" and args.dataset == "stackoverflow_nwp":
logging.info("RNN + stackoverflow_nwp")
model = RNN_StackOverFlow()
elif model_name == "resnet56":
model = resnet56(class_num=output_dim)
elif model_name == "mobilenet":
model = mobilenet(class_num=output_dim)
return model
def custom_model_trainer(args, model):
if args.dataset == "stackoverflow_lr":
return MyModelTrainerTAG(model)
elif args.dataset in ["fed_shakespeare", "stackoverflow_nwp"]:
return MyModelTrainerNWP(model)
else: # default model trainer is for classification problem
return MyModelTrainerCLS(model)
if __name__ == "__main__":
logging.basicConfig()
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
parser = add_args(argparse.ArgumentParser(description='FedAvg-standalone'))
args = parser.parse_args()
logger.info(args)
device = torch.device("cuda:" + str(args.gpu) if torch.cuda.is_available() else "cpu")
logger.info(device)
wandb.init(
project="fedml",
name="FedAVG-r" + str(args.comm_round) + "-e" + str(args.epochs) + "-lr" + str(args.lr),
config=args
)
# Set the random seed. The np.random seed determines the dataset partition.
# The torch_manual_seed determines the initial weight.
# We fix these two, so that we can reproduce the result.
random.seed(0)
np.random.seed(0)
torch.manual_seed(0)
torch.cuda.manual_seed_all(0)
# load data
dataset = load_data(args, args.dataset)
# create model.
# Note if the model is DNN (e.g., ResNet), the training will be very slow.
# In this case, please use our FedML distributed version (./fedml_experiments/distributed_fedavg)
model = create_model(args, model_name=args.model, output_dim=dataset[7])
model_trainer = custom_model_trainer(args, model)
logging.info(model)
fedavgAPI = FedAvgAPI(dataset, device, args, model_trainer)
fedavgAPI.train()
| 49.365931 | 119 | 0.698894 |
7ae1ea121dc8bdaffb51292d10b07fe5cb831721 | 7,528 | py | Python | ventanaAdministradorVehiculos.py | DavidPareja14/Administrador-de-vehiculos | 7f294bbfd11579d470379f03d426c7223949e7a3 | [
"MIT"
] | null | null | null | ventanaAdministradorVehiculos.py | DavidPareja14/Administrador-de-vehiculos | 7f294bbfd11579d470379f03d426c7223949e7a3 | [
"MIT"
] | null | null | null | ventanaAdministradorVehiculos.py | DavidPareja14/Administrador-de-vehiculos | 7f294bbfd11579d470379f03d426c7223949e7a3 | [
"MIT"
] | null | null | null | from kivy.uix.screenmanager import ScreenManager, Screen
from kivy.clock import Clock
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.button import Button
from kivy.uix.label import Label
from kivy.uix.popup import Popup
from kivy.lang import Builder
from anadirVehiculo import AgregarVehiculo
import requests
"""
Los botones de vehiculo, Ubicacion y Eliminar los implemento como clases aparte, esto con el objeto de poder obtener la instancia de cada
btn al presionar uno, ya que desde el kv solo es mandar como parametro a la funcion (self) si el btn es presionado, hay otras formas,
pero no quiero, con esto puedo obtener el id correspondiente al vehiculo.
"""
#l=[]
k=Builder.load_string("""
<SecondWindow>:
name: "segunda"
BoxLayout:
id:box
orientation:"vertical"
BoxLayout:
size_hint_y:0.3
orientation:"vertical"
Label:
text: "Administrador de vehiculos"
BoxLayout:
Label:
text: "Vehiculo"
Label:
text: "Ubicacion"
Label:
text: "Eliminar"
ScrollView:
id: scroll
GridLayout:
id: contenedorFilas
cols: 1
size_hint_y: None #Si esto no se pone, el scroll no aparece.
row_default_height: root.height*0.1
height: self.minimum_height
BoxLayout:
size_hint_y:0.25
spacing: 50
padding: 20,30,50,10 #Margenes: izquierda, arriba, derecha, abajo
Button:
text: "Agregar Vehiculo"
on_release:
root.oprimidoBtnAgregarVehiculo()
Button:
text: "GPS"
on_release:
root.pantallas(app)
root.manager.current="gps"
<BotonVehiculo>:
on_press: app.root.current="tableroPrincipal"
<BotonUbicacion>:
on_press: root.ubicacionVehiculo()
<BotonEliminar>:
on_press: root.eliminarVehiculo()
""")
class BotonVehiculo(Button):
def tableroVehiculo(self):
pass
class BotonUbicacion(Button):
def ubicacionVehiculo(self):
ip_request = requests.get('https://get.geojs.io/v1/ip.json')
my_ip = ip_request.json()['ip']
geo_request = requests.get('https://get.geojs.io/v1/ip/geo/' +my_ip + '.json')
geo_data = geo_request.json()
#Agregar ubicacion DB
print(self.parent.children[2].text) #Para obtener el nombre del vehiculo.
print(geo_data['latitude'], geo_data['longitude'])
self.popup = Popup(title="ESTADO",
content=Label(text="Ubicacion guardada correctamente"),
size_hint=(0.7, 0.2))
self.popup.open()
class BotonEliminar(Button):
"""ATENCION
Para la f eliminarVeh no me permite eliminar botones con el parent cosa que es extraña, porque con solo poner self.parent me muestra el pad
re del btn, supuse que tal vez me interpretaba el btn como un objeto diferente, como sea, lo que hice fue crear una lista l con todos
los objetos boxlayout creados, luego comparo ese con el boxlayout padre de mi btn seleccionado y borro los botones (veh, ubic, elim)
pero desde el obj metido en la lista y funciona. Luego meti el gridLayout que contiene a todos los boxlayout en la ultima pos de la lis
ta para poder accederlo y elimimar el boxlayout que contiene al boton oprimido, lo elimina, pero en la terminal de cmd salen errores
al yo cerrar la ventana de kivy.
LO HE SOLUCIONADO
Utilizando la lista l para meter los objetos BoxLayout y Grid ([objBox,objBox,objBox,..., objGridLayout]) podia eliminar los
objetos BoxLayout si se seleccionaba el boton respectivo, sin embargo al cerrar la aplicacion, se generaban errores, lo que pienso,
que yo eliminaba un box pero como era una copia quedaba el otro, esto puede generar incosistencias, al hacer la prueba unitaria con este
modulo, me di cuenta que mi implementacion funcionaba con normalidad, sin necesidad de una lista, solo con self.parent... ahora, he quitado
el codigo kv del archivo vistas.kv y lo integro en este archivo y funciona.
"""
def eliminarVehiculo(self):
print (self.parent.children[2].text) #saca el nombre del vehiculo a eliminar de BD
self.parent.parent.remove_widget(self.parent)
#print(self.parent.remove_widget(self)) #Es para que elimine los botones con respecto al eliminar, pero genera error, si se prueba como un modulo
#individual esta bien.
"""
for obj in l:
if self.parent==obj:
l[-1].remove_widget(obj)
"""
class SecondWindow(Screen):
#l=[]
def __init__(self, **kwargs):
super(SecondWindow, self).__init__(**kwargs)
Clock.schedule_once(lambda dt:self.scrollVehiculos()) #hago este proceso, porque si trato de usar los self.ids desde el constructor,
#Me dara error, ya que no se han creado todavia, por eso con clock lo que trato es
#retardar el proceso, de esta manera funciona, con la func lambda no tengo que obtener dt.
def oprimidoBtnAgregarVehiculo(self):
self.content = AgregarVehiculo() #Este texto que paso lo captura el stringProperty
self.content.bind(on_guardar=self._on_guardar) #segun mi analisis, es para dar el mando de on_answer a _on_answer
self.popup = Popup(title="Agregue el vehiculo que desee",
content=self.content,
size_hint=(0.9, 0.9))
self.popup.open()
def pantallas(self, app):
app.root.screens[3].actualizarMarcadores() #Es para que el mapa siempre aparesca centrado en la ubicacion actual
def _on_guardar(self, instance):
resultadoVentanaAgregarVehiculo=self.content.on_guardar() #La pos 0 me determina si los datos de agregarVeh son correctos o no.
if resultadoVentanaAgregarVehiculo[0]: #pos que contiene True o False
box=BoxLayout(orientation="horizontal")
box.add_widget(BotonVehiculo(text=resultadoVentanaAgregarVehiculo[1])) #pos que tiene nombre del vehiculo.
box.add_widget(BotonUbicacion(text="ubicacion")) #Los ids son iguales y corresponden al nombre del vehiculo
box.add_widget(BotonEliminar(text="Eliminar"))
self.ids.contenedorFilas.add_widget(box)
self.popup.dismiss()
else:
pass
def scrollVehiculos(self):
# CONSULTA BASE DE DATOS PARA LISTAR TODOS LOS VEHICULOS
for i in range(5):
#self.l.append(BoxLayout(orientation="horizontal"))
#self.ids.contenedorFilas.add_widget(self.l[-1]) #al gridlayout le agrego lo boxlayout necesarios, en cada boxlayout puedo posicionar
#mis tres botones.
self.ids.contenedorFilas.add_widget(BoxLayout(orientation="horizontal"))
for i, n in enumerate(self.ids.contenedorFilas.children):
n.add_widget(BotonVehiculo(text="vehiculo"+str(i)))
n.add_widget(BotonUbicacion(text="ubicacion"+str(i))) #Los ids son iguales y corresponden al nombre del vehiculo
n.add_widget(BotonEliminar(text="Eliminar"+str(i)))
#l.append(n)
#l.append(self.ids.contenedorFilas)
#print(l) #No entiendo porque se imprimen dos listas
"""
#Esta funcion la dejo por si algo, no funciono para eliminar los botones, pero fue un intento que depronto me sirva en el futuro.
def eliminarVehiculo(self, idBoton): #esto es para eliminar los botones asociados a un boxL pero sale raro, creo que es porque meto los
#boxLayout a una lista, o porque el parametr idBoton me lo pasan desde otra clase.
#print(idBoton)
#self.l[int(idBoton)].clear_widgets()
#self.ids.contenedorFilas.remove_widget(self.l[int(idBoton)])
#self.l.pop(int(idBoton))
""" | 45.349398 | 147 | 0.703108 |
361dd64cd261a81246151b95e4c72637e2698d06 | 51,549 | py | Python | synapse/handlers/e2e_keys.py | mattcen/synapse | 26e13ad126473fc15ce0b674f821f05f1f1158e2 | [
"Apache-2.0"
] | 1 | 2020-07-21T17:51:02.000Z | 2020-07-21T17:51:02.000Z | synapse/handlers/e2e_keys.py | mjvaldez/synapse | de119063f248981510e961e83f1515a3add19a21 | [
"Apache-2.0"
] | null | null | null | synapse/handlers/e2e_keys.py | mjvaldez/synapse | de119063f248981510e961e83f1515a3add19a21 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2016 OpenMarket Ltd
# Copyright 2018-2019 New Vector Ltd
# Copyright 2019 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import attr
from canonicaljson import encode_canonical_json, json
from signedjson.key import decode_verify_key_bytes
from signedjson.sign import SignatureVerifyException, verify_signed_json
from unpaddedbase64 import decode_base64
from twisted.internet import defer
from synapse.api.errors import CodeMessageException, Codes, NotFoundError, SynapseError
from synapse.logging.context import make_deferred_yieldable, run_in_background
from synapse.logging.opentracing import log_kv, set_tag, tag_args, trace
from synapse.replication.http.devices import ReplicationUserDevicesResyncRestServlet
from synapse.types import (
UserID,
get_domain_from_id,
get_verify_key_from_cross_signing_key,
)
from synapse.util import unwrapFirstError
from synapse.util.async_helpers import Linearizer
from synapse.util.caches.expiringcache import ExpiringCache
from synapse.util.retryutils import NotRetryingDestination
logger = logging.getLogger(__name__)
class E2eKeysHandler(object):
def __init__(self, hs):
self.store = hs.get_datastore()
self.federation = hs.get_federation_client()
self.device_handler = hs.get_device_handler()
self.is_mine = hs.is_mine
self.clock = hs.get_clock()
self._edu_updater = SigningKeyEduUpdater(hs, self)
federation_registry = hs.get_federation_registry()
self._is_master = hs.config.worker_app is None
if not self._is_master:
self._user_device_resync_client = ReplicationUserDevicesResyncRestServlet.make_client(
hs
)
else:
# Only register this edu handler on master as it requires writing
# device updates to the db
#
# FIXME: switch to m.signing_key_update when MSC1756 is merged into the spec
federation_registry.register_edu_handler(
"org.matrix.signing_key_update",
self._edu_updater.incoming_signing_key_update,
)
# doesn't really work as part of the generic query API, because the
# query request requires an object POST, but we abuse the
# "query handler" interface.
federation_registry.register_query_handler(
"client_keys", self.on_federation_query_client_keys
)
@trace
async def query_devices(self, query_body, timeout, from_user_id):
""" Handle a device key query from a client
{
"device_keys": {
"<user_id>": ["<device_id>"]
}
}
->
{
"device_keys": {
"<user_id>": {
"<device_id>": {
...
}
}
}
}
Args:
from_user_id (str): the user making the query. This is used when
adding cross-signing signatures to limit what signatures users
can see.
"""
device_keys_query = query_body.get("device_keys", {})
# separate users by domain.
# make a map from domain to user_id to device_ids
local_query = {}
remote_queries = {}
for user_id, device_ids in device_keys_query.items():
# we use UserID.from_string to catch invalid user ids
if self.is_mine(UserID.from_string(user_id)):
local_query[user_id] = device_ids
else:
remote_queries[user_id] = device_ids
set_tag("local_key_query", local_query)
set_tag("remote_key_query", remote_queries)
# First get local devices.
failures = {}
results = {}
if local_query:
local_result = await self.query_local_devices(local_query)
for user_id, keys in local_result.items():
if user_id in local_query:
results[user_id] = keys
# Now attempt to get any remote devices from our local cache.
remote_queries_not_in_cache = {}
if remote_queries:
query_list = []
for user_id, device_ids in remote_queries.items():
if device_ids:
query_list.extend((user_id, device_id) for device_id in device_ids)
else:
query_list.append((user_id, None))
(
user_ids_not_in_cache,
remote_results,
) = await self.store.get_user_devices_from_cache(query_list)
for user_id, devices in remote_results.items():
user_devices = results.setdefault(user_id, {})
for device_id, device in devices.items():
keys = device.get("keys", None)
device_display_name = device.get("device_display_name", None)
if keys:
result = dict(keys)
unsigned = result.setdefault("unsigned", {})
if device_display_name:
unsigned["device_display_name"] = device_display_name
user_devices[device_id] = result
for user_id in user_ids_not_in_cache:
domain = get_domain_from_id(user_id)
r = remote_queries_not_in_cache.setdefault(domain, {})
r[user_id] = remote_queries[user_id]
# Get cached cross-signing keys
cross_signing_keys = await self.get_cross_signing_keys_from_cache(
device_keys_query, from_user_id
)
# Now fetch any devices that we don't have in our cache
@trace
async def do_remote_query(destination):
"""This is called when we are querying the device list of a user on
a remote homeserver and their device list is not in the device list
cache. If we share a room with this user and we're not querying for
specific user we will update the cache with their device list.
"""
destination_query = remote_queries_not_in_cache[destination]
# We first consider whether we wish to update the device list cache with
# the users device list. We want to track a user's devices when the
# authenticated user shares a room with the queried user and the query
# has not specified a particular device.
# If we update the cache for the queried user we remove them from further
# queries. We use the more efficient batched query_client_keys for all
# remaining users
user_ids_updated = []
for (user_id, device_list) in destination_query.items():
if user_id in user_ids_updated:
continue
if device_list:
continue
room_ids = await self.store.get_rooms_for_user(user_id)
if not room_ids:
continue
# We've decided we're sharing a room with this user and should
# probably be tracking their device lists. However, we haven't
# done an initial sync on the device list so we do it now.
try:
if self._is_master:
user_devices = await self.device_handler.device_list_updater.user_device_resync(
user_id
)
else:
user_devices = await self._user_device_resync_client(
user_id=user_id
)
user_devices = user_devices["devices"]
user_results = results.setdefault(user_id, {})
for device in user_devices:
user_results[device["device_id"]] = device["keys"]
user_ids_updated.append(user_id)
except Exception as e:
failures[destination] = _exception_to_failure(e)
if len(destination_query) == len(user_ids_updated):
# We've updated all the users in the query and we do not need to
# make any further remote calls.
return
# Remove all the users from the query which we have updated
for user_id in user_ids_updated:
destination_query.pop(user_id)
try:
remote_result = await self.federation.query_client_keys(
destination, {"device_keys": destination_query}, timeout=timeout
)
for user_id, keys in remote_result["device_keys"].items():
if user_id in destination_query:
results[user_id] = keys
if "master_keys" in remote_result:
for user_id, key in remote_result["master_keys"].items():
if user_id in destination_query:
cross_signing_keys["master_keys"][user_id] = key
if "self_signing_keys" in remote_result:
for user_id, key in remote_result["self_signing_keys"].items():
if user_id in destination_query:
cross_signing_keys["self_signing_keys"][user_id] = key
except Exception as e:
failure = _exception_to_failure(e)
failures[destination] = failure
set_tag("error", True)
set_tag("reason", failure)
await make_deferred_yieldable(
defer.gatherResults(
[
run_in_background(do_remote_query, destination)
for destination in remote_queries_not_in_cache
],
consumeErrors=True,
).addErrback(unwrapFirstError)
)
ret = {"device_keys": results, "failures": failures}
ret.update(cross_signing_keys)
return ret
async def get_cross_signing_keys_from_cache(self, query, from_user_id):
"""Get cross-signing keys for users from the database
Args:
query (Iterable[string]) an iterable of user IDs. A dict whose keys
are user IDs satisfies this, so the query format used for
query_devices can be used here.
from_user_id (str): the user making the query. This is used when
adding cross-signing signatures to limit what signatures users
can see.
Returns:
defer.Deferred[dict[str, dict[str, dict]]]: map from
(master_keys|self_signing_keys|user_signing_keys) -> user_id -> key
"""
master_keys = {}
self_signing_keys = {}
user_signing_keys = {}
user_ids = list(query)
keys = await self.store.get_e2e_cross_signing_keys_bulk(user_ids, from_user_id)
for user_id, user_info in keys.items():
if user_info is None:
continue
if "master" in user_info:
master_keys[user_id] = user_info["master"]
if "self_signing" in user_info:
self_signing_keys[user_id] = user_info["self_signing"]
if (
from_user_id in keys
and keys[from_user_id] is not None
and "user_signing" in keys[from_user_id]
):
# users can see other users' master and self-signing keys, but can
# only see their own user-signing keys
user_signing_keys[from_user_id] = keys[from_user_id]["user_signing"]
return {
"master_keys": master_keys,
"self_signing_keys": self_signing_keys,
"user_signing_keys": user_signing_keys,
}
@trace
async def query_local_devices(self, query):
"""Get E2E device keys for local users
Args:
query (dict[string, list[string]|None): map from user_id to a list
of devices to query (None for all devices)
Returns:
defer.Deferred: (resolves to dict[string, dict[string, dict]]):
map from user_id -> device_id -> device details
"""
set_tag("local_query", query)
local_query = []
result_dict = {}
for user_id, device_ids in query.items():
# we use UserID.from_string to catch invalid user ids
if not self.is_mine(UserID.from_string(user_id)):
logger.warning("Request for keys for non-local user %s", user_id)
log_kv(
{
"message": "Requested a local key for a user which"
" was not local to the homeserver",
"user_id": user_id,
}
)
set_tag("error", True)
raise SynapseError(400, "Not a user here")
if not device_ids:
local_query.append((user_id, None))
else:
for device_id in device_ids:
local_query.append((user_id, device_id))
# make sure that each queried user appears in the result dict
result_dict[user_id] = {}
results = await self.store.get_e2e_device_keys(local_query)
# Build the result structure
for user_id, device_keys in results.items():
for device_id, device_info in device_keys.items():
result_dict[user_id][device_id] = device_info
log_kv(results)
return result_dict
async def on_federation_query_client_keys(self, query_body):
""" Handle a device key query from a federated server
"""
device_keys_query = query_body.get("device_keys", {})
res = await self.query_local_devices(device_keys_query)
ret = {"device_keys": res}
# add in the cross-signing keys
cross_signing_keys = await self.get_cross_signing_keys_from_cache(
device_keys_query, None
)
ret.update(cross_signing_keys)
return ret
@trace
async def claim_one_time_keys(self, query, timeout):
local_query = []
remote_queries = {}
for user_id, device_keys in query.get("one_time_keys", {}).items():
# we use UserID.from_string to catch invalid user ids
if self.is_mine(UserID.from_string(user_id)):
for device_id, algorithm in device_keys.items():
local_query.append((user_id, device_id, algorithm))
else:
domain = get_domain_from_id(user_id)
remote_queries.setdefault(domain, {})[user_id] = device_keys
set_tag("local_key_query", local_query)
set_tag("remote_key_query", remote_queries)
results = await self.store.claim_e2e_one_time_keys(local_query)
json_result = {}
failures = {}
for user_id, device_keys in results.items():
for device_id, keys in device_keys.items():
for key_id, json_bytes in keys.items():
json_result.setdefault(user_id, {})[device_id] = {
key_id: json.loads(json_bytes)
}
@trace
async def claim_client_keys(destination):
set_tag("destination", destination)
device_keys = remote_queries[destination]
try:
remote_result = await self.federation.claim_client_keys(
destination, {"one_time_keys": device_keys}, timeout=timeout
)
for user_id, keys in remote_result["one_time_keys"].items():
if user_id in device_keys:
json_result[user_id] = keys
except Exception as e:
failure = _exception_to_failure(e)
failures[destination] = failure
set_tag("error", True)
set_tag("reason", failure)
await make_deferred_yieldable(
defer.gatherResults(
[
run_in_background(claim_client_keys, destination)
for destination in remote_queries
],
consumeErrors=True,
)
)
logger.info(
"Claimed one-time-keys: %s",
",".join(
(
"%s for %s:%s" % (key_id, user_id, device_id)
for user_id, user_keys in json_result.items()
for device_id, device_keys in user_keys.items()
for key_id, _ in device_keys.items()
)
),
)
log_kv({"one_time_keys": json_result, "failures": failures})
return {"one_time_keys": json_result, "failures": failures}
@tag_args
async def upload_keys_for_user(self, user_id, device_id, keys):
time_now = self.clock.time_msec()
# TODO: Validate the JSON to make sure it has the right keys.
device_keys = keys.get("device_keys", None)
if device_keys:
logger.info(
"Updating device_keys for device %r for user %s at %d",
device_id,
user_id,
time_now,
)
log_kv(
{
"message": "Updating device_keys for user.",
"user_id": user_id,
"device_id": device_id,
}
)
# TODO: Sign the JSON with the server key
changed = await self.store.set_e2e_device_keys(
user_id, device_id, time_now, device_keys
)
if changed:
# Only notify about device updates *if* the keys actually changed
await self.device_handler.notify_device_update(user_id, [device_id])
else:
log_kv({"message": "Not updating device_keys for user", "user_id": user_id})
one_time_keys = keys.get("one_time_keys", None)
if one_time_keys:
log_kv(
{
"message": "Updating one_time_keys for device.",
"user_id": user_id,
"device_id": device_id,
}
)
await self._upload_one_time_keys_for_user(
user_id, device_id, time_now, one_time_keys
)
else:
log_kv(
{"message": "Did not update one_time_keys", "reason": "no keys given"}
)
# the device should have been registered already, but it may have been
# deleted due to a race with a DELETE request. Or we may be using an
# old access_token without an associated device_id. Either way, we
# need to double-check the device is registered to avoid ending up with
# keys without a corresponding device.
await self.device_handler.check_device_registered(user_id, device_id)
result = await self.store.count_e2e_one_time_keys(user_id, device_id)
set_tag("one_time_key_counts", result)
return {"one_time_key_counts": result}
async def _upload_one_time_keys_for_user(
self, user_id, device_id, time_now, one_time_keys
):
logger.info(
"Adding one_time_keys %r for device %r for user %r at %d",
one_time_keys.keys(),
device_id,
user_id,
time_now,
)
# make a list of (alg, id, key) tuples
key_list = []
for key_id, key_obj in one_time_keys.items():
algorithm, key_id = key_id.split(":")
key_list.append((algorithm, key_id, key_obj))
# First we check if we have already persisted any of the keys.
existing_key_map = await self.store.get_e2e_one_time_keys(
user_id, device_id, [k_id for _, k_id, _ in key_list]
)
new_keys = [] # Keys that we need to insert. (alg, id, json) tuples.
for algorithm, key_id, key in key_list:
ex_json = existing_key_map.get((algorithm, key_id), None)
if ex_json:
if not _one_time_keys_match(ex_json, key):
raise SynapseError(
400,
(
"One time key %s:%s already exists. "
"Old key: %s; new key: %r"
)
% (algorithm, key_id, ex_json, key),
)
else:
new_keys.append(
(algorithm, key_id, encode_canonical_json(key).decode("ascii"))
)
log_kv({"message": "Inserting new one_time_keys.", "keys": new_keys})
await self.store.add_e2e_one_time_keys(user_id, device_id, time_now, new_keys)
async def upload_signing_keys_for_user(self, user_id, keys):
"""Upload signing keys for cross-signing
Args:
user_id (string): the user uploading the keys
keys (dict[string, dict]): the signing keys
"""
# if a master key is uploaded, then check it. Otherwise, load the
# stored master key, to check signatures on other keys
if "master_key" in keys:
master_key = keys["master_key"]
_check_cross_signing_key(master_key, user_id, "master")
else:
master_key = await self.store.get_e2e_cross_signing_key(user_id, "master")
# if there is no master key, then we can't do anything, because all the
# other cross-signing keys need to be signed by the master key
if not master_key:
raise SynapseError(400, "No master key available", Codes.MISSING_PARAM)
try:
master_key_id, master_verify_key = get_verify_key_from_cross_signing_key(
master_key
)
except ValueError:
if "master_key" in keys:
# the invalid key came from the request
raise SynapseError(400, "Invalid master key", Codes.INVALID_PARAM)
else:
# the invalid key came from the database
logger.error("Invalid master key found for user %s", user_id)
raise SynapseError(500, "Invalid master key")
# for the other cross-signing keys, make sure that they have valid
# signatures from the master key
if "self_signing_key" in keys:
self_signing_key = keys["self_signing_key"]
_check_cross_signing_key(
self_signing_key, user_id, "self_signing", master_verify_key
)
if "user_signing_key" in keys:
user_signing_key = keys["user_signing_key"]
_check_cross_signing_key(
user_signing_key, user_id, "user_signing", master_verify_key
)
# if everything checks out, then store the keys and send notifications
deviceids = []
if "master_key" in keys:
await self.store.set_e2e_cross_signing_key(user_id, "master", master_key)
deviceids.append(master_verify_key.version)
if "self_signing_key" in keys:
await self.store.set_e2e_cross_signing_key(
user_id, "self_signing", self_signing_key
)
try:
deviceids.append(
get_verify_key_from_cross_signing_key(self_signing_key)[1].version
)
except ValueError:
raise SynapseError(400, "Invalid self-signing key", Codes.INVALID_PARAM)
if "user_signing_key" in keys:
await self.store.set_e2e_cross_signing_key(
user_id, "user_signing", user_signing_key
)
# the signature stream matches the semantics that we want for
# user-signing key updates: only the user themselves is notified of
# their own user-signing key updates
await self.device_handler.notify_user_signature_update(user_id, [user_id])
# master key and self-signing key updates match the semantics of device
# list updates: all users who share an encrypted room are notified
if len(deviceids):
await self.device_handler.notify_device_update(user_id, deviceids)
return {}
async def upload_signatures_for_device_keys(self, user_id, signatures):
"""Upload device signatures for cross-signing
Args:
user_id (string): the user uploading the signatures
signatures (dict[string, dict[string, dict]]): map of users to
devices to signed keys. This is the submission from the user; an
exception will be raised if it is malformed.
Returns:
dict: response to be sent back to the client. The response will have
a "failures" key, which will be a dict mapping users to devices
to errors for the signatures that failed.
Raises:
SynapseError: if the signatures dict is not valid.
"""
failures = {}
# signatures to be stored. Each item will be a SignatureListItem
signature_list = []
# split between checking signatures for own user and signatures for
# other users, since we verify them with different keys
self_signatures = signatures.get(user_id, {})
other_signatures = {k: v for k, v in signatures.items() if k != user_id}
self_signature_list, self_failures = await self._process_self_signatures(
user_id, self_signatures
)
signature_list.extend(self_signature_list)
failures.update(self_failures)
other_signature_list, other_failures = await self._process_other_signatures(
user_id, other_signatures
)
signature_list.extend(other_signature_list)
failures.update(other_failures)
# store the signature, and send the appropriate notifications for sync
logger.debug("upload signature failures: %r", failures)
await self.store.store_e2e_cross_signing_signatures(user_id, signature_list)
self_device_ids = [item.target_device_id for item in self_signature_list]
if self_device_ids:
await self.device_handler.notify_device_update(user_id, self_device_ids)
signed_users = [item.target_user_id for item in other_signature_list]
if signed_users:
await self.device_handler.notify_user_signature_update(
user_id, signed_users
)
return {"failures": failures}
async def _process_self_signatures(self, user_id, signatures):
"""Process uploaded signatures of the user's own keys.
Signatures of the user's own keys from this API come in two forms:
- signatures of the user's devices by the user's self-signing key,
- signatures of the user's master key by the user's devices.
Args:
user_id (string): the user uploading the keys
signatures (dict[string, dict]): map of devices to signed keys
Returns:
(list[SignatureListItem], dict[string, dict[string, dict]]):
a list of signatures to store, and a map of users to devices to failure
reasons
Raises:
SynapseError: if the input is malformed
"""
signature_list = []
failures = {}
if not signatures:
return signature_list, failures
if not isinstance(signatures, dict):
raise SynapseError(400, "Invalid parameter", Codes.INVALID_PARAM)
try:
# get our self-signing key to verify the signatures
(
_,
self_signing_key_id,
self_signing_verify_key,
) = await self._get_e2e_cross_signing_verify_key(user_id, "self_signing")
# get our master key, since we may have received a signature of it.
# We need to fetch it here so that we know what its key ID is, so
# that we can check if a signature that was sent is a signature of
# the master key or of a device
(
master_key,
_,
master_verify_key,
) = await self._get_e2e_cross_signing_verify_key(user_id, "master")
# fetch our stored devices. This is used to 1. verify
# signatures on the master key, and 2. to compare with what
# was sent if the device was signed
devices = await self.store.get_e2e_device_keys([(user_id, None)])
if user_id not in devices:
raise NotFoundError("No device keys found")
devices = devices[user_id]
except SynapseError as e:
failure = _exception_to_failure(e)
failures[user_id] = {device: failure for device in signatures.keys()}
return signature_list, failures
for device_id, device in signatures.items():
# make sure submitted data is in the right form
if not isinstance(device, dict):
raise SynapseError(400, "Invalid parameter", Codes.INVALID_PARAM)
try:
if "signatures" not in device or user_id not in device["signatures"]:
# no signature was sent
raise SynapseError(
400, "Invalid signature", Codes.INVALID_SIGNATURE
)
if device_id == master_verify_key.version:
# The signature is of the master key. This needs to be
# handled differently from signatures of normal devices.
master_key_signature_list = self._check_master_key_signature(
user_id, device_id, device, master_key, devices
)
signature_list.extend(master_key_signature_list)
continue
# at this point, we have a device that should be signed
# by the self-signing key
if self_signing_key_id not in device["signatures"][user_id]:
# no signature was sent
raise SynapseError(
400, "Invalid signature", Codes.INVALID_SIGNATURE
)
try:
stored_device = devices[device_id]
except KeyError:
raise NotFoundError("Unknown device")
if self_signing_key_id in stored_device.get("signatures", {}).get(
user_id, {}
):
# we already have a signature on this device, so we
# can skip it, since it should be exactly the same
continue
_check_device_signature(
user_id, self_signing_verify_key, device, stored_device
)
signature = device["signatures"][user_id][self_signing_key_id]
signature_list.append(
SignatureListItem(
self_signing_key_id, user_id, device_id, signature
)
)
except SynapseError as e:
failures.setdefault(user_id, {})[device_id] = _exception_to_failure(e)
return signature_list, failures
def _check_master_key_signature(
self, user_id, master_key_id, signed_master_key, stored_master_key, devices
):
"""Check signatures of a user's master key made by their devices.
Args:
user_id (string): the user whose master key is being checked
master_key_id (string): the ID of the user's master key
signed_master_key (dict): the user's signed master key that was uploaded
stored_master_key (dict): our previously-stored copy of the user's master key
devices (iterable(dict)): the user's devices
Returns:
list[SignatureListItem]: a list of signatures to store
Raises:
SynapseError: if a signature is invalid
"""
# for each device that signed the master key, check the signature.
master_key_signature_list = []
sigs = signed_master_key["signatures"]
for signing_key_id, signature in sigs[user_id].items():
_, signing_device_id = signing_key_id.split(":", 1)
if (
signing_device_id not in devices
or signing_key_id not in devices[signing_device_id]["keys"]
):
# signed by an unknown device, or the
# device does not have the key
raise SynapseError(400, "Invalid signature", Codes.INVALID_SIGNATURE)
# get the key and check the signature
pubkey = devices[signing_device_id]["keys"][signing_key_id]
verify_key = decode_verify_key_bytes(signing_key_id, decode_base64(pubkey))
_check_device_signature(
user_id, verify_key, signed_master_key, stored_master_key
)
master_key_signature_list.append(
SignatureListItem(signing_key_id, user_id, master_key_id, signature)
)
return master_key_signature_list
async def _process_other_signatures(self, user_id, signatures):
"""Process uploaded signatures of other users' keys. These will be the
target user's master keys, signed by the uploading user's user-signing
key.
Args:
user_id (string): the user uploading the keys
signatures (dict[string, dict]): map of users to devices to signed keys
Returns:
(list[SignatureListItem], dict[string, dict[string, dict]]):
a list of signatures to store, and a map of users to devices to failure
reasons
Raises:
SynapseError: if the input is malformed
"""
signature_list = []
failures = {}
if not signatures:
return signature_list, failures
try:
# get our user-signing key to verify the signatures
(
user_signing_key,
user_signing_key_id,
user_signing_verify_key,
) = await self._get_e2e_cross_signing_verify_key(user_id, "user_signing")
except SynapseError as e:
failure = _exception_to_failure(e)
for user, devicemap in signatures.items():
failures[user] = {device_id: failure for device_id in devicemap.keys()}
return signature_list, failures
for target_user, devicemap in signatures.items():
# make sure submitted data is in the right form
if not isinstance(devicemap, dict):
raise SynapseError(400, "Invalid parameter", Codes.INVALID_PARAM)
for device in devicemap.values():
if not isinstance(device, dict):
raise SynapseError(400, "Invalid parameter", Codes.INVALID_PARAM)
device_id = None
try:
# get the target user's master key, to make sure it matches
# what was sent
(
master_key,
master_key_id,
_,
) = await self._get_e2e_cross_signing_verify_key(
target_user, "master", user_id
)
# make sure that the target user's master key is the one that
# was signed (and no others)
device_id = master_key_id.split(":", 1)[1]
if device_id not in devicemap:
logger.debug(
"upload signature: could not find signature for device %s",
device_id,
)
# set device to None so that the failure gets
# marked on all the signatures
device_id = None
raise NotFoundError("Unknown device")
key = devicemap[device_id]
other_devices = [k for k in devicemap.keys() if k != device_id]
if other_devices:
# other devices were signed -- mark those as failures
logger.debug("upload signature: too many devices specified")
failure = _exception_to_failure(NotFoundError("Unknown device"))
failures[target_user] = {
device: failure for device in other_devices
}
if user_signing_key_id in master_key.get("signatures", {}).get(
user_id, {}
):
# we already have the signature, so we can skip it
continue
_check_device_signature(
user_id, user_signing_verify_key, key, master_key
)
signature = key["signatures"][user_id][user_signing_key_id]
signature_list.append(
SignatureListItem(
user_signing_key_id, target_user, device_id, signature
)
)
except SynapseError as e:
failure = _exception_to_failure(e)
if device_id is None:
failures[target_user] = {
device_id: failure for device_id in devicemap.keys()
}
else:
failures.setdefault(target_user, {})[device_id] = failure
return signature_list, failures
async def _get_e2e_cross_signing_verify_key(
self, user_id: str, key_type: str, from_user_id: str = None
):
"""Fetch locally or remotely query for a cross-signing public key.
First, attempt to fetch the cross-signing public key from storage.
If that fails, query the keys from the homeserver they belong to
and update our local copy.
Args:
user_id: the user whose key should be fetched
key_type: the type of key to fetch
from_user_id: the user that we are fetching the keys for.
This affects what signatures are fetched.
Returns:
dict, str, VerifyKey: the raw key data, the key ID, and the
signedjson verify key
Raises:
NotFoundError: if the key is not found
SynapseError: if `user_id` is invalid
"""
user = UserID.from_string(user_id)
key = await self.store.get_e2e_cross_signing_key(
user_id, key_type, from_user_id
)
if key:
# We found a copy of this key in our database. Decode and return it
key_id, verify_key = get_verify_key_from_cross_signing_key(key)
return key, key_id, verify_key
# If we couldn't find the key locally, and we're looking for keys of
# another user then attempt to fetch the missing key from the remote
# user's server.
#
# We may run into this in possible edge cases where a user tries to
# cross-sign a remote user, but does not share any rooms with them yet.
# Thus, we would not have their key list yet. We instead fetch the key,
# store it and notify clients of new, associated device IDs.
if self.is_mine(user) or key_type not in ["master", "self_signing"]:
# Note that master and self_signing keys are the only cross-signing keys we
# can request over federation
raise NotFoundError("No %s key found for %s" % (key_type, user_id))
(
key,
key_id,
verify_key,
) = await self._retrieve_cross_signing_keys_for_remote_user(user, key_type)
if key is None:
raise NotFoundError("No %s key found for %s" % (key_type, user_id))
return key, key_id, verify_key
async def _retrieve_cross_signing_keys_for_remote_user(
self, user: UserID, desired_key_type: str,
):
"""Queries cross-signing keys for a remote user and saves them to the database
Only the key specified by `key_type` will be returned, while all retrieved keys
will be saved regardless
Args:
user: The user to query remote keys for
desired_key_type: The type of key to receive. One of "master", "self_signing"
Returns:
Deferred[Tuple[Optional[Dict], Optional[str], Optional[VerifyKey]]]: A tuple
of the retrieved key content, the key's ID and the matching VerifyKey.
If the key cannot be retrieved, all values in the tuple will instead be None.
"""
try:
remote_result = await self.federation.query_user_devices(
user.domain, user.to_string()
)
except Exception as e:
logger.warning(
"Unable to query %s for cross-signing keys of user %s: %s %s",
user.domain,
user.to_string(),
type(e),
e,
)
return None, None, None
# Process each of the retrieved cross-signing keys
desired_key = None
desired_key_id = None
desired_verify_key = None
retrieved_device_ids = []
for key_type in ["master", "self_signing"]:
key_content = remote_result.get(key_type + "_key")
if not key_content:
continue
# Ensure these keys belong to the correct user
if "user_id" not in key_content:
logger.warning(
"Invalid %s key retrieved, missing user_id field: %s",
key_type,
key_content,
)
continue
if user.to_string() != key_content["user_id"]:
logger.warning(
"Found %s key of user %s when querying for keys of user %s",
key_type,
key_content["user_id"],
user.to_string(),
)
continue
# Validate the key contents
try:
# verify_key is a VerifyKey from signedjson, which uses
# .version to denote the portion of the key ID after the
# algorithm and colon, which is the device ID
key_id, verify_key = get_verify_key_from_cross_signing_key(key_content)
except ValueError as e:
logger.warning(
"Invalid %s key retrieved: %s - %s %s",
key_type,
key_content,
type(e),
e,
)
continue
# Note down the device ID attached to this key
retrieved_device_ids.append(verify_key.version)
# If this is the desired key type, save it and its ID/VerifyKey
if key_type == desired_key_type:
desired_key = key_content
desired_verify_key = verify_key
desired_key_id = key_id
# At the same time, store this key in the db for subsequent queries
await self.store.set_e2e_cross_signing_key(
user.to_string(), key_type, key_content
)
# Notify clients that new devices for this user have been discovered
if retrieved_device_ids:
# XXX is this necessary?
await self.device_handler.notify_device_update(
user.to_string(), retrieved_device_ids
)
return desired_key, desired_key_id, desired_verify_key
def _check_cross_signing_key(key, user_id, key_type, signing_key=None):
"""Check a cross-signing key uploaded by a user. Performs some basic sanity
checking, and ensures that it is signed, if a signature is required.
Args:
key (dict): the key data to verify
user_id (str): the user whose key is being checked
key_type (str): the type of key that the key should be
signing_key (VerifyKey): (optional) the signing key that the key should
be signed with. If omitted, signatures will not be checked.
"""
if (
key.get("user_id") != user_id
or key_type not in key.get("usage", [])
or len(key.get("keys", {})) != 1
):
raise SynapseError(400, ("Invalid %s key" % (key_type,)), Codes.INVALID_PARAM)
if signing_key:
try:
verify_signed_json(key, user_id, signing_key)
except SignatureVerifyException:
raise SynapseError(
400, ("Invalid signature on %s key" % key_type), Codes.INVALID_SIGNATURE
)
def _check_device_signature(user_id, verify_key, signed_device, stored_device):
"""Check that a signature on a device or cross-signing key is correct and
matches the copy of the device/key that we have stored. Throws an
exception if an error is detected.
Args:
user_id (str): the user ID whose signature is being checked
verify_key (VerifyKey): the key to verify the device with
signed_device (dict): the uploaded signed device data
stored_device (dict): our previously stored copy of the device
Raises:
SynapseError: if the signature was invalid or the sent device is not the
same as the stored device
"""
# make sure that the device submitted matches what we have stored
stripped_signed_device = {
k: v for k, v in signed_device.items() if k not in ["signatures", "unsigned"]
}
stripped_stored_device = {
k: v for k, v in stored_device.items() if k not in ["signatures", "unsigned"]
}
if stripped_signed_device != stripped_stored_device:
logger.debug(
"upload signatures: key does not match %s vs %s",
signed_device,
stored_device,
)
raise SynapseError(400, "Key does not match")
try:
verify_signed_json(signed_device, user_id, verify_key)
except SignatureVerifyException:
logger.debug("invalid signature on key")
raise SynapseError(400, "Invalid signature", Codes.INVALID_SIGNATURE)
def _exception_to_failure(e):
if isinstance(e, SynapseError):
return {"status": e.code, "errcode": e.errcode, "message": str(e)}
if isinstance(e, CodeMessageException):
return {"status": e.code, "message": str(e)}
if isinstance(e, NotRetryingDestination):
return {"status": 503, "message": "Not ready for retry"}
# include ConnectionRefused and other errors
#
# Note that some Exceptions (notably twisted's ResponseFailed etc) don't
# give a string for e.message, which json then fails to serialize.
return {"status": 503, "message": str(e)}
def _one_time_keys_match(old_key_json, new_key):
old_key = json.loads(old_key_json)
# if either is a string rather than an object, they must match exactly
if not isinstance(old_key, dict) or not isinstance(new_key, dict):
return old_key == new_key
# otherwise, we strip off the 'signatures' if any, because it's legitimate
# for different upload attempts to have different signatures.
old_key.pop("signatures", None)
new_key_copy = dict(new_key)
new_key_copy.pop("signatures", None)
return old_key == new_key_copy
@attr.s
class SignatureListItem:
"""An item in the signature list as used by upload_signatures_for_device_keys.
"""
signing_key_id = attr.ib()
target_user_id = attr.ib()
target_device_id = attr.ib()
signature = attr.ib()
class SigningKeyEduUpdater(object):
"""Handles incoming signing key updates from federation and updates the DB"""
def __init__(self, hs, e2e_keys_handler):
self.store = hs.get_datastore()
self.federation = hs.get_federation_client()
self.clock = hs.get_clock()
self.e2e_keys_handler = e2e_keys_handler
self._remote_edu_linearizer = Linearizer(name="remote_signing_key")
# user_id -> list of updates waiting to be handled.
self._pending_updates = {}
# Recently seen stream ids. We don't bother keeping these in the DB,
# but they're useful to have them about to reduce the number of spurious
# resyncs.
self._seen_updates = ExpiringCache(
cache_name="signing_key_update_edu",
clock=self.clock,
max_len=10000,
expiry_ms=30 * 60 * 1000,
iterable=True,
)
async def incoming_signing_key_update(self, origin, edu_content):
"""Called on incoming signing key update from federation. Responsible for
parsing the EDU and adding to pending updates list.
Args:
origin (string): the server that sent the EDU
edu_content (dict): the contents of the EDU
"""
user_id = edu_content.pop("user_id")
master_key = edu_content.pop("master_key", None)
self_signing_key = edu_content.pop("self_signing_key", None)
if get_domain_from_id(user_id) != origin:
logger.warning("Got signing key update edu for %r from %r", user_id, origin)
return
room_ids = await self.store.get_rooms_for_user(user_id)
if not room_ids:
# We don't share any rooms with this user. Ignore update, as we
# probably won't get any further updates.
return
self._pending_updates.setdefault(user_id, []).append(
(master_key, self_signing_key)
)
await self._handle_signing_key_updates(user_id)
async def _handle_signing_key_updates(self, user_id):
"""Actually handle pending updates.
Args:
user_id (string): the user whose updates we are processing
"""
device_handler = self.e2e_keys_handler.device_handler
device_list_updater = device_handler.device_list_updater
with (await self._remote_edu_linearizer.queue(user_id)):
pending_updates = self._pending_updates.pop(user_id, [])
if not pending_updates:
# This can happen since we batch updates
return
device_ids = []
logger.info("pending updates: %r", pending_updates)
for master_key, self_signing_key in pending_updates:
new_device_ids = await device_list_updater.process_cross_signing_key_update(
user_id, master_key, self_signing_key,
)
device_ids = device_ids + new_device_ids
await device_handler.notify_device_update(user_id, device_ids)
| 39.83694 | 104 | 0.590138 |
f78aea993c17664887e0fc2e716667a7ba1767a4 | 4,712 | py | Python | builders/specs/cli/PyCOMPSsCLIResources/pycompss_cli/core/utils.py | eflows4hpc/compss | c497f6d34722103c6c8f83ebc314b495573ce054 | [
"Apache-2.0"
] | null | null | null | builders/specs/cli/PyCOMPSsCLIResources/pycompss_cli/core/utils.py | eflows4hpc/compss | c497f6d34722103c6c8f83ebc314b495573ce054 | [
"Apache-2.0"
] | null | null | null | builders/specs/cli/PyCOMPSsCLIResources/pycompss_cli/core/utils.py | eflows4hpc/compss | c497f6d34722103c6c8f83ebc314b495573ce054 | [
"Apache-2.0"
] | null | null | null | import json
from glob import glob
from pathlib import Path
import subprocess
import os
def is_debug():
return os.getenv('PYCOMPSS_CLI_DEBUG', 'false').lower() == 'true'
def get_object_method_by_name(obj, method_name, include_in_name=False):
for class_method_name in dir(obj):
if not '__' in class_method_name and callable(getattr(obj, class_method_name)):
if class_method_name.startswith(method_name) or (include_in_name and method_name in class_method_name):
return class_method_name
def table_print(col_names, data):
print_table(data, header=col_names)
def get_current_env_conf(return_path=False):
home_path = str(Path.home())
current_env = glob(home_path + '/.COMPSs/envs/*/current')[0].replace('current', 'env.json')
with open(current_env, 'r') as env:
if return_path:
return json.load(env), current_env
return json.load(env)
def get_env_conf_by_name(env_name):
home_path = str(Path.home())
env_path = home_path + '/.COMPSs/envs/' + env_name + '/env.json'
with open(env_path, 'r') as env:
return json.load(env)
def ssh_run_commands(login_info, commands, **kwargs):
cmd = ' ; '.join(filter(len, commands))
res = subprocess.run(f"ssh {login_info} '{cmd}'", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kwargs)
return res.stdout.decode(), res.stderr.decode()
def check_exit_code(command):
return subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE).returncode
def is_inside_docker():
return ':/docker/' in subprocess.check_output(['cat', '/proc/self/cgroup']).decode('utf-8')
def print_table(items, header=None, wrap=True, wrap_style="wrap", row_line=False, fix_col_width=False):
''' Prints a matrix of data as a human readable table. Matrix
should be a list of lists containing any type of values that can
be converted into text strings.
Two different column adjustment methods are supported through
the *wrap_style* argument:
wrap: it will wrap values to fit max_col_width (by extending cell height)
cut: it will strip values to max_col_width
If the *wrap* argument is set to False, column widths are set to fit all
values in each column.
This code is free software. Updates can be found at
https://gist.github.com/jhcepas/5884168
'''
max_col_width = os.get_terminal_size().columns
if fix_col_width:
c2maxw = dict([(i, max_col_width) for i in range(len(items[0]))])
wrap = True
elif not wrap:
c2maxw = dict([(i, max([len(str(e[i])) for e in items])) for i in range(len(items[0]))])
else:
c2maxw = dict([(i, min(max_col_width, max([len(str(e[i])) for e in items])))
for i in range(len(items[0]))])
if header:
current_item = -1
row = header
if wrap and not fix_col_width:
for col, maxw in c2maxw.items():
c2maxw[col] = max(maxw, len(header[col]))
if wrap:
c2maxw[col] = min(c2maxw[col], max_col_width)
else:
current_item = 0
row = items[current_item]
while row:
is_extra = False
values = []
extra_line = [""]*len(row)
for col, val in enumerate(row):
cwidth = c2maxw[col]
wrap_width = cwidth
val = str(val)
try:
newline_i = val.index("\n")
except ValueError:
pass
else:
wrap_width = min(newline_i+1, wrap_width)
val = val.replace("\n", " ", 1)
if wrap and len(val) > wrap_width:
if wrap_style == "cut":
val = val[:wrap_width-1]+"+"
elif wrap_style == "wrap":
extra_line[col] = val[wrap_width:]
val = val[:wrap_width]
val = val.ljust(cwidth)
values.append(val)
print(' | '.join(values))
if not set(extra_line) - set(['']):
if header and current_item == -1:
print(' | '.join(['='*c2maxw[col] for col in range(len(row)) ]))
current_item += 1
try:
row = items[current_item]
except IndexError:
row = None
else:
row = extra_line
is_extra = True
if row_line and not is_extra and not (header and current_item == 0):
if row:
print(' | '.join(['-'*c2maxw[col] for col in range(len(row)) ]))
else:
print(' | '.join(['='*c2maxw[col] for col in range(len(extra_line)) ])) | 38.622951 | 123 | 0.593379 |
162917fff93631f32887699568e6de091d83a8f1 | 9,219 | py | Python | pandas/tests/window/moments/test_moments_ewm.py | gabsmoreira/pandas | ee1efb6d923a2c3e5a912efe20a336179614993d | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | pandas/tests/window/moments/test_moments_ewm.py | gabsmoreira/pandas | ee1efb6d923a2c3e5a912efe20a336179614993d | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | pandas/tests/window/moments/test_moments_ewm.py | gabsmoreira/pandas | ee1efb6d923a2c3e5a912efe20a336179614993d | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | import numpy as np
from numpy.random import randn
import pytest
import pandas as pd
from pandas import DataFrame, Series
import pandas._testing as tm
from pandas.tests.window.common import Base
@pytest.mark.filterwarnings("ignore:can't resolve package:ImportWarning")
class TestMoments(Base):
def setup_method(self, method):
self._create_data()
def test_ewma(self):
self._check_ew(name="mean")
vals = pd.Series(np.zeros(1000))
vals[5] = 1
result = vals.ewm(span=100, adjust=False).mean().sum()
assert np.abs(result - 1) < 1e-2
@pytest.mark.parametrize("adjust", [True, False])
@pytest.mark.parametrize("ignore_na", [True, False])
def test_ewma_cases(self, adjust, ignore_na):
# try adjust/ignore_na args matrix
s = Series([1.0, 2.0, 4.0, 8.0])
if adjust:
expected = Series([1.0, 1.6, 2.736842, 4.923077])
else:
expected = Series([1.0, 1.333333, 2.222222, 4.148148])
result = s.ewm(com=2.0, adjust=adjust, ignore_na=ignore_na).mean()
tm.assert_series_equal(result, expected)
def test_ewma_nan_handling(self):
s = Series([1.0] + [np.nan] * 5 + [1.0])
result = s.ewm(com=5).mean()
tm.assert_series_equal(result, Series([1.0] * len(s)))
s = Series([np.nan] * 2 + [1.0] + [np.nan] * 2 + [1.0])
result = s.ewm(com=5).mean()
tm.assert_series_equal(result, Series([np.nan] * 2 + [1.0] * 4))
# GH 7603
s0 = Series([np.nan, 1.0, 101.0])
s1 = Series([1.0, np.nan, 101.0])
s2 = Series([np.nan, 1.0, np.nan, np.nan, 101.0, np.nan])
s3 = Series([1.0, np.nan, 101.0, 50.0])
com = 2.0
alpha = 1.0 / (1.0 + com)
def simple_wma(s, w):
return (s.multiply(w).cumsum() / w.cumsum()).fillna(method="ffill")
for (s, adjust, ignore_na, w) in [
(s0, True, False, [np.nan, (1.0 - alpha), 1.0]),
(s0, True, True, [np.nan, (1.0 - alpha), 1.0]),
(s0, False, False, [np.nan, (1.0 - alpha), alpha]),
(s0, False, True, [np.nan, (1.0 - alpha), alpha]),
(s1, True, False, [(1.0 - alpha) ** 2, np.nan, 1.0]),
(s1, True, True, [(1.0 - alpha), np.nan, 1.0]),
(s1, False, False, [(1.0 - alpha) ** 2, np.nan, alpha]),
(s1, False, True, [(1.0 - alpha), np.nan, alpha]),
(
s2,
True,
False,
[np.nan, (1.0 - alpha) ** 3, np.nan, np.nan, 1.0, np.nan],
),
(s2, True, True, [np.nan, (1.0 - alpha), np.nan, np.nan, 1.0, np.nan]),
(
s2,
False,
False,
[np.nan, (1.0 - alpha) ** 3, np.nan, np.nan, alpha, np.nan],
),
(s2, False, True, [np.nan, (1.0 - alpha), np.nan, np.nan, alpha, np.nan]),
(s3, True, False, [(1.0 - alpha) ** 3, np.nan, (1.0 - alpha), 1.0]),
(s3, True, True, [(1.0 - alpha) ** 2, np.nan, (1.0 - alpha), 1.0]),
(
s3,
False,
False,
[
(1.0 - alpha) ** 3,
np.nan,
(1.0 - alpha) * alpha,
alpha * ((1.0 - alpha) ** 2 + alpha),
],
),
(
s3,
False,
True,
[(1.0 - alpha) ** 2, np.nan, (1.0 - alpha) * alpha, alpha],
),
]:
expected = simple_wma(s, Series(w))
result = s.ewm(com=com, adjust=adjust, ignore_na=ignore_na).mean()
tm.assert_series_equal(result, expected)
if ignore_na is False:
# check that ignore_na defaults to False
result = s.ewm(com=com, adjust=adjust).mean()
tm.assert_series_equal(result, expected)
def test_ewmvar(self):
self._check_ew(name="var")
def test_ewmvol(self):
self._check_ew(name="vol")
def test_ewma_span_com_args(self):
A = self.series.ewm(com=9.5).mean()
B = self.series.ewm(span=20).mean()
tm.assert_almost_equal(A, B)
with pytest.raises(ValueError):
self.series.ewm(com=9.5, span=20)
with pytest.raises(ValueError):
self.series.ewm().mean()
def test_ewma_halflife_arg(self):
A = self.series.ewm(com=13.932726172912965).mean()
B = self.series.ewm(halflife=10.0).mean()
tm.assert_almost_equal(A, B)
with pytest.raises(ValueError):
self.series.ewm(span=20, halflife=50)
with pytest.raises(ValueError):
self.series.ewm(com=9.5, halflife=50)
with pytest.raises(ValueError):
self.series.ewm(com=9.5, span=20, halflife=50)
with pytest.raises(ValueError):
self.series.ewm()
def test_ewm_alpha(self):
# GH 10789
s = Series(self.arr)
a = s.ewm(alpha=0.61722699889169674).mean()
b = s.ewm(com=0.62014947789973052).mean()
c = s.ewm(span=2.240298955799461).mean()
d = s.ewm(halflife=0.721792864318).mean()
tm.assert_series_equal(a, b)
tm.assert_series_equal(a, c)
tm.assert_series_equal(a, d)
def test_ewm_alpha_arg(self):
# GH 10789
s = self.series
with pytest.raises(ValueError):
s.ewm()
with pytest.raises(ValueError):
s.ewm(com=10.0, alpha=0.5)
with pytest.raises(ValueError):
s.ewm(span=10.0, alpha=0.5)
with pytest.raises(ValueError):
s.ewm(halflife=10.0, alpha=0.5)
def test_ewm_domain_checks(self):
# GH 12492
s = Series(self.arr)
msg = "comass must satisfy: comass >= 0"
with pytest.raises(ValueError, match=msg):
s.ewm(com=-0.1)
s.ewm(com=0.0)
s.ewm(com=0.1)
msg = "span must satisfy: span >= 1"
with pytest.raises(ValueError, match=msg):
s.ewm(span=-0.1)
with pytest.raises(ValueError, match=msg):
s.ewm(span=0.0)
with pytest.raises(ValueError, match=msg):
s.ewm(span=0.9)
s.ewm(span=1.0)
s.ewm(span=1.1)
msg = "halflife must satisfy: halflife > 0"
with pytest.raises(ValueError, match=msg):
s.ewm(halflife=-0.1)
with pytest.raises(ValueError, match=msg):
s.ewm(halflife=0.0)
s.ewm(halflife=0.1)
msg = "alpha must satisfy: 0 < alpha <= 1"
with pytest.raises(ValueError, match=msg):
s.ewm(alpha=-0.1)
with pytest.raises(ValueError, match=msg):
s.ewm(alpha=0.0)
s.ewm(alpha=0.1)
s.ewm(alpha=1.0)
with pytest.raises(ValueError, match=msg):
s.ewm(alpha=1.1)
@pytest.mark.parametrize("method", ["mean", "vol", "var"])
def test_ew_empty_series(self, method):
vals = pd.Series([], dtype=np.float64)
ewm = vals.ewm(3)
result = getattr(ewm, method)()
tm.assert_almost_equal(result, vals)
def _check_ew(self, name=None, preserve_nan=False):
series_result = getattr(self.series.ewm(com=10), name)()
assert isinstance(series_result, Series)
frame_result = getattr(self.frame.ewm(com=10), name)()
assert type(frame_result) == DataFrame
result = getattr(self.series.ewm(com=10), name)()
if preserve_nan:
assert result[self._nan_locs].isna().all()
@pytest.mark.parametrize("min_periods", [0, 1])
@pytest.mark.parametrize("name", ["mean", "var", "vol"])
def test_ew_min_periods(self, min_periods, name):
# excluding NaNs correctly
arr = randn(50)
arr[:10] = np.NaN
arr[-10:] = np.NaN
s = Series(arr)
# check min_periods
# GH 7898
result = getattr(s.ewm(com=50, min_periods=2), name)()
assert result[:11].isna().all()
assert not result[11:].isna().any()
result = getattr(s.ewm(com=50, min_periods=min_periods), name)()
if name == "mean":
assert result[:10].isna().all()
assert not result[10:].isna().any()
else:
# ewm.std, ewm.vol, ewm.var (with bias=False) require at least
# two values
assert result[:11].isna().all()
assert not result[11:].isna().any()
# check series of length 0
result = getattr(
Series(dtype=object).ewm(com=50, min_periods=min_periods), name
)()
tm.assert_series_equal(result, Series(dtype="float64"))
# check series of length 1
result = getattr(Series([1.0]).ewm(50, min_periods=min_periods), name)()
if name == "mean":
tm.assert_series_equal(result, Series([1.0]))
else:
# ewm.std, ewm.vol, ewm.var with bias=False require at least
# two values
tm.assert_series_equal(result, Series([np.NaN]))
# pass in ints
result2 = getattr(Series(np.arange(50)).ewm(span=10), name)()
assert result2.dtype == np.float_
| 35.187023 | 86 | 0.530752 |
9cbd9ef9f77776a020982b3a7224ffa529e834c6 | 364 | py | Python | auto_derby/services/cleanup.py | gentle-knight-13/auto-derby | 70593fea2c3d803487e6e0d2ce0c40d60bc6304d | [
"MIT"
] | null | null | null | auto_derby/services/cleanup.py | gentle-knight-13/auto-derby | 70593fea2c3d803487e6e0d2ce0c40d60bc6304d | [
"MIT"
] | null | null | null | auto_derby/services/cleanup.py | gentle-knight-13/auto-derby | 70593fea2c3d803487e6e0d2ce0c40d60bc6304d | [
"MIT"
] | null | null | null | # -*- coding=UTF-8 -*-
# pyright: strict
from __future__ import annotations
from typing import Callable, Protocol
Callback = Callable[[], None]
class Service(Protocol):
def add(self, cb: Callback) -> None:
...
def run(self) -> None:
...
def __enter__(self) -> Service:
...
def __exit__(self, *_) -> None:
...
| 16.545455 | 40 | 0.568681 |
4460437b7bd779818e8933718c8badf20d6eb3e8 | 1,258 | py | Python | app/core/models.py | nesar-ahmed/recipe-api | 98f560d780f92339662be66547f59a54fdcbb613 | [
"MIT"
] | null | null | null | app/core/models.py | nesar-ahmed/recipe-api | 98f560d780f92339662be66547f59a54fdcbb613 | [
"MIT"
] | null | null | null | app/core/models.py | nesar-ahmed/recipe-api | 98f560d780f92339662be66547f59a54fdcbb613 | [
"MIT"
] | null | null | null | from django.db import models
from django.contrib.auth.models import AbstractBaseUser, \
BaseUserManager, \
PermissionsMixin
class UserManager(BaseUserManager):
def create_user(self, email, password=None, **extra_fields):
"""Creates and saves a new user"""
if not email:
raise ValueError('User must have an email address')
user = self.model(email=self.normalize_email(email), **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password):
"""Creates and saves a new super user"""
user = self.create_user(email, password)
user.is_staff = True
user.is_superuser = True
user.save(using=self._db)
return user
class User(AbstractBaseUser, PermissionsMixin):
"""Custom user model that supports using email instead of username"""
name = models.CharField(max_length=255, )
email = models.EmailField(max_length=255, unique=True)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
objects = UserManager()
USERNAME_FIELD = 'email'
| 32.25641 | 76 | 0.643879 |
baba3d3c67808e177b631266c5cd535f5d48c1bf | 6,150 | py | Python | Scripts/ProportionalAllocation.py | d-wasserman/arc-numerical-tools | a88ed46c48083dfa615895ecf75e7c1c9c650f97 | [
"Apache-2.0"
] | null | null | null | Scripts/ProportionalAllocation.py | d-wasserman/arc-numerical-tools | a88ed46c48083dfa615895ecf75e7c1c9c650f97 | [
"Apache-2.0"
] | 3 | 2022-01-25T00:58:37.000Z | 2022-01-26T05:44:40.000Z | Scripts/ProportionalAllocation.py | d-wasserman/arc-numerical-tools | a88ed46c48083dfa615895ecf75e7c1c9c650f97 | [
"Apache-2.0"
] | 2 | 2018-09-14T21:44:34.000Z | 2020-08-15T22:21:05.000Z | # --------------------------------
# Name: ProportionalAllocation.py
# Purpose: This script is intended to provide a way to use sampling geography that will calculate proportional
# averages or sums based on the percentage of an intersection covered by the sampling geography. The output is
# the sampling geography with fields sampled from the base features.
# Current Owner: David Wasserman
# Last Modified: 4/17/2021
# Copyright: David Wasserman
# ArcGIS Version: ArcGIS Pro
# Python Version: 3.6
# --------------------------------
# Copyright 2021 David J. Wasserman
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# --------------------------------
# Import Modules
import arcpy
import os
import pandas as pd
from arcgis.features import GeoAccessor, GeoSeriesAccessor
import SharedArcNumericalLib as san
# Function Definitions
def proportional_allocation(sampling_features, base_features, out_feature_class,
sum_fields=[], mean_fields=[]):
"""This script is intended to provide a way to use sampling geography that will calculate proportional
averages or sums based on the percentage of an intersection covered by the sampling geography. The output is
the sampling geography with fields sampled from the base features.
Parameters
--------------------
sampling_features - The sampling features are the features you want to associate proportional averages or sums
from the attributes in the base features. The output will look like this input polygon layer with new fields.
base_features- The base features have the attributes being sampled by the polygon sampling features.
out_feature_class - The output feature class is a copy of the sampling features with new sum & average fields
sum_fields - Fields to proportionally sum (based on the overlapping areas between the sampling and base features)
from the base to the sampling features.
mean_fields - Fields to proportionally average (based on the overlapping areas between the sampling and base features)
from the base to the sampling features.
"""
arcpy.env.overwriteOutput = True
# Start Analysis
temp_intersect = os.path.join("in_memory", "temp_intersect")
san.arc_print("Calculating original areas...")
base_area_col = "Base_Area_SQMI"
inter_area_col = "Inter_Area_SQMI"
sampling_id = "Sampling_ID"
ratio_coverage = "Proportion"
san.add_new_field(base_features, base_area_col, "DOUBLE")
arcpy.CalculateField_management(base_features, base_area_col, "!shape.area@SQUAREMILES!")
san.add_new_field(sampling_features, "Sampling_ID", "LONG")
oid_s = arcpy.Describe(sampling_features).OIDFieldName
arcpy.CalculateField_management(sampling_features, sampling_id, "!{0}!".format(oid_s))
san.arc_print("Conducting an intersection...", True)
arcpy.Intersect_analysis([[sampling_features, 1], [base_features, 1]], temp_intersect)
san.add_new_field(temp_intersect, inter_area_col, "DOUBLE")
arcpy.CalculateField_management(temp_intersect, inter_area_col, "!shape.area@SQUAREMILES!")
san.arc_print("Calculating proportional sums and/or averages...", True)
sum_fields = [i for i in sum_fields if san.field_exist(temp_intersect, i)]
mean_fields = [i for i in mean_fields if san.field_exist(temp_intersect, i)]
agg_fields = list(set(sum_fields + mean_fields))
all_fields = [sampling_id, inter_area_col, base_area_col] + agg_fields
inter_df = san.arcgis_table_to_df(temp_intersect, all_fields)
inter_df[ratio_coverage] = inter_df[inter_area_col].fillna(0) / inter_df[base_area_col].fillna(1)
sum_cols = ["SUM_" + str(i) for i in sum_fields]
for input, sum in zip(sum_fields, sum_cols):
inter_df[sum] = inter_df[input] * inter_df[ratio_coverage] # Weight X Value
inter_groups_sum = inter_df.groupby(sampling_id).sum()
mean_cols = ["MEAN_" + str(i) for i in mean_fields]
for input, mean in zip(mean_fields, mean_cols):
inter_df[mean] = inter_df[input] * inter_df[inter_area_col] # (Weight X Value) / SUM(weights)
inter_groups_avg = inter_df.groupby(sampling_id).sum()
for mean in mean_cols:
inter_groups_avg[mean] = inter_groups_avg[mean]/inter_groups_avg[inter_area_col]
inter_groups = inter_groups_sum.merge(inter_groups_avg[mean_cols], how="left", left_index=True, right_index=True)
san.arc_print("Associating results to sampled SEDF...")
samp_df = pd.DataFrame.spatial.from_featureclass(sampling_features)
samp_df = samp_df.merge(inter_groups, how="left", left_on=sampling_id, right_index=True,
suffixes=("DELETE_X", "DELETE_Y"))
kept_cols = [i for i in samp_df.columns if "DELETE" not in str(i) and str(i) not in agg_fields]
samp_df = samp_df[kept_cols].copy()
san.arc_print("Exporting results...", True)
samp_df.spatial.to_featureclass(out_feature_class)
san.arc_print("Script Completed Successfully.", True)
# End do_analysis function
# This test allows the script to be used from the operating
# system command prompt (stand-alone), in a Python IDE,
# as a geoprocessing script tool, or as a module imported in
# another script
if __name__ == '__main__':
# Define input parameters
target_feature_class = arcpy.GetParameterAsText(0)
join_feature_class = arcpy.GetParameterAsText(1)
output_feature_class = arcpy.GetParameterAsText(2)
sum_fields = arcpy.GetParameterAsText(3).split(";")
mean_fields = arcpy.GetParameterAsText(4).split(";")
proportional_allocation(target_feature_class, join_feature_class, output_feature_class, sum_fields, mean_fields)
| 53.947368 | 123 | 0.737724 |
67625e0b320918976bc854666bd7dc3cf04669b8 | 1,784 | py | Python | hypertools/reduce/reduce.py | jeremymanning/hypertools | 1b39b41aaa634e816d73635e0b9b773f1ed6e709 | [
"MIT"
] | 1 | 2019-08-11T18:25:18.000Z | 2019-08-11T18:25:18.000Z | hypertools/reduce/reduce.py | jeremymanning/hypertools | 1b39b41aaa634e816d73635e0b9b773f1ed6e709 | [
"MIT"
] | 33 | 2020-05-12T01:21:05.000Z | 2021-12-07T16:13:42.000Z | hypertools/reduce/reduce.py | jeremymanning/hypertools | 1b39b41aaa634e816d73635e0b9b773f1ed6e709 | [
"MIT"
] | null | null | null | # noinspection PyPackageRequirements
import datawrangler as dw
import numpy as np
from ..core.model import apply_model
from ..core import get_default_options
from ..align.common import pad
defaults = get_default_options()
def get_n_components(model, **kwargs):
if 'n_components' in kwargs.keys():
return kwargs['n_components']
if type(model) is str:
if model in ['SparseCoder']:
if 'dictionary' in kwargs.keys():
return kwargs['dictionary'].shape[1]
elif model == 'PPCA':
return None
else:
return defaults[model].copy().pop('n_components', None)
elif hasattr(model, '__name__'):
return get_n_components(getattr(model, '__name__'), **kwargs)
elif type(model) is dict and all([k in ['model', 'args', 'kwargs'] for k in model.keys()]):
return get_n_components(model['model'], **model['kwargs'])
else:
return None
@dw.decorate.apply_stacked
def reduce(data, model='IncrementalPCA', **kwargs):
# noinspection PyTypeChecker
n_components = get_n_components(model, **kwargs)
if (n_components is None) or (data.shape[1] > n_components):
return apply_model(data, model, search=['sklearn.decomposition', 'sklearn.manifold', 'sklearn.mixture',
'umap', 'ppca'],
**dw.core.update_dict(get_default_options()['reduce'], kwargs))
elif data.shape[1] == n_components:
transformed_data = data.copy()
else:
transformed_data = pad(data, c=n_components)
return_model = kwargs.pop('return_model', False)
if return_model:
return transformed_data, {'model': model, 'args': [], 'kwargs': kwargs}
else:
return transformed_data
| 34.307692 | 111 | 0.633408 |
8c5fd7bc1c9bd293de89770126a8ee09dbc102a4 | 1,042 | py | Python | testing/tsurf/test1.py | GNS-Science/eq-fault-geom | 2e110c27670b824f5177911085c78ba2ee00d507 | [
"MIT"
] | 1 | 2020-11-21T20:22:11.000Z | 2020-11-21T20:22:11.000Z | testing/tsurf/test1.py | GNS-Science/eq-fault-geom | 2e110c27670b824f5177911085c78ba2ee00d507 | [
"MIT"
] | 43 | 2020-06-29T03:50:10.000Z | 2022-03-15T23:13:14.000Z | testing/tsurf/test1.py | GNS-Science/eq-fault-geom | 2e110c27670b824f5177911085c78ba2ee00d507 | [
"MIT"
] | 1 | 2021-03-10T22:20:18.000Z | 2021-03-10T22:20:18.000Z | #!/usr/bin/env python
"""
Very basic test of faultmeshio/tsurf.py.
Reads and writes a Tsurf file.
"""
import sys
sys.path.insert(0, '../../src')
# import pdb
# pdb.set_trace()
from eq_fault_geom import faultmeshio
# Files.
inFile = '../../data/Wellington_Hutt_Valley_1.ts'
outFile1 = 'Wellington_Hutt_Valley_1_test1.ts'
outFile2 = 'Wellington_Hutt_Valley_1_test2.ts'
outFile3 = 'Wellington_Hutt_Valley_1_test3.ts'
# Read and write sample Tsurf file.
tsurf1 = faultmeshio.tsurf(inFile)
tsurf1.write(outFile1)
# Create and write new mesh using Tsurf and properties from original mesh.
x = tsurf1.x
y = tsurf1.y
z = tsurf1.z
triangles = tsurf1.mesh.cells
tsurf2 = faultmeshio.tsurf(x, y, z, triangles, name=tsurf1.name,
solid_color=tsurf1.solid_color, visible=tsurf1.visible, NAME=tsurf1.NAME,
AXIS_NAME=tsurf1.AXIS_NAME, AXIS_UNIT=tsurf1.AXIS_UNIT, ZPOSITIVE=tsurf1.ZPOSITIVE)
# Write the mesh.
tsurf2.write(outFile2)
# Create and write mesh using default properties.
tsurf3 = faultmeshio.tsurf(x, y, z, triangles)
tsurf3.write(outFile3)
| 27.421053 | 83 | 0.769674 |
6368657193f4d05caf7404e89c8aa8310df8978c | 1,525 | py | Python | tests/test_Accession.py | LinkageIO/Minus80 | 85c83bec68b58e73026de9f306da2541f3310b08 | [
"MIT"
] | 1 | 2020-01-27T18:47:21.000Z | 2020-01-27T18:47:21.000Z | tests/test_Accession.py | LinkageIO/Minus80 | 85c83bec68b58e73026de9f306da2541f3310b08 | [
"MIT"
] | 6 | 2017-09-21T22:30:05.000Z | 2020-12-07T05:07:47.000Z | tests/test_Accession.py | LinkageIO/Minus80 | 85c83bec68b58e73026de9f306da2541f3310b08 | [
"MIT"
] | 2 | 2018-10-31T15:23:51.000Z | 2019-05-22T12:08:18.000Z | from minus80 import Accession
def test_bare_accession():
x = Accession("empty")
assert isinstance(x, Accession)
str(x)
repr(x)
def test_add_relative_path():
x = Accession("empty")
x.add_file("./test.txt")
def test_add_files():
x = Accession("empty")
x.add_files(["./test.txt", "test2.txt", "test3.txt"])
def test_accession_name(simpleAccession):
assert simpleAccession.name == "Sample1"
def test_accession_files(simpleAccession):
assert "file1.txt" in simpleAccession.files
assert "file2.txt" in simpleAccession.files
def test_accession_metadata(simpleAccession):
assert simpleAccession.metadata["type"] == "sample"
def test_accession_getitem(simpleAccession):
assert simpleAccession["type"] == "sample"
def test_accession_setitem(simpleAccession):
simpleAccession["added"] = True
assert simpleAccession["added"] == True
def test_accession_file_check(RNAAccession1):
assert len(RNAAccession1.files) == 4
def test_accession_add_file_skip_check(simpleAccession):
simpleAccession.add_file("ssh://[email protected]/path/to/file.txt")
assert "ssh://[email protected]/path/to/file.txt" in simpleAccession.files
def test_accession_files_are_set(simpleAccession):
simpleAccession.add_file("/path/to/file.txt")
len_files = len(simpleAccession.files)
simpleAccession.add_file("/path/to/file.txt")
assert len(simpleAccession.files) == len_files
def test_load_from_yaml():
Accession.from_yaml("data/test_accession.yaml")
| 25 | 78 | 0.739016 |
7208e20eeb8a0b002aa5bd4736ac752a17ea073c | 1,948 | py | Python | pyglare/math/geometry.py | keyvank/pyglare | 9e26ae444ff4481f0f50d7344d2a5a881d04fe64 | [
"MIT"
] | 6 | 2017-01-13T22:32:55.000Z | 2022-03-27T22:19:49.000Z | pyglare/math/geometry.py | keyvank/pyglare | 9e26ae444ff4481f0f50d7344d2a5a881d04fe64 | [
"MIT"
] | 1 | 2016-09-13T17:59:41.000Z | 2016-09-13T18:05:20.000Z | pyglare/math/geometry.py | keyvank/pyglare | 9e26ae444ff4481f0f50d7344d2a5a881d04fe64 | [
"MIT"
] | null | null | null | import math
class Vector:
def __init__(self,x,y,z):
self.x = x
self.y = y
self.z = z
def length(self):
return math.sqrt(self.x * self.x + self.y * self.y + self.z * self.z)
def normalize(self):
return self / self.length()
def __add__(self,vec):
return Vector(self.x + vec.x,self.y + vec.y,self.z + vec.z)
def __sub__(self,vec):
return Vector(self.x - vec.x,self.y - vec.y,self.z - vec.z)
def __neg__(self):
return Vector(-self.x,-self.y,-self.z)
def __mul__(self,num):
return Vector(self.x * num,self.y * num,self.z * num)
def __truediv__(self,num):
return Vector(self.x / num,self.y / num,self.z / num)
def dot(a,b):
return a.x*b.x + a.y*b.y + a.z*b.z
def cross(a,b):
return Vector(a.y*b.z - a.z*b.y,
a.z*b.x - a.x*b.z,
a.x*b.y - a.y*b.x)
def reflect(self,vec):
mirror=self * Vector.dot(self,vec)/Vector.dot(self,self)
return (mirror*2-vec).normalize()
class Ray:
def __init__(self,position,direction):
self.position = position
self.direction = direction
class Plane:
def __init__(self,normal,intercept):
self.normal = normal
self.intercept = intercept
def intersection(self,ray):
div=Vector.dot(ray.direction,self.normal)
if div==0: # Plane and ray are parallel!
return None
t = -(Vector.dot(ray.position,self.normal)+self.intercept)/div
if t>0:
return t
else:
return None
class Sphere:
def __init__(self,position,radius):
self.position = position
self.radius = radius
def intersection(self,ray):
tca=Vector.dot(self.position-ray.position,ray.direction)
if tca<0:
return None
d2=Vector.dot(self.position-ray.position,self.position-ray.position)-tca*tca
if d2 > self.radius ** 2:
return None
thc=math.sqrt(self.radius ** 2 - d2)
ret=min(tca-thc,tca+thc)
if ret<0:
return None
else:
return ret
class Triangle:
def __init__(self,a,b,c):
self.a = a
self.b = b
self.c = c
| 20.505263 | 78 | 0.649384 |
402fad72683c8b97ece1c7c73977a49d01015fcf | 660 | py | Python | examples/fractals/dragon_curve/test_dragon_curve.py | Electro98/aads | 89607910856600b38349c31665f43fbb33df71c5 | [
"MIT"
] | 7 | 2021-07-24T05:37:07.000Z | 2022-03-15T05:17:25.000Z | examples/fractals/dragon_curve/test_dragon_curve.py | Electro98/aads | 89607910856600b38349c31665f43fbb33df71c5 | [
"MIT"
] | 2 | 2021-08-05T14:09:46.000Z | 2021-08-21T14:12:03.000Z | examples/fractals/dragon_curve/test_dragon_curve.py | Electro98/aads | 89607910856600b38349c31665f43fbb33df71c5 | [
"MIT"
] | 8 | 2021-08-20T17:17:02.000Z | 2022-03-15T05:17:27.000Z | """Тесты для модуля dragon_curve.py"""
import unittest
from dragon_curve import dragon_curve # pylint: disable=E0401
TEST_DRAGON_CURVE = [
# Первая итерация
('fx', 'fx+yf+'),
# Вторая итерации
('fx+yf+', 'fx+yf++-fx-yf+'),
# Третья итерации
('fx+yf++-fx-yf+', 'fx+yf++-fx-yf++-fx+yf+--fx-yf+')
]
class TestDragonCurve(unittest.TestCase):
"""Тест-кейс функции dragon_curve"""
def test_dragon_curve(self): # pylint: disable=C0116
for data, expected in TEST_DRAGON_CURVE:
with self.subTest():
self.assertEqual(dragon_curve(data), expected)
if __name__ == '__main__':
unittest.main()
| 25.384615 | 62 | 0.633333 |
43cc634a104124142b5282f454f0d0de3a0ff1cb | 7,860 | py | Python | Ch13/dtw.py | jason-168/MLCode | 429c17e004fb41ba16c371416c8f73833ab8fc1d | [
"Xnet",
"X11"
] | 146 | 2016-05-24T02:55:53.000Z | 2022-03-23T14:54:42.000Z | Ch13/dtw.py | jason-168/MLCode | 429c17e004fb41ba16c371416c8f73833ab8fc1d | [
"Xnet",
"X11"
] | 1 | 2017-08-17T23:07:39.000Z | 2017-08-18T08:27:19.000Z | Ch13/dtw.py | jason-168/MLCode | 429c17e004fb41ba16c371416c8f73833ab8fc1d | [
"Xnet",
"X11"
] | 94 | 2016-05-06T12:34:33.000Z | 2022-03-30T03:31:04.000Z |
# Code from Chapter 13 of Machine Learning: An Algorithmic Perspective (2nd Edition)
# by Stephen Marsland (http://stephenmonika.net)
# You are free to use, change, or redistribute the code in any way you wish for
# non-commercial purposes, but please maintain the name of the original author.
# This code comes with no warranty of any kind.
# Stephen Marsland, 2008, 2014
import numpy as np
class dtree:
""" Decision Tree with weights"""
def __init__(self):
""" Constructor """
def read_data(self,filename):
fid = open(filename,"r")
data = []
d = []
for line in fid.readlines():
d.append(line.strip())
for d1 in d:
data.append(d1.split(","))
fid.close()
self.featureNames = data[0]
self.featureNames = self.featureNames[:-1]
data = data[1:]
self.classes = []
for d in range(len(data)):
self.classes.append(data[d][-1])
data[d] = data[d][:-1]
return data,self.classes,self.featureNames
def classify(self,tree,datapoint):
if type(tree) == type("string"):
# Have reached a leaf
return tree
else:
a = tree.keys()[0]
for i in range(len(self.featureNames)):
if self.featureNames[i]==a:
break
try:
t = tree[a][datapoint[i]]
return self.classify(t,datapoint)
except:
return None
def classifyAll(self,tree,data):
results = []
for i in range(len(data)):
results.append(self.classify(tree,data[i]))
return results
def make_tree(self,data,weights,classes,featureNames,maxlevel=-1,level=0):
nData = len(data)
nFeatures = len(data[0])
try:
self.featureNames
except:
self.featureNames = featureNames
# List the possible classes
newClasses = []
for aclass in classes:
if newClasses.count(aclass)==0:
newClasses.append(aclass)
# Compute the default class (and total entropy)
frequency = np.zeros(len(newClasses))
totalGini = 0
index = 0
for aclass in newClasses:
frequency[index] = classes.count(aclass)
totalGini += (float(frequency[index])/nData)**2
index += 1
totalGini = 1 - totalGini
default = classes[np.argmax(frequency)]
if nData==0 or nFeatures == 0 or (maxlevel>=0 and level>maxlevel):
# Have reached an empty branch
return default
elif classes.count(classes[0]) == nData:
# Only 1 class remains
return classes[0]
else:
# Choose which feature is best
#print totalGini
gain = np.zeros(nFeatures)
for feature in range(nFeatures):
g = self.calc_info_gain(data,weights,classes,feature)
gain[feature] = totalGini - g
#print "gain", gain
bestFeature = np.argmin(gain)
#print bestFeature
tree = {featureNames[bestFeature]:{}}
# List the values that bestFeature can take
values = []
for datapoint in data:
if values.count(datapoint[bestFeature])==0:
values.append(datapoint[bestFeature])
for value in values:
# Find the datapoints with each feature value
newData = []
newWeights = []
newClasses = []
index = 0
for datapoint in data:
if datapoint[bestFeature]==value:
if bestFeature==0:
newdatapoint = datapoint[1:]
newweight = weights[1:]
newNames = featureNames[1:]
elif bestFeature==nFeatures:
newdatapoint = datapoint[:-1]
newweight = weights[:-1]
newNames = featureNames[:-1]
else:
newdatapoint = datapoint[:bestFeature]
newdatapoint.extend(datapoint[bestFeature+1:])
newweight = weights[:bestFeature]
newweight = np.concatenate((newweight,weights[bestFeature+1:]))
newNames = featureNames[:bestFeature]
newNames.extend(featureNames[bestFeature+1:])
newData.append(newdatapoint)
newWeights = np.concatenate((newWeights,newweight))
newClasses.append(classes[index])
index += 1
# Now recurse to the next level
subtree = self.make_tree(newData,newWeights,newClasses,newNames,maxlevel,level+1)
# And on returning, add the subtree on to the tree
tree[featureNames[bestFeature]][value] = subtree
return tree
def printTree(self,tree,str):
if type(tree) == dict:
print str, tree.keys()[0]
for item in tree.values()[0].keys():
print str, item
self.printTree(tree.values()[0][item], str + "\t")
else:
print str, "\t->\t", tree
def calc_info_gain(self,data,weights,classes,feature,maxlevel=-1,level=0):
gain = 0
nData = len(data)
try:
self.featureNames
except:
self.featureNames = featureNames
# List the values that feature can take
values = []
valueweight = np.array([],dtype=float)
counter = 0
for datapoint in data:
if values.count(datapoint[feature])==0:
values.append(datapoint[feature])
if np.size(valueweight) == 0:
valueweight = np.array([weights[counter]])
else:
valueweight = np.concatenate((valueweight,np.array([weights[counter]])))
else:
ind = values.index(datapoint[feature])
valueweight[ind] += weights[counter]
counter += 1
valueweight /= sum(valueweight)
#print "v",valueweight
featureCounts = np.zeros(len(values))
gini = np.zeros(len(values))
valueIndex = 0
# Find where those values appear in data[feature] and the corresponding class
for value in values:
dataIndex = 0
newClasses = []
for datapoint in data:
if datapoint[feature]==value:
featureCounts[valueIndex]+=1
newClasses.append(classes[dataIndex])
dataIndex += 1
# Get the values in newClasses
classValues = []
for aclass in newClasses:
if classValues.count(aclass)==0:
classValues.append(aclass)
classCounts = np.zeros(len(classValues))
classIndex = 0
for classValue in classValues:
for aclass in newClasses:
if aclass == classValue:
classCounts[classIndex]+=1
classIndex += 1
for classIndex in range(len(classValues)):
gini[valueIndex] += (float(classCounts[classIndex])/sum(classCounts))**2
gain = gain + float(featureCounts[valueIndex])/nData * gini[valueIndex] * valueweight[valueIndex]
valueIndex += 1
return 1-gain
| 35.246637 | 109 | 0.514249 |
c3db95043d732437500b0eaf3a02765fe4c41c46 | 17,419 | py | Python | tests/gpflow/utilities/test_printing.py | WRJacobs/GPflow | 0cc82706fc38be24eab5cf359fc715bbc333f2d1 | [
"Apache-2.0"
] | null | null | null | tests/gpflow/utilities/test_printing.py | WRJacobs/GPflow | 0cc82706fc38be24eab5cf359fc715bbc333f2d1 | [
"Apache-2.0"
] | null | null | null | tests/gpflow/utilities/test_printing.py | WRJacobs/GPflow | 0cc82706fc38be24eab5cf359fc715bbc333f2d1 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 the GPflow authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
import tensorflow as tf
import gpflow
from gpflow.config import Config, as_context
from gpflow.utilities.utilities import (
leaf_components,
_merge_leaf_components,
tabulate_module_summary,
set_trainable,
)
rng = np.random.RandomState(0)
class Data:
H0 = 5
H1 = 2
M = 10
D = 1
Z = 0.5 * np.ones((M, 1))
ls = 2.0
var = 1.0
# ------------------------------------------
# Helpers
# ------------------------------------------
class A(tf.Module):
def __init__(self, name=None):
super().__init__(name)
self.var_trainable = tf.Variable(tf.zeros((2, 2, 1)), trainable=True)
self.var_fixed = tf.Variable(tf.ones((2, 2, 1)), trainable=False)
class B(tf.Module):
def __init__(self, name=None):
super().__init__(name)
self.submodule_list = [A(), A()]
self.submodule_dict = dict(a=A(), b=A())
self.var_trainable = tf.Variable(tf.zeros((2, 2, 1)), trainable=True)
self.var_fixed = tf.Variable(tf.ones((2, 2, 1)), trainable=False)
class C(tf.keras.Model):
def __init__(self, name=None):
super().__init__(name)
self.variable = tf.Variable(tf.zeros((2, 2, 1)), trainable=True)
self.param = gpflow.Parameter(0.0)
self.dense = tf.keras.layers.Dense(5)
def create_kernel():
kern = gpflow.kernels.SquaredExponential(lengthscales=Data.ls, variance=Data.var)
set_trainable(kern.lengthscales, False)
return kern
def create_compose_kernel():
kernel = gpflow.kernels.Product(
[
gpflow.kernels.Sum([create_kernel(), create_kernel()]),
gpflow.kernels.Sum([create_kernel(), create_kernel()]),
]
)
return kernel
def create_model():
kernel = create_kernel()
model = gpflow.models.SVGP(
kernel=kernel,
likelihood=gpflow.likelihoods.Gaussian(variance_lower_bound=None),
inducing_variable=Data.Z,
q_diag=True,
)
set_trainable(model.q_mu, False)
return model
# ------------------------------------------
# Reference
# ------------------------------------------
example_tf_module_variable_dict = {
"A.var_trainable": {"value": np.zeros((2, 2, 1)), "trainable": True, "shape": (2, 2, 1),},
"A.var_fixed": {"value": np.ones((2, 2, 1)), "trainable": False, "shape": (2, 2, 1),},
}
example_module_list_variable_dict = {
"submodule_list[0].var_trainable": example_tf_module_variable_dict["A.var_trainable"],
"submodule_list[0].var_fixed": example_tf_module_variable_dict["A.var_fixed"],
"submodule_list[1].var_trainable": example_tf_module_variable_dict["A.var_trainable"],
"submodule_list[1].var_fixed": example_tf_module_variable_dict["A.var_fixed"],
"submodule_dict['a'].var_trainable": example_tf_module_variable_dict["A.var_trainable"],
"submodule_dict['a'].var_fixed": example_tf_module_variable_dict["A.var_fixed"],
"submodule_dict['b'].var_trainable": example_tf_module_variable_dict["A.var_trainable"],
"submodule_dict['b'].var_fixed": example_tf_module_variable_dict["A.var_fixed"],
"B.var_trainable": example_tf_module_variable_dict["A.var_trainable"],
"B.var_fixed": example_tf_module_variable_dict["A.var_fixed"],
}
kernel_param_dict = {
"SquaredExponential.lengthscales": {"value": Data.ls, "trainable": False, "shape": (),},
"SquaredExponential.variance": {"value": Data.var, "trainable": True, "shape": ()},
}
compose_kernel_param_dict = {
"kernels[0].kernels[0].variance": kernel_param_dict["SquaredExponential.variance"],
"kernels[0].kernels[0].lengthscales": kernel_param_dict["SquaredExponential.lengthscales"],
"kernels[0].kernels[1].variance": kernel_param_dict["SquaredExponential.variance"],
"kernels[0].kernels[1].lengthscales": kernel_param_dict["SquaredExponential.lengthscales"],
"kernels[1].kernels[0].variance": kernel_param_dict["SquaredExponential.variance"],
"kernels[1].kernels[0].lengthscales": kernel_param_dict["SquaredExponential.lengthscales"],
"kernels[1].kernels[1].variance": kernel_param_dict["SquaredExponential.variance"],
"kernels[1].kernels[1].lengthscales": kernel_param_dict["SquaredExponential.lengthscales"],
}
model_gp_param_dict = {
"kernel.lengthscales": kernel_param_dict["SquaredExponential.lengthscales"],
"kernel.variance": kernel_param_dict["SquaredExponential.variance"],
"likelihood.variance": {"value": 1.0, "trainable": True, "shape": ()},
"inducing_variable.Z": {"value": Data.Z, "trainable": True, "shape": (Data.M, Data.D),},
"SVGP.q_mu": {"value": np.zeros((Data.M, 1)), "trainable": False, "shape": (Data.M, 1),},
"SVGP.q_sqrt": {"value": np.ones((Data.M, 1)), "trainable": True, "shape": (Data.M, 1),},
}
example_dag_module_param_dict = {
"SVGP.kernel.variance\nSVGP.kernel.lengthscales": kernel_param_dict[
"SquaredExponential.lengthscales"
],
"SVGP.likelihood.variance": {"value": 1.0, "trainable": True, "shape": ()},
"SVGP.inducing_variable.Z": {"value": Data.Z, "trainable": True, "shape": (Data.M, Data.D),},
"SVGP.q_mu": {"value": np.zeros((Data.M, 1)), "trainable": False, "shape": (Data.M, 1),},
"SVGP.q_sqrt": {"value": np.ones((Data.M, 1)), "trainable": True, "shape": (Data.M, 1),},
}
compose_kernel_param_print_string = """\
name class transform prior trainable shape dtype value\n\
------------------------------------------ --------- ----------- ------- ----------- ------- ------- -------\n\
Product.kernels[0].kernels[0].variance Parameter Softplus True () float64 1\n\
Product.kernels[0].kernels[0].lengthscales Parameter Softplus False () float64 2\n\
Product.kernels[0].kernels[1].variance Parameter Softplus True () float64 1\n\
Product.kernels[0].kernels[1].lengthscales Parameter Softplus False () float64 2\n\
Product.kernels[1].kernels[0].variance Parameter Softplus True () float64 1\n\
Product.kernels[1].kernels[0].lengthscales Parameter Softplus False () float64 2\n\
Product.kernels[1].kernels[1].variance Parameter Softplus True () float64 1\n\
Product.kernels[1].kernels[1].lengthscales Parameter Softplus False () float64 2"""
kernel_param_print_string = """\
name class transform prior trainable shape dtype value\n\
------------------------------- --------- ----------- ------- ----------- ------- ------- -------\n\
SquaredExponential.variance Parameter Softplus True () float64 1\n\
SquaredExponential.lengthscales Parameter Softplus False () float64 2"""
kernel_param_print_string_with_shift = """\
name class transform prior trainable shape dtype value\n\
------------------------------- --------- ---------------- ------- ----------- ------- ------- -------\n\
SquaredExponential.variance Parameter Softplus + Shift True () float64 1\n\
SquaredExponential.lengthscales Parameter Softplus + Shift False () float64 2"""
model_gp_param_print_string = """\
name class transform prior trainable shape dtype value\n\
------------------------ --------- ----------- ------- ----------- ------- ------- --------\n\
SVGP.kernel.variance Parameter Softplus True () float64 1.0\n\
SVGP.kernel.lengthscales Parameter Softplus False () float64 2.0\n\
SVGP.likelihood.variance Parameter Softplus True () float64 1.0\n\
SVGP.inducing_variable.Z Parameter True (10, 1) float64 [[0.5...\n\
SVGP.q_mu Parameter False (10, 1) float64 [[0....\n\
SVGP.q_sqrt Parameter Softplus True (10, 1) float64 [[1...."""
example_tf_module_variable_print_string = """\
name class transform prior trainable shape dtype value\n\
--------------- ---------------- ----------- ------- ----------- --------- ------- --------\n\
A.var_trainable ResourceVariable True (2, 2, 1) float32 [[[0....\n\
A.var_fixed ResourceVariable False (2, 2, 1) float32 [[[1...."""
example_module_list_variable_print_string = """\
name class transform prior trainable shape dtype value\n\
----------------------------------- ---------------- ----------- ------- ----------- --------- ------- --------\n\
B.submodule_list[0].var_trainable ResourceVariable True (2, 2, 1) float32 [[[0....\n\
B.submodule_list[0].var_fixed ResourceVariable False (2, 2, 1) float32 [[[1....\n\
B.submodule_list[1].var_trainable ResourceVariable True (2, 2, 1) float32 [[[0....\n\
B.submodule_list[1].var_fixed ResourceVariable False (2, 2, 1) float32 [[[1....\n\
B.submodule_dict['a'].var_trainable ResourceVariable True (2, 2, 1) float32 [[[0....\n\
B.submodule_dict['a'].var_fixed ResourceVariable False (2, 2, 1) float32 [[[1....\n\
B.submodule_dict['b'].var_trainable ResourceVariable True (2, 2, 1) float32 [[[0....\n\
B.submodule_dict['b'].var_fixed ResourceVariable False (2, 2, 1) float32 [[[1....\n\
B.var_trainable ResourceVariable True (2, 2, 1) float32 [[[0....\n\
B.var_fixed ResourceVariable False (2, 2, 1) float32 [[[1...."""
# Note: we use grid format here because we have a double reference to the same variable
# which does not render nicely in the table formatting.
example_tf_keras_model = """\
+-------------------------+------------------+-------------+---------+-------------+-----------+---------+----------+\n\
| name | class | transform | prior | trainable | shape | dtype | value |\n\
+=========================+==================+=============+=========+=============+===========+=========+==========+\n\
| C._trainable_weights[0] | ResourceVariable | | | True | (2, 2, 1) | float32 | [[[0.... |\n\
| C.variable | | | | | | | |\n\
+-------------------------+------------------+-------------+---------+-------------+-----------+---------+----------+\n\
| C.param | Parameter | | | True | () | float64 | 0.0 |\n\
+-------------------------+------------------+-------------+---------+-------------+-----------+---------+----------+"""
# ------------------------------------------
# Fixtures
# ------------------------------------------
@pytest.fixture(params=[A, B, create_kernel, create_model])
def module(request):
return request.param()
@pytest.fixture
def dag_module():
dag = create_model()
dag.kernel.variance = dag.kernel.lengthscales
return dag
# ------------------------------------------
# Tests
# ------------------------------------------
def test_leaf_components_only_returns_parameters_and_variables(module):
for path, variable in leaf_components(module).items():
assert isinstance(variable, tf.Variable) or isinstance(variable, gpflow.Parameter)
@pytest.mark.parametrize(
"module_callable, expected_param_dicts",
[(create_kernel, kernel_param_dict), (create_model, model_gp_param_dict)],
)
def test_leaf_components_registers_variable_properties(module_callable, expected_param_dicts):
module = module_callable()
for path, variable in leaf_components(module).items():
param_name = path.split(".")[-2] + "." + path.split(".")[-1]
assert isinstance(variable, gpflow.Parameter)
np.testing.assert_equal(variable.value().numpy(), expected_param_dicts[param_name]["value"])
assert variable.trainable == expected_param_dicts[param_name]["trainable"]
assert variable.shape == expected_param_dicts[param_name]["shape"]
@pytest.mark.parametrize(
"module_callable, expected_param_dicts", [(create_compose_kernel, compose_kernel_param_dict),],
)
def test_leaf_components_registers_compose_kernel_variable_properties(
module_callable, expected_param_dicts
):
module = module_callable()
leaf_components_dict = leaf_components(module)
assert len(leaf_components_dict) > 0
for path, variable in leaf_components_dict.items():
path_as_list = path.split(".")
param_name = path_as_list[-3] + "." + path_as_list[-2] + "." + path_as_list[-1]
assert isinstance(variable, gpflow.Parameter)
np.testing.assert_equal(variable.value().numpy(), expected_param_dicts[param_name]["value"])
assert variable.trainable == expected_param_dicts[param_name]["trainable"]
assert variable.shape == expected_param_dicts[param_name]["shape"]
@pytest.mark.parametrize(
"module_class, expected_var_dicts",
[(A, example_tf_module_variable_dict), (B, example_module_list_variable_dict),],
)
def test_leaf_components_registers_param_properties(module_class, expected_var_dicts):
module = module_class()
for path, variable in leaf_components(module).items():
var_name = path.split(".")[-2] + "." + path.split(".")[-1]
assert isinstance(variable, tf.Variable)
np.testing.assert_equal(variable.numpy(), expected_var_dicts[var_name]["value"])
assert variable.trainable == expected_var_dicts[var_name]["trainable"]
assert variable.shape == expected_var_dicts[var_name]["shape"]
@pytest.mark.parametrize("expected_var_dicts", [example_dag_module_param_dict])
def test_merge_leaf_components_merges_keys_with_same_values(dag_module, expected_var_dicts):
leaf_components_dict = leaf_components(dag_module)
for path, variable in _merge_leaf_components(leaf_components_dict).items():
assert path in expected_var_dicts
for sub_path in path.split("\n"):
assert sub_path in leaf_components_dict
assert leaf_components_dict[sub_path] is variable
@pytest.mark.parametrize(
"module_callable, expected_param_print_string",
[
(create_compose_kernel, compose_kernel_param_print_string),
(create_kernel, kernel_param_print_string),
(create_model, model_gp_param_print_string),
(A, example_tf_module_variable_print_string),
(B, example_module_list_variable_print_string),
],
)
def test_print_summary_output_string(module_callable, expected_param_print_string):
with as_context(Config(positive_minimum=None)):
assert tabulate_module_summary(module_callable()) == expected_param_print_string
def test_print_summary_output_string_with_positive_minimum():
with as_context(Config(positive_minimum=1e-6)):
print(tabulate_module_summary(create_kernel()))
assert tabulate_module_summary(create_kernel()) == kernel_param_print_string_with_shift
def test_print_summary_for_keras_model():
# Note: best to use `grid` formatting for `tf.keras.Model` printing
# because of the duplicates in the references to the variables.
assert tabulate_module_summary(C(), tablefmt="grid") == example_tf_keras_model
def test_leaf_components_combination_kernel():
"""
Regression test for kernel compositions - output for printing should not be empty (issue #1066).
"""
k = gpflow.kernels.SquaredExponential() + gpflow.kernels.SquaredExponential()
assert leaf_components(k), "Combination kernel should have non-empty leaf components"
def test_module_parameters_return_iterators_not_generators():
"""
Regression test: Ensure that gpflow.Module parameters return iterators like in TF2, not
generators.
Reason:
param = m.params # <generator object>
x = [p for p in param] # List[Parameters]
y = [p for p in param] # [] empty!
"""
m = create_model()
assert isinstance(m, gpflow.base.Module)
assert isinstance(m.parameters, tuple)
assert isinstance(m.trainable_parameters, tuple)
| 49.345609 | 121 | 0.58884 |
93477b630d32f08205dc636baf79485650df17fb | 596 | py | Python | botorch/utils/multi_objective/box_decompositions/__init__.py | dme65/botorch | 508f215bfe987373924e39444c8fb544d5132178 | [
"MIT"
] | null | null | null | botorch/utils/multi_objective/box_decompositions/__init__.py | dme65/botorch | 508f215bfe987373924e39444c8fb544d5132178 | [
"MIT"
] | null | null | null | botorch/utils/multi_objective/box_decompositions/__init__.py | dme65/botorch | 508f215bfe987373924e39444c8fb544d5132178 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from botorch.utils.multi_objective.box_decompositions.non_dominated import (
FastNondominatedPartitioning,
NondominatedPartitioning,
)
from botorch.utils.multi_objective.box_decompositions.utils import (
compute_non_dominated_hypercell_bounds_2d,
)
__all__ = [
"compute_non_dominated_hypercell_bounds_2d",
"FastNondominatedPartitioning",
"NondominatedPartitioning",
]
| 27.090909 | 76 | 0.790268 |
249f546a593c92c52aed5abfcbfce9dbdd53427a | 25,468 | py | Python | qa/rpc-tests/fundrawtransaction-hd.py | emnaSE/Xdata-test-master | ac1eed013fcf3a6d50fb7a87b1b9eb50cac5ac46 | [
"MIT"
] | null | null | null | qa/rpc-tests/fundrawtransaction-hd.py | emnaSE/Xdata-test-master | ac1eed013fcf3a6d50fb7a87b1b9eb50cac5ac46 | [
"MIT"
] | null | null | null | qa/rpc-tests/fundrawtransaction-hd.py | emnaSE/Xdata-test-master | ac1eed013fcf3a6d50fb7a87b1b9eb50cac5ac46 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
# Create one-input, one-output, no-fee transaction:
class RawTransactionsTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 4
def setup_network(self, split=False):
self.nodes = start_nodes(4, self.options.tmpdir, [['-usehd=1']] * self.num_nodes, redirect_stderr=True)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
connect_nodes_bi(self.nodes,0,3)
self.is_network_split=False
self.sync_all()
def run_test(self):
self.log.info("Mining blocks...")
min_relay_tx_fee = self.nodes[0].getnetworkinfo()['relayfee']
# This test is not meant to test fee estimation and we'd like
# to be sure all txs are sent at a consistent desired feerate
for node in self.nodes:
node.settxfee(min_relay_tx_fee)
# if the fee's positive delta is higher than this value tests will fail,
# neg. delta always fail the tests.
# The size of the signature of every input may be at most 2 bytes larger
# than a minimum sized signature.
# = 2 bytes * minRelayTxFeePerByte
feeTolerance = 2 * min_relay_tx_fee/1000
self.nodes[2].generate(1)
self.sync_all()
self.nodes[0].generate(121)
self.sync_all()
watchonly_address = self.nodes[0].getnewaddress()
watchonly_pubkey = self.nodes[0].validateaddress(watchonly_address)["pubkey"]
watchonly_amount = Decimal(2000)
self.nodes[3].importpubkey(watchonly_pubkey, "", True)
watchonly_txid = self.nodes[0].sendtoaddress(watchonly_address, watchonly_amount)
self.nodes[0].sendtoaddress(self.nodes[3].getnewaddress(), watchonly_amount / 10)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 15)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 10)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 50)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
###############
# simple test #
###############
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 10 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0) #test if we have enought inputs
##############################
# simple test with two coins #
##############################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 22 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0) #test if we have enough inputs
##############################
# simple test with two coins #
##############################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 26 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0)
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
################################
# simple test with two outputs #
################################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 26, self.nodes[1].getnewaddress() : 25 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert(len(dec_tx['vin']) > 0)
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
#########################################################################
# test a fundrawtransaction with a VIN greater than the required amount #
#########################################################################
utx = False
listunspent = self.nodes[2].listunspent()
for aUtx in listunspent:
if aUtx['amount'] == 50:
utx = aUtx
break
assert(utx!=False)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : 10 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
#####################################################################
# test a fundrawtransaction with which will not get a change output #
#####################################################################
utx = False
listunspent = self.nodes[2].listunspent()
for aUtx in listunspent:
if aUtx['amount'] == 50:
utx = aUtx
break
assert(utx!=False)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : Decimal(50) - fee - feeTolerance }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(rawtxfund['changepos'], -1)
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
#########################################################################
# test a fundrawtransaction with a VIN smaller than the required amount #
#########################################################################
utx = False
listunspent = self.nodes[2].listunspent()
for aUtx in listunspent:
if aUtx['amount'] == 10:
utx = aUtx
break
assert(utx!=False)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : 10 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
# 4-byte version + 1-byte vin count + 36-byte prevout then script_len
rawtx = rawtx[:82] + "0100" + rawtx[84:]
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for i, out in enumerate(dec_tx['vout']):
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
else:
assert_equal(i, rawtxfund['changepos'])
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex'])
assert_equal(matchingOuts, 1)
assert_equal(len(dec_tx['vout']), 2)
###########################################
# test a fundrawtransaction with two VINs #
###########################################
utx = False
utx2 = False
listunspent = self.nodes[2].listunspent()
for aUtx in listunspent:
if aUtx['amount'] == 10:
utx = aUtx
if aUtx['amount'] == 50:
utx2 = aUtx
assert(utx!=False)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ]
outputs = { self.nodes[0].getnewaddress() : 60 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for out in dec_tx['vout']:
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
assert_equal(matchingOuts, 1)
assert_equal(len(dec_tx['vout']), 2)
matchingIns = 0
for vinOut in dec_tx['vin']:
for vinIn in inputs:
if vinIn['txid'] == vinOut['txid']:
matchingIns+=1
assert_equal(matchingIns, 2) #we now must see two vins identical to vins given as params
#########################################################
# test a fundrawtransaction with two VINs and two vOUTs #
#########################################################
utx = False
utx2 = False
listunspent = self.nodes[2].listunspent()
for aUtx in listunspent:
if aUtx['amount'] == 10:
utx = aUtx
if aUtx['amount'] == 50:
utx2 = aUtx
assert(utx!=False)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ]
outputs = { self.nodes[0].getnewaddress() : 60, self.nodes[0].getnewaddress() : 10 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for out in dec_tx['vout']:
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
assert_equal(matchingOuts, 2)
assert_equal(len(dec_tx['vout']), 3)
##############################################
# test a fundrawtransaction with invalid vin #
##############################################
listunspent = self.nodes[2].listunspent()
inputs = [ {'txid' : "1c7f966dab21119bac53213a2bc7532bff1fa844c124fd750a7d0b1332440bd1", 'vout' : 0} ] #invalid vin!
outputs = { self.nodes[0].getnewaddress() : 10}
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
try:
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
raise AssertionError("Spent more than available")
except JSONRPCException as e:
assert("Insufficient" in e.error['message'])
############################################################
#compare fee of a standard pubkeyhash transaction
inputs = []
outputs = {self.nodes[1].getnewaddress():11}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 11)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a standard pubkeyhash transaction with multiple outputs
inputs = []
outputs = {self.nodes[1].getnewaddress():11,self.nodes[1].getnewaddress():12,self.nodes[1].getnewaddress():1,self.nodes[1].getnewaddress():13,self.nodes[1].getnewaddress():2,self.nodes[1].getnewaddress():3}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendmany("", outputs)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a 2of2 multisig p2sh transaction
# create 2of2 addr
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[1].getnewaddress()
addr1Obj = self.nodes[1].validateaddress(addr1)
addr2Obj = self.nodes[1].validateaddress(addr2)
mSigObj = self.nodes[1].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])
inputs = []
outputs = {mSigObj:11}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(mSigObj, 11)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a standard pubkeyhash transaction
# create 4of5 addr
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[1].getnewaddress()
addr3 = self.nodes[1].getnewaddress()
addr4 = self.nodes[1].getnewaddress()
addr5 = self.nodes[1].getnewaddress()
addr1Obj = self.nodes[1].validateaddress(addr1)
addr2Obj = self.nodes[1].validateaddress(addr2)
addr3Obj = self.nodes[1].validateaddress(addr3)
addr4Obj = self.nodes[1].validateaddress(addr4)
addr5Obj = self.nodes[1].validateaddress(addr5)
mSigObj = self.nodes[1].addmultisigaddress(4, [addr1Obj['pubkey'], addr2Obj['pubkey'], addr3Obj['pubkey'], addr4Obj['pubkey'], addr5Obj['pubkey']])
inputs = []
outputs = {mSigObj:11}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(mSigObj, 11)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
# spend a 2of2 multisig transaction over fundraw
# create 2of2 addr
addr1 = self.nodes[2].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[2].validateaddress(addr1)
addr2Obj = self.nodes[2].validateaddress(addr2)
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])
# send 12 XDA to msig addr
txId = self.nodes[0].sendtoaddress(mSigObj, 12)
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
oldBalance = self.nodes[1].getbalance()
inputs = []
outputs = {self.nodes[1].getnewaddress():11}
rawTx = self.nodes[2].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[2].fundrawtransaction(rawTx)
signedTx = self.nodes[2].signrawtransaction(fundedTx['hex'])
txId = self.nodes[2].sendrawtransaction(signedTx['hex'])
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
# make sure funds are received at node1
assert_equal(oldBalance+Decimal('11.0000000'), self.nodes[1].getbalance())
############################################################
# locked wallet test
self.nodes[1].encryptwallet("test")
self.nodes.pop(1)
stop_node(self.nodes[0], 0)
stop_node(self.nodes[1], 2)
stop_node(self.nodes[2], 3)
self.nodes = start_nodes(4, self.options.tmpdir, [['-usehd=1']] * self.num_nodes, redirect_stderr=True)
# This test is not meant to test fee estimation and we'd like
# to be sure all txs are sent at a consistent desired feerate
for node in self.nodes:
node.settxfee(min_relay_tx_fee)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
connect_nodes_bi(self.nodes,0,3)
self.is_network_split=False
self.sync_all()
# drain the keypool
self.nodes[1].getnewaddress()
self.nodes[1].getrawchangeaddress()
inputs = []
outputs = {self.nodes[0].getnewaddress():1.1}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs)
# fund a transaction that requires a new key for the change output
# creating the key must be impossible because the wallet is locked
try:
fundedTx = self.nodes[1].fundrawtransaction(rawTx)
raise AssertionError("Wallet unlocked without passphrase")
except JSONRPCException as e:
assert('Keypool ran out' in e.error['message'])
#refill the keypool
self.nodes[1].walletpassphrase("test", 100)
self.nodes[1].keypoolrefill(2) #need to refill the keypool to get an internal change address
self.nodes[1].walletlock()
try:
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 12)
raise AssertionError("Wallet unlocked without passphrase")
except JSONRPCException as e:
assert('walletpassphrase' in e.error['message'])
oldBalance = self.nodes[0].getbalance()
inputs = []
outputs = {self.nodes[0].getnewaddress():11}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawTx)
#now we need to unlock
self.nodes[1].walletpassphrase("test", 100)
signedTx = self.nodes[1].signrawtransaction(fundedTx['hex'])
txId = self.nodes[1].sendrawtransaction(signedTx['hex'])
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
# make sure funds are received at node1
assert_equal(oldBalance+Decimal('511.0000000'), self.nodes[0].getbalance())
###############################################
# multiple (~19) inputs tx test | Compare fee #
###############################################
#empty node1, send some small coins from node0 to node1
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
for i in range(0,20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
#fund a tx with ~20 small inputs
inputs = []
outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[1].sendmany("", outputs)
signedFee = self.nodes[1].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance*19) #~19 inputs
#############################################
# multiple (~19) inputs tx test | sign/send #
#############################################
#again, empty node1, send some small coins from node0 to node1
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
for i in range(0,20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
#fund a tx with ~20 small inputs
oldBalance = self.nodes[0].getbalance()
inputs = []
outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawTx)
fundedAndSignedTx = self.nodes[1].signrawtransaction(fundedTx['hex'])
txId = self.nodes[1].sendrawtransaction(fundedAndSignedTx['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(oldBalance+Decimal('500.19000000'), self.nodes[0].getbalance()) #0.19+block reward
#####################################################
# test fundrawtransaction with OP_RETURN and no vin #
#####################################################
rawtx = "0100000000010000000000000000066a047465737400000000"
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(len(dec_tx['vin']), 0)
assert_equal(len(dec_tx['vout']), 1)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert_greater_than(len(dec_tx['vin']), 0) # at least one vin
assert_equal(len(dec_tx['vout']), 2) # one change output added
##################################################
# test a fundrawtransaction using only watchonly #
##################################################
inputs = []
outputs = {self.nodes[2].getnewaddress() : watchonly_amount / 2}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = self.nodes[3].fundrawtransaction(rawtx, True)
res_dec = self.nodes[0].decoderawtransaction(result["hex"])
assert_equal(len(res_dec["vin"]), 1)
assert_equal(res_dec["vin"][0]["txid"], watchonly_txid)
assert("fee" in result.keys())
assert_greater_than(result["changepos"], -1)
###############################################################
# test fundrawtransaction using the entirety of watched funds #
###############################################################
inputs = []
outputs = {self.nodes[2].getnewaddress() : watchonly_amount}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = self.nodes[3].fundrawtransaction(rawtx, True)
res_dec = self.nodes[0].decoderawtransaction(result["hex"])
assert_equal(len(res_dec["vin"]), 2)
assert(res_dec["vin"][0]["txid"] == watchonly_txid or res_dec["vin"][1]["txid"] == watchonly_txid)
assert_greater_than(result["fee"], 0)
assert_greater_than(result["changepos"], -1)
assert_equal(result["fee"] + res_dec["vout"][result["changepos"]]["value"], watchonly_amount / 10)
signedtx = self.nodes[3].signrawtransaction(result["hex"])
assert(not signedtx["complete"])
signedtx = self.nodes[0].signrawtransaction(signedtx["hex"])
assert(signedtx["complete"])
self.nodes[0].sendrawtransaction(signedtx["hex"])
if __name__ == '__main__':
RawTransactionsTest().main()
| 40.683706 | 214 | 0.558701 |
ff10f100d34be4a48c3f3e8718224871106c9c2a | 1,146 | py | Python | serivce/app/views.py | LupusAnay/gateway-prototype | e40426e05ca7b2a83bfd8d31fec252086f586eb8 | [
"MIT"
] | null | null | null | serivce/app/views.py | LupusAnay/gateway-prototype | e40426e05ca7b2a83bfd8d31fec252086f586eb8 | [
"MIT"
] | null | null | null | serivce/app/views.py | LupusAnay/gateway-prototype | e40426e05ca7b2a83bfd8d31fec252086f586eb8 | [
"MIT"
] | null | null | null | from flask import Blueprint, make_response, jsonify, request
from functools import wraps
from serivce.jwt_decoder import decode_auth_token
service_blueprint = Blueprint('service', __name__, url_prefix='/service')
def require_auth(role=''):
def wrap(f):
@wraps(f)
def wrapped_f(*args, **kwargs):
token = request.headers.get('Authorization').replace('Bearer ', '')
payload = decode_auth_token(token)
print(payload)
if type(payload) is not dict:
return make_response(jsonify({'error': 'token_error',
'token': payload}))
return f(*args, **kwargs)
return wrapped_f
return wrap
@service_blueprint.route('/')
@require_auth(role='fuck')
def get():
token = request.headers.get('Authorization').replace('Bearer ', '')
data = decode_auth_token(token)
print('im here')
response = {
'result': 'success',
'secret_resource': 'hello from hell',
'token': data
}
print('im after token decoding', response)
return make_response(jsonify(response)), 200
| 30.157895 | 79 | 0.611693 |
f469a957e31b5162b11e07b369f87cf1ba12cc3c | 759 | py | Python | general/edge_flaps.py | ml-for-gp/jaxgptoolbox | 78ca5775caf79fa985e733bdee14909755d7b635 | [
"Apache-2.0"
] | 10 | 2021-08-10T18:50:08.000Z | 2022-03-25T07:36:47.000Z | general/edge_flaps.py | ml-for-gp/jaxgptoolbox | 78ca5775caf79fa985e733bdee14909755d7b635 | [
"Apache-2.0"
] | null | null | null | general/edge_flaps.py | ml-for-gp/jaxgptoolbox | 78ca5775caf79fa985e733bdee14909755d7b635 | [
"Apache-2.0"
] | 1 | 2021-08-08T16:44:27.000Z | 2021-08-08T16:44:27.000Z | import numpy as np
from . edges_with_mapping import edges_with_mapping
def edge_flaps(F):
'''
EDGEFLAPS compute flap edge indices for each edge
Input:
F (|F|,3) numpy array of face indices
Output:
E (|E|,2) numpy array of edge indices
flapEdges (|E|, 4 or 2) numpy array of edge indices
'''
# Notes:
# Each flapEdges[e,:] = [a,b,c,d] edges indices
# / \
# b a
# / \
# - e - -
# \ /
# c d
# \ /
E, F2E = edges_with_mapping(F)
flapEdges = [[] for i in range(E.shape[0])]
for f in range(F.shape[0]):
e0 = F2E[f,0]
e1 = F2E[f,1]
e2 = F2E[f,2]
flapEdges[e0].extend([e1,e2])
flapEdges[e1].extend([e2,e0])
flapEdges[e2].extend([e0,e1])
return E, np.array(flapEdges) | 23.71875 | 55 | 0.56917 |
822046a2545c4fc31cf3c561a28422c2ef1b0e53 | 344 | py | Python | lancando-excecoes.py | fabiobarretopro/Aprendendo-Python | a47acf6b9fdfdad55853e620db451a6a2e61bc6f | [
"MIT"
] | null | null | null | lancando-excecoes.py | fabiobarretopro/Aprendendo-Python | a47acf6b9fdfdad55853e620db451a6a2e61bc6f | [
"MIT"
] | null | null | null | lancando-excecoes.py | fabiobarretopro/Aprendendo-Python | a47acf6b9fdfdad55853e620db451a6a2e61bc6f | [
"MIT"
] | null | null | null | class SomentePares(list):
def append(self, inteiro):
if not isinstance(inteiro, int):
raise TypeError("Somente inteiros")
if inteiro % 2 != 0:
raise ValueError("Somente Pares")
super(SomentePares, self).append(inteiro)
sp = SomentePares()
sp.append(10)
sp.append(30)
sp.append(4)
print(sp)
| 22.933333 | 49 | 0.630814 |
1894d3b091e4c4a0cf462805521f209302f8c49d | 349 | py | Python | build/lib/atmPy/atmos/constants.py | msrconsulting/atm-py | d9d41a509743d473000d8325ce4e319615988589 | [
"MIT"
] | null | null | null | build/lib/atmPy/atmos/constants.py | msrconsulting/atm-py | d9d41a509743d473000d8325ce4e319615988589 | [
"MIT"
] | null | null | null | build/lib/atmPy/atmos/constants.py | msrconsulting/atm-py | d9d41a509743d473000d8325ce4e319615988589 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Common physical constants.
@author: mtat76
"""
from math import pi
# Electron charge in Coulombs
e = 1.60218e-19
# dielectric constant
eps0 = 8.8542e-12
# Boltzmann's constant
k = 1.3807e-23
# convert angle to radians
a2r = lambda x: x/180*pi
# convert radians to angle
r2a = lambda x: x/pi*180 | 15.863636 | 30 | 0.647564 |
82c0c42c7b63b4cf67f81a4cddcc591633e8fd71 | 2,891 | py | Python | voting_clf.py | ajarihant/kaggle-apr-21 | 98bb1e538384afdfd7affe7ed4f5a90131eeebe0 | [
"MIT"
] | null | null | null | voting_clf.py | ajarihant/kaggle-apr-21 | 98bb1e538384afdfd7affe7ed4f5a90131eeebe0 | [
"MIT"
] | null | null | null | voting_clf.py | ajarihant/kaggle-apr-21 | 98bb1e538384afdfd7affe7ed4f5a90131eeebe0 | [
"MIT"
] | null | null | null | import pandas as pd
import numpy as np
import os
import random
import warnings
warnings.simplefilter('ignore')
from sklearn.preprocessing import LabelEncoder, StandardScaler
from sklearn.model_selection import train_test_split, KFold, StratifiedKFold
import lightgbm as lgb
TARGET = 'Survived'
N_ESTIMATORS = 1000
N_SPLITS = 10
SEED = 2021
EARLY_STOPPING_ROUNDS = 100
VERBOSE = 100
def set_seed(seed=42):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
set_seed(SEED)
root_path = "./"
train = pd.read_csv(os.path.join(root_path, 'train.csv'))
# y = train['Survived']
# train = train.drop(['Survived'],1)
test = pd.read_csv(os.path.join(root_path, 'test.csv'))
dataset = pd.concat([train, test], axis = 0, ignore_index = True)
# train_len = len(train)
# dataset = dataset.drop(['PassengerId'], 1)
# print('*********Whole Dataset*********\n', dataset.head())
dataset['Age'] = dataset['Age'].fillna(dataset['Age'].mean())
dataset['Cabin'] = dataset['Cabin'].fillna('X').map(lambda x: x[0].strip())
dataset['Ticket'] = dataset['Ticket'].fillna('X').map(lambda x:str(x).split()[0] if len(str(x).split()) > 1 else 'X')
fare_map = dataset[['Fare', 'Pclass']].dropna().groupby('Pclass').median().to_dict()
dataset['Fare'] = dataset['Fare'].fillna(dataset['Pclass'].map(fare_map['Fare']))
dataset['Fare'] = np.log1p(dataset['Fare'])
dataset['Embarked'] = dataset['Embarked'].fillna('X')
dataset['Name'] = dataset['Name'].map(lambda x: x.split(',')[0])
# print('*********Whole Dataset*********\n', dataset.head())
label_cols = ['Name', 'Ticket', 'Sex']
onehot_cols = ['Cabin', 'Embarked']
numerical_cols = ['Pclass', 'Age', 'SibSp', 'Parch', 'Fare']
def label_encoder(c):
le = LabelEncoder()
return le.fit_transform(c)
scaler = StandardScaler()
onehot_encoded_df = pd.get_dummies(dataset[onehot_cols])
label_encoded_df = dataset[label_cols].apply(label_encoder)
numerical_df = pd.DataFrame(scaler.fit_transform(dataset[numerical_cols]), columns=numerical_cols)
target_df = dataset[TARGET]
dataset = pd.concat([numerical_df, label_encoded_df, onehot_encoded_df, target_df], axis=1)
# print('*********Whole Dataset*********\n', dataset.head())
# Light GBM
params = {
'metric': 'binary_logloss',
'n_estimators': N_ESTIMATORS,
'objective': 'binary',
'random_state': SEED,
'learning_rate': 0.01,
'min_child_samples': 150,
'reg_alpha': 3e-5,
'reg_lambda': 9e-2,
'num_leaves': 20,
'max_depth': 16,
'colsample_bytree': 0.8,
'subsample': 0.8,
'subsample_freq': 2,
'max_bin': 240,
}
lgb_oof = np.zeros(train.shape[0])
lgb_preds = np.zeros(test.shape[0])
feature_importances = pd.DataFrame()
skf = StratifiedKFold(n_splits=N_SPLITS, shuffle=True, random_state=SEED)
for fold, (train_idx, valid_idx) in enumerate(skf.split(dataset, dataset[TARGET])):
print(f"===== FOLD {fold} =====")
| 31.769231 | 117 | 0.682809 |
d339da8e62215eb4fac9ad202acd8e7ce42975a9 | 3,840 | py | Python | rapid7vmconsole/models/resources_discovery_asset.py | BeanBagKing/vm-console-client-python | 7e4a2526297ef55b2d179bbfafa4451a68b3d4ed | [
"MIT"
] | 61 | 2018-05-17T05:57:09.000Z | 2022-03-08T13:59:21.000Z | rapid7vmconsole/models/resources_discovery_asset.py | BeanBagKing/vm-console-client-python | 7e4a2526297ef55b2d179bbfafa4451a68b3d4ed | [
"MIT"
] | 33 | 2018-06-26T16:21:14.000Z | 2022-03-03T20:55:47.000Z | rapid7vmconsole/models/resources_discovery_asset.py | BeanBagKing/vm-console-client-python | 7e4a2526297ef55b2d179bbfafa4451a68b3d4ed | [
"MIT"
] | 43 | 2018-02-24T05:45:53.000Z | 2022-03-31T22:15:16.000Z | # coding: utf-8
"""
Python InsightVM API Client
OpenAPI spec version: 3
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class ResourcesDiscoveryAsset(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'links': 'list[Link]',
'resources': 'list[DiscoveryAsset]'
}
attribute_map = {
'links': 'links',
'resources': 'resources'
}
def __init__(self, links=None, resources=None): # noqa: E501
"""ResourcesDiscoveryAsset - a model defined in Swagger""" # noqa: E501
self._links = None
self._resources = None
self.discriminator = None
if links is not None:
self.links = links
if resources is not None:
self.resources = resources
@property
def links(self):
"""Gets the links of this ResourcesDiscoveryAsset. # noqa: E501
:return: The links of this ResourcesDiscoveryAsset. # noqa: E501
:rtype: list[Link]
"""
return self._links
@links.setter
def links(self, links):
"""Sets the links of this ResourcesDiscoveryAsset.
:param links: The links of this ResourcesDiscoveryAsset. # noqa: E501
:type: list[Link]
"""
self._links = links
@property
def resources(self):
"""Gets the resources of this ResourcesDiscoveryAsset. # noqa: E501
:return: The resources of this ResourcesDiscoveryAsset. # noqa: E501
:rtype: list[DiscoveryAsset]
"""
return self._resources
@resources.setter
def resources(self, resources):
"""Sets the resources of this ResourcesDiscoveryAsset.
:param resources: The resources of this ResourcesDiscoveryAsset. # noqa: E501
:type: list[DiscoveryAsset]
"""
self._resources = resources
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ResourcesDiscoveryAsset, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ResourcesDiscoveryAsset):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 27.428571 | 86 | 0.572396 |
390ec6dee7f0c1fc4e9180b19450e26ebd9af866 | 15,599 | py | Python | qa/rpc-tests/wallet.py | VirtusPay/virtus | 0ec8039945176fc4ac78594d0040bfec0bf30cb9 | [
"MIT"
] | 2 | 2018-04-02T07:16:00.000Z | 2018-04-14T13:36:56.000Z | qa/rpc-tests/wallet.py | VirtusPay/virtus | 0ec8039945176fc4ac78594d0040bfec0bf30cb9 | [
"MIT"
] | null | null | null | qa/rpc-tests/wallet.py | VirtusPay/virtus | 0ec8039945176fc4ac78594d0040bfec0bf30cb9 | [
"MIT"
] | 4 | 2018-02-26T15:54:26.000Z | 2019-02-12T09:37:16.000Z | #!/usr/bin/env python2
# coding=utf-8
# ^^^^^^^^^^^^ TODO remove when supporting only Python3
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class WalletTest (BitcoinTestFramework):
def check_fee_amount(self, curr_balance, balance_with_fee, fee_per_byte, tx_size):
"""Return curr_balance after asserting the fee was in range"""
fee = balance_with_fee - curr_balance
target_fee = fee_per_byte * tx_size
if fee < target_fee:
raise AssertionError("Fee of %s VIRTUS too low! (Should be %s VIRTUS)"%(str(fee), str(target_fee)))
# allow the node's estimation to be at most 2 bytes off
if fee > fee_per_byte * (tx_size + 2):
raise AssertionError("Fee of %s VIRTUS too high! (Should be %s VIRTUS)"%(str(fee), str(target_fee)))
return curr_balance
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 4)
def setup_network(self, split=False):
self.nodes = start_nodes(3, self.options.tmpdir)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
self.is_network_split=False
self.sync_all()
def run_test (self):
# Check that there's no UTXO on none of the nodes
assert_equal(len(self.nodes[0].listunspent()), 0)
assert_equal(len(self.nodes[1].listunspent()), 0)
assert_equal(len(self.nodes[2].listunspent()), 0)
print "Mining blocks..."
self.nodes[0].generate(1)
walletinfo = self.nodes[0].getwalletinfo()
assert_equal(walletinfo['immature_balance'], 500)
assert_equal(walletinfo['balance'], 0)
self.sync_all()
self.nodes[1].generate(101)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), 500)
assert_equal(self.nodes[1].getbalance(), 500)
assert_equal(self.nodes[2].getbalance(), 0)
# Check that only first and second nodes have UTXOs
assert_equal(len(self.nodes[0].listunspent()), 1)
assert_equal(len(self.nodes[1].listunspent()), 1)
assert_equal(len(self.nodes[2].listunspent()), 0)
# Send 210 VIRTUS from 0 to 2 using sendtoaddress call.
# Second transaction will be child of first, and will require a fee
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 110)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 100)
walletinfo = self.nodes[0].getwalletinfo()
assert_equal(walletinfo['immature_balance'], 0)
# Have node0 mine a block, thus it will collect its own fee.
self.nodes[0].generate(1)
self.sync_all()
# Exercise locking of unspent outputs
unspent_0 = self.nodes[2].listunspent()[0]
unspent_0 = {"txid": unspent_0["txid"], "vout": unspent_0["vout"]}
self.nodes[2].lockunspent(False, [unspent_0])
assert_raises(JSONRPCException, self.nodes[2].sendtoaddress, self.nodes[2].getnewaddress(), 200)
assert_equal([unspent_0], self.nodes[2].listlockunspent())
self.nodes[2].lockunspent(True, [unspent_0])
assert_equal(len(self.nodes[2].listlockunspent()), 0)
# Have node1 generate 100 blocks (so node0 can recover the fee)
self.nodes[1].generate(100)
self.sync_all()
# node0 should end up with 1000 VIRTUS in block rewards plus fees, but
# minus the 210 plus fees sent to node2
assert_equal(self.nodes[0].getbalance(), 1000-210)
assert_equal(self.nodes[2].getbalance(), 210)
# Node0 should have two unspent outputs.
# Create a couple of transactions to send them to node2, submit them through
# node1, and make sure both node0 and node2 pick them up properly:
node0utxos = self.nodes[0].listunspent(1)
assert_equal(len(node0utxos), 2)
# create both transactions
txns_to_send = []
for utxo in node0utxos:
inputs = []
outputs = {}
inputs.append({ "txid" : utxo["txid"], "vout" : utxo["vout"]})
outputs[self.nodes[2].getnewaddress("from1")] = utxo["amount"]
raw_tx = self.nodes[0].createrawtransaction(inputs, outputs)
txns_to_send.append(self.nodes[0].signrawtransaction(raw_tx))
# Have node 1 (miner) send the transactions
self.nodes[1].sendrawtransaction(txns_to_send[0]["hex"], True)
self.nodes[1].sendrawtransaction(txns_to_send[1]["hex"], True)
# Have node1 mine a block to confirm transactions:
self.nodes[1].generate(1)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), 0)
assert_equal(self.nodes[2].getbalance(), 1000)
assert_equal(self.nodes[2].getbalance("from1"), 1000-210)
# Send 100 VIRTUS normal
address = self.nodes[0].getnewaddress("test")
fee_per_byte = Decimal('0.001') / 1000
self.nodes[2].settxfee(fee_per_byte * 1000)
txid = self.nodes[2].sendtoaddress(address, 100, "", "", False)
self.nodes[2].generate(1)
self.sync_all()
node_2_bal = self.check_fee_amount(self.nodes[2].getbalance(), Decimal('900'), fee_per_byte, count_bytes(self.nodes[2].getrawtransaction(txid)))
assert_equal(self.nodes[0].getbalance(), Decimal('100'))
# Send 100 VIRTUS with subtract fee from amount
txid = self.nodes[2].sendtoaddress(address, 100, "", "", True)
self.nodes[2].generate(1)
self.sync_all()
node_2_bal -= Decimal('100')
assert_equal(self.nodes[2].getbalance(), node_2_bal)
node_0_bal = self.check_fee_amount(self.nodes[0].getbalance(), Decimal('200'), fee_per_byte, count_bytes(self.nodes[2].getrawtransaction(txid)))
# Sendmany 100 VIRTUS
txid = self.nodes[2].sendmany('from1', {address: 100}, 0, False, "", [])
self.nodes[2].generate(1)
self.sync_all()
node_0_bal += Decimal('100')
node_2_bal = self.check_fee_amount(self.nodes[2].getbalance(), node_2_bal - Decimal('100'), fee_per_byte, count_bytes(self.nodes[2].getrawtransaction(txid)))
assert_equal(self.nodes[0].getbalance(), node_0_bal)
# Sendmany 100 VIRTUS with subtract fee from amount
txid = self.nodes[2].sendmany('from1', {address: 100}, 0, False, "", [address])
self.nodes[2].generate(1)
self.sync_all()
node_2_bal -= Decimal('100')
assert_equal(self.nodes[2].getbalance(), node_2_bal)
node_0_bal = self.check_fee_amount(self.nodes[0].getbalance(), node_0_bal + Decimal('100'), fee_per_byte, count_bytes(self.nodes[2].getrawtransaction(txid)))
# Test ResendWalletTransactions:
# Create a couple of transactions, then start up a fourth
# node (nodes[3]) and ask nodes[0] to rebroadcast.
# EXPECT: nodes[3] should have those transactions in its mempool.
txid1 = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1)
txid2 = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 1)
sync_mempools(self.nodes)
self.nodes.append(start_node(3, self.options.tmpdir))
connect_nodes_bi(self.nodes, 0, 3)
sync_blocks(self.nodes)
relayed = self.nodes[0].resendwallettransactions()
assert_equal(set(relayed), {txid1, txid2})
sync_mempools(self.nodes)
assert(txid1 in self.nodes[3].getrawmempool())
# Exercise balance rpcs
assert_equal(self.nodes[0].getwalletinfo()["unconfirmed_balance"], 1)
assert_equal(self.nodes[0].getunconfirmedbalance(), 1)
#check if we can list zero value tx as available coins
#1. create rawtx
#2. hex-changed one output to 0.0
#3. sign and send
#4. check if recipient (node0) can list the zero value tx
usp = self.nodes[1].listunspent()
inputs = [{"txid":usp[0]['txid'], "vout":usp[0]['vout']}]
outputs = {self.nodes[1].getnewaddress(): 499.998, self.nodes[0].getnewaddress(): 11.11}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs).replace("c0833842", "00000000") #replace 11.11 with 0.0 (int32)
decRawTx = self.nodes[1].decoderawtransaction(rawTx)
signedRawTx = self.nodes[1].signrawtransaction(rawTx)
decRawTx = self.nodes[1].decoderawtransaction(signedRawTx['hex'])
zeroValueTxid= decRawTx['txid']
sendResp = self.nodes[1].sendrawtransaction(signedRawTx['hex'])
self.sync_all()
self.nodes[1].generate(1) #mine a block
self.sync_all()
unspentTxs = self.nodes[0].listunspent() #zero value tx must be in listunspents output
found = False
for uTx in unspentTxs:
if uTx['txid'] == zeroValueTxid:
found = True
assert_equal(uTx['amount'], Decimal('0'))
assert(found)
#do some -walletbroadcast tests
stop_nodes(self.nodes)
wait_bitcoinds()
self.nodes = start_nodes(3, self.options.tmpdir, [["-walletbroadcast=0"],["-walletbroadcast=0"],["-walletbroadcast=0"]])
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
self.sync_all()
txIdNotBroadcasted = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 2)
txObjNotBroadcasted = self.nodes[0].gettransaction(txIdNotBroadcasted)
self.nodes[1].generate(1) #mine a block, tx should not be in there
self.sync_all()
assert_equal(self.nodes[2].getbalance(), node_2_bal) #should not be changed because tx was not broadcasted
#now broadcast from another node, mine a block, sync, and check the balance
self.nodes[1].sendrawtransaction(txObjNotBroadcasted['hex'])
self.nodes[1].generate(1)
self.sync_all()
node_2_bal += 2
txObjNotBroadcasted = self.nodes[0].gettransaction(txIdNotBroadcasted)
assert_equal(self.nodes[2].getbalance(), node_2_bal)
#create another tx
txIdNotBroadcasted = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 2)
#restart the nodes with -walletbroadcast=1
stop_nodes(self.nodes)
wait_bitcoinds()
self.nodes = start_nodes(3, self.options.tmpdir)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
sync_blocks(self.nodes)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
node_2_bal += 2
#tx should be added to balance because after restarting the nodes tx should be broadcastet
assert_equal(self.nodes[2].getbalance(), node_2_bal)
#send a tx with value in a string (PR#6380 +)
txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "2")
txObj = self.nodes[0].gettransaction(txId)
assert_equal(txObj['amount'], Decimal('-2'))
txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "0.0001")
txObj = self.nodes[0].gettransaction(txId)
assert_equal(txObj['amount'], Decimal('-0.0001'))
#check if JSON parser can handle scientific notation in strings
txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "1e-4")
txObj = self.nodes[0].gettransaction(txId)
assert_equal(txObj['amount'], Decimal('-0.0001'))
try:
txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "1f-4")
except JSONRPCException as e:
assert("Invalid amount" in e.error['message'])
else:
raise AssertionError("Must not parse invalid amounts")
try:
self.nodes[0].generate("2")
raise AssertionError("Must not accept strings as numeric")
except JSONRPCException as e:
assert("not an integer" in e.error['message'])
# Import address and private key to check correct behavior of spendable unspents
# 1. Send some coins to generate new UTXO
address_to_import = self.nodes[2].getnewaddress()
txid = self.nodes[0].sendtoaddress(address_to_import, 1)
self.nodes[0].generate(1)
self.sync_all()
# 2. Import address from node2 to node1
self.nodes[1].importaddress(address_to_import)
# 3. Validate that the imported address is watch-only on node1
assert(self.nodes[1].validateaddress(address_to_import)["iswatchonly"])
# 4. Check that the unspents after import are not spendable
assert_array_result(self.nodes[1].listunspent(),
{"address": address_to_import},
{"spendable": False})
# 5. Import private key of the previously imported address on node1
priv_key = self.nodes[2].dumpprivkey(address_to_import)
self.nodes[1].importprivkey(priv_key)
# 6. Check that the unspents are now spendable on node1
assert_array_result(self.nodes[1].listunspent(),
{"address": address_to_import},
{"spendable": True})
#check if wallet or blochchain maintenance changes the balance
self.sync_all()
blocks = self.nodes[0].generate(2)
self.sync_all()
balance_nodes = [self.nodes[i].getbalance() for i in range(3)]
block_count = self.nodes[0].getblockcount()
# Check modes:
# - True: unicode escaped as \u....
# - False: unicode directly as UTF-8
for mode in [True, False]:
self.nodes[0].ensure_ascii = mode
# unicode check: Basic Multilingual Plane, Supplementary Plane respectively
for s in [u'рыба', u'𝅘𝅥𝅯']:
addr = self.nodes[0].getaccountaddress(s)
label = self.nodes[0].getaccount(addr)
assert_equal(label.encode('utf-8'), s.encode('utf-8')) # TODO remove encode(...) when supporting only Python3
assert(s in self.nodes[0].listaccounts().keys())
self.nodes[0].ensure_ascii = True # restore to default
# maintenance tests
maintenance = [
'-rescan',
'-reindex',
'-zapwallettxes=1',
'-zapwallettxes=2',
'-salvagewallet',
]
for m in maintenance:
print "check " + m
stop_nodes(self.nodes)
wait_bitcoinds()
self.nodes = start_nodes(3, self.options.tmpdir, [[m]] * 3)
while m == '-reindex' and [block_count] * 3 != [self.nodes[i].getblockcount() for i in range(3)]:
# reindex will leave rpc warm up "early"; Wait for it to finish
time.sleep(0.1)
assert_equal(balance_nodes, [self.nodes[i].getbalance() for i in range(3)])
# Exercise listsinceblock with the last two blocks
coinbase_tx_1 = self.nodes[0].listsinceblock(blocks[0])
assert_equal(coinbase_tx_1["lastblock"], blocks[1])
assert_equal(len(coinbase_tx_1["transactions"]), 1)
assert_equal(coinbase_tx_1["transactions"][0]["blockhash"], blocks[1])
assert_equal(len(self.nodes[0].listsinceblock(blocks[1])["transactions"]), 0)
if __name__ == '__main__':
WalletTest ().main ()
| 44.568571 | 165 | 0.635938 |
6e24103b2f4766f9469232e140dd3aac1a953306 | 2,674 | py | Python | chat/consumers.py | Murtuza47/Chat-application-by-django | 931ba3106ad21ca8a100d97da520e42e7513afbc | [
"MIT"
] | null | null | null | chat/consumers.py | Murtuza47/Chat-application-by-django | 931ba3106ad21ca8a100d97da520e42e7513afbc | [
"MIT"
] | null | null | null | chat/consumers.py | Murtuza47/Chat-application-by-django | 931ba3106ad21ca8a100d97da520e42e7513afbc | [
"MIT"
] | null | null | null | # chat/consumers.py
import json
from channels.generic.websocket import WebsocketConsumer
from asgiref.sync import async_to_sync
from .models import Message
from django.contrib.auth import get_user_model
from .models import Message
User = get_user_model()
class ChatConsumer(WebsocketConsumer):
def fetch_messages(self,data):
message = Message.last_10_messages()
content = {
'messages': self.messages_to_json(message)
}
self.send_message(content)
def messages_to_json(self, messages):
result = []
for message in messages:
result.append(self.message_to_json(message))
return result
def message_to_json(self, message):
return{
'author': message.author.username,
'content': message.content,
'timestamp': str(message.timestamp)
}
def new_message(self, data):
author = data['from']
author_message = User.objects.filter(username=author)[0]
message = Message.objects.create(author=author_message, content=data['message'])
content= {
'commands' : 'new_message',
'message': self.message_to_json(message)
}
return self.send_chat_message(content)
commands = {
'fetch_message': fetch_messages,
'new_message': new_message
}
def connect(self):
self.room_name = self.scope['url_route']['kwargs']['room_name']
self.room_group_name = 'chat_%s' % self.room_name
# Join room group
async_to_sync(self.channel_layer.group_add)(
self.room_group_name,
self.channel_name
)
self.accept()
def disconnect(self, close_code):
# Leave room group
async_to_sync(self.channel_layer.group_discard)(
self.room_group_name,
self.channel_name
)
# Receive message from WebSocket
def receive(self, text_data):
text_data_json = json.loads(text_data)
self.commands[text_data_json['commands']](self, text_data_json)
def send_chat_message(self,message):
# Send message to room group
async_to_sync(self.channel_layer.group_send)(
self.room_group_name,
{
'type': 'chat_message',
'message': message
}
)
def send_message(self, message):
self.send(text_data=json.dumps(message))
# Receive message from room group
def chat_message(self, event):
message = event['message']
# Send message to WebSocket
self.send(text_data=json.dumps(message)) | 29.065217 | 88 | 0.620045 |
c3407d169885be970c0b70ef8e871748a6d430f1 | 879 | py | Python | fabfile.py | ibarsi/drizzle | 827e0ede0d685909c08e1111154b10623bdd9f50 | [
"MIT"
] | 2 | 2017-04-03T14:58:14.000Z | 2018-06-29T14:42:12.000Z | fabfile.py | ibarsi/developer-problems | 56c7e23ba8eb3097b514ac06523f1eadd2ed99a0 | [
"MIT"
] | 4 | 2017-04-03T13:22:38.000Z | 2017-04-04T00:36:21.000Z | fabfile.py | ibarsi/developer-problems | 56c7e23ba8eb3097b514ac06523f1eadd2ed99a0 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import sys
import json
from fabric.api import env, task, cd, run, prompt
from settings import REMOTE_HOST, REMOTE_CODE_DIR
# CONFIG
env.hosts = [REMOTE_HOST]
env.use_ssh_config = True
def get_version():
with open('package.json') as package_file:
package = json.load(package_file)
return package['version']
@task
def deploy():
print 'Running on {0} as {1}'.format(env.hosts, env.user)
default_tag = get_version()
tag = prompt('Please enter {0} {1} [default: {2}]: '.format('tag', '(eg. 1.0.0)', default_tag))
tag = default_tag if tag in [None, ''] else tag
with cd(REMOTE_CODE_DIR):
run('git checkout master')
run('git pull')
run('git checkout tags/{0}'.format(tag))
run('npm prune')
run('npm install')
print 'Successfully deployed version {0}'.format(default_tag)
| 22.538462 | 99 | 0.647327 |
3dd20fa79971fbc062497a5ba6271ea43f99ffb0 | 28,177 | py | Python | cirq/ops/common_gates.py | maffoo/Cirq | 358dcccac5d80febe012c57f1ee66391d3d9912a | [
"Apache-2.0"
] | 1 | 2018-08-31T22:43:46.000Z | 2018-08-31T22:43:46.000Z | cirq/ops/common_gates.py | yongwww/Cirq | d0cb9d8e386e9240cdc4dcda605e0006e58316bd | [
"Apache-2.0"
] | null | null | null | cirq/ops/common_gates.py | yongwww/Cirq | d0cb9d8e386e9240cdc4dcda605e0006e58316bd | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Quantum gates that are commonly used in the literature."""
import math
from typing import Union, Tuple, Optional, List, Callable, cast, Iterable
import numpy as np
from cirq import value
from cirq.ops import gate_features, eigen_gate, raw_types, gate_operation
class Rot11Gate(eigen_gate.EigenGate,
gate_features.PhaseableEffect,
gate_features.TwoQubitGate,
gate_features.TextDiagrammable,
gate_features.InterchangeableQubitsGate,
gate_features.QasmConvertibleGate):
"""Phases the |11> state of two adjacent qubits by a fixed amount.
A ParameterizedCZGate guaranteed to not be using the parameter key field.
"""
def __init__(self, *, # Forces keyword args.
half_turns: Optional[Union[value.Symbol, float]] = None,
rads: Optional[float] = None,
degs: Optional[float] = None) -> None:
"""Initializes the gate.
At most one angle argument may be specified. If more are specified,
the result is considered ambiguous and an error is thrown. If no angle
argument is given, the default value of one half turn is used.
Args:
half_turns: Relative phasing of CZ's eigenstates, in half_turns.
rads: Relative phasing of CZ's eigenstates, in radians.
degs: Relative phasing of CZ's eigenstates, in degrees.
"""
super().__init__(exponent=value.chosen_angle_to_half_turns(
half_turns=half_turns,
rads=rads,
degs=degs))
def _eigen_components(self):
return [
(0, np.diag([1, 1, 1, 0])),
(1, np.diag([0, 0, 0, 1])),
]
def _canonical_exponent_period(self) -> Optional[float]:
return 2
def _with_exponent(self,
exponent: Union[value.Symbol, float]) -> 'Rot11Gate':
return Rot11Gate(half_turns=exponent)
def phase_by(self, phase_turns, qubit_index):
return self
@property
def half_turns(self) -> Union[value.Symbol, float]:
return self._exponent
def text_diagram_info(self, args: gate_features.TextDiagramInfoArgs
) -> gate_features.TextDiagramInfo:
return gate_features.TextDiagramInfo(
wire_symbols=('@', '@'),
exponent=self._exponent)
def known_qasm_output(self,
qubits: Tuple[raw_types.QubitId, ...],
args: gate_features.QasmOutputArgs) -> Optional[str]:
if self.half_turns != 1:
return None # Don't have an equivalent gate in QASM
args.validate_version('2.0')
return args.format('cz {0},{1};\n', qubits[0], qubits[1])
def __str__(self) -> str:
if self.half_turns == 1:
return 'CZ'
return 'CZ**{!r}'.format(self.half_turns)
def __repr__(self) -> str:
if self.half_turns == 1:
return 'cirq.CZ'
return '(cirq.CZ**{!r})'.format(self.half_turns)
class RotXGate(eigen_gate.EigenGate,
gate_features.TextDiagrammable,
gate_features.SingleQubitGate,
gate_features.QasmConvertibleGate):
"""Fixed rotation around the X axis of the Bloch sphere."""
def __init__(self, *, # Forces keyword args.
half_turns: Optional[Union[value.Symbol, float]] = None,
rads: Optional[float] = None,
degs: Optional[float] = None) -> None:
"""Initializes the gate.
At most one angle argument may be specified. If more are specified,
the result is considered ambiguous and an error is thrown. If no angle
argument is given, the default value of one half turn is used.
Args:
half_turns: The relative phasing of X's eigenstates, in half_turns.
rads: The relative phasing of X's eigenstates, in radians.
degs: The relative phasing of X's eigenstates, in degrees.
"""
super().__init__(exponent=value.chosen_angle_to_half_turns(
half_turns=half_turns,
rads=rads,
degs=degs))
def _eigen_components(self):
return [
(0, np.array([[0.5, 0.5], [0.5, 0.5]])),
(1, np.array([[0.5, -0.5], [-0.5, 0.5]])),
]
def _canonical_exponent_period(self) -> Optional[float]:
return 2
def _with_exponent(self,
exponent: Union[value.Symbol, float]) -> 'RotXGate':
return RotXGate(half_turns=exponent)
@property
def half_turns(self) -> Union[value.Symbol, float]:
return self._exponent
def text_diagram_info(self, args: gate_features.TextDiagramInfoArgs
) -> gate_features.TextDiagramInfo:
return gate_features.TextDiagramInfo(
wire_symbols=('X',),
exponent=self._exponent)
def known_qasm_output(self,
qubits: Tuple[raw_types.QubitId, ...],
args: gate_features.QasmOutputArgs) -> Optional[str]:
args.validate_version('2.0')
if self.half_turns == 1:
return args.format('x {0};\n', qubits[0])
else:
return args.format('rx({0:half_turns}) {1};\n',
self.half_turns, qubits[0])
def __str__(self) -> str:
if self.half_turns == 1:
return 'X'
return 'X**{!r}'.format(self.half_turns)
def __repr__(self) -> str:
if self.half_turns == 1:
return 'cirq.X'
return '(cirq.X**{!r})'.format(self.half_turns)
class RotYGate(eigen_gate.EigenGate,
gate_features.TextDiagrammable,
gate_features.SingleQubitGate,
gate_features.QasmConvertibleGate):
"""Fixed rotation around the Y axis of the Bloch sphere."""
def __init__(self, *, # Forces keyword args.
half_turns: Optional[Union[value.Symbol, float]] = None,
rads: Optional[float] = None,
degs: Optional[float] = None) -> None:
"""Initializes the gate.
At most one angle argument may be specified. If more are specified,
the result is considered ambiguous and an error is thrown. If no angle
argument is given, the default value of one half turn is used.
Args:
half_turns: The relative phasing of Y's eigenstates, in half_turns.
rads: The relative phasing of Y's eigenstates, in radians.
degs: The relative phasing of Y's eigenstates, in degrees.
"""
super().__init__(exponent=value.chosen_angle_to_half_turns(
half_turns=half_turns,
rads=rads,
degs=degs))
def _eigen_components(self):
return [
(0, np.array([[0.5, -0.5j], [0.5j, 0.5]])),
(1, np.array([[0.5, 0.5j], [-0.5j, 0.5]])),
]
def _canonical_exponent_period(self) -> Optional[float]:
return 2
def _with_exponent(self,
exponent: Union[value.Symbol, float]) -> 'RotYGate':
return RotYGate(half_turns=exponent)
@property
def half_turns(self) -> Union[value.Symbol, float]:
return self._exponent
def text_diagram_info(self, args: gate_features.TextDiagramInfoArgs
) -> gate_features.TextDiagramInfo:
return gate_features.TextDiagramInfo(
wire_symbols=('Y',),
exponent=self._exponent)
def known_qasm_output(self,
qubits: Tuple[raw_types.QubitId, ...],
args: gate_features.QasmOutputArgs) -> Optional[str]:
args.validate_version('2.0')
if self.half_turns == 1:
return args.format('y {0};\n', qubits[0])
else:
return args.format('ry({0:half_turns}) {1};\n',
self.half_turns, qubits[0])
def __str__(self) -> str:
if self.half_turns == 1:
return 'Y'
return 'Y**{!r}'.format(self.half_turns)
def __repr__(self) -> str:
if self.half_turns == 1:
return 'cirq.Y'
return '(cirq.Y**{!r})'.format(self.half_turns)
class RotZGate(eigen_gate.EigenGate,
gate_features.TextDiagrammable,
gate_features.SingleQubitGate,
gate_features.PhaseableEffect,
gate_features.QasmConvertibleGate):
"""Fixed rotation around the Z axis of the Bloch sphere."""
def __init__(self, *, # Forces keyword args.
half_turns: Optional[Union[value.Symbol, float]] = None,
rads: Optional[float] = None,
degs: Optional[float] = None) -> None:
"""Initializes the gate.
At most one angle argument may be specified. If more are specified,
the result is considered ambiguous and an error is thrown. If no angle
argument is given, the default value of one half turn is used.
Args:
half_turns: The relative phasing of Z's eigenstates, in half_turns.
rads: The relative phasing of Z's eigenstates, in radians.
degs: The relative phasing of Z's eigenstates, in degrees.
"""
super().__init__(exponent=value.chosen_angle_to_half_turns(
half_turns=half_turns,
rads=rads,
degs=degs))
def _eigen_components(self):
return [
(0, np.diag([1, 0])),
(1, np.diag([0, 1])),
]
def _canonical_exponent_period(self) -> Optional[float]:
return 2
def _with_exponent(self,
exponent: Union[value.Symbol, float]) -> 'RotZGate':
return RotZGate(half_turns=exponent)
@property
def half_turns(self) -> Union[value.Symbol, float]:
return self._exponent
def phase_by(self,
phase_turns: float,
qubit_index: int):
return self
def text_diagram_info(self, args: gate_features.TextDiagramInfoArgs
) -> gate_features.TextDiagramInfo:
if self.half_turns in [-0.25, 0.25]:
return gate_features.TextDiagramInfo(
wire_symbols=('T',),
exponent=cast(float, self._exponent) * 4)
if self.half_turns in [-0.5, 0.5]:
return gate_features.TextDiagramInfo(
wire_symbols=('S',),
exponent=cast(float, self._exponent) * 2)
return gate_features.TextDiagramInfo(
wire_symbols=('Z',),
exponent=self._exponent)
def known_qasm_output(self,
qubits: Tuple[raw_types.QubitId, ...],
args: gate_features.QasmOutputArgs) -> Optional[str]:
args.validate_version('2.0')
if self.half_turns == 1:
return args.format('z {0};\n', qubits[0])
else:
return args.format('rz({0:half_turns}) {1};\n',
self.half_turns, qubits[0])
def __str__(self) -> str:
if self.half_turns == 0.25:
return 'T'
if self.half_turns == -0.25:
return 'T**-1'
if self.half_turns == 0.5:
return 'S'
if self.half_turns == -0.5:
return 'S**-1'
if self.half_turns == 1:
return 'Z'
return 'Z**{}'.format(self.half_turns)
def __repr__(self) -> str:
if self.half_turns == 0.25:
return 'cirq.T'
if self.half_turns == -0.25:
return '(cirq.T**-1)'
if self.half_turns == 0.5:
return 'cirq.S'
if self.half_turns == -0.5:
return '(cirq.S**-1)'
if self.half_turns == 1:
return 'cirq.Z'
return '(cirq.Z**{!r})'.format(self.half_turns)
class MeasurementGate(raw_types.Gate,
gate_features.TextDiagrammable,
gate_features.QasmConvertibleGate):
"""Indicates that qubits should be measured plus a key to identify results.
Attributes:
key: The string key of the measurement.
invert_mask: A list of values indicating whether the corresponding
qubits should be flipped. The list's length must not be longer than
the number of qubits, but it is permitted to be shorted. Qubits with
indices past the end of the mask are not flipped.
"""
def __init__(self,
key: str = '',
invert_mask: Tuple[bool, ...] = ()) -> None:
self.key = key
self.invert_mask = invert_mask or ()
@staticmethod
def is_measurement(op: Union[raw_types.Gate, raw_types.Operation]) -> bool:
if isinstance(op, MeasurementGate):
return True
if (isinstance(op, gate_operation.GateOperation) and
isinstance(op.gate, MeasurementGate)):
return True
return False
def with_bits_flipped(self, *bit_positions: int) -> 'MeasurementGate':
"""Toggles whether or not the measurement inverts various outputs."""
old_mask = self.invert_mask or ()
n = max(len(old_mask) - 1, *bit_positions) + 1
new_mask = [k < len(old_mask) and old_mask[k] for k in range(n)]
for b in bit_positions:
new_mask[b] = not new_mask[b]
return MeasurementGate(key=self.key, invert_mask=tuple(new_mask))
def validate_args(self, qubits):
if (self.invert_mask is not None and
len(self.invert_mask) > len(qubits)):
raise ValueError('len(invert_mask) > len(qubits)')
def text_diagram_info(self, args: gate_features.TextDiagramInfoArgs
) -> gate_features.TextDiagramInfo:
n = (max(1, len(self.invert_mask))
if args.known_qubit_count is None
else args.known_qubit_count)
symbols = ['M'] * n
# Show which output bits are negated.
if self.invert_mask:
for i, b in enumerate(self.invert_mask):
if b:
symbols[i] = '!M'
# Mention the measurement key.
if (not args.known_qubits or
self.key != _default_measurement_key(args.known_qubits)):
symbols[0] += "('{}')".format(self.key)
return gate_features.TextDiagramInfo(tuple(symbols))
def known_qasm_output(self,
qubits: Tuple[raw_types.QubitId, ...],
args: gate_features.QasmOutputArgs) -> Optional[str]:
args.validate_version('2.0')
invert_mask = self.invert_mask
if len(invert_mask) < len(qubits):
invert_mask = (invert_mask
+ (False,) * (len(qubits) - len(invert_mask)))
lines = []
for i, (qubit, inv) in enumerate(zip(qubits, invert_mask)):
if inv:
lines.append(args.format(
'x {0}; // Invert the following measurement\n', qubit))
lines.append(args.format('measure {0} -> {1:meas}[{2}];\n',
qubit, self.key, i))
return ''.join(lines)
def __repr__(self):
return 'cirq.MeasurementGate({}, {})'.format(repr(self.key),
repr(self.invert_mask))
def __eq__(self, other):
if not isinstance(other, type(self)):
return NotImplemented
return self.key == other.key and self.invert_mask == other.invert_mask
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((MeasurementGate, self.key, self.invert_mask))
def _default_measurement_key(qubits: Iterable[raw_types.QubitId]) -> str:
return ','.join(str(q) for q in qubits)
def measure(*qubits: raw_types.QubitId,
key: Optional[str] = None,
invert_mask: Tuple[bool, ...] = ()
) -> gate_operation.GateOperation:
"""Returns a single MeasurementGate applied to all the given qubits.
The qubits are measured in the computational basis.
Args:
*qubits: The qubits that the measurement gate should measure.
key: The string key of the measurement. If this is None, it defaults
to a comma-separated list of the target qubits' str values.
invert_mask: A list of Truthy or Falsey values indicating whether
the corresponding qubits should be flipped. None indicates no
inverting should be done.
Returns:
An operation targeting the given qubits with a measurement.
"""
if key is None:
key = _default_measurement_key(qubits)
return MeasurementGate(key, invert_mask).on(*qubits)
def measure_each(*qubits: raw_types.QubitId,
key_func: Callable[[raw_types.QubitId], str] = str
) -> List[gate_operation.GateOperation]:
"""Returns a list of operations individually measuring the given qubits.
The qubits are measured in the computational basis.
Args:
*qubits: The qubits to measure.
key_func: Determines the key of the measurements of each qubit. Takes
the qubit and returns the key for that qubit. Defaults to str.
Returns:
A list of operations individually measuring the given qubits.
"""
return [MeasurementGate(key_func(q)).on(q) for q in qubits]
X = RotXGate() # Pauli X gate.
Y = RotYGate() # Pauli Y gate.
Z = RotZGate() # Pauli Z gate.
CZ = Rot11Gate() # Negates the amplitude of the |11> state.
S = Z**0.5
T = Z**0.25
class HGate(gate_features.CompositeGate,
gate_features.TextDiagrammable,
gate_features.ReversibleEffect,
gate_features.KnownMatrix,
gate_features.SingleQubitGate,
gate_features.QasmConvertibleGate):
"""180 degree rotation around the X+Z axis of the Bloch sphere."""
def text_diagram_info(self, args: gate_features.TextDiagramInfoArgs
) -> gate_features.TextDiagramInfo:
return gate_features.TextDiagramInfo(('H',))
def default_decompose(self, qubits):
q = qubits[0]
yield Y(q)**0.5
yield X(q)
def inverse(self):
return self
def matrix(self):
"""See base class."""
s = math.sqrt(0.5)
return np.array([[s, s], [s, -s]])
def known_qasm_output(self,
qubits: Tuple[raw_types.QubitId, ...],
args: gate_features.QasmOutputArgs) -> Optional[str]:
args.validate_version('2.0')
return args.format('h {0};\n', qubits[0])
def __str__(self):
return 'H'
def __repr__(self):
return 'cirq.H'
H = HGate() # Hadamard gate.
class CNotGate(eigen_gate.EigenGate,
gate_features.TextDiagrammable,
gate_features.CompositeGate,
gate_features.TwoQubitGate,
gate_features.QasmConvertibleGate):
"""When applying CNOT (controlled-not) to QuBits, you can either use
positional arguments CNOT(q1, q2), where q2 is toggled when q1 is on,
or named arguments CNOT(control=q1, target=q2).
(Mixing the two is not permitted.)"""
def __init__(self, *, # Forces keyword args.
half_turns: Optional[Union[value.Symbol, float]] = None,
rads: Optional[float] = None,
degs: Optional[float] = None) -> None:
"""Initializes the gate.
At most one angle argument may be specified. If more are specified,
the result is considered ambiguous and an error is thrown. If no angle
argument is given, the default value of one half turn is used.
Args:
half_turns: Relative phasing of CNOT's eigenstates, in half_turns.
rads: Relative phasing of CNOT's eigenstates, in radians.
degs: Relative phasing of CNOT's eigenstates, in degrees.
"""
super().__init__(exponent=value.chosen_angle_to_half_turns(
half_turns=half_turns,
rads=rads,
degs=degs))
def default_decompose(self, qubits):
c, t = qubits
yield Y(t)**-0.5
yield Rot11Gate(half_turns=self.half_turns).on(c, t)
yield Y(t)**0.5
def _eigen_components(self):
return [
(0, np.array([[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 0.5, 0.5],
[0, 0, 0.5, 0.5]])),
(1, np.array([[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0.5, -0.5],
[0, 0, -0.5, 0.5]])),
]
def _canonical_exponent_period(self) -> Optional[float]:
return 2
def _with_exponent(self,
exponent: Union[value.Symbol, float]) -> 'CNotGate':
return CNotGate(half_turns=exponent)
@property
def half_turns(self) -> Union[value.Symbol, float]:
return self._exponent
def text_diagram_info(self, args: gate_features.TextDiagramInfoArgs
) -> gate_features.TextDiagramInfo:
return gate_features.TextDiagramInfo(
wire_symbols=('@', 'X'),
exponent=self._exponent)
def known_qasm_output(self,
qubits: Tuple[raw_types.QubitId, ...],
args: gate_features.QasmOutputArgs) -> Optional[str]:
if self.half_turns != 1:
return None # Don't have an equivalent gate in QASM
args.validate_version('2.0')
return args.format('cx {0},{1};\n', qubits[0], qubits[1])
def __str__(self) -> str:
if self.half_turns == 1:
return 'CNOT'
return 'CNOT**{!r}'.format(self.half_turns)
def __repr__(self) -> str:
if self.half_turns == 1:
return 'cirq.CNOT'
return '(cirq.CNOT**{!r})'.format(self.half_turns)
def on(self, *args: raw_types.QubitId,
**kwargs: raw_types.QubitId) -> gate_operation.GateOperation:
if not kwargs:
return super().on(*args)
if not args and set(kwargs.keys()) == {'control', 'target'}:
return super().on(kwargs['control'], kwargs['target'])
raise ValueError(
"Expected two positional argument or else 'target' AND 'control' "
"keyword arguments. But got args={!r}, kwargs={!r}.".format(
args, kwargs))
CNOT = CNotGate() # Controlled Not Gate.
class SwapGate(eigen_gate.EigenGate,
gate_features.TextDiagrammable,
gate_features.TwoQubitGate,
gate_features.CompositeGate,
gate_features.InterchangeableQubitsGate,
gate_features.QasmConvertibleGate):
"""Swaps two qubits."""
def __init__(self, *, # Forces keyword args.
half_turns: Union[value.Symbol, float] = 1.0) -> None:
super().__init__(exponent=half_turns)
def default_decompose(self, qubits):
"""See base class."""
a, b = qubits
yield CNOT(a, b)
yield CNOT(b, a) ** self.half_turns
yield CNOT(a, b)
def _eigen_components(self):
return [
(0, np.array([[1, 0, 0, 0],
[0, 0.5, 0.5, 0],
[0, 0.5, 0.5, 0],
[0, 0, 0, 1]])),
(1, np.array([[0, 0, 0, 0],
[0, 0.5, -0.5, 0],
[0, -0.5, 0.5, 0],
[0, 0, 0, 0]])),
]
def _canonical_exponent_period(self) -> Optional[float]:
return 2
def _with_exponent(self,
exponent: Union[value.Symbol, float]) -> 'SwapGate':
return SwapGate(half_turns=exponent)
@property
def half_turns(self) -> Union[value.Symbol, float]:
return self._exponent
def text_diagram_info(self, args: gate_features.TextDiagramInfoArgs
) -> gate_features.TextDiagramInfo:
if not args.use_unicode_characters:
return gate_features.TextDiagramInfo(
wire_symbols=('swap', 'swap'),
exponent=self._exponent)
return gate_features.TextDiagramInfo(
wire_symbols=('×', '×'),
exponent=self._exponent)
def known_qasm_output(self,
qubits: Tuple[raw_types.QubitId, ...],
args: gate_features.QasmOutputArgs) -> Optional[str]:
if self.half_turns != 1:
return None # Don't have an equivalent gate in QASM
args.validate_version('2.0')
return args.format('swap {0},{1};\n', qubits[0], qubits[1])
def __str__(self) -> str:
if self.half_turns == 1:
return 'SWAP'
return 'SWAP**{!r}'.format(self.half_turns)
def __repr__(self) -> str:
if self.half_turns == 1:
return 'cirq.SWAP'
return '(cirq.SWAP**{!r})'.format(self.half_turns)
SWAP = SwapGate() # Exchanges two qubits' states.
class ISwapGate(eigen_gate.EigenGate,
gate_features.CompositeGate,
gate_features.InterchangeableQubitsGate,
gate_features.TextDiagrammable,
gate_features.TwoQubitGate):
"""Rotates the |01⟩-vs-|10⟩ subspace of two qubits around its Bloch X-axis.
When exponent=1, swaps the two qubits and phases |01⟩ and |10⟩ by i. More
generally, this gate's matrix is defined as follows:
ISWAP**t ≡ exp(+i π t (X⊗X + Y⊗Y) / 4)
≡ [1 0 0 0]
[0 cos(π·t/2) i·sin(π·t/2) 0]
[0 i·sin(π·t/2) cos(π·t/2) 0]
[0 0 0 1]
"""
@property
def exponent(self) -> Union[value.Symbol, float]:
return self._exponent
def _eigen_components(self):
return [
(0, np.diag([1, 0, 0, 1])),
(+0.5, np.array([[0, 0, 0, 0],
[0, 0.5, 0.5, 0],
[0, 0.5, 0.5, 0],
[0, 0, 0, 0]])),
(-0.5, np.array([[0, 0, 0, 0],
[0, 0.5, -0.5, 0],
[0, -0.5, 0.5, 0],
[0, 0, 0, 0]])),
]
def _canonical_exponent_period(self) -> Optional[float]:
return 4
def _with_exponent(self, exponent: Union[value.Symbol, float]
) -> 'ISwapGate':
return ISwapGate(exponent=exponent)
def default_decompose(self, qubits):
a, b = qubits
yield CNOT(a, b)
yield H(a)
yield CNOT(b, a)
yield S(a)**self.exponent
yield CNOT(b, a)
yield S(a)**-self.exponent
yield H(a)
yield CNOT(a, b)
def text_diagram_info(self, args: gate_features.TextDiagramInfoArgs
) -> gate_features.TextDiagramInfo:
return gate_features.TextDiagramInfo(
wire_symbols=('iSwap', 'iSwap'),
exponent=self._exponent)
def __str__(self) -> str:
if self.exponent == 1:
return 'ISWAP'
return 'ISWAP**{!r}'.format(self.exponent)
def __repr__(self):
if self.exponent == 1:
return 'cirq.ISWAP'
return '(cirq.ISWAP**{!r})'.format(self.exponent)
ISWAP = ISwapGate() # Swaps two qubits while phasing the swapped subspace by i.
| 36.263835 | 80 | 0.569826 |
09969a99e8853801d17ea10b4464bc7476735080 | 18,231 | py | Python | main.py | cparlette/bitfl-kivy | 786ecde2edc31bf2e1ab1a5e3df8bfe1842d958c | [
"MIT"
] | null | null | null | main.py | cparlette/bitfl-kivy | 786ecde2edc31bf2e1ab1a5e3df8bfe1842d958c | [
"MIT"
] | null | null | null | main.py | cparlette/bitfl-kivy | 786ecde2edc31bf2e1ab1a5e3df8bfe1842d958c | [
"MIT"
] | null | null | null | from kivy.app import App
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.widget import Widget
from kivy.uix.popup import Popup
from kivy.uix.label import Label
from kivy.properties import ObjectProperty, NumericProperty, StringProperty
from kivy.uix.button import Button
from kivy.logger import Logger
from kivy.core.window import Window
from kivy.animation import Animation
import random
# Version used by buildozer for android builds
__version__ = "0.1.0"
class Location(Button):
button_index = NumericProperty(1)
popup_menu = None
def clicked(self):
#this particular button was clicked, so instruct the player to move here
#check first to see if the player is in the middle of moving
if self.parent.player1.is_moving == 0:
self.parent.player1.move(self.button_index)
class BITFLGame(FloatLayout):
current_player_time_left = 50
current_week = 1
#list of the buttons, must be instantiated later or else it's just empty ObjectProperties
location_list = []
'''
location_list numbering scheme looks like this:
0 1 2 3 4
13 5
12 6
11 10 9 8 7
'''
def initial_setup(self):
#add the menu buttons here, although this might be a poor place
#Luxury Apartments
self.upper_left.popup_menu = CustomPopup()
self.upper_left.popup_menu.title = self.upper_left.text
self.upper_left.popup_menu.ids.right_popup_section.add_widget(
Button(text="Relax", on_press=lambda a: self.change_player_stats(
happiness=5)))
self.upper_left.popup_menu.ids.right_popup_section.add_widget(
Button(text="Read a book", on_press=lambda a: self.change_player_stats(
knowledge=5)))
#Rent Office
self.upper_midleft.popup_menu = CustomPopup()
self.upper_midleft.popup_menu.title = self.upper_midleft.text
self.upper_midleft.popup_menu.ids.right_popup_section.add_widget(
Button(text="Pay Rent", on_press=lambda a: self.change_player_stats(
money=-100)))
self.upper_midleft.popup_menu.ids.right_popup_section.add_widget(
Button(text="Get your mail", on_press=lambda a: self.change_player_stats(
happiness=1)))
#Standard Apartment
self.upper_center.popup_menu = CustomPopup()
self.upper_center.popup_menu.title = self.upper_center.text
self.upper_center.popup_menu.ids.right_popup_section.add_widget(
Button(text="Relax", on_press=lambda a: self.change_player_stats(
happiness=5)))
self.upper_center.popup_menu.ids.right_popup_section.add_widget(
Button(text="Read a book", on_press=lambda a: self.change_player_stats(
knowledge=5)))
self.upper_center.popup_menu.ids.right_popup_section.add_widget(
Button(text="Throw a party", on_press=lambda a: self.change_player_stats(
happiness=15, money=-50)))
self.upper_center.popup_menu.ids.right_popup_section.add_widget(
Button(text="Work remotely doing data entry", on_press=lambda a: self.change_player_stats(
money=25, happiness=-1)))
#Pawn Shop
self.upper_midright.popup_menu = CustomPopup()
self.upper_midright.popup_menu.title = self.upper_midright.text
self.upper_midright.popup_menu.ids.right_popup_section.add_widget(
Button(text="Buy book on sale", on_press=lambda a: self.change_player_stats(
knowledge=5, money=-10)))
self.upper_midright.popup_menu.ids.right_popup_section.add_widget(
Button(text="Sell your guitar", on_press=lambda a: self.change_player_stats(
money=75, happiness=-3)))
#Z-Mart
self.upper_right.popup_menu = CustomPopup()
self.upper_right.popup_menu.title = self.upper_right.text
self.upper_right.popup_menu.ids.right_popup_section.add_widget(
Button(text="Buy Refrigerator", on_press=lambda a: self.change_player_stats(
money=-250, happiness=5)))
self.upper_right.popup_menu.ids.right_popup_section.add_widget(
Button(text="Buy Bicycle", on_press=lambda a: self.change_player_stats(
money=-150, happiness=10)))
self.upper_right.popup_menu.ids.right_popup_section.add_widget(
Button(text="Buy Encyclopedia Set", on_press=lambda a: self.change_player_stats(
money=-50, knowledge=10)))
self.upper_right.popup_menu.ids.right_popup_section.add_widget(
Button(text="Buy Stereo", on_press=lambda a: self.change_player_stats(
money=-100, happiness=15)))
self.upper_right.popup_menu.ids.right_popup_section.add_widget(
Button(text="Buy Gaming Computer", on_press=lambda a: self.change_player_stats(
money=-350, happiness=20)))
self.upper_right.popup_menu.ids.right_popup_section.add_widget(
Button(text="Buy DVD", on_press=lambda a: self.change_player_stats(
money=-10, happiness=1)))
self.upper_right.popup_menu.ids.right_popup_section.add_widget(
Button(text="Buy Yanni's Greatest Hits", on_press=lambda a: self.change_player_stats(
money=-30, happiness=2)))
self.upper_right.popup_menu.ids.right_popup_section.add_widget(
Button(text="Buy Call of Duty 27", on_press=lambda a: self.change_player_stats(
money=-60, happiness=5)))
#Fast Food Restaurant
self.midupper_right.popup_menu = CustomPopup()
self.midupper_right.popup_menu.title = self.midupper_right.text
self.midupper_right.popup_menu.ids.right_popup_section.add_widget(
Button(text="Buy Sad Meal", on_press=lambda a: self.change_player_stats(
money=-5, happiness=-1)))
self.midupper_right.popup_menu.ids.right_popup_section.add_widget(
Button(text="Buy Gigantoburger Combo", on_press=lambda a: self.change_player_stats(
money=-15, happiness=1)))
#Clothing Store
self.midlower_right.popup_menu = CustomPopup()
self.midlower_right.popup_menu.title = self.midlower_right.text
self.midlower_right.popup_menu.ids.right_popup_section.add_widget(
Button(text="Buy Casual Clothes", on_press=lambda a: self.change_player_stats(
money=-50, happiness=10)))
self.midlower_right.popup_menu.ids.right_popup_section.add_widget(
Button(text="Buy Business Casual Clothes", on_press=lambda a: self.change_player_stats(
money=-130, happiness=5)))
self.midlower_right.popup_menu.ids.right_popup_section.add_widget(
Button(text="Buy Business Clothes", on_press=lambda a: self.change_player_stats(
money=-250, happiness=2)))
self.midlower_right.popup_menu.ids.right_popup_section.add_widget(
Button(text="Buy Formal Clothes", on_press=lambda a: self.change_player_stats(
money=-360, happiness=1)))
#Socket City
self.lower_right.popup_menu = CustomPopup()
self.lower_right.popup_menu.title = self.lower_right.text
self.lower_right.popup_menu.ids.right_popup_section.add_widget(
Button(text="Buy Washer/Dryer", on_press=lambda a: self.change_player_stats(
money=-300, items=["washer", "dryer"])))
self.lower_right.popup_menu.ids.right_popup_section.add_widget(
Button(text="Buy Refrigerator", on_press=lambda a: self.change_player_stats(
money=-250, items=["refrigerator"])))
#University
self.lower_midright.popup_menu = CustomPopup()
self.lower_midright.popup_menu.title = self.lower_midright.text
self.lower_midright.popup_menu.ids.right_popup_section.add_widget(
Button(text="Take CompSci Class", on_press=lambda a: self.change_player_stats(
knowledge=50)))
self.lower_midright.popup_menu.ids.right_popup_section.add_widget(
Button(text="Take English Class", on_press=lambda a: self.change_player_stats(
knowledge=50)))
#Blank
self.lower_center.popup_menu = CustomPopup()
self.lower_center.popup_menu.title = self.lower_center.text
self.lower_center.popup_menu.ids.right_popup_section.add_widget(
Button(text="Increase Knowledge", on_press=lambda a: self.change_player_stats(
knowledge=1)))
self.lower_center.popup_menu.ids.right_popup_section.add_widget(
Button(text="Increase Money", on_press=lambda a: self.change_player_stats(
money=50)))
#Employment Office
self.lower_midleft.popup_menu = CustomPopup()
self.lower_midleft.popup_menu.title = self.lower_midleft.text
self.lower_midleft.popup_menu.ids.right_popup_section.add_widget(
Button(text="Get job at Factory", on_press=lambda a: self.change_player_stats(
job={"location": self.lower_left, "title": "Manager", "salary": 20})))
self.lower_midleft.popup_menu.ids.right_popup_section.add_widget(
Button(text="Get job at Bank", on_press=lambda a: self.change_player_stats(
job={"location": self.midlower_left, "title": "Teller", "salary": 15})))
#Factory
self.lower_left.popup_menu = CustomPopup()
self.lower_left.popup_menu.title = self.lower_left.text
self.lower_left.popup_menu.ids.right_popup_section.add_widget(
Button(text="Increase Knowledge", on_press=lambda a: self.change_player_stats(
knowledge=1)))
self.lower_left.popup_menu.ids.right_popup_section.add_widget(
Button(text="Increase Money", on_press=lambda a: self.change_player_stats(
money=50)))
#Bank
self.midlower_left.popup_menu = CustomPopup()
self.midlower_left.popup_menu.title = self.midlower_left.text
self.midlower_left.popup_menu.ids.right_popup_section.add_widget(
Button(text="Withdraw Money", on_press=lambda a: self.change_player_stats(
money=200)))
self.midlower_left.popup_menu.ids.right_popup_section.add_widget(
Button(text="Rob Bank", on_press=lambda a: self.change_player_stats(
money=550)))
#Black's Market
self.midupper_left.popup_menu = CustomPopup()
self.midupper_left.popup_menu.title = self.midupper_left.text
self.midupper_left.popup_menu.ids.right_popup_section.add_widget(
Button(text="Buy Bacon", on_press=lambda a: self.change_player_stats(
money=-10, happiness=10)))
self.midupper_left.popup_menu.ids.right_popup_section.add_widget(
Button(text="Buy Sushi", on_press=lambda a: self.change_player_stats(
money=-20, happiness=20)))
self.midupper_left.popup_menu.ids.right_popup_section.add_widget(
Button(text="Buy Salad", on_press=lambda a: self.change_player_stats(
money=-10, happiness=2)))
self.midupper_left.popup_menu.ids.right_popup_section.add_widget(
Button(text="Buy Frozen Pizza", on_press=lambda a: self.change_player_stats(
money=-10, happiness=5)))
#set up the location_list after the buttons are actually buttons and not just ObjectProperty
#there might be a better way but this actually works
self.location_list = [self.upper_left, self.upper_midleft,
self.upper_center, self.upper_midright, self.upper_right,
self.midupper_right, self.midlower_right, self.lower_right,
self.lower_midright, self.lower_center, self.lower_midleft,
self.lower_left, self.midlower_left, self.midupper_left
]
def update_player_stats(self):
#print out the current player stats in the middle of the screen
stats = "Player 1 current stats:\n"
stats += "Time Left This Week: "+str(self.current_player_time_left)+"\n"
stats += "Current Week: "+str(self.current_week)+"\n"
stats += "Knowledge: "+str(self.player1.knowledge)+"\n"
stats += "Money: "+str(self.player1.money)+"\n"
stats += "Happiness: "+str(self.player1.happiness)+"\n"
if self.player1.job["title"]:
stats += "Job: "+self.player1.job["title"]+" at "+self.player1.job["location"].text+"\n"
else:
stats += "Job: Unemployed\n"
App.get_running_app().player_stats = stats
def update_player_inventory(self):
inv = "Player 1 current inventory:\n"
for thing in self.player1.inventory:
inv += thing+"\n"
App.get_running_app().player_inventory = inv
def change_player_stats(self, knowledge=0, money=0, happiness=0, items=[], job={}, time=0):
if self.player1.money + money < 0:
no_money_popup = NoMoneyPopup()
no_money_popup.open()
elif self.current_player_time_left + time < 0:
no_time_popup = NoTimePopup()
no_time_popup.open()
else:
self.player1.knowledge += knowledge
self.player1.money += money
self.player1.happiness += happiness
self.current_player_time_left += time
if job:
self.player1.job = job
self.update_player_stats()
#add items to inventory
for thing in items:
self.player1.inventory.append(thing)
self.update_player_inventory()
def end_of_turn(self):
self.current_player_time_left = 50
self.current_week += 1
animation = Animation(duration=0)
animation += Animation(pos=(self.upper_center.center[0]-(self.player1.size[0]/2),
self.upper_center.center[1]-(self.player1.size[1]/2)), duration=.1)
animation.start(self.player1)
self.player1.location_index = 2
self.update_player_stats()
#generate a new turn message from a list of options
possible_messages = [
"You played bingo all weekend at the local fire hall.",
"You spent all weekend painting the living room a slightly darker shade of brown.",
"You went swimming at La Jolla."]
App.get_running_app().new_turn_message = possible_messages[
random.randint(0, len(possible_messages)-1)]
new_turn_popup = NewTurnPopup()
new_turn_popup.open()
class Player(Widget):
#player stats
knowledge = 0
money = 1000
happiness = 50
inventory = []
job = {"location": None, "title": "", "salary": 1}
#I had defined the whole work button here, but it references self which isn't available, but
#I need the actual button here so I can reference it in remove_work_button
work_button = None
#keep track of where the player is currently
location_index = NumericProperty(2)
is_moving = NumericProperty(0)
#The popup has to bind on_dismiss to a function, so I made this, maybe could use a lambda instead?
def remove_work_button(self, popup):
popup.ids.left_popup_section.remove_widget(self.work_button)
return False
#finished_moving is needed since an animation's on_complete needs to call a function
def finished_moving(self, instance, value):
#update the player to not be moving
self.is_moving = 0
#check to see if there is any time left in this week
if self.parent.current_player_time_left < 0:
self.parent.end_of_turn()
else:
#If this location is where the player works, add a work button
current_location = self.parent.location_list[self.location_index]
if self.job["location"] == current_location:
self.work_button = Button(text="Work", on_press=lambda a: self.parent.change_player_stats(
money=(self.job["salary"]*8), time=-8), size_hint=(.5, .25), pos_hint={'x': .5, 'y': 0})
current_location.popup_menu.ids.left_popup_section.add_widget(self.work_button)
current_location.popup_menu.bind(on_dismiss=self.remove_work_button)
#Open the popup from that location
current_location.popup_menu.open()
def move(self, target_button_index):
#tell the other buttons that we're moving, so they don't work
self.is_moving = 1
#find out how far away we are from the target button if we go clockwise
direction = 'clockwise'
distance = 0
total_locations = len(self.parent.location_list)
max_distance = total_locations / 2
if target_button_index > self.location_index:
distance = target_button_index - self.location_index
else:
#handle if we wrap around from 13 to 0
distance += (total_locations - self.location_index)
distance += target_button_index
#if it's too far to go clockwise, then go counter-clockwise
if distance > max_distance:
direction = 'counterclockwise'
distance = (total_locations - distance)
#make a list of buttons in the correct order
#I had to add +/-1 to all the indices because I don't want the current button, and I want to get
#the target button as well. There's probably a cleaner way to do that rather than +1
button_list = []
if direction == 'clockwise':
if target_button_index == 0:
#special case where the target is 0 (upper left)
button_list = self.parent.location_list[self.location_index+1:]
button_list.append(self.parent.location_list[0])
elif self.location_index + distance > total_locations:
#player is wrapping around from 13 to 0
button_list = self.parent.location_list[self.location_index+1:]
button_list += self.parent.location_list[:target_button_index+1]
else:
#player is going clockwise without wrapping around the upper left
button_list = self.parent.location_list[self.location_index+1:target_button_index+1]
elif direction == 'counterclockwise':
if target_button_index == 0:
#special case where the target is 0 (upper left)
button_list = self.parent.location_list[self.location_index-1::-1]
elif self.location_index == 0:
#special case where the player is currently on 0 (upper left)
button_list = self.parent.location_list[total_locations:target_button_index-1:-1]
elif self.location_index - distance < 0:
#player is wrapping around from 0 to 13
button_list = self.parent.location_list[self.location_index-1::-1]
button_list += self.parent.location_list[total_locations:target_button_index-1:-1]
else:
#player is going counterclockwise without wrapping around the upper left
button_list = self.parent.location_list[self.location_index-1:target_button_index-1:-1]
#make the animation, set the initial duration to 0 so it starts immediately
animation = Animation(duration=0)
#have the player move to the next button in the list
for button in button_list:
animation += Animation(
pos=(
button.center[0]-(self.size[0]/2),
button.center[1]-(self.size[1]/2)
),
duration=.3
)
#each square you move takes 1 "hour"
self.parent.current_player_time_left -= 1
#when the animation completes, call finished_moving(), which will set is_moving to 0
animation.bind(on_complete=self.finished_moving)
#run the animations
animation.start(self)
#update the UI with correct time remaining
self.parent.update_player_stats()
#set the players location_index so we know where he is
self.location_index = target_button_index
class CustomPopup(Popup):
pass
class NoMoneyPopup(Popup):
pass
class NoTimePopup(Popup):
pass
class NewTurnPopup(Popup):
pass
class BITFLApp(App):
player_stats = StringProperty("")
player_inventory = StringProperty("")
new_turn_message = StringProperty("")
def build(self):
game = BITFLGame()
#need to setup the button list AFTER instantiation, not sure if there's a better way
game.initial_setup()
game.update_player_stats()
game.update_player_inventory()
return game
if __name__ == '__main__':
BITFLApp().run()
| 42.69555 | 99 | 0.757172 |
e0e7d464bd05001732ac27bd1be0a1343626536d | 3,357 | py | Python | scenic/model_lib/matchers/common.py | tigerneil/scenic | e7815e8aaabc617a51a43aaba57cc02e246d4b3e | [
"Apache-2.0"
] | 1 | 2022-02-17T18:48:43.000Z | 2022-02-17T18:48:43.000Z | scenic/model_lib/matchers/common.py | hrshuv0/scenic | fee73998bbe123898e35d371d8ecd0461505fcd4 | [
"Apache-2.0"
] | null | null | null | scenic/model_lib/matchers/common.py | hrshuv0/scenic | fee73998bbe123898e35d371d8ecd0461505fcd4 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 The Scenic Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common functions for computing matchings."""
import jax
import jax.experimental.host_callback as hcb
import jax.numpy as jnp
import numpy as np
def slicer(cost, n_present_col, matching_fn):
"""Slices cost matrices per-example and exploits padding to compute faster.
Args:
cost: np.ndarray[batch, n_row, n_col]; Cost matrices for which matchings
must be computed. It is assumed that n_row >= n_col.
n_present_col: np.ndarray[batch]; Number of trailing padded columns of the
cost matrices.
matching_fn: Matching function to call to compute matching on the unpadded
cost matrices.
Returns:
Matchings of shape [batch, 2, n_col].
"""
batch, n_row, n_col = cost.shape
if n_row < n_col:
raise ValueError(
f'Slicer requires that n_row ({n_row}) >= n_col ({n_col}).')
eye = np.eye(n_row, dtype=np.bool)
matches = []
for i in range(batch):
present_col = max(n_present_col[i], 1) # One col even if all are padded.
cost_m = cost[i : i + 1, :, :present_col] # Slicing should avoid a copy.
indices = matching_fn(cost_m)[0]
row, col = indices[0], indices[1]
# Add padded matches (if padding was done correctly these can be random).
unmatched_row = np.where(~eye[row].max(axis=0))[0] # Faster than setdiff1d.
unmatched_col = np.arange(present_col, n_col)
# Assume n_row >= n_col >= n_present_col.
n_common = n_col - present_col
unmatched_row = unmatched_row[:n_common]
# Reconstruct the matching.
row = np.concatenate([row, unmatched_row], axis=0)
col = np.concatenate([col, unmatched_col], axis=0)
indices = np.stack([row, col], axis=0)
matches.append(indices)
return np.stack(matches)
def cpu_matcher(matching_fn):
"""Wraps matching function to be usable within jitted functions.
Args:
matching_fn: function; A matching function that aligns the predictions of
the model with targets.
Returns:
Matching function with host callback that can be jitted.
"""
# The callback function can only take a single argument.
def maybe_slice_and_match(args):
cost, ncol = args
if ncol is None:
return matching_fn(cost)
else:
return slicer(cost, ncol, matching_fn)
@jax.custom_vjp
def matching_fn_hcb(cost, n_cols=None):
bs, n, m = cost.shape
return hcb.call(
maybe_slice_and_match, (cost, n_cols),
result_shape=jax.ShapeDtypeStruct([bs, 2, min(n, m)], jnp.int32))
# Define forward and backward passes.
def matching_fn_hcb_vjp_fwd(cost, n_cols):
return matching_fn_hcb(cost, n_cols), None
def matching_fn_hcb_vjp_bwd(*_):
return (None,) # Return no gradient.
matching_fn_hcb.defvjp(matching_fn_hcb_vjp_fwd, matching_fn_hcb_vjp_bwd)
return matching_fn_hcb
| 32.911765 | 80 | 0.70986 |
b8c85474684781ba756a6dc97dcba33f9c0a6f90 | 2,865 | py | Python | topi/tests/python/test_topi_batch_matmul.py | 0xreza/tvm | f08d5d78ee000b2c113ac451f8d73817960eafd5 | [
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0"
] | 5 | 2020-06-19T03:22:24.000Z | 2021-03-17T22:16:48.000Z | topi/tests/python/test_topi_batch_matmul.py | 0xreza/tvm | f08d5d78ee000b2c113ac451f8d73817960eafd5 | [
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0"
] | 2 | 2020-07-08T12:34:59.000Z | 2020-07-11T15:54:47.000Z | topi/tests/python/test_topi_batch_matmul.py | 0xreza/tvm | f08d5d78ee000b2c113ac451f8d73817960eafd5 | [
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0"
] | 3 | 2020-12-10T23:21:18.000Z | 2020-12-11T01:04:50.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test code for batch_matmul operator"""
import numpy as np
import tvm
from tvm import te
import topi
import topi.testing
from topi.util import get_const_tuple
from tvm.contrib.pickle_memoize import memoize
from common import get_all_backend
_batch_matmul_implement = {
"generic": (topi.nn.batch_matmul, topi.generic.schedule_batch_matmul),
"cpu": (topi.x86.batch_matmul, topi.x86.schedule_batch_matmul),
"gpu": (topi.nn.batch_matmul, topi.cuda.schedule_batch_matmul),
}
def verify_batch_matmul(batch, M, N, K):
x = te.placeholder((batch, M, K), name='x')
y = te.placeholder((batch, N, K), name='y')
dtype = x.dtype
# use memoize to pickle the test data for next time use
@memoize("topi.tests.test_topi_batch_matmul")
def get_ref_data():
a_np = np.random.uniform(size=(batch, M, K)).astype(dtype)
b_np = np.random.uniform(size=(batch, N, K)).astype(dtype)
c_np = topi.testing.batch_matmul(a_np, b_np)
return (a_np, b_np, c_np)
# get the test data
a_np, b_np, c_np = get_ref_data()
def check_device(device):
ctx = tvm.context(device, 0)
if not ctx.exist:
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
with tvm.target.create(device):
fcompute, fschedule = topi.testing.dispatch(device, _batch_matmul_implement)
out = fcompute(x, y)
s = fschedule([out])
a = tvm.nd.array(a_np, ctx)
b = tvm.nd.array(b_np, ctx)
c = tvm.nd.array(np.zeros(get_const_tuple(out.shape), dtype=dtype), ctx)
f = tvm.build(s, [x, y, out], device, name="dense")
f(a, b, c)
tvm.testing.assert_allclose(c.asnumpy(), c_np, rtol=1e-5)
for device in get_all_backend():
check_device(device)
def test_batch_matmul():
verify_batch_matmul(1, 16, 16, 32)
verify_batch_matmul(5, 16, 16, 32)
verify_batch_matmul(5, 16, 20, 32)
verify_batch_matmul(30, 16, 20, 32)
if __name__ == "__main__":
test_batch_matmul()
| 36.730769 | 88 | 0.684468 |
4e6e5dc629b261e53127e9cef9bc76e564a7b3f5 | 87 | py | Python | tests/test_dog.py | mloskot/python-package | 4b24c22811052492d3af9d2f7d1ffa8f6ae8b412 | [
"Unlicense"
] | null | null | null | tests/test_dog.py | mloskot/python-package | 4b24c22811052492d3af9d2f7d1ffa8f6ae8b412 | [
"Unlicense"
] | null | null | null | tests/test_dog.py | mloskot/python-package | 4b24c22811052492d3af9d2f7d1ffa8f6ae8b412 | [
"Unlicense"
] | null | null | null | def test_noise():
import pets.dog.noise
assert pets.dog.noise.make() == 'woof!' | 29 | 43 | 0.655172 |
93792f9d0e6415208d5eb4f06dc3761ecffe412a | 4,505 | py | Python | com/sony/hackathon/visual/rekognition.py | aeroith/ing-hackathon-kando | dc614c6c223479e7d89f5ecbe402cebf51fa9971 | [
"MIT"
] | null | null | null | com/sony/hackathon/visual/rekognition.py | aeroith/ing-hackathon-kando | dc614c6c223479e7d89f5ecbe402cebf51fa9971 | [
"MIT"
] | null | null | null | com/sony/hackathon/visual/rekognition.py | aeroith/ing-hackathon-kando | dc614c6c223479e7d89f5ecbe402cebf51fa9971 | [
"MIT"
] | null | null | null | import random
from rekognition_helper import rekognition_helper
import io
import time
import picamera
class visual_cortex(object):
def __init__(self):
self.human_like = ['Person', 'Human', 'People']
self.known_faces = {'e7e5b7da-ad8a-4c77-af43-69454c3e0ce9': 'Berkay',
'1cff55aa-5406-4644-9fdc-aa5132c51495': 'Berkay',
'05ae12a2-8b03-42e2-aef1-875161880c66': 'Berkay',
'73184efe-9756-4bfb-aafa-62c09baec97c': 'Berkay',
'fc8458b5-fffa-4f23-aeb6-980127fdb427': 'Denizcan',
'8d62bd15-4626-4222-9a5d-0f79876b696b': 'Denizcan',
'6f37734d-f66f-4969-b632-dde8caebbbc8': 'Denizcan',
'66a30047-1035-404e-8022-99c618803dbc': 'Denizcan',
'edaa11a6-1937-485b-9065-133ea2a68b8a': 'Sercan',
'2a9d8067-43c4-4409-a77e-9ee930bd477f': 'Sercan',
'c12dc22d-dea2-403c-a595-d221fb3ce874': 'Sercan',
'929c29f9-539c-4b84-aa2e-f7189e1a446e': 'Serhat',
'c309eec7-956f-4a3c-9d79-afbf00ab6715': 'Serhat',
'e1bdb7d9-9368-4e8d-98c2-fa6ae4a29e9b': 'Serhat',
'f460ed5d-ecba-4ca1-967a-1a380862d03f': 'Serhat',
'2e50621b-c4cc-4712-9e80-f43f399bd7a0': 'Olcay',
'79d65fb4-87c2-457a-ba6e-3029b7d25c83': 'Olcay',
'c7a794a4-d564-4592-a242-f47080b51dfe': 'Olcay',
'fcb1fc2b-7617-444d-a81a-6d5d1505de7d': 'Olcay',
'ece6008e-15e4-4298-a6f5-705f062447ca': 'Olcay',
'7261819c-0f5c-49e4-9cf2-549907cb5a0f': 'Olcay',
'23c7f00a-8483-4280-a0a1-aaffbe5e1e8c': 'Aysegul',
'9a6aa2bf-a0f4-48b9-9d54-00c56028bb88': 'Aysegul',
'aa51a0ae-6df5-4642-a7ac-762cce6791e2': 'Aysegul',
'0fb8374d-b6a7-448f-a5ef-1f8e3a3cdb99': 'Pinar',
'9d9c2cec-81a6-4f73-bd8d-306f0a5412ed': 'Pinar',
'acb3f948-4e56-4a03-9ae0-a08dddefe213': 'Pinar',
'26673d23-6668-41e7-b2ae-b0e6691b3b02': 'Pinar'}
self.sentence_prefixes_object = [
'I see {}.',
'I think I see {}.',
'There is {} in front of you.'
]
def __tell_known_names(self, matched_face_ids):
matched_names = []
re_helper = rekognition_helper()
for matched_face_id in matched_face_ids:
matched_names.append(self.known_faces[matched_face_id])
matched_names_unique = list(set(matched_names))
print 'I know these people!'
print matched_names_unique
if len(matched_names_unique) > 1:
matched_names, last = ", ".join(matched_names_unique[:-1]), matched_names_unique[-1]
re_helper.speak(" ve ".join([matched_names, last]), voice='Filiz')
elif len(matched_names_unique) == 1:
re_helper.speak(matched_names_unique[0], voice='Filiz')
def see_and_tell(self, byte_array, image_path):
re_helper = rekognition_helper()
# re_helper.create_one_time_collection()
# re_helper.search_faces_by_image(byte_array)
detected_label = re_helper.detect_labels_offline(image_path)
print 'Detected label: ' + detected_label
re_helper.speak(random.choice(self.sentence_prefixes_object).format(detected_label))
if detected_label in self.human_like:
matched_face_ids = re_helper.search_faces_by_image(byte_array)
self.__tell_known_names(matched_face_ids)
else:
detected_text = re_helper.detect_text(byte_array)
print 'I detected text!' + detected_text
re_helper.speak(detected_text)
if __name__ == "__main__":
my_stream = io.BytesIO()
with picamera.PiCamera() as camera:
camera.resolution = (1024, 768)
camera.hflip = True
camera.vflip = True
camera.start_preview()
# Camera warm-up time
time.sleep(2)
vc = visual_cortex()
while True:
camera.capture("foo.jpg")
with open("foo.jpg", "rw") as imageFile:
f = imageFile.read()
vc.see_and_tell(f, "foo.jpg")
| 49.505495 | 96 | 0.568479 |
95df63bbcd0b24d949004943b2a7cf074e1603c6 | 850 | py | Python | data/transcoder_evaluation_gfg/python/FIND_SUM_NODES_GIVEN_PERFECT_BINARY_TREE_1.py | Syamgith/CodeGen | 6aebde3e4f043ff88ba785b6902fcc445dddafee | [
"MIT"
] | 241 | 2021-07-20T08:35:20.000Z | 2022-03-31T02:39:08.000Z | data/transcoder_evaluation_gfg/python/FIND_SUM_NODES_GIVEN_PERFECT_BINARY_TREE_1.py | Syamgith/CodeGen | 6aebde3e4f043ff88ba785b6902fcc445dddafee | [
"MIT"
] | 49 | 2021-07-22T23:18:42.000Z | 2022-03-24T09:15:26.000Z | data/transcoder_evaluation_gfg/python/FIND_SUM_NODES_GIVEN_PERFECT_BINARY_TREE_1.py | Syamgith/CodeGen | 6aebde3e4f043ff88ba785b6902fcc445dddafee | [
"MIT"
] | 71 | 2021-07-21T05:17:52.000Z | 2022-03-29T23:49:28.000Z | # Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import math
def f_gold ( l ) :
leafNodeCount = math.pow ( 2 , l - 1 ) ;
sumLastLevel = 0 ;
sumLastLevel = ( ( leafNodeCount * ( leafNodeCount + 1 ) ) / 2 ) ;
sum = sumLastLevel * l ;
return int ( sum ) ;
#TOFILL
if __name__ == '__main__':
param = [
(5,),
(16,),
(8,),
(61,),
(59,),
(88,),
(67,),
(28,),
(58,),
(42,)
]
n_success = 0
for i, parameters_set in enumerate(param):
if abs(1 - (0.0000001 + abs(f_gold(*parameters_set))) / (abs(f_filled(*parameters_set)) + 0.0000001)) < 0.001:
n_success+=1
print("#Results: %i, %i" % (n_success, len(param))) | 23.611111 | 118 | 0.556471 |
da33bacca84474179b778ee21d1b777884ed2b0a | 14,123 | py | Python | airflow/hooks/dbapi.py | javatarz/airflow | 1d53bec0618c042de5cd05801b0c1fe015e6b4f8 | [
"Apache-2.0"
] | null | null | null | airflow/hooks/dbapi.py | javatarz/airflow | 1d53bec0618c042de5cd05801b0c1fe015e6b4f8 | [
"Apache-2.0"
] | 1 | 2022-03-08T16:17:19.000Z | 2022-03-08T17:22:52.000Z | airflow/hooks/dbapi.py | javatarz/airflow | 1d53bec0618c042de5cd05801b0c1fe015e6b4f8 | [
"Apache-2.0"
] | null | null | null | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from contextlib import closing
from datetime import datetime
from typing import Any, Optional
from sqlalchemy import create_engine
from airflow.exceptions import AirflowException
from airflow.hooks.base import BaseHook
from airflow.typing_compat import Protocol
class ConnectorProtocol(Protocol):
"""A protocol where you can connect to a database."""
def connect(self, host: str, port: int, username: str, schema: str) -> Any:
"""
Connect to a database.
:param host: The database host to connect to.
:param port: The database port to connect to.
:param username: The database username used for the authentication.
:param schema: The database schema to connect to.
:return: the authorized connection object.
"""
#########################################################################################
# #
# Note! Be extra careful when changing this file. This hook is used as a base for #
# a number of DBApi-related hooks and providers depend on the methods implemented #
# here. Whatever you add here, has to backwards compatible unless #
# `>=<Airflow version>` is added to providers' requirements using the new feature #
# #
#########################################################################################
class DbApiHook(BaseHook):
"""
Abstract base class for sql hooks.
:param schema: Optional DB schema that overrides the schema specified in the connection. Make sure that
if you change the schema parameter value in the constructor of the derived Hook, such change
should be done before calling the ``DBApiHook.__init__()``.
"""
# Override to provide the connection name.
conn_name_attr = None # type: str
# Override to have a default connection id for a particular dbHook
default_conn_name = 'default_conn_id'
# Override if this db supports autocommit.
supports_autocommit = False
# Override with the object that exposes the connect method
connector = None # type: Optional[ConnectorProtocol]
# Override with db-specific query to check connection
_test_connection_sql = "select 1"
def __init__(self, *args, schema: Optional[str] = None, **kwargs):
super().__init__()
if not self.conn_name_attr:
raise AirflowException("conn_name_attr is not defined")
elif len(args) == 1:
setattr(self, self.conn_name_attr, args[0])
elif self.conn_name_attr not in kwargs:
setattr(self, self.conn_name_attr, self.default_conn_name)
else:
setattr(self, self.conn_name_attr, kwargs[self.conn_name_attr])
# We should not make schema available in deriving hooks for backwards compatibility
# If a hook deriving from DBApiHook has a need to access schema, then it should retrieve it
# from kwargs and store it on its own. We do not run "pop" here as we want to give the
# Hook deriving from the DBApiHook to still have access to the field in it's constructor
self.__schema = schema
def get_conn(self):
"""Returns a connection object"""
db = self.get_connection(getattr(self, self.conn_name_attr))
return self.connector.connect(host=db.host, port=db.port, username=db.login, schema=db.schema)
def get_uri(self) -> str:
"""
Extract the URI from the connection.
:return: the extracted uri.
"""
conn = self.get_connection(getattr(self, self.conn_name_attr))
conn.schema = self.__schema or conn.schema
return conn.get_uri()
def get_sqlalchemy_engine(self, engine_kwargs=None):
"""
Get an sqlalchemy_engine object.
:param engine_kwargs: Kwargs used in :func:`~sqlalchemy.create_engine`.
:return: the created engine.
"""
if engine_kwargs is None:
engine_kwargs = {}
return create_engine(self.get_uri(), **engine_kwargs)
def get_pandas_df(self, sql, parameters=None, **kwargs):
"""
Executes the sql and returns a pandas dataframe
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:param parameters: The parameters to render the SQL query with.
:param kwargs: (optional) passed into pandas.io.sql.read_sql method
"""
try:
from pandas.io import sql as psql
except ImportError:
raise Exception("pandas library not installed, run: pip install 'apache-airflow[pandas]'.")
with closing(self.get_conn()) as conn:
return psql.read_sql(sql, con=conn, params=parameters, **kwargs)
def get_records(self, sql, parameters=None):
"""
Executes the sql and returns a set of records.
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:param parameters: The parameters to render the SQL query with.
"""
with closing(self.get_conn()) as conn:
with closing(conn.cursor()) as cur:
if parameters is not None:
cur.execute(sql, parameters)
else:
cur.execute(sql)
return cur.fetchall()
def get_first(self, sql, parameters=None):
"""
Executes the sql and returns the first resulting row.
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:param parameters: The parameters to render the SQL query with.
"""
with closing(self.get_conn()) as conn:
with closing(conn.cursor()) as cur:
if parameters is not None:
cur.execute(sql, parameters)
else:
cur.execute(sql)
return cur.fetchone()
def run(self, sql, autocommit=False, parameters=None, handler=None):
"""
Runs a command or a list of commands. Pass a list of sql
statements to the sql parameter to get them to execute
sequentially
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:param autocommit: What to set the connection's autocommit setting to
before executing the query.
:param parameters: The parameters to render the SQL query with.
:param handler: The result handler which is called with the result of each statement.
:return: query results if handler was provided.
"""
scalar = isinstance(sql, str)
if scalar:
sql = [sql]
if sql:
self.log.debug("Executing %d statements", len(sql))
else:
raise ValueError("List of SQL statements is empty")
with closing(self.get_conn()) as conn:
if self.supports_autocommit:
self.set_autocommit(conn, autocommit)
with closing(conn.cursor()) as cur:
results = []
for sql_statement in sql:
self._run_command(cur, sql_statement, parameters)
if handler is not None:
result = handler(cur)
results.append(result)
# If autocommit was set to False for db that supports autocommit,
# or if db does not supports autocommit, we do a manual commit.
if not self.get_autocommit(conn):
conn.commit()
if handler is None:
return None
if scalar:
return results[0]
return results
def _run_command(self, cur, sql_statement, parameters):
"""Runs a statement using an already open cursor."""
self.log.info("Running statement: %s, parameters: %s", sql_statement, parameters)
if parameters:
cur.execute(sql_statement, parameters)
else:
cur.execute(sql_statement)
# According to PEP 249, this is -1 when query result is not applicable.
if cur.rowcount >= 0:
self.log.info("Rows affected: %s", cur.rowcount)
def set_autocommit(self, conn, autocommit):
"""Sets the autocommit flag on the connection"""
if not self.supports_autocommit and autocommit:
self.log.warning(
"%s connection doesn't support autocommit but autocommit activated.",
getattr(self, self.conn_name_attr),
)
conn.autocommit = autocommit
def get_autocommit(self, conn):
"""
Get autocommit setting for the provided connection.
Return True if conn.autocommit is set to True.
Return False if conn.autocommit is not set or set to False or conn
does not support autocommit.
:param conn: Connection to get autocommit setting from.
:return: connection autocommit setting.
:rtype: bool
"""
return getattr(conn, 'autocommit', False) and self.supports_autocommit
def get_cursor(self):
"""Returns a cursor"""
return self.get_conn().cursor()
@staticmethod
def _generate_insert_sql(table, values, target_fields, replace, **kwargs):
"""
Static helper method that generates the INSERT SQL statement.
The REPLACE variant is specific to MySQL syntax.
:param table: Name of the target table
:param values: The row to insert into the table
:param target_fields: The names of the columns to fill in the table
:param replace: Whether to replace instead of insert
:return: The generated INSERT or REPLACE SQL statement
:rtype: str
"""
placeholders = [
"%s",
] * len(values)
if target_fields:
target_fields = ", ".join(target_fields)
target_fields = f"({target_fields})"
else:
target_fields = ''
if not replace:
sql = "INSERT INTO "
else:
sql = "REPLACE INTO "
sql += f"{table} {target_fields} VALUES ({','.join(placeholders)})"
return sql
def insert_rows(self, table, rows, target_fields=None, commit_every=1000, replace=False, **kwargs):
"""
A generic way to insert a set of tuples into a table,
a new transaction is created every commit_every rows
:param table: Name of the target table
:param rows: The rows to insert into the table
:param target_fields: The names of the columns to fill in the table
:param commit_every: The maximum number of rows to insert in one
transaction. Set to 0 to insert all rows in one transaction.
:param replace: Whether to replace instead of insert
"""
i = 0
with closing(self.get_conn()) as conn:
if self.supports_autocommit:
self.set_autocommit(conn, False)
conn.commit()
with closing(conn.cursor()) as cur:
for i, row in enumerate(rows, 1):
lst = []
for cell in row:
lst.append(self._serialize_cell(cell, conn))
values = tuple(lst)
sql = self._generate_insert_sql(table, values, target_fields, replace, **kwargs)
self.log.debug("Generated sql: %s", sql)
cur.execute(sql, values)
if commit_every and i % commit_every == 0:
conn.commit()
self.log.info("Loaded %s rows into %s so far", i, table)
conn.commit()
self.log.info("Done loading. Loaded a total of %s rows", i)
@staticmethod
def _serialize_cell(cell, conn=None):
"""
Returns the SQL literal of the cell as a string.
:param cell: The cell to insert into the table
:param conn: The database connection
:return: The serialized cell
:rtype: str
"""
if cell is None:
return None
if isinstance(cell, datetime):
return cell.isoformat()
return str(cell)
def bulk_dump(self, table, tmp_file):
"""
Dumps a database table into a tab-delimited file
:param table: The name of the source table
:param tmp_file: The path of the target file
"""
raise NotImplementedError()
def bulk_load(self, table, tmp_file):
"""
Loads a tab-delimited file into a database table
:param table: The name of the target table
:param tmp_file: The path of the file to load into the table
"""
raise NotImplementedError()
def test_connection(self):
"""Tests the connection using db-specific query"""
status, message = False, ''
try:
if self.get_first(self._test_connection_sql):
status = True
message = 'Connection successfully tested'
except Exception as e:
status = False
message = str(e)
return status, message
| 39.230556 | 107 | 0.605041 |
1f602dde136985a21b83c24f3fc95414e167b046 | 1,390 | py | Python | main.py | pegehlha/Exercise-01c-Basic-Game-Loop | ef0faaa60fd81a904858d7ef205af89249542512 | [
"MIT"
] | null | null | null | main.py | pegehlha/Exercise-01c-Basic-Game-Loop | ef0faaa60fd81a904858d7ef205af89249542512 | [
"MIT"
] | null | null | null | main.py | pegehlha/Exercise-01c-Basic-Game-Loop | ef0faaa60fd81a904858d7ef205af89249542512 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import sys,os,json
assert sys.version_info >= (3,8), "This script requires at least Python 3.8"
def load(l):
f = open(os.path.join(sys.path[0], l))
data = f.read()
j = json.loads(data)
return j
def find_passage(game_desc, pid):
for p in game_desc["passages"]:
if p["pid"] == pid:
return p
return {}
# ------------------------------------------------------
def update(current, game_desc, choice):
if choice == "":
return current
for l in current ["links"]:
if choice == l["name"].lower():
current = find_passage(game_desc, l["pid"])
return current
def render(current):
print("You are at the " + current["name"])
print(current ["text"])
pass
def get_input(current):
choice = input("What would you like to do? (type quit to exit) ")
choice = choice.lower()
if choice in ["quit","q","exit"]:
return "quit"
return choice
# ------------------------------------------------------
def main():
game_desc = load("adventure.json")
current = find_passage(game_desc, game_desc["startnode"])
choice = ""
while choice != "quit" and current != {}:
current = update(current, game_desc, choice)
render(current)
choice = get_input(current)
print("Thanks for playing!")
if __name__ == "__main__":
main() | 23.559322 | 76 | 0.548201 |
4fbce576130203c692f3a42aa26e9b2cb6193fc7 | 3,624 | py | Python | nlgmcts/text_mcts.py | lantunes/nlg-mcts | 91afe0fe7eeed96cf10686c7f555df91fbdd3112 | [
"MIT"
] | 1 | 2018-10-29T20:43:45.000Z | 2018-10-29T20:43:45.000Z | nlgmcts/text_mcts.py | lantunes/nlg-mcts | 91afe0fe7eeed96cf10686c7f555df91fbdd3112 | [
"MIT"
] | null | null | null | nlgmcts/text_mcts.py | lantunes/nlg-mcts | 91afe0fe7eeed96cf10686c7f555df91fbdd3112 | [
"MIT"
] | 1 | 2021-05-11T12:08:29.000Z | 2021-05-11T12:08:29.000Z | import random
from math import *
import math
import numpy as np
class TextMCTS:
def __init__(self, vocabulary, text_length, eval_function, c=sqrt(2)):
self._vocabulary = vocabulary
self._text_length = text_length
self._eval_function = eval_function
self._best_sequence = None
self._c = c
def search(self, state, num_simulations):
root_node = _Node(state, self._vocabulary, self._text_length, self._c)
# Perform simulations
for i in range(num_simulations):
node = root_node
# Select
while not node.has_untried_moves() and node.has_children():
node = node.select_child()
# Expand
if node.has_untried_moves():
move_state = node.select_untried_move()
node = node.add_child(move_state, self._vocabulary, self._text_length, self._c)
# Rollout
rollout_state = list(node.state)
while len(rollout_state) < self._text_length:
rollout_state += [self._select_next_move_randomly()]
# Backpropagate
# backpropagate from the expanded node and work back to the root node
score = self._eval_function(rollout_state)
while node is not None:
node.visits += 1
node.wins += score
node = node.parent
self._store_best(rollout_state, score)
# return the move that was most visited
most_visited_node = sorted(root_node.children, key = lambda c: c.visits)[-1]
return most_visited_node.state
def _select_next_move_randomly(self):
return np.random.choice(self._vocabulary)
def _store_best(self, rollout_state, score):
current_best = self._best_sequence
if current_best is None or score > current_best[1]:
self._best_sequence = (rollout_state, score)
def get_best_sequence(self):
return self._best_sequence
class _Node:
def __init__(self, state, vocabulary, text_length, c, parent=None):
self.state = state
self._c = c
self._vocabulary = vocabulary
self._text_length = text_length
self.wins = 0.0
self.visits = 0.0
self.parent = parent
self.children = []
self.untried_moves = self._get_child_states()
def _get_child_states(self):
child_states = []
for token in self._vocabulary:
child_states.append(self.state + [token])
return child_states
def _average_value(self):
return self.wins / self.visits
def has_untried_moves(self):
return self.untried_moves != []
def select_untried_move(self):
return random.choice(self.untried_moves)
def add_child(self, child_state, vocabulary, text_length, c):
child = _Node(child_state, vocabulary, text_length, c, parent=self)
self.children.append(child)
self.untried_moves.remove(child_state)
return child
def has_children(self):
return self.children != []
def select_child(self):
highest_ucb1 = None
selected_child_node = None
for child_node in self.children:
ucb1 = child_node.ucb1()
if highest_ucb1 is None or highest_ucb1 < ucb1:
highest_ucb1 = ucb1
selected_child_node = child_node
return selected_child_node
def ucb1(self):
if self.visits == 0.0:
return math.inf
return self._average_value() + self._c*sqrt(log(self.parent.visits)/self.visits)
| 32.357143 | 95 | 0.625 |
864a26ba6d78e77cd103433125ebc1877706fd31 | 6,353 | py | Python | lino_book/projects/team/tests/test_notify.py | khchine5/book | b6272d33d49d12335d25cf0a2660f7996680b1d1 | [
"BSD-2-Clause"
] | 1 | 2018-01-12T14:09:58.000Z | 2018-01-12T14:09:58.000Z | lino_book/projects/team/tests/test_notify.py | khchine5/book | b6272d33d49d12335d25cf0a2660f7996680b1d1 | [
"BSD-2-Clause"
] | 4 | 2018-02-06T19:53:10.000Z | 2019-08-01T21:47:44.000Z | lino_book/projects/team/tests/test_notify.py | khchine5/book | b6272d33d49d12335d25cf0a2660f7996680b1d1 | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2016-2018 Rumma & Ko Ltd
# License: BSD (see file COPYING for details)
"""Runs some tests about the notification framework.
You can run only these tests by issuing::
$ go team
$ python manage.py test tests.test_notify
Or::
$ go book
$ python setup.py test -s tests.test_demo.TestCase.test_team
"""
from __future__ import unicode_literals
# import six
import datetime
from mock import patch
from django.conf import settings
from django.utils.timezone import make_aware
from lino.api import dd, rt
from lino.utils.djangotest import TestCase
from lino.core import constants
from lino.modlib.users.choicelists import UserTypes
from lino.utils.instantiator import create
from lino.modlib.notify.models import send_pending_emails_often
from lino.modlib.notify.choicelists import MailModes
from lino.core.diff import ChangeWatcher
import sys
# from cStringIO import StringIO
from io import StringIO
import contextlib
@contextlib.contextmanager
def capture_stdout():
oldout = sys.stdout
try:
out = StringIO()
sys.stdout = out
yield out
finally:
sys.stdout = oldout
# out = out.getvalue()
class TestCase(TestCase):
"""Miscellaneous tests."""
maxDiff = None
def test_01(self):
self.assertEqual(settings.SETTINGS_MODULE, None)
self.assertEqual(settings.LOGGING, {})
self.assertEqual(settings.SERVER_EMAIL, 'root@localhost')
@patch('lino.api.dd.logger')
def test_comment(self, logger):
"""Test what happens when a comment is posted on a ticket with
watchers.
"""
ContentType = rt.models.contenttypes.ContentType
Ticket = rt.models.tickets.Ticket
# Project = rt.models.tickets.Project
Site = rt.models.tickets.Site
Subscription = rt.models.tickets.Subscription
# Vote = rt.models.votes.Vote
# Star = rt.models.stars.Star
Message = rt.models.notify.Message
User = settings.SITE.user_model
# create(Project, name="Project")
robin = create(
User, username='robin',
first_name="Robin",
user_type=UserTypes.admin)
aline = create(
User, username='aline',
first_name="Aline",
email="[email protected]", language='fr',
user_type=UserTypes.admin)
foo = create(Site, name="Foo")
create(Subscription, site=foo, user=aline)
obj = create(
Ticket, summary="Save the world, après moi le déluge",
user=robin, site=foo)
self.assertEqual(Message.objects.count(), 0)
ar = rt.login('robin')
self.client.force_login(ar.user)
url = "/api/comments/CommentsByRFC"
post_data = dict()
post_data[constants.URL_PARAM_ACTION_NAME] = 'submit_insert'
post_data.update(body="I don't agree.")
post_data[constants.URL_PARAM_MASTER_PK] = obj.pk
ct = ContentType.objects.get_for_model(Ticket)
post_data[constants.URL_PARAM_MASTER_TYPE] = ct.id
# post_data[constants.URL_PARAM_REQUESTING_PANEL] = '123'
self.client.force_login(robin)
response = self.client.post(
url, post_data,
REMOTE_USER='robin',
HTTP_ACCEPT_LANGUAGE='en')
result = self.check_json_result(
response, 'rows success message close_window navinfo')
self.assertEqual(result['success'], True)
self.assertEqual(
result['message'],
"""Comment "Comment #1" has been created.""")
self.assertEqual(Message.objects.count(), 1)
msg = Message.objects.all()[0]
# self.assertEqual(msg.message_type)
self.assertEqual(msg.seen, None)
self.assertEqual(msg.user, aline)
expected = """Robin a commenté [ticket 1] (Save the world, """\
"""après moi le déluge):<br>I don't agree."""
self.assertEqual(expected, msg.body)
# manually set created timestamp so we can test on it later.
now = datetime.datetime(2016, 12, 22, 19, 45, 55)
if settings.USE_TZ:
now = make_aware(now)
msg.created = now
msg.save()
settings.SERVER_EMAIL = '[email protected]'
with capture_stdout() as out:
send_pending_emails_often()
out = out.getvalue().strip()
# if six.PY3:
# if isinstance(out, bytes):
# out = out.decode()
# # if isinstance(out, bytes):
# raise Exception(out)
# print(out)
expected = """send email
Sender: [email protected]
To: [email protected]
Subject: [Django] Robin a comment? #1 (? Save the world, apr?s moi le d?luge)
<body>
(22/12/2016 19:45)
Robin a comment? <a href="http://127.0.0.1:8000/api/tickets/Ticket/1" title="Save the world, après moi le déluge">#1</a> (Save the world, apr?s moi le d?luge):<br>I don't agree.
</body>
"""
self.assertEquivalent(expected, out)
self.assertEqual(logger.debug.call_count, 1)
logger.debug.assert_called_with(
'Send out %s summaries for %d users.',
MailModes.often, 1)
# logger.info.assert_called_with(
# 'Notify %s users about %s', 1, 'Change by robin')
Message.objects.all().delete()
self.assertEqual(Message.objects.count(), 0)
cw = ChangeWatcher(obj)
from lino_xl.lib.tickets.choicelists import Priorities
obj.priority = Priorities.low
obj.save_watched_instance(ar, cw)
with capture_stdout() as out:
send_pending_emails_often()
out = out.getvalue().strip()
# print(out)
expected = ""
# self.assertEquivalent(expected, out)
# we do not test the output because the datetime changes. But
# we actually just wanted to see if there is no
# UnicodeException. We capture it in order to hide it from
# test runner output.
self.assertEqual(logger.debug.call_count, 2)
logger.debug.assert_called_with(
'Send out %s summaries for %d users.',
MailModes.often, 1)
| 31.765 | 187 | 0.616087 |
7ba4ff2f2bd5817feaf618a9b177dfc5037e8fb1 | 1,247 | py | Python | functionplots.py | martinaobrien/Problem_Sets | 5928f9ed2a743f46a9615f41192fd6dfb810b73c | [
"CNRI-Python"
] | null | null | null | functionplots.py | martinaobrien/Problem_Sets | 5928f9ed2a743f46a9615f41192fd6dfb810b73c | [
"CNRI-Python"
] | null | null | null | functionplots.py | martinaobrien/Problem_Sets | 5928f9ed2a743f46a9615f41192fd6dfb810b73c | [
"CNRI-Python"
] | null | null | null | # Martina O'Brien 30 March 2019
# Title: Problem Set 10 - plot functions
# Programme that displays a plot of the functions x x^2 and 2^x in the range [0,4]and display them in a graph.
# import mglearn
import matplotlib.pyplot as plt
import numpy as np
lower = int(input('enter lower: '))
# This variables 'lower' is going to allow the user to set the lower part of the range.
upper = int(input('enter upper: '))
# This variables 'upper' is going to allow the user to set the upper part of the range.
x = np.arange(lower, upper)
# x creates the range for lower and upper values
plt.plot(x, x)
# Line displays x
plt.plot(x, x ** 2)
# Line 2 displays x^2
plt.plot(x, 2 ** x)
# Line 3 displays 2^x
# Graph labelling
My_title = f'Functions x, x^2 and 2^x in the range [{lower},{upper}]'
# Title is set us as a readable string.
plt.title(My_title, fontweight="bold")
plt.xlabel('x - axis')
# X axis is labelled
plt.ylabel('y - axis')
# X axis is labelled .
# Reference: https://matplotlib.org/api/text_api.html?highlight%3Dfontweight%23matplotlib.text.Text.get_fontweight&sa=D&source=hangouts&ust=1554075189421000&usg=AFQjCNE6Vi9Ok5-WxvK6qx136IKE2Y3wrQ
# Allocating labels for x and y axis
plt.xlabel("number")
plt.ylabel("number")
plt.show() | 29.690476 | 195 | 0.728949 |
c0e2dfa8ae1f0e4ef7bdbec6bdd08191e9a3f96f | 1,564 | py | Python | 3d/generate.py | danya02/slon-2018-steinhaus | df91ff49911e57cb9d3088ed5179752343071082 | [
"MIT"
] | null | null | null | 3d/generate.py | danya02/slon-2018-steinhaus | df91ff49911e57cb9d3088ed5179752343071082 | [
"MIT"
] | null | null | null | 3d/generate.py | danya02/slon-2018-steinhaus | df91ff49911e57cb9d3088ed5179752343071082 | [
"MIT"
] | null | null | null | import multiprocessing
def test(a,b,c,d,l):
asq=a**2
bsq=b**2
csq=c**2
dsq=d**2
lsq=l**2
return 3*((l**4)+(b**4)+(a**4)+(c**4)+(d**4)) == 2*(bsq*lsq+asq*lsq+asq*bsq+bsq*csq+csq*lsq+asq*csq+asq*dsq+bsq*dsq+csq*dsq+lsq*dsq)
limit=10
procs = 16
def worker(num):
print('I am worker',num)
for a in range(num,limit,procs):
for b in range(1,limit):
print('{num}: a={a}, b={b}'.format(**locals()))
for c in range(1,limit):
for d in range(1,limit):
for l in range(1,limit):
if test(a,b,c,d,l):
with open('steinhaus-3d-tetrahedron.part{num}'.format(**locals()),'a') as o:
print(a,b,c,d,l,file=o)
print(num,': FOUND!!!!!',a,b,c,d,l)
if __name__=='__main__':
proclist = [multiprocessing.Process(target=worker,args=tuple([i+1])) for i in range(procs)]
for i in proclist:
i.start()
try:
for i,j in enumerate(proclist):
print('Waiting for process {i}...'.format(**locals()))
j.join()
finally:
print('Combining data!')
data=[]
for i in range(procs):
try:
data.extend(open('steinhaus-3d-tetrahedron.part{}'.format(i+1)))
except:
pass
try:
data.extend(open('steinhaus-3d-tetrahedron'))
except:
pass
data=set(data)
open('steinhaus-3d-tetrahedron','w').write(''.join(data))
| 33.276596 | 136 | 0.493606 |
f74247aa712d3d42489eb05bdf2cd44e8b078479 | 100,983 | py | Python | ovs/lib/setup.py | rootfs-analytics/openvstorage | 6184822340faea1d2927643330a7aaa781d92d36 | [
"Apache-2.0"
] | 1 | 2019-10-30T20:50:59.000Z | 2019-10-30T20:50:59.000Z | ovs/lib/setup.py | rootfs-analytics/openvstorage | 6184822340faea1d2927643330a7aaa781d92d36 | [
"Apache-2.0"
] | null | null | null | ovs/lib/setup.py | rootfs-analytics/openvstorage | 6184822340faea1d2927643330a7aaa781d92d36 | [
"Apache-2.0"
] | null | null | null | # Copyright 2014 CloudFounders NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Module for SetupController
"""
import os
import re
import sys
import imp
import time
import uuid
import urllib2
import base64
import inspect
from ConfigParser import RawConfigParser
from ovs.extensions.db.arakoon.ArakoonInstaller import ArakoonInstaller
from ovs.extensions.generic.sshclient import SSHClient
from ovs.extensions.generic.interactive import Interactive
from ovs.extensions.generic.system import System
from ovs.log.logHandler import LogHandler
from ovs.extensions.storage.persistentfactory import PersistentFactory
from ovs.extensions.storage.volatilefactory import VolatileFactory
logger = LogHandler('lib', name='setup')
logger.logger.propagate = False
# @TODO: Make the setup_node re-entrant
# @TODO: Make it possible to run as a non-privileged user
# @TODO: Node password identical for all nodes
class SetupController(object):
"""
This class contains all logic for setting up an environment, installed with system-native packages
"""
PARTITION_DEFAULTS = {'device': 'DIR_ONLY', 'percentage': 'NA', 'label': 'cache1'}
# Arakoon
arakoon_clusters = ['ovsdb', 'voldrv']
arakoon_exclude_ports = {'ovsdb': 8872, 'voldrv': 8870}
# Generic configfiles
generic_configfiles = {'/opt/OpenvStorage/config/memcacheclient.cfg': 11211,
'/opt/OpenvStorage/config/rabbitmqclient.cfg': 5672}
ovs_config_filename = '/opt/OpenvStorage/config/ovs.cfg'
avahi_filename = '/etc/avahi/services/ovs_cluster.service'
# Services
model_services = ['memcached', 'arakoon-ovsdb']
master_services = model_services + ['rabbitmq', 'arakoon-voldrv']
extra_node_services = ['workers', 'volumerouter-consumer']
master_node_services = master_services + ['scheduled-tasks', 'snmp', 'webapp-api', 'nginx',
'volumerouter-consumer'] + extra_node_services
discovered_nodes = {}
host_ips = set()
@staticmethod
def setup_node(ip=None, force_type=None, verbose=False):
"""
Sets up a node.
1. Some magic figuring out here:
- Which cluster (new, joining)
- Cluster role (master, extra)
2. Prepare cluster
3. Depending on (2), setup first/extra node
4. Depending on (2), promote new extra node
"""
print Interactive.boxed_message(['Open vStorage Setup'])
logger.info('Starting Open vStorage Setup')
target_password = None
cluster_name = None
first_node = True
nodes = []
cluster_ip = None
hypervisor_type = None
hypervisor_name = None
hypervisor_password = None
hypervisor_ip = None
hypervisor_username = 'root'
known_passwords = {}
master_ip = None
auto_config = None
disk_layout = None
arakoon_mountpoint = None
join_cluster = False
enable_heartbeats = None
# Support non-interactive setup
preconfig = '/tmp/openvstorage_preconfig.cfg'
if os.path.exists(preconfig):
config = RawConfigParser()
config.read(preconfig)
ip = config.get('setup', 'target_ip')
target_password = config.get('setup', 'target_password')
cluster_ip = config.get('setup', 'cluster_ip')
cluster_name = str(config.get('setup', 'cluster_name'))
master_ip = config.get('setup', 'master_ip')
hypervisor_type = config.get('setup', 'hypervisor_type')
hypervisor_name = config.get('setup', 'hypervisor_name')
hypervisor_ip = config.get('setup', 'hypervisor_ip')
hypervisor_username = config.get('setup', 'hypervisor_username')
hypervisor_password = config.get('setup', 'hypervisor_password')
arakoon_mountpoint = config.get('setup', 'arakoon_mountpoint')
verbose = config.getboolean('setup', 'verbose')
auto_config = config.get('setup', 'auto_config')
disk_layout = eval(config.get('setup', 'disk_layout'))
join_cluster = config.getboolean('setup', 'join_cluster')
enable_heartbeats = True
try:
if force_type is not None:
force_type = force_type.lower()
if force_type not in ['master', 'extra']:
raise ValueError("The force_type parameter should be 'master' or 'extra'.")
# Create connection to target node
print '\n+++ Setting up connections +++\n'
logger.info('Setting up connections')
if ip is None:
ip = '127.0.0.1'
if target_password is None:
node_string = 'this node' if ip == '127.0.0.1' else ip
target_node_password = Interactive.ask_password('Enter the root password for {0}'.format(node_string))
else:
target_node_password = target_password
target_client = SSHClient.load(ip, target_node_password)
if verbose:
logger.debug('Verbose mode')
from ovs.plugin.provider.remote import Remote
Remote.cuisine.fabric.output['running'] = True
logger.debug('Target client loaded')
print '\n+++ Collecting cluster information +++\n'
logger.info('Collecting cluster information')
# Check whether running local or remote
unique_id = System.get_my_machine_id(target_client)
local_unique_id = System.get_my_machine_id()
remote_install = unique_id != local_unique_id
logger.debug('{0} installation'.format('Remote' if remote_install else 'Local'))
if not target_client.file_exists(SetupController.ovs_config_filename):
raise RuntimeError("The 'openvstorage' package is not installed on {0}".format(ip))
System.set_remote_config(target_client, 'ovs.core.uniqueid', unique_id)
# Getting cluster information
current_cluster_names = []
clusters = []
discovery_result = SetupController._discover_nodes(target_client)
if discovery_result:
clusters = discovery_result.keys()
current_cluster_names = clusters[:]
logger.debug('Cluster names: {0}'.format(current_cluster_names))
else:
print 'No existing Open vStorage clusters are found.'
logger.debug('No clusters found')
local_cluster_name = None
if remote_install is True:
if os.path.exists(SetupController.avahi_filename):
with open(SetupController.avahi_filename, 'r') as avahi_file:
avahi_contents = avahi_file.read()
match_groups = re.search('>ovs_cluster_(?P<cluster>[^_]+)_.+?<', avahi_contents).groupdict()
if 'cluster' in match_groups:
local_cluster_name = match_groups['cluster']
node_name = target_client.run('hostname')
logger.debug('Current host: {0}'.format(node_name))
if cluster_name is None:
if len(clusters) > 0:
clusters.sort()
dont_join = "Don't join any of these clusters."
logger.debug('Manual cluster selection')
if force_type in [None, 'master']:
clusters = [dont_join] + clusters
print 'Following Open vStorage clusters are found.'
cluster_name = Interactive.ask_choice(clusters, 'Select a cluster to join', default_value=local_cluster_name, sort_choices=False)
if cluster_name != dont_join:
logger.debug('Cluster {0} selected'.format(cluster_name))
SetupController.discovered_nodes = discovery_result[cluster_name]
nodes = [node_property['ip'] for node_property in discovery_result[cluster_name].values()]
if node_name in discovery_result[cluster_name].keys():
continue_install = Interactive.ask_yesno(
'{0} already exists in cluster {1}. Do you want to continue?'.format(
node_name, cluster_name
), default_value=True
)
if continue_install is False:
raise ValueError('Duplicate node name found.')
master_nodes = [this_node_name for this_node_name, node_properties in discovery_result[cluster_name].iteritems()
if node_properties.get('type', None) == 'master']
if len(master_nodes) == 0:
raise RuntimeError('No master node could be found in cluster {0}'.format(cluster_name))
# @TODO: we should be able to choose the ip here too in a multiple nic setup?
master_ip = discovery_result[cluster_name][master_nodes[0]]['ip']
known_passwords[master_ip] = Interactive.ask_password('Enter the root password for {0}'.format(master_ip))
first_node = False
else:
cluster_name = None
logger.debug('No cluster will be joined')
elif force_type is not None and force_type != 'master':
raise RuntimeError('No clusters were found. Only a Master node can be set up.')
if first_node is True and cluster_name is None:
while True:
cluster_name = Interactive.ask_string('Please enter the cluster name')
if cluster_name in current_cluster_names:
print 'The new cluster name should be unique.'
if not re.match('^[0-9a-zA-Z]+(\-[0-9a-zA-Z]+)*$', cluster_name):
print "The new cluster name can only contain numbers, letters and dashes."
else:
break
else: # Automated install
logger.debug('Automated installation')
if cluster_name in discovery_result:
SetupController.discovered_nodes = discovery_result[cluster_name]
# @TODO: update the ip to the chosen one in autoconfig file?
nodes = [node_property['ip'] for node_property in discovery_result[cluster_name].values()]
first_node = not join_cluster
if not cluster_name:
raise RuntimeError('The name of the cluster should be known by now.')
# Get target cluster ip
ipaddresses = target_client.run("ip a | grep 'inet ' | sed 's/\s\s*/ /g' | cut -d ' ' -f 3 | cut -d '/' -f 1").strip().split('\n')
ipaddresses = [found_ip.strip() for found_ip in ipaddresses if found_ip.strip() != '127.0.0.1']
if not cluster_ip:
cluster_ip = Interactive.ask_choice(ipaddresses, 'Select the public ip address of {0}'.format(node_name))
known_passwords[cluster_ip] = target_node_password
if cluster_ip not in nodes:
nodes.append(cluster_ip)
logger.debug('Cluster ip is selected as {0}'.format(cluster_ip))
if target_password is not None:
for node in nodes:
known_passwords[node] = target_password
# Deciding master/extra
print 'Analyzing cluster layout'
logger.info('Analyzing cluster layout')
promote = False
if first_node is False:
for cluster in SetupController.arakoon_clusters:
config = ArakoonInstaller.get_config_from(cluster, master_ip, known_passwords[master_ip])
cluster_nodes = [node.strip() for node in config.get('global', 'cluster').split(',')]
logger.debug('{0} nodes for cluster {1} found'.format(len(cluster_nodes), cluster))
if (len(cluster_nodes) < 3 or force_type == 'master') and force_type != 'extra':
promote = True
else:
promote = True # Correct, but irrelevant, since a first node is always master
mountpoints, hypervisor_info = SetupController._prepare_node(
cluster_ip, nodes, known_passwords,
{'type': hypervisor_type,
'name': hypervisor_name,
'username': hypervisor_username,
'ip': hypervisor_ip,
'password': hypervisor_password},
auto_config, disk_layout
)
if first_node:
SetupController._setup_first_node(cluster_ip, unique_id, mountpoints,
cluster_name, node_name, hypervisor_info, arakoon_mountpoint,
enable_heartbeats)
else:
SetupController._setup_extra_node(cluster_ip, master_ip, cluster_name, unique_id,
nodes, hypervisor_info)
if promote:
SetupController._promote_node(cluster_ip, master_ip, cluster_name, nodes, unique_id,
mountpoints, arakoon_mountpoint)
print ''
print Interactive.boxed_message(['Setup complete.',
'Point your browser to http://{0} to use Open vStorage'.format(cluster_ip)])
logger.info('Setup complete')
except Exception as exception:
print '' # Spacing
print Interactive.boxed_message(['An unexpected error occurred:', str(exception)])
logger.exception('Unexpected error')
logger.error(str(exception))
sys.exit(1)
except KeyboardInterrupt:
print ''
print ''
print Interactive.boxed_message(['This setup was aborted. Open vStorage may be in an inconsistent state, make sure to validate the installation.'])
logger.error('Keyboard interrupt')
sys.exit(1)
@staticmethod
def _prepare_node(cluster_ip, nodes, known_passwords, hypervisor_info, auto_config, disk_layout):
"""
Prepares a node:
- Exchange SSH keys
- Update hosts files
- Partitioning
- Request hypervisor information
"""
print '\n+++ Preparing node +++\n'
logger.info('Preparing node')
# Exchange ssh keys
print 'Exchanging SSH keys'
logger.info('Exchanging SSH keys')
passwords = {}
first_request = True
prev_node_password = ''
for node in nodes:
if node in known_passwords:
passwords[node] = known_passwords[node]
continue
if first_request is True:
prev_node_password = Interactive.ask_password('Enter root password for {0}'.format(node))
logger.debug('Custom password for {0}'.format(node))
passwords[node] = prev_node_password
first_request = False
else:
this_node_password = Interactive.ask_password('Enter root password for {0}, just press enter if identical as above'.format(node))
if this_node_password == '':
logger.debug('Identical password for {0}'.format(node))
this_node_password = prev_node_password
passwords[node] = this_node_password
prev_node_password = this_node_password
root_ssh_folder = '/root/.ssh'
ovs_ssh_folder = '/opt/OpenvStorage/.ssh'
public_key_filename = '{0}/id_rsa.pub'
authorized_keys_filename = '{0}/authorized_keys'
known_hosts_filename = '{0}/known_hosts'
authorized_keys = ''
mapping = {}
logger.debug('Nodes: {0}'.format(nodes))
logger.debug('Discovered nodes: \n{0}'.format(SetupController.discovered_nodes))
all_ips = set()
all_hostnames = set()
for hostname, node_details in SetupController.discovered_nodes.iteritems():
for ip in node_details['ip_list']:
all_ips.add(ip)
all_hostnames.add(hostname)
all_ips.update(SetupController.host_ips)
for node in nodes:
node_client = SSHClient.load(node, passwords[node])
root_pub_key = node_client.file_read(public_key_filename.format(root_ssh_folder))
ovs_pub_key = node_client.file_read(public_key_filename.format(ovs_ssh_folder))
authorized_keys += '{0}\n{1}\n'.format(root_pub_key, ovs_pub_key)
node_hostname = node_client.run('hostname')
all_hostnames.add(node_hostname)
mapping[node] = node_hostname
for node in nodes:
node_client = SSHClient.load(node, passwords[node])
print 'Updating hosts files'
logger.debug('Updating hosts files')
for ip in mapping.keys():
update_hosts_file = """
from ovs.extensions.generic.system import System
System.update_hosts_file(hostname='{0}', ip='{1}')
""".format(mapping[ip], ip)
SetupController._exec_python(node_client, update_hosts_file)
node_client.file_write(authorized_keys_filename.format(root_ssh_folder), authorized_keys)
node_client.file_write(authorized_keys_filename.format(ovs_ssh_folder), authorized_keys)
cmd = 'cp {1} {1}.tmp;ssh-keyscan -t rsa {0} {2} >> {1}.tmp;cat {1}.tmp | sort -u - > {1}'
node_client.run(cmd.format(' '.join(all_ips), known_hosts_filename.format(root_ssh_folder), ' '.join(all_hostnames)))
cmd = 'su - ovs -c "cp {1} {1}.tmp;ssh-keyscan -t rsa {0} {2} >> {1}.tmp;cat {1}.tmp | sort -u - > {1}"'
node_client.run(cmd.format(' '.join(all_ips), known_hosts_filename.format(ovs_ssh_folder), ' '.join(all_hostnames)))
# Creating filesystems
print 'Creating filesystems'
logger.info('Creating filesystems')
target_client = SSHClient.load(cluster_ip)
disk_layout = SetupController.apply_flexible_disk_layout(target_client, auto_config, disk_layout)
# add directory mountpoints to ovs.cfg
config = SetupController._remote_config_read(target_client, SetupController.ovs_config_filename)
partition_key = 'vpool_partitions'
if config.has_section(partition_key):
config.remove_section(partition_key)
config.add_section(partition_key)
additional_mountpoints = list()
for mountpoint, details in disk_layout.iteritems():
if 'DIR_ONLY' in details['device']:
additional_mountpoints.append(mountpoint)
config.set(partition_key, 'dirs', ','.join(map(str, additional_mountpoints)))
SetupController._remote_config_write(target_client, SetupController.ovs_config_filename, config)
mountpoints = disk_layout.keys()
mountpoints.sort()
print 'Collecting hypervisor information'
logger.info('Collecting hypervisor information')
# Collecting hypervisor data
target_client = SSHClient.load(cluster_ip)
possible_hypervisor = SetupController._discover_hypervisor(target_client)
if not hypervisor_info.get('type'):
hypervisor_info['type'] = Interactive.ask_choice(['VMWARE', 'KVM'],
question='Which type of hypervisor is this Storage Router backing?',
default_value=possible_hypervisor)
logger.debug('Selected hypervisor type {0}'.format(hypervisor_info['type']))
default_name = ('esxi{0}' if hypervisor_info['type'] == 'VMWARE' else 'kvm{0}').format(cluster_ip.split('.')[-1])
if not hypervisor_info.get('name'):
hypervisor_info['name'] = Interactive.ask_string('Enter hypervisor hostname', default_value=default_name)
if hypervisor_info['type'] == 'VMWARE':
first_request = True # If parameters are wrong, we need to re-ask it
while True:
if not hypervisor_info.get('ip') or not first_request:
hypervisor_info['ip'] = Interactive.ask_string('Enter hypervisor ip address',
default_value=hypervisor_info.get('ip'))
if not hypervisor_info.get('username') or not first_request:
hypervisor_info['username'] = Interactive.ask_string('Enter hypervisor username',
default_value=hypervisor_info['username'])
if not hypervisor_info.get('password') or not first_request:
hypervisor_info['password'] = Interactive.ask_password('Enter hypervisor {0} password'.format(hypervisor_info.get('username')))
try:
request = urllib2.Request('https://{0}/mob'.format(hypervisor_info['ip']))
auth = base64.encodestring('{0}:{1}'.format(hypervisor_info['username'], hypervisor_info['password'])).replace('\n', '')
request.add_header("Authorization", "Basic %s" % auth)
urllib2.urlopen(request).read()
break
except Exception as ex:
first_request = False
print 'Could not connect to {0}: {1}'.format(hypervisor_info['ip'], ex)
elif hypervisor_info['type'] == 'KVM':
hypervisor_info['ip'] = cluster_ip
hypervisor_info['password'] = passwords[cluster_ip]
hypervisor_info['username'] = 'root'
logger.debug('Hypervisor at {0} with username {1}'.format(hypervisor_info['ip'], hypervisor_info['username']))
return mountpoints, hypervisor_info
@staticmethod
def _setup_first_node(cluster_ip, unique_id, mountpoints, cluster_name, node_name, hypervisor_info, arakoon_mountpoint, enable_heartbeats):
"""
Sets up the first node services. This node is always a master
"""
print '\n+++ Setting up first node +++\n'
logger.info('Setting up first node')
print 'Setting up Arakoon'
logger.info('Setting up Arakoon')
# Loading arakoon mountpoint
target_client = SSHClient.load(cluster_ip)
if arakoon_mountpoint is None:
arakoon_mountpoint = Interactive.ask_choice(mountpoints, question='Select arakoon database mountpoint',
default_value=Interactive.find_in_list(mountpoints, 'db'))
System.set_remote_config(target_client, 'ovs.core.db.arakoon.location', arakoon_mountpoint)
for cluster in SetupController.arakoon_clusters:
ports = [SetupController.arakoon_exclude_ports[cluster], SetupController.arakoon_exclude_ports[cluster] + 1]
ArakoonInstaller.create_cluster(cluster, cluster_ip, ports)
print 'Setting up logstash'
logger.info('Setting up logstash')
SetupController._replace_param_in_config(target_client, '/etc/logstash/conf.d/indexer.conf',
'<CLUSTER_NAME>', 'ovses_{0}'.format(cluster_name))
SetupController._change_service_state(target_client, 'logstash', 'restart')
print 'Adding services'
logger.info('Adding services')
params = {'<ARAKOON_NODE_ID>': unique_id,
'<MEMCACHE_NODE_IP>': cluster_ip,
'<WORKER_QUEUE>': '{0},ovs_masters'.format(unique_id)}
for service in SetupController.master_node_services + ['watcher-framework', 'watcher-volumedriver']:
logger.debug('Adding service {0}'.format(service))
SetupController._add_service(target_client, service, params)
print 'Setting up RabbitMQ'
logger.debug('Setting up RabbitMQ')
target_client.run("""cat > /etc/rabbitmq/rabbitmq.config << EOF
[
{{rabbit, [{{tcp_listeners, [{0}]}},
{{default_user, <<"{1}">>}},
{{default_pass, <<"{2}">>}}]}}
].
EOF
""".format(System.read_remote_config(target_client, 'ovs.core.broker.port'),
System.read_remote_config(target_client, 'ovs.core.broker.login'),
System.read_remote_config(target_client, 'ovs.core.broker.password')))
rabbitmq_running, rabbitmq_pid = SetupController._is_rabbitmq_running(target_client)
if rabbitmq_running and rabbitmq_pid:
print(' WARNING: an instance of rabbitmq-server is running, this needs to be stopped')
target_client.run('service rabbitmq-server stop')
time.sleep(5)
try:
target_client.run('kill {0}'.format(rabbitmq_pid))
print(' Process killed')
except SystemExit:
print(' Process already stopped')
target_client.run('rabbitmq-server -detached; sleep 5;')
users = target_client.run('rabbitmqctl list_users').split('\r\n')[1:-1]
users = [usr.split('\t')[0] for usr in users]
if 'ovs' not in users:
target_client.run('rabbitmqctl add_user {0} {1}'.format(System.read_remote_config(target_client, 'ovs.core.broker.login'),
System.read_remote_config(target_client, 'ovs.core.broker.password')))
target_client.run('rabbitmqctl set_permissions {0} ".*" ".*" ".*"'.format(System.read_remote_config(target_client, 'ovs.core.broker.login')))
target_client.run('rabbitmqctl stop; sleep 5;')
print 'Build configuration files'
logger.info('Build configuration files')
for config_file, port in SetupController.generic_configfiles.iteritems():
config = RawConfigParser()
config.add_section('main')
config.set('main', 'nodes', unique_id)
config.add_section(unique_id)
config.set(unique_id, 'location', '{0}:{1}'.format(cluster_ip, port))
SetupController._remote_config_write(target_client, config_file, config)
print 'Starting model services'
logger.debug('Starting model services')
for service in SetupController.model_services:
if SetupController._has_service(target_client, service):
SetupController._enable_service(target_client, service)
SetupController._change_service_state(target_client, service, 'start')
print 'Start model migration'
logger.debug('Start model migration')
from ovs.extensions.migration.migration import Migration
Migration.migrate()
print '\n+++ Finalizing setup +++\n'
logger.info('Finalizing setup')
target_client.run('mkdir -p /opt/OpenvStorage/webapps/frontend/logging')
SetupController._change_service_state(target_client, 'logstash', 'restart')
SetupController._replace_param_in_config(target_client,
'/opt/OpenvStorage/webapps/frontend/logging/config.js',
'http://"+window.location.hostname+":9200',
'http://' + cluster_ip + ':9200')
# Imports, not earlier than here, as all required config files should be in place.
from ovs.dal.hybrids.pmachine import PMachine
from ovs.dal.lists.pmachinelist import PMachineList
from ovs.dal.hybrids.storagerouter import StorageRouter
from ovs.dal.lists.storagerouterlist import StorageRouterList
print 'Configuring/updating model'
logger.info('Configuring/updating model')
pmachine = None
for current_pmachine in PMachineList.get_pmachines():
if current_pmachine.ip == hypervisor_info['ip'] and current_pmachine.hvtype == hypervisor_info['type']:
pmachine = current_pmachine
break
if pmachine is None:
pmachine = PMachine()
pmachine.ip = hypervisor_info['ip']
pmachine.username = hypervisor_info['username']
pmachine.password = hypervisor_info['password']
pmachine.hvtype = hypervisor_info['type']
pmachine.name = hypervisor_info['name']
pmachine.save()
storagerouter = None
for current_storagerouter in StorageRouterList.get_storagerouters():
if current_storagerouter.ip == cluster_ip and current_storagerouter.machine_id == unique_id:
storagerouter = current_storagerouter
break
if storagerouter is None:
storagerouter = StorageRouter()
storagerouter.name = node_name
storagerouter.machine_id = unique_id
storagerouter.ip = cluster_ip
storagerouter.node_type = 'MASTER'
storagerouter.pmachine = pmachine
storagerouter.save()
print 'Updating configuration files'
logger.info('Updating configuration files')
System.set_remote_config(target_client, 'ovs.grid.ip', cluster_ip)
System.set_remote_config(target_client, 'ovs.support.cid', str(uuid.uuid4()))
System.set_remote_config(target_client, 'ovs.support.nid', str(uuid.uuid4()))
print 'Starting services'
logger.info('Starting services for join master')
for service in SetupController.master_services:
if SetupController._has_service(target_client, service):
SetupController._enable_service(target_client, service)
SetupController._change_service_state(target_client, service, 'start')
# Enable HA for the rabbitMQ queues
output = target_client.run('sleep 5;rabbitmqctl set_policy ha-all "^(volumerouter|ovs_.*)$" \'{"ha-mode":"all"}\'', quiet=True).split('\r\n')
retry = False
for line in output:
if 'Error: unable to connect to node ' in line:
rabbitmq_running, rabbitmq_pid = SetupController._is_rabbitmq_running(target_client)
if rabbitmq_running and rabbitmq_pid:
target_client.run('kill {0}'.format(rabbitmq_pid), quiet=True)
print(' Process killed, restarting')
target_client.run('service ovs-rabbitmq start', quiet=True)
retry = True
break
if retry:
target_client.run('sleep 5;rabbitmqctl set_policy ha-all "^(volumerouter|ovs_.*)$" \'{"ha-mode":"all"}\'')
rabbitmq_running, rabbitmq_pid, ovs_rabbitmq_running, same_process = SetupController._is_rabbitmq_running(target_client, True)
if ovs_rabbitmq_running and same_process:
pass # Correct process is running
elif rabbitmq_running and not ovs_rabbitmq_running:
# Wrong process is running, must be stopped and correct one started
print(' WARNING: an instance of rabbitmq-server is running, this needs to be stopped, ovs-rabbitmq will be started instead')
target_client.run('service rabbitmq-server stop', quiet=True)
time.sleep(5)
try:
target_client.run('kill {0}'.format(rabbitmq_pid), quiet=True)
print(' Process killed')
except SystemExit:
print(' Process already stopped')
target_client.run('service ovs-rabbitmq start', quiet=True)
elif not rabbitmq_running and not ovs_rabbitmq_running:
# Neither running
target_client.run('service ovs-rabbitmq start', quiet=True)
for service in ['watcher-framework', 'watcher-volumedriver']:
SetupController._enable_service(target_client, service)
SetupController._change_service_state(target_client, service, 'start')
logger.debug('Restarting workers')
SetupController._enable_service(target_client, 'workers')
SetupController._change_service_state(target_client, 'workers', 'restart')
SetupController._run_firstnode_hooks(cluster_ip)
target_client = SSHClient.load(cluster_ip)
System.set_remote_config(target_client, 'ovs.support.cid', str(uuid.uuid4()))
System.set_remote_config(target_client, 'ovs.support.nid', str(uuid.uuid4()))
if enable_heartbeats is None:
print '\n+++ Heartbeat +++\n'
logger.info('Heartbeat')
print Interactive.boxed_message(['Open vStorage has the option to send regular heartbeats with metadata to a centralized server.',
'The metadata contains anonymous data like Open vStorage\'s version and status of the Open vStorage services. These heartbeats are optional and can be turned on/off at any time via the GUI.'],
character=None)
enable_heartbeats = Interactive.ask_yesno('Do you want to enable Heartbeats?', default_value=True)
if enable_heartbeats is True:
System.set_remote_config(target_client, 'ovs.support.enabled', 1)
service = 'support-agent'
SetupController._add_service(target_client, service, {})
SetupController._enable_service(target_client, service)
SetupController._change_service_state(target_client, service, 'start')
print '\n+++ Announcing service +++\n'
logger.info('Announcing service')
target_client.run("""cat > {3} <<EOF
<?xml version="1.0" standalone='no'?>
<!--*-nxml-*-->
<!DOCTYPE service-group SYSTEM "avahi-service.dtd">
<!-- $Id$ -->
<service-group>
<name replace-wildcards="yes">ovs_cluster_{0}_{1}_{4}</name>
<service>
<type>_ovs_{2}_node._tcp</type>
<port>443</port>
</service>
</service-group>
EOF
""".format(cluster_name, node_name, 'master', SetupController.avahi_filename, cluster_ip.replace('.', '_')))
SetupController._change_service_state(target_client, 'avahi-daemon', 'restart')
target_client.run('chown -R ovs:ovs /opt/OpenvStorage/config', quiet=True)
logger.info('First node complete')
@staticmethod
def _setup_extra_node(cluster_ip, master_ip, cluster_name, unique_id, nodes, hypervisor_info):
"""
Sets up an additional node
"""
print '\n+++ Adding extra node +++\n'
logger.info('Adding extra node')
# Logstash setup
print 'Configuring logstash'
target_client = SSHClient.load(cluster_ip)
SetupController._replace_param_in_config(target_client, '/etc/logstash/conf.d/indexer.conf',
'<CLUSTER_NAME>', 'ovses_{0}'.format(cluster_name))
SetupController._change_service_state(target_client, 'logstash', 'restart')
print 'Adding services'
logger.info('Adding services')
params = {'<ARAKOON_NODE_ID>': unique_id,
'<MEMCACHE_NODE_IP>': cluster_ip,
'<WORKER_QUEUE>': unique_id}
for service in SetupController.extra_node_services + ['watcher-framework', 'watcher-volumedriver']:
logger.debug('Adding service {0}'.format(service))
SetupController._add_service(target_client, service, params)
print 'Configuring services'
logger.info('Copying client configurations')
for cluster in SetupController.arakoon_clusters:
ArakoonInstaller.deploy_config(master_ip, cluster_ip, cluster)
for config in SetupController.generic_configfiles.keys():
master_client = SSHClient.load(master_ip)
client_config = SetupController._remote_config_read(master_client, config)
target_client = SSHClient.load(cluster_ip)
SetupController._remote_config_write(target_client, config, client_config)
client = SSHClient.load(master_ip)
cid = System.read_remote_config(client, 'ovs.support.cid')
enabled = System.read_remote_config(client, 'ovs.support.enabled')
enablesupport = System.read_remote_config(client, 'ovs.support.enablesupport')
client = SSHClient.load(cluster_ip)
System.set_remote_config(client, 'ovs.support.nid', str(uuid.uuid4()))
System.set_remote_config(client, 'ovs.support.cid', cid)
System.set_remote_config(client, 'ovs.support.enabled', enabled)
System.set_remote_config(client, 'ovs.support.enablesupport', enablesupport)
if int(enabled) > 0:
service = 'support-agent'
SetupController._add_service(client, service, {})
SetupController._enable_service(client, service)
SetupController._change_service_state(client, service, 'start')
client = SSHClient.load(cluster_ip)
node_name = client.run('hostname')
client.run('mkdir -p /opt/OpenvStorage/webapps/frontend/logging')
SetupController._change_service_state(client, 'logstash', 'restart')
SetupController._replace_param_in_config(client,
'/opt/OpenvStorage/webapps/frontend/logging/config.js',
'http://"+window.location.hostname+":9200',
'http://' + cluster_ip + ':9200')
# Imports, not earlier than here, as all required config files should be in place.
from ovs.dal.hybrids.pmachine import PMachine
from ovs.dal.lists.pmachinelist import PMachineList
from ovs.dal.hybrids.storagerouter import StorageRouter
from ovs.dal.lists.storagerouterlist import StorageRouterList
print 'Configuring/updating model'
logger.info('Configuring/updating model')
pmachine = None
for current_pmachine in PMachineList.get_pmachines():
if current_pmachine.ip == hypervisor_info['ip'] and current_pmachine.hvtype == hypervisor_info['type']:
pmachine = current_pmachine
break
if pmachine is None:
pmachine = PMachine()
pmachine.ip = hypervisor_info['ip']
pmachine.username = hypervisor_info['username']
pmachine.password = hypervisor_info['password']
pmachine.hvtype = hypervisor_info['type']
pmachine.name = hypervisor_info['name']
pmachine.save()
storagerouter = None
for current_storagerouter in StorageRouterList.get_storagerouters():
if current_storagerouter.ip == cluster_ip and current_storagerouter.machine_id == unique_id:
storagerouter = current_storagerouter
break
if storagerouter is None:
storagerouter = StorageRouter()
storagerouter.name = node_name
storagerouter.machine_id = unique_id
storagerouter.ip = cluster_ip
storagerouter.node_type = 'EXTRA'
storagerouter.pmachine = pmachine
storagerouter.save()
print 'Updating configuration files'
logger.info('Updating configuration files')
System.set_remote_config(client, 'ovs.grid.ip', cluster_ip)
print 'Starting services'
for service in ['watcher-framework', 'watcher-volumedriver']:
SetupController._enable_service(target_client, service)
SetupController._change_service_state(target_client, service, 'start')
logger.debug('Restarting workers')
for node in nodes:
node_client = SSHClient.load(node)
SetupController._enable_service(node_client, 'workers')
SetupController._change_service_state(node_client, 'workers', 'restart')
SetupController._run_extranode_hooks(cluster_ip, master_ip)
print '\n+++ Announcing service +++\n'
logger.info('Announcing service')
target_client = SSHClient.load(cluster_ip)
target_client.run("""cat > {3} <<EOF
<?xml version="1.0" standalone='no'?>
<!--*-nxml-*-->
<!DOCTYPE service-group SYSTEM "avahi-service.dtd">
<!-- $Id$ -->
<service-group>
<name replace-wildcards="yes">ovs_cluster_{0}_{1}_{4}</name>
<service>
<type>_ovs_{2}_node._tcp</type>
<port>443</port>
</service>
</service-group>
EOF
""".format(cluster_name, node_name, 'extra', SetupController.avahi_filename, cluster_ip.replace('.', '_')))
SetupController._change_service_state(target_client, 'avahi-daemon', 'restart')
target_client.run('chown -R ovs:ovs /opt/OpenvStorage/config', quiet=True)
logger.info('Extra node complete')
@staticmethod
def promote_node():
"""
Promotes the local node
"""
print Interactive.boxed_message(['Open vStorage Setup - Promote'])
logger.info('Starting Open vStorage Setup - promote')
try:
print '\n+++ Collecting information +++\n'
logger.info('Collecting information')
if not os.path.exists(SetupController.avahi_filename):
raise RuntimeError('No local OVS setup found.')
with open(SetupController.avahi_filename, 'r') as avahi_file:
avahi_contents = avahi_file.read()
if '_ovs_master_node._tcp' in avahi_contents:
raise RuntimeError('This node is already master.')
match_groups = re.search('>ovs_cluster_(?P<cluster>[^_]+)_.+?<', avahi_contents).groupdict()
if 'cluster' not in match_groups:
raise RuntimeError('No cluster information found.')
cluster_name = match_groups['cluster']
target_password = Interactive.ask_password('Enter the root password for this node')
target_client = SSHClient.load('127.0.0.1', target_password)
discovery_result = SetupController._discover_nodes(target_client)
master_nodes = [this_node_name for this_node_name, node_properties in discovery_result[cluster_name].iteritems()
if node_properties.get('type', None) == 'master']
nodes = [node_property['ip'] for node_property in discovery_result[cluster_name].values()]
if len(master_nodes) == 0:
raise RuntimeError('No master node could be found in cluster {0}'.format(cluster_name))
master_ip = discovery_result[cluster_name][master_nodes[0]]['ip']
ovs_config = SetupController._remote_config_read(target_client, SetupController.ovs_config_filename)
unique_id = ovs_config.get('core', 'uniqueid')
ip = ovs_config.get('grid', 'ip')
nodes.append(ip) # The client node is never included in the discovery results
SetupController._promote_node(ip, master_ip, cluster_name, nodes, unique_id, None, None)
print ''
print Interactive.boxed_message(['Promote complete.'])
logger.info('Setup complete - promote')
except Exception as exception:
print '' # Spacing
print Interactive.boxed_message(['An unexpected error occurred:', str(exception)])
logger.exception('Unexpected error')
logger.error(str(exception))
sys.exit(1)
except KeyboardInterrupt:
print ''
print ''
print Interactive.boxed_message(['This setup was aborted. Open vStorage may be in an inconsistent state, make sure to validate the installation.'])
logger.error('Keyboard interrupt')
sys.exit(1)
@staticmethod
def _promote_node(cluster_ip, master_ip, cluster_name, nodes, unique_id, mountpoints, arakoon_mountpoint):
"""
Promotes a given node
"""
from ovs.dal.lists.storagerouterlist import StorageRouterList
print '\n+++ Promoting node +++\n'
logger.info('Promoting node')
target_client = SSHClient.load(cluster_ip)
node_name = target_client.run('hostname')
storagerouter = StorageRouterList.get_by_machine_id(unique_id)
storagerouter.node_type = 'MASTER'
storagerouter.save()
# Find other (arakoon) master nodes
master_nodes = []
for cluster in SetupController.arakoon_clusters:
config = ArakoonInstaller.get_config_from(cluster, master_ip)
master_nodes = [config.get(node.strip(), 'ip').strip() for node in config.get('global', 'cluster').split(',')]
if cluster_ip in master_nodes:
master_nodes.remove(cluster_ip)
if len(master_nodes) == 0:
raise RuntimeError('There should be at least one other master node')
# Logstash setup
target_client = SSHClient.load(cluster_ip)
SetupController._replace_param_in_config(target_client, '/etc/logstash/conf.d/indexer.conf',
'<CLUSTER_NAME>', 'ovses_{0}'.format(cluster_name))
SetupController._change_service_state(target_client, 'logstash', 'restart')
print 'Adding services'
logger.info('Adding services')
params = {'<ARAKOON_NODE_ID>': unique_id,
'<MEMCACHE_NODE_IP>': cluster_ip,
'<WORKER_QUEUE>': '{0},ovs_masters'.format(unique_id)}
for service in SetupController.master_node_services + ['watcher-framework', 'watcher-volumedriver']:
logger.debug('Adding service {0}'.format(service))
SetupController._add_service(target_client, service, params)
print 'Joining arakoon cluster'
logger.info('Joining arakoon cluster')
# Loading arakoon mountpoint
target_client = SSHClient.load(cluster_ip)
if arakoon_mountpoint is None:
if mountpoints:
manual = 'Enter custom path'
mountpoints.sort()
mountpoints = [manual] + mountpoints
arakoon_mountpoint = Interactive.ask_choice(mountpoints, question='Select arakoon database mountpoint',
default_value=Interactive.find_in_list(mountpoints, 'db'),
sort_choices=False)
if arakoon_mountpoint == manual:
arakoon_mountpoint = None
if arakoon_mountpoint is None:
while True:
arakoon_mountpoint = Interactive.ask_string('Enter arakoon database path').strip().rstrip('/')
if target_client.dir_exists(arakoon_mountpoint):
break
else:
print ' Invalid path, please retry'
System.set_remote_config(target_client, 'ovs.core.db.arakoon.location', arakoon_mountpoint)
for cluster in SetupController.arakoon_clusters:
ports = [SetupController.arakoon_exclude_ports[cluster], SetupController.arakoon_exclude_ports[cluster] + 1]
ArakoonInstaller.extend_cluster(master_ip, cluster_ip, cluster, ports)
print 'Distribute configuration files'
logger.info('Distribute configuration files')
for config_file, port in SetupController.generic_configfiles.iteritems():
master_client = SSHClient.load(master_ip)
config = SetupController._remote_config_read(master_client, config_file)
config_nodes = [n.strip() for n in config.get('main', 'nodes').split(',')]
if unique_id not in config_nodes:
config.set('main', 'nodes', ', '.join(config_nodes + [unique_id]))
config.add_section(unique_id)
config.set(unique_id, 'location', '{0}:{1}'.format(cluster_ip, port))
for node in nodes:
node_client = SSHClient.load(node)
SetupController._remote_config_write(node_client, config_file, config)
print 'Restarting master node services'
logger.info('Restarting master node services')
for cluster in SetupController.arakoon_clusters:
ArakoonInstaller.restart_cluster_add(cluster, master_nodes, cluster_ip)
PersistentFactory.store = None
VolatileFactory.store = None
print 'Setting up RabbitMQ'
logger.debug('Setting up RMQ')
target_client = SSHClient.load(cluster_ip)
target_client.run("""cat > /etc/rabbitmq/rabbitmq.config << EOF
[
{{rabbit, [{{tcp_listeners, [{0}]}},
{{default_user, <<"{1}">>}},
{{default_pass, <<"{2}">>}}]}}
].
EOF
""".format(System.read_remote_config(target_client, 'ovs.core.broker.port'),
System.read_remote_config(target_client, 'ovs.core.broker.login'),
System.read_remote_config(target_client, 'ovs.core.broker.password')))
rabbitmq_running, rabbitmq_pid = SetupController._is_rabbitmq_running(target_client)
if rabbitmq_running and rabbitmq_pid:
print(' WARNING: an instance of rabbitmq-server is running, this needs to be stopped')
target_client.run('service rabbitmq-server stop')
time.sleep(5)
try:
target_client.run('kill {0}'.format(rabbitmq_pid))
print(' Process killed')
except SystemExit:
print(' Process already stopped')
target_client.run('rabbitmq-server -detached; sleep 5;')
users = target_client.run('rabbitmqctl list_users').split('\r\n')[1:-1]
users = [usr.split('\t')[0] for usr in users]
if 'ovs' not in users:
target_client.run('rabbitmqctl add_user {0} {1}'.format(System.read_remote_config(target_client, 'ovs.core.broker.login'),
System.read_remote_config(target_client, 'ovs.core.broker.password')))
target_client.run('rabbitmqctl set_permissions {0} ".*" ".*" ".*"'.format(System.read_remote_config(target_client, 'ovs.core.broker.login')))
target_client.run('rabbitmqctl stop; sleep 5;')
# Copy rabbitmq cookie
logger.debug('Copying RMQ cookie')
rabbitmq_cookie_file = '/var/lib/rabbitmq/.erlang.cookie'
master_client = SSHClient.load(master_ip)
contents = master_client.file_read(rabbitmq_cookie_file)
master_hostname = master_client.run('hostname')
target_client = SSHClient.load(cluster_ip)
target_client.dir_ensure(os.path.dirname(rabbitmq_cookie_file), True)
target_client.file_write(rabbitmq_cookie_file, contents)
target_client.file_attribs(rabbitmq_cookie_file, mode=400)
target_client.run('rabbitmq-server -detached; sleep 5; rabbitmqctl stop_app; sleep 5;')
target_client.run('rabbitmqctl join_cluster rabbit@{}; sleep 5;'.format(master_hostname))
target_client.run('rabbitmqctl stop; sleep 5;')
# Enable HA for the rabbitMQ queues
SetupController._change_service_state(target_client, 'rabbitmq', 'start')
output = target_client.run('sleep 5;rabbitmqctl set_policy ha-all "^(volumerouter|ovs_.*)$" \'{"ha-mode":"all"}\'', quiet=True).split('\r\n')
retry = False
for line in output:
if 'Error: unable to connect to node ' in line:
rabbitmq_running, rabbitmq_pid = SetupController._is_rabbitmq_running(target_client)
if rabbitmq_running and rabbitmq_pid:
target_client.run('kill {0}'.format(rabbitmq_pid), quiet=True)
print(' Process killed, restarting')
target_client.run('service ovs-rabbitmq start', quiet=True)
retry = True
break
if retry:
target_client.run('sleep 5;rabbitmqctl set_policy ha-all "^(volumerouter|ovs_.*)$" \'{"ha-mode":"all"}\'')
rabbitmq_running, rabbitmq_pid, ovs_rabbitmq_running, same_process = SetupController._is_rabbitmq_running(
target_client, True)
if ovs_rabbitmq_running and same_process:
pass # Correct process is running
elif rabbitmq_running and not ovs_rabbitmq_running:
# Wrong process is running, must be stopped and correct one started
print(' WARNING: an instance of rabbitmq-server is running, this needs to be stopped, ovs-rabbitmq will be started instead')
target_client.run('service rabbitmq-server stop', quiet=True)
time.sleep(5)
try:
target_client.run('kill {0}'.format(rabbitmq_pid), quiet=True)
print(' Process killed')
except SystemExit:
print(' Process already stopped')
target_client.run('service ovs-rabbitmq start', quiet=True)
elif not rabbitmq_running and not ovs_rabbitmq_running:
# Neither running
target_client.run('service ovs-rabbitmq start', quiet=True)
print 'Update existing vPools'
logger.info('Update existing vPools')
for node in nodes:
client_node = SSHClient.load(node)
update_voldrv = """
import os
from ovs.plugin.provider.configuration import Configuration
from ovs.extensions.storageserver.storagedriver import StorageDriverConfiguration
from ovs.extensions.db.arakoon.ArakoonManagement import ArakoonManagementEx
arakoon_cluster_config = ArakoonManagementEx().getCluster('voldrv').getClientConfig()
arakoon_nodes = []
for node_id, node_config in arakoon_cluster_config.iteritems():
arakoon_nodes.append({'node_id': node_id, 'host': node_config[0][0], 'port': node_config[1]})
configuration_dir = '{0}/storagedriver/storagedriver'.format(Configuration.get('ovs.core.cfgdir'))
if not os.path.exists(configuration_dir):
os.makedirs(configuration_dir)
for json_file in os.listdir(configuration_dir):
if json_file.endswith('.json'):
storagedriver_config = StorageDriverConfiguration('storagedriver', json_file.replace('.json', ''))
storagedriver_config.load()
storagedriver_config.configure_volume_registry(vregistry_arakoon_cluster_id='voldrv',
vregistry_arakoon_cluster_nodes=arakoon_nodes)
storagedriver_config.save()
"""
SetupController._exec_python(client_node, update_voldrv)
for node in nodes:
node_client = SSHClient.load(node)
SetupController._configure_amqp_to_volumedriver(node_client)
print 'Starting services'
target_client = SSHClient.load(cluster_ip)
logger.info('Starting services')
for service in SetupController.master_services:
if SetupController._has_service(target_client, service):
SetupController._enable_service(target_client, service)
SetupController._change_service_state(target_client, service, 'start')
print 'Retarting services'
SetupController._change_service_state(target_client, 'watcher-volumedriver', 'restart')
for node in nodes:
node_client = SSHClient.load(node)
SetupController._change_service_state(node_client, 'watcher-framework', 'restart')
if SetupController._run_promote_hooks(cluster_ip, master_ip):
print 'Retarting services'
for node in nodes:
node_client = SSHClient.load(node)
SetupController._change_service_state(node_client, 'watcher-framework', 'restart')
print '\n+++ Announcing service +++\n'
logger.info('Announcing service')
target_client = SSHClient.load(cluster_ip)
target_client.run("""cat > {3} <<EOF
<?xml version="1.0" standalone='no'?>
<!--*-nxml-*-->
<!DOCTYPE service-group SYSTEM "avahi-service.dtd">
<!-- $Id$ -->
<service-group>
<name replace-wildcards="yes">ovs_cluster_{0}_{1}_{4}</name>
<service>
<type>_ovs_{2}_node._tcp</type>
<port>443</port>
</service>
</service-group>
EOF
""".format(cluster_name, node_name, 'master', SetupController.avahi_filename, cluster_ip.replace('.', '_')))
SetupController._change_service_state(target_client, 'avahi-daemon', 'restart')
target_client.run('chown -R ovs:ovs /opt/OpenvStorage/config', quiet=True)
logger.info('Promote complete')
@staticmethod
def demote_node():
"""
Demotes the local node
"""
print Interactive.boxed_message(['Open vStorage Setup - Demote'])
logger.info('Starting Open vStorage Setup - demote')
try:
print '\n+++ Collecting information +++\n'
logger.info('Collecting information')
if not os.path.exists(SetupController.avahi_filename):
raise RuntimeError('No local OVS setup found.')
with open(SetupController.avahi_filename, 'r') as avahi_file:
avahi_contents = avahi_file.read()
if '_ovs_master_node._tcp' not in avahi_contents:
raise RuntimeError('This node is should be a master.')
match_groups = re.search('>ovs_cluster_(?P<cluster>[^_]+)_.+?<', avahi_contents).groupdict()
if 'cluster' not in match_groups:
raise RuntimeError('No cluster information found.')
cluster_name = match_groups['cluster']
target_password = Interactive.ask_password('Enter the root password for this node')
target_client = SSHClient.load('127.0.0.1', target_password)
discovery_result = SetupController._discover_nodes(target_client)
master_nodes = [this_node_name for this_node_name, node_properties in
discovery_result[cluster_name].iteritems()
if node_properties.get('type', None) == 'master']
nodes = [node_property['ip'] for node_property in discovery_result[cluster_name].values()]
if len(master_nodes) == 0:
raise RuntimeError('It is not possible to remove the only master in cluster {0}'.format(cluster_name))
master_ip = discovery_result[cluster_name][master_nodes[0]]['ip']
ovs_config = SetupController._remote_config_read(target_client, SetupController.ovs_config_filename)
unique_id = ovs_config.get('core', 'uniqueid')
ip = ovs_config.get('grid', 'ip')
nodes.append(ip) # The client node is never included in the discovery results
SetupController._demote_node(ip, master_ip, cluster_name, nodes, unique_id)
print ''
print Interactive.boxed_message(['Demote complete.'])
logger.info('Setup complete - demote')
except Exception as exception:
print '' # Spacing
print Interactive.boxed_message(['An unexpected error occurred:', str(exception)])
logger.exception('Unexpected error')
logger.error(str(exception))
sys.exit(1)
except KeyboardInterrupt:
print ''
print ''
print Interactive.boxed_message(['This setup was aborted. Open vStorage may be in an inconsistent state, make sure to validate the installation.'])
logger.error('Keyboard interrupt')
sys.exit(1)
@staticmethod
def _demote_node(cluster_ip, master_ip, cluster_name, nodes, unique_id):
"""
Demotes a given node
"""
from ovs.dal.lists.storagerouterlist import StorageRouterList
print '\n+++ Demoting node +++\n'
logger.info('Demoting node')
target_client = SSHClient.load(cluster_ip)
node_name = target_client.run('hostname')
storagerouter = StorageRouterList.get_by_machine_id(unique_id)
storagerouter.node_type = 'EXTRA'
storagerouter.save()
# Find other (arakoon) master nodes
master_nodes = []
for cluster in SetupController.arakoon_clusters:
config = ArakoonInstaller.get_config_from(cluster, master_ip)
master_nodes = [config.get(node.strip(), 'ip').strip() for node in
config.get('global', 'cluster').split(',')]
if cluster_ip in master_nodes:
master_nodes.remove(cluster_ip)
if len(master_nodes) == 0:
raise RuntimeError('There should be at least one other master node')
print 'Leaving arakoon cluster'
logger.info('Leaving arakoon cluster')
for cluster in SetupController.arakoon_clusters:
ArakoonInstaller.shrink_cluster(master_ip, cluster_ip, cluster)
print 'Update existing vPools'
logger.info('Update existing vPools')
for node in nodes:
client_node = SSHClient.load(node)
update_voldrv = """
import os
from ovs.plugin.provider.configuration import Configuration
from ovs.extensions.storageserver.storagedriver import StorageDriverConfiguration
from ovs.extensions.db.arakoon.ArakoonManagement import ArakoonManagementEx
arakoon_cluster_config = ArakoonManagementEx().getCluster('voldrv').getClientConfig()
arakoon_nodes = []
for node_id, node_config in arakoon_cluster_config.iteritems():
arakoon_nodes.append({'node_id': node_id, 'host': node_config[0][0], 'port': node_config[1]})
configuration_dir = '{0}/storagedriver/storagedriver'.format(Configuration.get('ovs.core.cfgdir'))
if not os.path.exists(configuration_dir):
os.makedirs(configuration_dir)
for json_file in os.listdir(configuration_dir):
if json_file.endswith('.json'):
storagedriver_config = StorageDriverConfiguration('storagedriver', json_file.replace('.json', ''))
storagedriver_config.load()
storagedriver_config.configure_volume_registry(vregistry_arakoon_cluster_id='voldrv',
vregistry_arakoon_cluster_nodes=arakoon_nodes)
storagedriver_config.save()
"""
SetupController._exec_python(client_node, update_voldrv)
for node in nodes:
node_client = SSHClient.load(node)
SetupController._configure_amqp_to_volumedriver(node_client)
print 'Distribute configuration files'
logger.info('Distribute configuration files')
for config_file, port in SetupController.generic_configfiles.iteritems():
master_client = SSHClient.load(master_ip)
config = SetupController._remote_config_read(master_client, config_file)
config_nodes = [n.strip() for n in config.get('main', 'nodes').split(',')]
if unique_id in config_nodes:
config_nodes.remove(unique_id)
config.set('main', 'nodes', ', '.join(config_nodes))
config.remove_section(unique_id)
for node in nodes:
node_client = SSHClient.load(node)
SetupController._remote_config_write(node_client, config_file, config)
print 'Restarting master node services'
logger.info('Restarting master node services')
remaining_nodes = nodes[:]
if cluster_ip in remaining_nodes:
remaining_nodes.remove(cluster_ip)
for cluster in SetupController.arakoon_clusters:
ArakoonInstaller.restart_cluster_remove(cluster, remaining_nodes)
PersistentFactory.store = None
VolatileFactory.store = None
print 'Removing/unconfiguring RabbitMQ'
logger.debug('Removing/unconfiguring RabbitMQ')
target_client = SSHClient.load(cluster_ip)
if SetupController._has_service(target_client, 'rabbitmq'):
target_client.run('rabbitmq-server -detached; sleep 5; rabbitmqctl stop_app; sleep 5;')
target_client.run('rabbitmqctl reset; sleep 5;')
target_client.run('rabbitmqctl stop; sleep 5;')
SetupController._change_service_state(target_client, 'rabbitmq', 'stop')
SetupController._remove_service(target_client, 'rabbitmq')
target_client.file_unlink("/var/lib/rabbitmq/.erlang.cookie")
print 'Removing services'
logger.info('Removing services')
for service in [s for s in SetupController.master_node_services if s not in SetupController.extra_node_services] :
if SetupController._has_service(target_client, service):
logger.debug('Removing service {0}'.format(service))
SetupController._change_service_state(target_client, service, 'stop')
SetupController._remove_service(target_client, service)
params = {'<ARAKOON_NODE_ID>': unique_id,
'<MEMCACHE_NODE_IP>': cluster_ip,
'<WORKER_QUEUE>': '{0}'.format(unique_id)}
if SetupController._has_service(target_client, 'workers'):
SetupController._add_service(target_client, 'workers', params)
print 'Restarting services'
logger.debug('Restarting services')
for node in master_nodes:
node_client = SSHClient.load(node)
for service in [s for s in SetupController.master_node_services if s not in SetupController.master_services]:
SetupController._change_service_state(node_client, service, 'restart')
target_client = SSHClient.load(cluster_ip)
SetupController._change_service_state(target_client, 'watcher-volumedriver', 'restart')
for node in nodes:
node_client = SSHClient.load(node)
SetupController._change_service_state(node_client, 'watcher-framework', 'restart')
if SetupController._run_demote_hooks(cluster_ip, master_ip):
print 'Retarting services'
for node in nodes:
node_client = SSHClient.load(node)
SetupController._change_service_state(node_client, 'watcher-framework', 'restart')
print '\n+++ Announcing service +++\n'
logger.info('Announcing service')
target_client = SSHClient.load(cluster_ip)
target_client.run("""cat > {3} <<EOF
<?xml version="1.0" standalone='no'?>
<!--*-nxml-*-->
<!DOCTYPE service-group SYSTEM "avahi-service.dtd">
<!-- $Id$ -->
<service-group>
<name replace-wildcards="yes">ovs_cluster_{0}_{1}_{4}</name>
<service>
<type>_ovs_{2}_node._tcp</type>
<port>443</port>
</service>
</service-group>
EOF
""".format(cluster_name, node_name, 'extra', SetupController.avahi_filename, cluster_ip.replace('.', '_')))
SetupController._change_service_state(target_client, 'avahi-daemon', 'restart')
logger.info('Demote complete')
@staticmethod
def _add_service(client, name, params=None):
if params is None:
params = {}
run_service_script = """
from ovs.plugin.provider.service import Service
Service.add_service('', '{0}', '', '', {1})
"""
_ = SetupController._exec_python(client,
run_service_script.format(name, params))
@staticmethod
def _remove_service(client, name):
run_service_script = """
from ovs.plugin.provider.service import Service
Service.remove_service('', '{0}')
"""
_ = SetupController._exec_python(client,
run_service_script.format(name))
@staticmethod
def _disable_service(client, name):
run_service_script = """
from ovs.plugin.provider.service import Service
Service.disable_service('{0}')
"""
_ = SetupController._exec_python(client,
run_service_script.format(name))
@staticmethod
def _enable_service(client, name):
run_service_script = """
from ovs.plugin.provider.service import Service
Service.enable_service('{0}')
"""
_ = SetupController._exec_python(client,
run_service_script.format(name))
@staticmethod
def _has_service(client, name):
has_service_script = """
from ovs.plugin.provider.service import Service
print Service.has_service('{0}')
"""
status = SetupController._exec_python(client,
has_service_script.format(name))
if status == 'True':
return True
return False
@staticmethod
def _get_service_status(client, name):
run_service_script = """
from ovs.plugin.provider.service import Service
print Service.get_service_status('{0}')
"""
status = SetupController._exec_python(client,
run_service_script.format(name))
if status == 'True':
return True
if status == 'False':
return False
return None
@staticmethod
def _restart_service(client, name):
run_service_script = """
from ovs.plugin.provider.service import Service
print Service.restart_service('{0}')
"""
status = SetupController._exec_python(client,
run_service_script.format(name))
return status
@staticmethod
def _start_service(client, name):
run_service_script = """
from ovs.plugin.provider.service import Service
print Service.start_service('{0}')
"""
status = SetupController._exec_python(client,
run_service_script.format(name))
return status
@staticmethod
def _stop_service(client, name):
run_service_script = """
from ovs.plugin.provider.service import Service
print Service.stop_service('{0}')
"""
status = SetupController._exec_python(client,
run_service_script.format(name))
return status
@staticmethod
def _get_disk_configuration(client):
"""
Connect to target host and retrieve sata/ssd/raid configuration
"""
remote_script = """
from string import digits
import pyudev
import glob
import re
import os
blk_patterns = ['sd.*', 'fio.*', 'vd.*', 'xvd.*']
blk_devices = dict()
def get_boot_device():
mtab = open('/etc/mtab').read().splitlines()
for line in mtab:
if ' / ' in line:
boot_partition = line.split()[0]
return boot_partition.lstrip('/dev/').translate(None, digits)
boot_device = get_boot_device()
def get_value(device, property):
return str(open('/sys/block/' + device + '/' + property).read())
def get_size_in_bytes(device):
sectors = get_value(device, 'size')
sector_size = get_value(device, 'queue/hw_sector_size')
return float(sectors) * float(sector_size)
def get_device_type(device):
'''
determine ssd or disk = accurate
determine ssd == accelerator = best guess
Returns: disk|ssd|accelerator|unknown
'''
rotational = get_value(device, 'queue/rotational')
if '1' in str(rotational):
return 'disk'
else:
return 'ssd'
def is_part_of_sw_raid(device):
#ID_FS_TYPE linux_raid_member
#ID_FS_USAGE raid
context = pyudev.Context()
devices = context.list_devices(subsystem='block')
is_raid_member = False
for entry in devices:
if device not in entry['DEVNAME']:
continue
if entry['DEVTYPE']=='partition' and 'ID_FS_USAGE' in entry.keys():
if 'raid' in entry['ID_FS_USAGE'].lower():
is_raid_member = True
return is_raid_member
def get_drive_model(device):
context = pyudev.Context()
devices = context.list_devices(subsystem='block')
for entry in devices:
if device not in entry['DEVNAME']:
continue
if entry['DEVTYPE']=='disk' and 'ID_MODEL' in entry.keys():
return str(entry['ID_MODEL'])
if 'fio' in device:
return 'FUSIONIO'
return ''
def get_device_details(device):
return {'size' : get_size_in_bytes(device),
'type' : get_device_type(device),
'software_raid' : is_part_of_sw_raid(device),
'model' : get_drive_model(device),
'boot_device' : device == boot_device
}
for device_path in glob.glob('/sys/block/*'):
device = os.path.basename(device_path)
for pattern in blk_patterns:
if re.compile(pattern).match(device):
blk_devices[device] = get_device_details(device)
print blk_devices
"""
blk_devices = eval(SetupController._exec_python(client, remote_script))
# cross-check ssd devices - flawed detection on vmware
for disk in blk_devices.keys():
output = str(client.run("hdparm -I {0} 2> /dev/null | grep 'Solid State' || true".format('/dev/' + disk)).strip())
if 'Solid State' in output and blk_devices[disk]['type'] == 'disk':
print 'Updating device type for /dev/{0} to ssd'.format(disk)
blk_devices[disk]['type'] = 'ssd'
return blk_devices
@staticmethod
def _generate_default_partition_layout(blk_devices):
"""
Process detected block devices while
- ignoring bootdevice unless it's the only one
- ignoring devices part of a software raid
"""
mountpoints_to_allocate = {'/mnt/md': {'device': 'DIR_ONLY', 'percentage': 'NA', 'label': 'mdpath'},
'/mnt/db': {'device': 'DIR_ONLY', 'percentage': 'NA', 'label': 'db'},
'/mnt/cache1': dict(SetupController.PARTITION_DEFAULTS),
'/mnt/bfs': {'device': 'DIR_ONLY', 'percentage': 'NA', 'label': 'backendfs'},
'/var/tmp': {'device': 'DIR_ONLY', 'percentage': 'NA', 'label': 'tempfs'}}
selected_devices = dict(blk_devices)
skipped_devices = set()
for device, values in blk_devices.iteritems():
if values['boot_device']:
skipped_devices.add(device)
if values['software_raid']:
skipped_devices.add(device)
for device in skipped_devices:
selected_devices.pop(device)
ssd_devices = list()
disk_devices = list()
for device, values in selected_devices.iteritems():
if values['type'] == 'ssd':
ssd_devices.append('/dev/' + device)
if values['type'] == 'disk':
disk_devices.append('/dev/' + device)
nr_of_ssds = len(ssd_devices)
nr_of_disks = len(disk_devices)
print '{0} ssd devices: {1}'.format(nr_of_ssds, str(ssd_devices))
print '{0} sata drives: {1}'.format(nr_of_disks, str(disk_devices))
print
if nr_of_disks == 1:
mountpoints_to_allocate['/var/tmp']['device'] = disk_devices[0]
mountpoints_to_allocate['/var/tmp']['percentage'] = 20
elif nr_of_disks >= 2:
mountpoints_to_allocate['/var/tmp']['device'] = disk_devices[0]
mountpoints_to_allocate['/var/tmp']['percentage'] = 100
if nr_of_ssds == 1:
mountpoints_to_allocate['/mnt/cache1']['device'] = ssd_devices[0]
mountpoints_to_allocate['/mnt/cache1']['percentage'] = 50
mountpoints_to_allocate['/mnt/md']['device'] = ssd_devices[0]
mountpoints_to_allocate['/mnt/md']['percentage'] = 25
mountpoints_to_allocate['/mnt/db']['device'] = ssd_devices[0]
mountpoints_to_allocate['/mnt/db']['percentage'] = 25
elif nr_of_ssds >= 2:
for count in xrange(nr_of_ssds):
marker = str('/mnt/cache' + str(count + 1))
mountpoints_to_allocate[marker] = dict(SetupController.PARTITION_DEFAULTS)
mountpoints_to_allocate[marker]['device'] = ssd_devices[count]
mountpoints_to_allocate[marker]['label'] = 'cache' + str(count + 1)
if count < 2:
cache_size = 75
else:
cache_size = 100
mountpoints_to_allocate[marker]['percentage'] = cache_size
mountpoints_to_allocate['/mnt/md']['device'] = ssd_devices[0]
mountpoints_to_allocate['/mnt/md']['percentage'] = 25
mountpoints_to_allocate['/mnt/db']['device'] = ssd_devices[1]
mountpoints_to_allocate['/mnt/db']['percentage'] = 25
return mountpoints_to_allocate, skipped_devices
@staticmethod
def _partition_disks(client, partition_layout):
fstab_entry = 'LABEL={0} {1} ext4 defaults,nobootwait,noatime,discard 0 2'
fstab_separator = ('# BEGIN Open vStorage', '# END Open vStorage') # Don't change, for backwards compatibility
mounted = [device.strip() for device in client.run("cat /etc/mtab | cut -d ' ' -f 2").strip().split('\n')]
unique_disks = set()
for mp, values in partition_layout.iteritems():
unique_disks.add(values['device'])
# Umount partitions
if mp in mounted:
print 'Unmounting {0}'.format(mp)
client.run('umount {0}'.format(mp))
mounted_devices = [device.strip() for device in client.run("cat /etc/mtab | cut -d ' ' -f 1").strip().split('\n')]
for mounted_device in mounted_devices:
for chosen_device in unique_disks:
if chosen_device in mounted_device:
print 'Unmounting {0}'.format(mounted_device)
client.run('umount {0}'.format(mounted_device))
# Wipe disks
for disk in unique_disks:
if disk == 'DIR_ONLY':
continue
client.run('parted {0} -s mklabel gpt'.format(disk))
# Pre process partition info (disk as key)
mountpoints = partition_layout.keys()
mountpoints.sort()
partitions_by_disk = dict()
for mp in mountpoints:
partition = partition_layout[mp]
disk = partition['device']
percentage = partition['percentage']
label = partition['label']
if disk in partitions_by_disk:
partitions_by_disk[disk].append((mp, percentage, label))
else:
partitions_by_disk[disk] = [(mp, percentage, label)]
# Partition and format disks
fstab_entries = ['{0} - Do not edit anything in this block'.format(fstab_separator[0])]
for disk, partitions in partitions_by_disk.iteritems():
if disk == 'DIR_ONLY':
for directory, _, _ in partitions:
client.run('mkdir -p {0}'.format(directory))
continue
start = '2MB'
count = 1
for mp, percentage, label in partitions:
if start == '2MB':
size_in_percentage = int(percentage)
client.run('parted {0} -s mkpart {1} {2} {3}%'.format(disk, label, start, size_in_percentage))
else:
size_in_percentage = int(start) + int(percentage)
client.run('parted {0} -s mkpart {1} {2}% {3}%'.format(disk, label, start, size_in_percentage))
client.run('mkfs.ext4 -q {0} -L {1}'.format(disk + str(count), label))
fstab_entries.append(fstab_entry.format(label, mp))
count += 1
start = size_in_percentage
fstab_entries.append(fstab_separator[1])
# Update fstab
original_content = [line.strip() for line in client.file_read('/etc/fstab').strip().split('\n')]
new_content = []
skip = False
for line in original_content:
if skip is False:
if line.startswith(fstab_separator[0]):
skip = True
else:
new_content.append(line)
elif line.startswith(fstab_separator[1]):
skip = False
new_content += fstab_entries
client.file_write('/etc/fstab', '{0}\n'.format('\n'.join(new_content)))
try:
client.run('timeout -k 9 5s mountall -q || true')
except:
pass # The above might fail sometimes. We don't mind and will try again
client.run('swapoff --all')
client.run('mountall -q')
client.run('chmod 1777 /var/tmp')
@staticmethod
def apply_flexible_disk_layout(client, auto_config=False, default=dict()):
import choice
blk_devices = SetupController._get_disk_configuration(client)
skipped = set()
if not default:
default, skipped = SetupController._generate_default_partition_layout(blk_devices)
print 'Excluded: {0}'.format(skipped)
print '-> bootdisk or part of software RAID configuration'
print
device_size_map = dict()
for key, values in blk_devices.iteritems():
device_size_map['/dev/' + key] = values['size']
def show_layout(proposed):
print 'Proposed partition layout:'
keys = proposed.keys()
keys.sort()
key_map = list()
for mp in keys:
sub_keys = proposed[mp].keys()
sub_keys.sort()
mp_values = ''
if not proposed[mp]['device'] or proposed[mp]['device'] in ['DIR_ONLY']:
mp_values = ' {0} : {1:20}'.format('device', 'DIR_ONLY')
print "{0:20} : {1}".format(mp, mp_values)
key_map.append(mp)
continue
for sub_key in sub_keys:
value = str(proposed[mp][sub_key])
if sub_key == 'device' and value and value != 'DIR_ONLY':
size = device_size_map[value]
size_in_gb = int(size / 1000.0 / 1000.0 / 1000.0)
value = value + ' ({0} GB)'.format(size_in_gb)
if sub_key in ['device']:
mp_values = mp_values + ' {0} : {1:20}'.format(sub_key, value)
elif sub_key in ['label']:
mp_values = mp_values + ' {0} : {1:10}'.format(sub_key, value)
else:
mp_values = mp_values + ' {0} : {1:5}'.format(sub_key, value)
print "{0:20} : {1}".format(mp, mp_values)
key_map.append(mp)
print
return key_map
def show_submenu_layout(subitem, mountpoint):
sub_keys = subitem.keys()
sub_keys.sort()
for sub_key in sub_keys:
print "{0:15} : {1}".format(sub_key, subitem[sub_key])
print "{0:15} : {1}".format('mountpoint', mountpoint)
print
def is_device_path(value):
if re.match('/dev/[a-z][a-z][a-z]+', value):
return True
else:
return False
def is_mountpoint(value):
if re.match('/mnt/[a-z]+[0-9]*', value):
return True
else:
return False
def is_percentage(value):
try:
if 0 <= int(value) <= 100:
return True
else:
return False
except ValueError:
return False
def is_label(value):
if re.match('[a-z]+[0-9]*', value):
return True
else:
return False
def validate_subitem(subitem, answer):
if subitem in ['percentage']:
return is_percentage(answer)
elif subitem in ['device']:
return is_device_path(answer)
elif subitem in ['mountpoint']:
return is_mountpoint(answer)
elif subitem in ['label']:
return is_label(answer)
return False
def _summarize_partition_percentages(layout):
total = dict()
for details in layout.itervalues():
device = details['device']
if device == 'DIR_ONLY':
continue
if details['percentage'] == 'NA':
print '>>> Invalid value {0}% for device: {1}'.format(details['percentage'], device)
time.sleep(1)
return False
percentage = int(details['percentage'])
if device in total:
total[device] += percentage
else:
total[device] = percentage
print total
for device, percentage in total.iteritems():
if is_percentage(percentage):
continue
else:
print '>>> Invalid total {0}% for device: {1}'.format(percentage, device)
time.sleep(1)
return False
return True
def process_submenu_actions(mp_to_edit):
subitem = default[mp_to_edit]
submenu_items = subitem.keys()
submenu_items.sort()
submenu_items.append('mountpoint')
submenu_items.append('finish')
print 'Mountpoint: {0}'.format(mp_to_edit)
while True:
show_submenu_layout(subitem, mp_to_edit)
subitem_chosen = choice.Menu(submenu_items).ask()
if subitem_chosen == 'finish':
break
elif subitem_chosen == 'mountpoint':
new_mountpoint = choice.Input('Enter new mountpoint: ', str).ask()
if new_mountpoint in default:
print 'New mountpoint already exists!'
else:
mp_values = default[mp_to_edit]
default.pop(mp_to_edit)
default[new_mountpoint] = mp_values
mp_to_edit = new_mountpoint
else:
answer = choice.Input('Enter new value for {}'.format(subitem_chosen)).ask()
if validate_subitem(subitem_chosen, answer):
subitem[subitem_chosen] = answer
else:
print '\n>>> Invalid entry {0} for {1}\n'.format(answer, subitem_chosen)
time.sleep(1)
if auto_config:
SetupController._partition_disks(client, default)
return default
else:
choices = show_layout(default)
while True:
menu_actions = ['Add', 'Remove', 'Update', 'Print', 'Apply', 'Quit']
menu_devices = list(choices)
menu_devices.sort()
chosen = choice.Menu(menu_actions).ask()
if chosen == 'Add':
to_add = choice.Input('Enter mountpoint to add:', str).ask()
if to_add in default:
print 'Mountpoint {0} already exists'.format(to_add)
else:
default[to_add] = dict(SetupController.PARTITION_DEFAULTS)
choices = show_layout(default)
elif chosen == 'Remove':
to_remove = choice.Input('Enter mountpoint to remove:', str).ask()
if to_remove in default:
default.pop(to_remove)
else:
print 'Mountpoint {0} not found, no action taken'.format(to_remove)
choices = show_layout(default)
elif chosen == 'Update':
print 'Choose mountpoint to update:'
to_update = choice.Menu(menu_devices).ask()
process_submenu_actions(to_update)
choices = show_layout(default)
elif chosen == 'Print':
show_layout(default)
elif chosen == 'Apply':
if not _summarize_partition_percentages(default):
'Partition totals are not within percentage range'
choices = show_layout(default)
continue
show_layout(default)
confirmation = choice.Input('Please confirm partition layout (yes/no), ALL DATA WILL BE ERASED ON THE DISKS ABOVE!', str).ask()
if confirmation.lower() == 'yes':
print 'Applying partition layout ...'
SetupController._partition_disks(client, default)
return default
else:
print 'Please confirm by typing yes'
elif chosen == 'Quit':
return 'QUIT'
@staticmethod
def _discover_nodes(client):
nodes = {}
ipaddresses = client.run("ip a | grep 'inet ' | sed 's/\s\s*/ /g' | cut -d ' ' -f 3 | cut -d '/' -f 1").strip().split('\n')
ipaddresses = [found_ip.strip() for found_ip in ipaddresses if found_ip.strip() != '127.0.0.1']
SetupController.host_ips = set(ipaddresses)
SetupController._change_service_state(client, 'dbus', 'start')
SetupController._change_service_state(client, 'avahi-daemon', 'start')
discover_result = client.run('avahi-browse -artp 2> /dev/null | grep ovs_cluster || true')
logger.debug('Avahi discovery result:\n{0}'.format(discover_result))
for entry in discover_result.split('\n'):
entry_parts = entry.split(';')
if entry_parts[0] == '=' and entry_parts[2] == 'IPv4' and entry_parts[7] not in ipaddresses:
# =;eth0;IPv4;ovs_cluster_kenneth_ovs100;_ovs_master_node._tcp;local;ovs100.local;172.22.1.10;443;
# split(';') -> [3] = ovs_cluster_kenneth_ovs100
# [4] = _ovs_master_node._tcp -> contains _ovs_<type>_node
# [7] = 172.22.1.10 (ip)
# split('_') -> [-1] = ovs100 (node name)
# [-2] = kenneth (cluster name)
cluster_info = entry_parts[3].split('_')
cluster_name = cluster_info[2]
node_name = cluster_info[3]
if cluster_name not in nodes:
nodes[cluster_name] = {}
if node_name not in nodes[cluster_name]:
nodes[cluster_name][node_name] = { 'ip': '', 'type': '', 'ip_list': []}
try:
ip = '{}.{}.{}.{}'.format(cluster_info[4], cluster_info[5], cluster_info[6], cluster_info[7])
except IndexError:
ip = entry_parts[7]
nodes[cluster_name][node_name]['ip'] = ip
nodes[cluster_name][node_name]['type'] = entry_parts[4].split('_')[2]
nodes[cluster_name][node_name]['ip_list'].append(ip)
return nodes
@staticmethod
def _validate_ip(ip):
regex = '^(((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?))$'
match = re.search(regex, ip)
return match is not None
@staticmethod
def _discover_hypervisor(client):
hypervisor = None
module = client.run('lsmod | grep kvm || true').strip()
if module != '':
hypervisor = 'KVM'
else:
disktypes = client.run('dmesg | grep VMware || true').strip()
if disktypes != '':
hypervisor = 'VMWARE'
return hypervisor
@staticmethod
def _remote_config_read(client, filename):
contents = client.file_read(filename)
with open('/tmp/temp_read.cfg', 'w') as configfile:
configfile.write(contents)
config = RawConfigParser()
config.read('/tmp/temp_read.cfg')
return config
@staticmethod
def _remote_config_write(client, filename, config):
with open('/tmp/temp_write.cfg', 'w') as configfile:
config.write(configfile)
with open('/tmp/temp_write.cfg', 'r') as configfile:
contents = configfile.read()
client.file_write(filename, contents)
@staticmethod
def _replace_param_in_config(client, config_file, old_value, new_value, add=False):
if client.file_exists(config_file):
contents = client.file_read(config_file)
if new_value in contents and new_value.find(old_value) > 0:
pass
elif old_value in contents:
contents = contents.replace(old_value, new_value)
elif add:
contents += new_value + '\n'
client.file_write(config_file, contents)
@staticmethod
def _exec_python(client, script):
"""
Executes a python script on the client
"""
return client.run('python -c """{0}"""'.format(script))
@staticmethod
def _change_service_state(client, name, state):
"""
Starts/stops/restarts a service
"""
action = None
status = SetupController._get_service_status(client, name)
if status is False and state in ['start', 'restart']:
SetupController._start_service(client, name)
action = 'started'
elif status is True and state == 'stop':
SetupController._stop_service(client, name)
action = 'stopped'
elif status is True and state == 'restart':
SetupController._restart_service(client, name)
action = 'restarted'
if action is None:
print ' [{0}] {1} already {2}'.format(client.ip, name, 'running' if status is True else 'halted')
else:
timeout = 300
safetycounter = 0
while safetycounter < timeout:
status = SetupController._get_service_status(client, name)
if (status is False and state == 'stop') or (status is True and state in ['start', 'restart']):
break
safetycounter += 1
time.sleep(1)
if safetycounter == timeout:
raise RuntimeError('Service {0} could not be {1} on node {2}'.format(name, action, client.ip))
print ' [{0}] {1} {2}'.format(client.ip, name, action)
@staticmethod
def _configure_amqp_to_volumedriver(client, vpname=None):
"""
Reads out the RabbitMQ client config, using that to (re)configure the volumedriver configuration(s)
"""
remote_script = """
import os
from ConfigParser import RawConfigParser
from ovs.plugin.provider.configuration import Configuration
from ovs.extensions.storageserver.storagedriver import StorageDriverConfiguration
protocol = Configuration.get('ovs.core.broker.protocol')
login = Configuration.get('ovs.core.broker.login')
password = Configuration.get('ovs.core.broker.password')
vpool_name = {0}
uris = []
cfg = RawConfigParser()
cfg.read('/opt/OpenvStorage/config/rabbitmqclient.cfg')
nodes = [n.strip() for n in cfg.get('main', 'nodes').split(',')]
for node in nodes:
uris.append({{'amqp_uri': '{{0}}://{{1}}:{{2}}@{{3}}'.format(protocol, login, password, cfg.get(node, 'location'))}})
configuration_dir = '{0}/storagedriver/storagedriver'.format(Configuration.get('ovs.core.cfgdir'))
if not os.path.exists(configuration_dir):
os.makedirs(configuration_dir)
for json_file in os.listdir(configuration_dir):
this_vpool_name = json_file.replace('.json', '')
if json_file.endswith('.json') and (vpool_name is None or vpool_name == this_vpool_name):
storagedriver_configuration = StorageDriverConfiguration('storagedriver', this_vpool_name)
storagedriver_configuration.load()
storagedriver_configuration.configure_event_publisher(events_amqp_routing_key=Configuration.get('ovs.core.broker.volumerouter.queue'),
events_amqp_uris=uris)
storagedriver_configuration.save()"""
SetupController._exec_python(client, remote_script.format(vpname if vpname is None else "'{0}'".format(vpname)))
@staticmethod
def _is_rabbitmq_running(client, check_ovs=False):
rabbitmq_running, rabbitmq_pid = False, 0
ovs_rabbitmq_running, pid = False, -1
output = client.run('service rabbitmq-server status', quiet=True)
if 'unrecognized service' in output:
output = None
if output:
output = output.split('\r\n')
for line in output:
if 'pid' in line:
rabbitmq_running = True
rabbitmq_pid = line.split(',')[1].replace('}', '')
else:
output = client.run('ps aux | grep rabbit@ | grep -v grep', quiet=True)
if output: # in case of error it is ''
output = output.split(' ')
if output[0] == 'rabbitmq':
rabbitmq_pid = output[1]
for item in output[2:]:
if 'erlang' in item or 'rabbitmq' in item or 'beam' in item:
rabbitmq_running = True
output = client.run('service ovs-rabbitmq status', quiet=True)
if 'stop/waiting' in output:
pass
if 'start/running' in output:
pid = output.split('process ')[1].strip()
ovs_rabbitmq_running = True
same_process = rabbitmq_pid == pid
if check_ovs:
return rabbitmq_running, rabbitmq_pid, ovs_rabbitmq_running, same_process
return rabbitmq_running, rabbitmq_pid
@staticmethod
def _run_promote_hooks(cluster_ip, master_ip):
"""
Execute promote hooks
"""
functions = SetupController._fetch_hooks('promote')
if len(functions) > 0:
print '\n+++ Running plugin hooks +++\n'
for function in functions:
function(cluster_ip=cluster_ip, master_ip=master_ip)
return len(functions) > 0
@staticmethod
def _run_demote_hooks(cluster_ip, master_ip):
"""
Execute demote hooks
"""
functions = SetupController._fetch_hooks('demote')
if len(functions) > 0:
print '\n+++ Running plugin hooks +++\n'
for function in functions:
function(cluster_ip=cluster_ip, master_ip=master_ip)
return len(functions) > 0
@staticmethod
def _run_firstnode_hooks(cluster_ip):
"""
Execute firstnode hooks
"""
functions = SetupController._fetch_hooks('firstnode')
if len(functions) > 0:
print '\n+++ Running plugin hooks +++\n'
for function in functions:
function(cluster_ip=cluster_ip)
return len(functions) > 0
@staticmethod
def _run_extranode_hooks(cluster_ip, master_ip):
"""
Execute extranode hooks
"""
functions = SetupController._fetch_hooks('extranode')
if len(functions) > 0:
print '\n+++ Running plugin hooks +++\n'
for function in functions:
function(cluster_ip=cluster_ip, master_ip=master_ip)
return len(functions) > 0
@staticmethod
def _fetch_hooks(hook_type):
"""
Load hooks
"""
functions = []
path = os.path.dirname(__file__)
for filename in os.listdir(path):
if os.path.isfile(os.path.join(path, filename)) and filename.endswith('.py') and filename != '__init__.py':
name = filename.replace('.py', '')
module = imp.load_source(name, os.path.join(path, filename))
for member in inspect.getmembers(module):
if inspect.isclass(member[1]) \
and member[1].__module__ == name \
and 'object' in [base.__name__ for base in member[1].__bases__]:
for submember in inspect.getmembers(member[1]):
if hasattr(submember[1], 'hooks') \
and isinstance(submember[1].hooks, list) \
and hook_type in submember[1].hooks:
functions.append(submember[1])
return functions
| 46.664972 | 237 | 0.607449 |
b643303859d7fb213d66ed084be5e85080c91b29 | 265 | py | Python | mainUI.py | ZubinGou/8086-emulator | 5087be61609fa571d16f34280211830746beaef1 | [
"MIT"
] | 39 | 2020-09-09T00:04:18.000Z | 2022-03-26T13:12:47.000Z | mainUI.py | ZubinGou/8086-emulator | 5087be61609fa571d16f34280211830746beaef1 | [
"MIT"
] | null | null | null | mainUI.py | ZubinGou/8086-emulator | 5087be61609fa571d16f34280211830746beaef1 | [
"MIT"
] | 8 | 2020-05-06T07:35:40.000Z | 2021-08-13T14:00:49.000Z | import sys
import PyQt5.sip
from PyQt5 import QtGui
from PyQt5.QtWidgets import QApplication
from ui.mainwindow import MainWindow
if __name__ == "__main__":
app = QApplication(sys.argv)
win = MainWindow(app)
win.show()
sys.exit(app.exec_())
| 17.666667 | 40 | 0.713208 |
92207285146d0087ff415e6f8a38cd9b9bc6970b | 4,318 | py | Python | bigml/anomalyhandler.py | alanponce/python | 9423b4c4968b81ee14cef1ab6cd62d23dfa8bd26 | [
"Apache-2.0"
] | 1 | 2021-06-20T11:51:22.000Z | 2021-06-20T11:51:22.000Z | bigml/anomalyhandler.py | alanponce/python | 9423b4c4968b81ee14cef1ab6cd62d23dfa8bd26 | [
"Apache-2.0"
] | null | null | null | bigml/anomalyhandler.py | alanponce/python | 9423b4c4968b81ee14cef1ab6cd62d23dfa8bd26 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
#!/usr/bin/env python
#
# Copyright 2014-2016 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Base class for anomaly detectors' REST calls
https://bigml.com/developers/anomalies
"""
try:
import simplejson as json
except ImportError:
import json
from bigml.resourcehandler import ResourceHandler
from bigml.resourcehandler import (check_resource_type, resource_is_ready,
get_anomaly_id)
from bigml.constants import ANOMALY_PATH
class AnomalyHandler(ResourceHandler):
"""This class is used by the BigML class as
a mixin that provides the REST calls models. It should not
be instantiated independently.
"""
def __init__(self):
"""Initializes the AnomalyHandler. This class is intended to be
used as a mixin on ResourceHandler, that inherits its
attributes and basic method from BigMLConnection, and must not be
instantiated independently.
"""
self.anomaly_url = self.url + ANOMALY_PATH
def create_anomaly(self, datasets, args=None, wait_time=3, retries=10):
"""Creates an anomaly detector from a `dataset` or a list o `datasets`.
"""
create_args = self._set_create_from_datasets_args(
datasets, args=args, wait_time=wait_time, retries=retries)
body = json.dumps(create_args)
return self._create(self.anomaly_url, body)
def get_anomaly(self, anomaly, query_string='',
shared_username=None, shared_api_key=None):
"""Retrieves an anomaly detector.
The anomaly parameter should be a string containing the
anomaly id or the dict returned by create_anomaly.
As the anomaly detector is an evolving object that is processed
until it reaches the FINISHED or FAULTY state, the function will
return a dict that encloses the model values and state info
available at the time it is called.
If this is a shared anomaly detector, the username and sharing api
key must also be provided.
"""
check_resource_type(anomaly, ANOMALY_PATH,
message="A anomaly id is needed.")
anomaly_id = get_anomaly_id(anomaly)
if anomaly_id:
return self._get("%s%s" % (self.url, anomaly_id),
query_string=query_string,
shared_username=shared_username,
shared_api_key=shared_api_key)
def anomaly_is_ready(self, anomaly, **kwargs):
"""Checks whether an anomaly detector's status is FINISHED.
"""
check_resource_type(anomaly, ANOMALY_PATH,
message="An anomaly id is needed.")
resource = self.get_anomaly(anomaly, **kwargs)
return resource_is_ready(resource)
def list_anomalies(self, query_string=''):
"""Lists all your anomaly detectors.
"""
return self._list(self.anomaly_url, query_string)
def update_anomaly(self, anomaly, changes):
"""Updates an anomaly detector.
"""
check_resource_type(anomaly, ANOMALY_PATH,
message="An anomaly detector id is needed.")
anomaly_id = get_anomaly_id(anomaly)
if anomaly_id:
body = json.dumps(changes)
return self._update("%s%s" % (self.url, anomaly_id), body)
def delete_anomaly(self, anomaly):
"""Deletes an anomaly detector.
"""
check_resource_type(anomaly, ANOMALY_PATH,
message="An anomaly detector id is needed.")
anomaly_id = get_anomaly_id(anomaly)
if anomaly_id:
return self._delete("%s%s" % (self.url, anomaly_id))
| 36.285714 | 79 | 0.646596 |
bb1f11c5666a635dfbbc1a9992f026f6029fde45 | 3,885 | py | Python | tests/test_ffmpeg_info.py | hugovk/imageio | 98b1c18df0138b17e9182230438a90e151dc61ff | [
"BSD-2-Clause"
] | 1 | 2020-08-19T04:54:44.000Z | 2020-08-19T04:54:44.000Z | tests/test_ffmpeg_info.py | hugovk/imageio | 98b1c18df0138b17e9182230438a90e151dc61ff | [
"BSD-2-Clause"
] | null | null | null | tests/test_ffmpeg_info.py | hugovk/imageio | 98b1c18df0138b17e9182230438a90e151dc61ff | [
"BSD-2-Clause"
] | null | null | null | # styletest: ignore E501
""" Tests specific to parsing ffmpeg info.
"""
import os
from pytest import skip
from imageio.testing import run_tests_if_main, need_internet
import imageio
if os.getenv("TRAVIS_OS_NAME") == "windows":
skip(
"Skip this on the Travis Windows run for now, see #408", allow_module_level=True
)
def dedent(text, dedent=8):
lines = [line[dedent:] for line in text.splitlines()]
text = "\n".join(lines)
return text.strip() + "\n"
def test_webcam_parse_device_names():
# Ensure that the device list parser returns all video devices (issue #283)
sample = dedent(
r"""
ffmpeg version 3.2.4 Copyright (c) 2000-2017 the FFmpeg developers
built with gcc 6.3.0 (GCC)
configuration: --enable-gpl --enable-version3 --enable-d3d11va --enable-dxva2 --enable-libmfx --enable-nvenc --enable-avisynthlibswresample 2. 3.100 / 2. 3.100
libpostproc 54. 1.100 / 54. 1.100
[dshow @ 039a7e20] DirectShow video devices (some may be both video and audio devices)
[dshow @ 039a7e20] "AVerMedia USB Polaris Analog Capture"
[dshow @ 039a7e20] Alternative name "@device_pnp_\\?\usb#vid_07ca&pid_c039&mi_01#8&55f1102&0&0001#{65e8773d-8f56-11d0-a3b9-00a0c9223196}\{9b365890-165f-11d0-a195-0020afd156e4}"
[dshow @ 039a7e20] "Lenovo EasyCamera"
[dshow @ 039a7e20] Alternative name "@device_pnp_\\?\usb#vid_04f2&pid_b50f&mi_00#6&bbc4ae1&1&0000#{65e8773d-8f56-11d0-a3b9-00a0c9223196}\global"
[dshow @ 039a7e20] DirectShow audio devices
[dshow @ 039a7e20] "Microphone (2- USB Multimedia Audio Device)"
[dshow @ 039a7e20] Alternative name "@device_cm_{33D9A762-90C8-11D0-BD43-00A0C911CE86}\wave_{73C17834-AA57-4CA1-847A-6BBEB1E0F2E6}"
[dshow @ 039a7e20] "SPDIF Interface (Multimedia Audio Device)"
[dshow @ 039a7e20] Alternative name "@device_cm_{33D9A762-90C8-11D0-BD43-00A0C911CE86}\wave_{617B63FB-CFC0-4D10-AE30-42A66CAF6A4E}"
dummy: Immediate exit requested
"""
)
# Parse the sample
device_names = imageio.plugins.ffmpeg.parse_device_names(sample)
# Assert that the device_names list has the correct length
assert len(device_names) == 2
def test_overload_fps():
need_internet()
# Native
r = imageio.get_reader("imageio:cockatoo.mp4")
assert r.count_frames() == 280 # native
assert int(r._meta["fps"] * r._meta["duration"] + 0.5) == 280
ims = [im for im in r]
assert len(ims) in (280, 281)
# imageio.mimwrite('~/parot280.gif', ims[:30])
# Less
r = imageio.get_reader("imageio:cockatoo.mp4", fps=8)
# assert r.count_frames() == 112 # cant :(
assert int(r._meta["fps"] * r._meta["duration"] + 0.5) == 112 # note the mismatch
ims = [im for im in r]
assert len(ims) == 114
# imageio.mimwrite('~/parot112.gif', ims[:30])
# More
r = imageio.get_reader("imageio:cockatoo.mp4", fps=24)
# assert r.count_frames() == 336 # cant :(
ims = [im for im in r]
assert int(r._meta["fps"] * r._meta["duration"] + 0.5) == 336
assert len(ims) in (336, 337)
# imageio.mimwrite('~/parot336.gif', ims[:30])
# Do we calculate nframes correctly? To be fair, the reader wont try to
# read beyond what it thinks how many frames it has. But this at least
# makes sure that this works.
for fps in (8.0, 8.02, 8.04, 8.06, 8.08):
r = imageio.get_reader("imageio:cockatoo.mp4", fps=fps)
n = int(r._meta["fps"] * r._meta["duration"] + 0.5)
i = 0
try:
while True:
r.get_next_data()
i += 1
except (StopIteration, IndexError):
pass
# print(r._meta['duration'], r._meta['fps'], r._meta['duration'] * fps, r._meta['nframes'], n)
assert n - 2 <= i <= n + 2
run_tests_if_main()
| 39.642857 | 188 | 0.640927 |
8eb52af3d38746344c4ead6f7a226ece849368a3 | 4,612 | py | Python | Data_preprocess/stitching.py | anatcohen2/AI-FFPE | ff2c79fa9fbb9658027ec1015ac9195548276411 | [
"BSD-3-Clause"
] | 12 | 2021-08-04T23:15:56.000Z | 2022-02-21T09:53:27.000Z | Data_preprocess/stitching.py | anatcohen2/AI-FFPE | ff2c79fa9fbb9658027ec1015ac9195548276411 | [
"BSD-3-Clause"
] | null | null | null | Data_preprocess/stitching.py | anatcohen2/AI-FFPE | ff2c79fa9fbb9658027ec1015ac9195548276411 | [
"BSD-3-Clause"
] | 5 | 2021-07-30T16:58:13.000Z | 2022-03-02T14:57:41.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Dec 27 22:02:14 2020
@author: bou02
"""
import math
import os
import time
import xml.etree.ElementTree as ET
from xml.dom import minidom
import sys
import fnmatch
from glob import glob
import cv2
import matplotlib.pyplot as plt
import numpy as np
import openslide
from PIL import Image
import PIL
PIL.Image.MAX_IMAGE_PIXELS = 9000000000
import pdb
import h5py
import math
from wsi_core.wsi_utils import savePatchIter_bag_hdf5, initialize_hdf5_bag
from numpy import ones
import re
import argparse
parser = argparse.ArgumentParser(description='FrozGAN Stitching',formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--down-ratio', default=1, type=int, help='stitching downscale ratio')
parser.add_argument('--h5-inpath', type=str, help='.h5 path')
parser.add_argument('--preds-path', type=str, help='FrozGAN preds path')
parser.add_argument('--output-dir', type=str, help='output path')
args = parser.parse_args()
def load_image( infilename ) :
img = Image.open( infilename )
img.load()
data = np.asarray( img, dtype="uint8" )
return data
def DrawGrid(img, coord, shape, thickness=2, color=(0,0,0,255)):
cv2.rectangle(img, tuple(np.maximum([0, 0], coord-thickness//2)), tuple(coord - thickness//2 + np.array(shape)), (0, 0, 0, 255), thickness=thickness)
return img
def DrawMap(canvas, patch_dset, coords, patch_size, indices=None, verbose=1, draw_grid=True):
if indices is None:
indices = np.arange(len(coords))
total = len(indices)
if verbose > 0:
ten_percent_chunk = math.ceil(total * 0.1)
# print('start stitching {}'.format(patch_dset.attrs['wsi_name']))
for idx in range(total):
if verbose > 0:
if idx % ten_percent_chunk == 0:
print('progress: {}/{} stitched'.format(idx, total))
patch_id = indices[idx]
print(patch_id)
patch = patch_dset[patch_id]
patch = cv2.resize(patch, patch_size)
coord = coords[patch_id]
print(coord)
canvas_crop_shape = canvas[coord[1]:coord[1]+patch_size[1], coord[0]:coord[0]+patch_size[0], :3].shape[:2]
canvas[coord[1]:coord[1]+patch_size[1], coord[0]:coord[0]+patch_size[0], :3] = patch[:canvas_crop_shape[0], :canvas_crop_shape[1], :]
if draw_grid:
DrawGrid(canvas, coord, patch_size)
return Image.fromarray(canvas)
def StitchPatches(hdf5_file_path, pred_path,downscale=4, draw_grid=False, bg_color=(0,0,0), alpha=-1):
file = h5py.File(hdf5_file_path, 'r')
files = []
dset = file['imgs']
print(len(dset))
start_dir = pred_path
pattern = "*.png"
for dir,_,_ in os.walk(start_dir):
files.extend(glob(os.path.join(dir,pattern)))
print(len(files))
files.sort(key=lambda f: int(re.sub('\D', '', f)))
images = ones((len(files), 512, 512, 3))
for i,load in enumerate(files):
print(load)
images[i]=(load_image( load ))
print(images[0].dtype)
#dset=files
coords = file['coords'][:]
if 'downsampled_level_dim' in dset.attrs.keys():
w, h = dset.attrs['downsampled_level_dim']
else:
w, h = dset.attrs['level_dim']
print('original size: {} x {}'.format(w, h))
w = w // downscale
h = h //downscale
coords = (coords / downscale).astype(np.int32)
print('downscaled size for stiching: {} x {}'.format(w, h))
print('number of patches: {}'.format(len(dset)))
img_shape = dset[0].shape
print('patch shape: {}'.format(img_shape))
downscaled_shape = (img_shape[1] // downscale, img_shape[0] // downscale)
if w*h > Image.MAX_IMAGE_PIXELS:
raise Image.DecompressionBombError("Visualization Downscale %d is too large" % downscale)
if alpha < 0 or alpha == -1:
heatmap = Image.new(size=(w,h), mode="RGB", color=bg_color)
else:
heatmap = Image.new(size=(w,h), mode="RGBA", color=bg_color + (int(255 * alpha),))
heatmap = np.array(heatmap)
heatmap = DrawMap(heatmap, images, coords, downscaled_shape, indices=None, draw_grid=draw_grid)
file.close()
return heatmap
down_ratio = args.down_ratio
for i in glob(str(args.h5_inpath)+"/*.h5"):
h5_path = i
preds_path = args.preds_path
heatmap=StitchPatches(h5_path,preds_path, down_ratio)
out_path = args.output_dir
stitch_path = os.path.join(out_path, "fake_stitch"+'.png')
heatmap.save(stitch_path)
| 34.676692 | 154 | 0.645273 |
841ba84585a30c23d67f85f469729f03ba6bce5e | 1,887 | py | Python | patent_scrabbler/middlewares.py | NickZ/patent_scrabbler | ef872e11b750d6d8a0754b5cda984e0d4af0af56 | [
"MIT"
] | null | null | null | patent_scrabbler/middlewares.py | NickZ/patent_scrabbler | ef872e11b750d6d8a0754b5cda984e0d4af0af56 | [
"MIT"
] | null | null | null | patent_scrabbler/middlewares.py | NickZ/patent_scrabbler | ef872e11b750d6d8a0754b5cda984e0d4af0af56 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
class PatentScrabblerSpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Response, dict
# or Item objects.
pass
def process_start_requests(start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
| 33.105263 | 78 | 0.674616 |
0294d38d53a6ecffac8ce3ed1f2239e5f7f9b106 | 1,039 | py | Python | manage.py | rajat-np/yt-search | 3bf66403283744a57fa5efa029c4c45bb5e9292d | [
"MIT"
] | null | null | null | manage.py | rajat-np/yt-search | 3bf66403283744a57fa5efa029c4c45bb5e9292d | [
"MIT"
] | null | null | null | manage.py | rajat-np/yt-search | 3bf66403283744a57fa5efa029c4c45bb5e9292d | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import os
import sys
from pathlib import Path
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.local")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django # noqa
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
# This allows easy placement of apps within the interior
# yt_search directory.
current_path = Path(__file__).parent.resolve()
sys.path.append(str(current_path / "yt_search"))
execute_from_command_line(sys.argv)
| 32.46875 | 77 | 0.658325 |
85111469a14775b517e89054c05bdbe4eb9dc0f0 | 160 | py | Python | setup.py | levi97/gym-navigate2D | 2ea958c403ff5973a178c9ee88df237e15f1c650 | [
"MIT"
] | 1 | 2019-08-12T21:19:19.000Z | 2019-08-12T21:19:19.000Z | setup.py | levi97/gym-navigate2D | 2ea958c403ff5973a178c9ee88df237e15f1c650 | [
"MIT"
] | null | null | null | setup.py | levi97/gym-navigate2D | 2ea958c403ff5973a178c9ee88df237e15f1c650 | [
"MIT"
] | null | null | null | from setuptools import setup
setup(name='gym_navigate2D',
version='0.0.1',
install_requires=['gym', 'numpy', 'matplotlib', 'ipython', 'pandas']
)
| 20 | 74 | 0.65625 |
00e09138554866030a412b4a2952d6a70456f478 | 1,279 | py | Python | src/model/losses/loss.py | Karan-Choudhary/SuperResolution | fea69e9ec2f1f59fca8211d2d29e274dc12af97b | [
"MIT"
] | 3 | 2022-02-15T08:51:58.000Z | 2022-02-19T17:22:17.000Z | src/model/losses/loss.py | Karan-Choudhary/SuperResolution | fea69e9ec2f1f59fca8211d2d29e274dc12af97b | [
"MIT"
] | 1 | 2022-02-22T14:13:09.000Z | 2022-02-22T14:13:09.000Z | src/model/losses/loss.py | Karan-Choudhary/SuperResolution | fea69e9ec2f1f59fca8211d2d29e274dc12af97b | [
"MIT"
] | null | null | null | import tensorflow as tf
from tensorflow.keras.applications.vgg19 import VGG19
import yaml
vgg19_model = VGG19(weights='imagenet', include_top=False)
feature_extractor = tf.keras.models.Sequential(*[vgg19_model.layers][:18])
def read_params(config_path):
with open(config_path,'r') as stream:
try:
params = yaml.safe_load(stream)
except yaml.YAMLError as exc:
print(exc)
return params
def generator_loss(disc_generated_output, gen_output, target):
config = read_params('params.yaml')
LAMBDA = config['loss']['lambda']
valid = tf.ones_like(disc_generated_output)
gan_loss = tf.keras.losses.MSE(valid,disc_generated_output)
gen_features = feature_extractor(gen_output)
real_features = feature_extractor(target)
l1_loss = tf.reduce_mean(tf.abs(gen_features - real_features))
total_gen_loss = gan_loss + (l1_loss * LAMBDA)
return total_gen_loss, gan_loss, l1_loss
def discriminator_loss(disc_real_output, disc_generated_output):
real_loss = tf.keras.losses.MSE(tf.ones_like(disc_real_output), disc_real_output)
generated_loss = tf.keras.losses.MSE(tf.zeros_like(disc_generated_output), disc_generated_output)
total_disc_loss = real_loss + generated_loss
return total_disc_loss
| 36.542857 | 101 | 0.75215 |
80272ffd9b8458863e1a9047aaeab7f7125be983 | 2,287 | py | Python | {{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/pages/tests/test_views.py | erickgnavar/django_template | bdaa361151296da5670f7698f2ee146edf9dc867 | [
"MIT"
] | null | null | null | {{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/pages/tests/test_views.py | erickgnavar/django_template | bdaa361151296da5670f7698f2ee146edf9dc867 | [
"MIT"
] | null | null | null | {{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/pages/tests/test_views.py | erickgnavar/django_template | bdaa361151296da5670f7698f2ee146edf9dc867 | [
"MIT"
] | null | null | null | from django.test import RequestFactory, TestCase
from django.urls import resolve
from .. import views
class HomeViewTestCase(TestCase):
def setUp(self):
self.view = views.HomeView.as_view()
self.factory = RequestFactory()
def test_match_expected_view(self):
url = resolve("/")
self.assertEqual(url.func.__name__, self.view.__name__)
def test_load_sucessful(self):
request = self.factory.get("/")
response = self.view(request)
self.assertEqual(response.status_code, 200)
class ContactViewTestCase(TestCase):
def setUp(self):
self.view = views.contact
self.factory = RequestFactory()
def test_match_expected_view(self):
url = resolve("/contact")
self.assertEqual(url.func.__name__, self.view.__name__)
def test_load_sucessful(self):
request = self.factory.get("/")
response = self.view(request)
self.assertEqual(response.status_code, 200)
class PrivacyViewTestCase(TestCase):
def setUp(self):
self.view = views.privacy
self.factory = RequestFactory()
def test_match_expected_view(self):
url = resolve("/privacy")
self.assertEqual(url.func.__name__, self.view.__name__)
def test_load_sucessful(self):
request = self.factory.get("/")
response = self.view(request)
self.assertEqual(response.status_code, 200)
class CookiesViewTestCase(TestCase):
def setUp(self):
self.view = views.cookies
self.factory = RequestFactory()
def test_match_expected_view(self):
url = resolve("/cookies")
self.assertEqual(url.func.__name__, self.view.__name__)
def test_load_sucessful(self):
request = self.factory.get("/")
response = self.view(request)
self.assertEqual(response.status_code, 200)
class FaqViewTestCase(TestCase):
def setUp(self):
self.view = views.faq
self.factory = RequestFactory()
def test_match_expected_view(self):
url = resolve("/faq")
self.assertEqual(url.func.__name__, self.view.__name__)
def test_load_sucessful(self):
request = self.factory.get("/")
response = self.view(request)
self.assertEqual(response.status_code, 200)
| 28.5875 | 63 | 0.664626 |
87d3b14e58923579d0fecdb0c33feaddfae4bf2f | 1,160 | py | Python | interprocedural_analyses/taint/test/integration/port.py | terrorizer1980/pyre-check | 16659c7f6f19f3c364ba3a56e6c582371a8ff348 | [
"MIT"
] | 1 | 2020-08-08T16:01:55.000Z | 2020-08-08T16:01:55.000Z | interprocedural_analyses/taint/test/integration/port.py | terrorizer1980/pyre-check | 16659c7f6f19f3c364ba3a56e6c582371a8ff348 | [
"MIT"
] | 4 | 2022-02-15T02:42:33.000Z | 2022-02-28T01:30:07.000Z | interprocedural_analyses/taint/test/integration/port.py | terrorizer1980/pyre-check | 16659c7f6f19f3c364ba3a56e6c582371a8ff348 | [
"MIT"
] | 1 | 2020-11-22T12:08:51.000Z | 2020-11-22T12:08:51.000Z | from builtins import __test_sink, __test_source
def source_field():
result = {}
result.a = __test_source()
return result
def sink_field(arg):
__test_sink(arg.a)
def match_flows():
x = source_field()
sink_field(x)
def star_arg(x, *data, **kwargs):
sink_field(data[1])
def star_arg_wrapper(x, *data, **kwargs):
star_arg(x, *data, **kwargs)
def match_star_arg_with_star():
data = [0, source_field(), 2]
star_arg_wrapper("a", *data)
def match_star_arg_directly():
star_arg_wrapper("a", "b", source_field(), "d")
def star_star_arg(x, **kwargs):
sink_field(kwargs["arg"])
def star_star_arg_wrapper(x, **kwargs):
star_star_arg(x, **kwargs)
def match_star_star_arg_with_star():
data = {"a": 0, "arg": source_field()}
star_star_arg_wrapper("a", **data)
def match_star_star_arg_directly():
star_star_arg_wrapper("a", "b", arg=source_field())
class Foo:
@property
def some_source():
return __test_source()
def refer_to_method_as_field(foo: Foo):
# This comes up in Instagram due to @cached_property decorators
taint = foo.some_source
__test_sink(taint)
| 18.412698 | 67 | 0.675 |
6e7cb29094e50f2d92f4720db5d6ca6cb7676e36 | 4,967 | py | Python | ironic/tests/test_fsm.py | naototty/vagrant-lxc-ironic | 4681c974703e20a874d4d4fbbb1fd02437022ac6 | [
"Apache-2.0"
] | null | null | null | ironic/tests/test_fsm.py | naototty/vagrant-lxc-ironic | 4681c974703e20a874d4d4fbbb1fd02437022ac6 | [
"Apache-2.0"
] | null | null | null | ironic/tests/test_fsm.py | naototty/vagrant-lxc-ironic | 4681c974703e20a874d4d4fbbb1fd02437022ac6 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (C) 2014 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ironic.common import exception as excp
from ironic.common import fsm
from ironic.tests import base
class FSMTest(base.TestCase):
def setUp(self):
super(FSMTest, self).setUp()
self.jumper = fsm.FSM("down")
self.jumper.add_state('up')
self.jumper.add_state('down')
self.jumper.add_transition('down', 'up', 'jump')
self.jumper.add_transition('up', 'down', 'fall')
def test_contains(self):
m = fsm.FSM('unknown')
self.assertNotIn('unknown', m)
m.add_state('unknown')
self.assertIn('unknown', m)
def test_duplicate_state(self):
m = fsm.FSM('unknown')
m.add_state('unknown')
self.assertRaises(excp.Duplicate, m.add_state, 'unknown')
def test_bad_transition(self):
m = fsm.FSM('unknown')
m.add_state('unknown')
m.add_state('fire')
self.assertRaises(excp.NotFound, m.add_transition,
'unknown', 'something', 'boom')
self.assertRaises(excp.NotFound, m.add_transition,
'something', 'unknown', 'boom')
def test_on_enter_on_exit(self):
def on_exit(state, event):
exit_transitions.append((state, event))
def on_enter(state, event):
enter_transitions.append((state, event))
enter_transitions = []
exit_transitions = []
m = fsm.FSM('start')
m.add_state('start', on_exit=on_exit)
m.add_state('down', on_enter=on_enter, on_exit=on_exit)
m.add_state('up', on_enter=on_enter, on_exit=on_exit)
m.add_transition('start', 'down', 'beat')
m.add_transition('down', 'up', 'jump')
m.add_transition('up', 'down', 'fall')
m.initialize()
m.process_event('beat')
m.process_event('jump')
m.process_event('fall')
self.assertEqual([('down', 'beat'),
('up', 'jump'), ('down', 'fall')], enter_transitions)
self.assertEqual([('down', 'jump'), ('up', 'fall')], exit_transitions)
def test_not_initialized(self):
self.assertRaises(excp.InvalidState,
self.jumper.process_event, 'jump')
def test_copy_states(self):
c = fsm.FSM()
self.assertEqual(0, len(c.states))
c.add_state('up')
self.assertEqual(1, len(c.states))
deep = c.copy()
shallow = c.copy(shallow=True)
c.add_state('down')
c.add_transition('up', 'down', 'fall')
self.assertEqual(2, len(c.states))
# deep copy created new members, so change is not visible
self.assertEqual(1, len(deep.states))
self.assertNotEqual(c._transitions, deep._transitions)
# but a shallow copy references the same state object
self.assertEqual(2, len(shallow.states))
self.assertEqual(c._transitions, shallow._transitions)
def test_copy_clears_current(self):
c = fsm.FSM()
c.add_state('up')
c.initialize('up')
d = c.copy()
self.assertEqual('up', c.current_state)
self.assertEqual(None, d.current_state)
def test_invalid_callbacks(self):
m = fsm.FSM('working')
m.add_state('working')
m.add_state('broken')
self.assertRaises(ValueError, m.add_state, 'b', on_enter=2)
self.assertRaises(ValueError, m.add_state, 'b', on_exit=2)
def test_invalid_target_state(self):
# Test to verify that adding a state which has a 'target' state that
# does not exist will raise an exception
self.assertRaises(excp.InvalidState,
self.jumper.add_state, 'jump', target='unknown')
def test_target_state_not_stable(self):
# Test to verify that adding a state that has a 'target' state which is
# not a 'stable' state will raise an exception
self.assertRaises(excp.InvalidState,
self.jumper.add_state, 'jump', target='down')
def test_target_state_stable(self):
# Test to verify that adding a new state with a 'target' state pointing
# to a 'stable' state does not raise an exception
m = fsm.FSM('working')
m.add_state('working', stable=True)
m.add_state('foo', target='working')
m.initialize()
| 35.992754 | 79 | 0.617274 |
b395ca8f768eb3e142c22b5d3676992e55cb0eb3 | 1,128 | py | Python | example_adapter.py | oldpaws/AI3601_RL_Final_Project_MAGAIL | 6e53d5c4ad21c9e3907e6d14b20c04b3287800cf | [
"MIT"
] | null | null | null | example_adapter.py | oldpaws/AI3601_RL_Final_Project_MAGAIL | 6e53d5c4ad21c9e3907e6d14b20c04b3287800cf | [
"MIT"
] | null | null | null | example_adapter.py | oldpaws/AI3601_RL_Final_Project_MAGAIL | 6e53d5c4ad21c9e3907e6d14b20c04b3287800cf | [
"MIT"
] | null | null | null | import gym
import utils
def get_observation_adapter(obs_stack_size):
stack_size = obs_stack_size
# look_ahead = 10
closest_neighbor_num = 12
img_resolution = 40
observe_lane_num = 3
subscribed_features = dict(
# distance_to_center=(stack_size, 1),
ego_pos=(stack_size, 2),
# heading=(stack_size, 1),
# speed=(stack_size, 1),
neighbor=(stack_size, 76), # dist, speed, ttc
# heading_errors=(stack_size, look_ahead),
# steering=(stack_size, 1),
# ego_lane_dist_and_speed=(stack_size, observe_lane_num + 1),
# img_gray=(stack_size, img_resolution, img_resolution) if use_rgb else False,
# distance=(stack_size, 1)
)
observation_space = gym.spaces.Dict(
utils.subscribe_features(**subscribed_features)
)
observation_adapter = utils.get_observation_adapter(
observation_space,
# look_ahead=look_ahead,
observe_lane_num=observe_lane_num,
resize=(img_resolution, img_resolution),
closest_neighbor_num=closest_neighbor_num,
)
return observation_adapter
| 28.923077 | 86 | 0.673759 |
33a669d19ba88fd28c160177f870904eeb7ff6e5 | 1,697 | py | Python | src/api/translate/gcloud_translate.py | Shandilya21/oratio | 53a77404df35a6b2b73c6a74a0e40d3f8747c408 | [
"BSD-3-Clause"
] | 5 | 2020-08-02T20:51:03.000Z | 2022-01-17T02:53:12.000Z | src/api/translate/gcloud_translate.py | Shandilya21/oratio | 53a77404df35a6b2b73c6a74a0e40d3f8747c408 | [
"BSD-3-Clause"
] | 12 | 2020-07-10T17:59:53.000Z | 2020-07-10T18:04:12.000Z | src/api/translate/gcloud_translate.py | Shandilya21/oratio | 53a77404df35a6b2b73c6a74a0e40d3f8747c408 | [
"BSD-3-Clause"
] | 3 | 2020-08-08T09:38:25.000Z | 2020-08-24T20:57:44.000Z | import sentence
import six
import os
from google.cloud import translate_v2 as translate
from constants.constants import REPO_PATH
def get_translation(translate_client, text_to_translate, target_language):
"""Translates text into the target language.
Target must be an ISO 639-1 language code.
See https://g.co/cloud/translate/v2/translate-reference#supported_languages
"""
if isinstance(text_to_translate, six.binary_type):
text_to_translate = text_to_translate.decode("utf-8")
# Text can also be a sequence of strings, in which case this method
# will return a sequence of results for each text.
result = translate_client.translate(
text_to_translate, target_language=target_language
)
return result["translatedText"]
# returns a list of all the lang-codes available
def get_supported_languages(translate_client, verbose=False):
"""Lists all available languages for gCloud translate API."""
results = translate_client.get_languages()
if verbose:
for language in results:
print(u"{name} ({language})".format(**language))
return [r["language"] for r in results]
# Will first check for keyfile credentials, then use the gcloud utility.
# returns None on failure
def get_client():
print("Using GCloud for translation client")
error = []
try:
client = translate.Client.from_service_account_json(
os.path.join(REPO_PATH, ".keyfile.json")
)
return client
except Exception as e:
error.append(str(e))
try:
return translate.Client()
except Exception as e:
error.append(str(e))
print("\n".join(error))
return None
| 31.425926 | 79 | 0.703595 |
51ee81370d1605315b16b3fb14ffbf1f88c5b988 | 7,819 | py | Python | msssim.py | hamedhaghighi/Usupervised_Image_Restoration | a3fefbf54891b9e984987fe15bd6b434b59fec3c | [
"MIT"
] | null | null | null | msssim.py | hamedhaghighi/Usupervised_Image_Restoration | a3fefbf54891b9e984987fe15bd6b434b59fec3c | [
"MIT"
] | null | null | null | msssim.py | hamedhaghighi/Usupervised_Image_Restoration | a3fefbf54891b9e984987fe15bd6b434b59fec3c | [
"MIT"
] | null | null | null | """Python implementation of MS-SSIM.
Usage:
python msssim.py --original_image=original.png --compared_image=distorted.png
"""
import numpy as np
from scipy import signal
from scipy.ndimage.filters import convolve
import tensorflow as tf
import os
import string
ori_path= "./vaeganCeleba2/sample/test00_0000_r.png"
com_path= "./vaeganCeleba2/sample/test00_0000.png"
tf.flags.DEFINE_string('original_image',ori_path , 'Path to PNG image.')
tf.flags.DEFINE_string('compared_image',com_path , 'Path to PNG image.')
FLAGS = tf.flags.FLAGS
def _FSpecialGauss(size, sigma):
"""Function to mimic the 'fspecial' gaussian MATLAB function."""
radius = size // 2
offset = 0.0
start, stop = -radius, radius + 1
if size % 2 == 0:
offset = 0.5
stop -= 1
x, y = np.mgrid[offset + start:stop, offset + start:stop]
assert len(x) == size
g = np.exp(-((x**2 + y**2)/(2.0 * sigma**2)))
return g / g.sum()
def _SSIMForMultiScale(img1, img2, max_val=255, filter_size=11,
filter_sigma=1.5, k1=0.01, k2=0.03):
"""Return the Structural Similarity Map between `img1` and `img2`.
This function attempts to match the functionality of ssim_index_new.m by
Zhou Wang: http://www.cns.nyu.edu/~lcv/ssim/msssim.zip
Arguments:
img1: Numpy array holding the first RGB image batch.
img2: Numpy array holding the second RGB image batch.
max_val: the dynamic range of the images (i.e., the difference between the
maximum the and minimum allowed values).
filter_size: Size of blur kernel to use (will be reduced for small images).
filter_sigma: Standard deviation for Gaussian blur kernel (will be reduced
for small images).
k1: Constant used to maintain stability in the SSIM calculation (0.01 in
the original paper).
k2: Constant used to maintain stability in the SSIM calculation (0.03 in
the original paper).
Returns:
Pair containing the mean SSIM and contrast sensitivity between `img1` and
`img2`.
Raises:
RuntimeError: If input images don't have the same shape or don't have four
dimensions: [batch_size, height, width, depth].
"""
if img1.shape != img2.shape:
raise RuntimeError('Input images must have the same shape (%s vs. %s).',
img1.shape, img2.shape)
if img1.ndim != 4:
raise RuntimeError('Input images must have four dimensions, not %d',
img1.ndim)
img1 = img1.astype(np.float64)
img2 = img2.astype(np.float64)
_, height, width, _ = img1.shape
# Filter size can't be larger than height or width of images.
size = min(filter_size, height, width)
# Scale down sigma if a smaller filter size is used.
sigma = size * filter_sigma / filter_size if filter_size else 0
if filter_size:
window = np.reshape(_FSpecialGauss(size, sigma), (1, size, size, 1))
mu1 = signal.fftconvolve(img1, window, mode='valid')
mu2 = signal.fftconvolve(img2, window, mode='valid')
sigma11 = signal.fftconvolve(img1 * img1, window, mode='valid')
sigma22 = signal.fftconvolve(img2 * img2, window, mode='valid')
sigma12 = signal.fftconvolve(img1 * img2, window, mode='valid')
else:
# Empty blur kernel so no need to convolve.
mu1, mu2 = img1, img2
sigma11 = img1 * img1
sigma22 = img2 * img2
sigma12 = img1 * img2
mu11 = mu1 * mu1
mu22 = mu2 * mu2
mu12 = mu1 * mu2
sigma11 -= mu11
sigma22 -= mu22
sigma12 -= mu12
# Calculate intermediate values used by both ssim and cs_map.
c1 = (k1 * max_val) ** 2
c2 = (k2 * max_val) ** 2
v1 = 2.0 * sigma12 + c2
v2 = sigma11 + sigma22 + c2
ssim = np.mean((((2.0 * mu12 + c1) * v1) / ((mu11 + mu22 + c1) * v2)))
cs = np.mean(v1 / v2)
return ssim, cs
def MultiScaleSSIM(img1, img2, max_val=255, filter_size=11, filter_sigma=1.5,
k1=0.01, k2=0.03, weights=None):
"""Return the MS-SSIM score between `img1` and `img2`.
This function implements Multi-Scale Structural Similarity (MS-SSIM) Image
Quality Assessment according to Zhou Wang's paper, "Multi-scale structural
similarity for image quality assessment" (2003).
Link: https://ece.uwaterloo.ca/~z70wang/publications/msssim.pdf
Author's MATLAB implementation:
http://www.cns.nyu.edu/~lcv/ssim/msssim.zip
Arguments:
img1: Numpy array holding the first RGB image batch.
img2: Numpy array holding the second RGB image batch.
max_val: the dynamic range of the images (i.e., the difference between the
maximum the and minimum allowed values).
filter_size: Size of blur kernel to use (will be reduced for small images).
filter_sigma: Standard deviation for Gaussian blur kernel (will be reduced
for small images).
k1: Constant used to maintain stability in the SSIM calculation (0.01 in
the original paper).
k2: Constant used to maintain stability in the SSIM calculation (0.03 in
the original paper).
weights: List of weights for each level; if none, use five levels and the
weights from the original paper.
Returns:
MS-SSIM score between `img1` and `img2`.
Raises:
RuntimeError: If input images don't have the same shape or don't have four
dimensions: [batch_size, height, width, depth].
"""
if img1.shape != img2.shape:
raise RuntimeError('Input images must have the same shape (%s vs. %s).',
img1.shape, img2.shape)
if img1.ndim != 4:
raise RuntimeError('Input images must have four dimensions, not %d',
img1.ndim)
# Note: default weights don't sum to 1.0 but do match the paper / matlab code.
weights = np.array(weights if weights else
[0.0448, 0.2856, 0.3001, 0.2363, 0.1333])
levels = weights.size
downsample_filter = np.ones((1, 2, 2, 1)) / 4.0
im1, im2 = [x.astype(np.float64) for x in [img1, img2]]
mssim = np.array([])
mcs = np.array([])
for _ in range(levels):
ssim, cs = _SSIMForMultiScale(
im1, im2, max_val=max_val, filter_size=filter_size,
filter_sigma=filter_sigma, k1=k1, k2=k2)
mssim = np.append(mssim, ssim)
mcs = np.append(mcs, cs)
filtered = [convolve(im, downsample_filter, mode='reflect')
for im in [im1, im2]]
im1, im2 = [x[:, ::2, ::2, :] for x in filtered]
return (np.prod(mcs[0:levels-1] ** weights[0:levels-1]) *
(mssim[levels-1] ** weights[levels-1]))
def compare(x, y):
stat_x = os.stat(x)
stat_y = os.stat(y)
if stat_x.st_ctime < stat_y.st_ctime:
return -1
elif stat_x.st_ctime > stat_y.st_ctime:
return 1
else:
return 0
def read_image_list(category):
file_ori = []
file_new = []
print("list file")
list = os.listdir(category)
for file in list:
if string.find(file, 'r') != -1:
file_ori.append(category + "/" + file)
else:
file_new.append(category + "/" + file)
return file_ori, file_new
def main(_):
score = 0.0
file_path = "./vaeganCeleba2/sample_ssim"
ori_list , gen_list = read_image_list(file_path)
ori_list.sort(compare)
gen_list.sort(compare)
print gen_list
print ori_list
for i in range(len(ori_list)):
with tf.gfile.FastGFile(ori_list[i]) as image_file:
img1_str = image_file.read()
with tf.gfile.FastGFile(gen_list[i]) as image_file:
img2_str = image_file.read()
input_img = tf.placeholder(tf.string)
decoded_image = tf.expand_dims(tf.image.decode_png(input_img, channels=3), 0)
with tf.Session() as sess:
img1 = sess.run(decoded_image, feed_dict={input_img: img1_str})
img2 = sess.run(decoded_image, feed_dict={input_img: img2_str})
print MultiScaleSSIM(img1, img2, max_val=255)
score = score + MultiScaleSSIM(img1, img2, max_val=255)
print score/len(ori_list)
if __name__ == '__main__':
tf.app.run() | 33.27234 | 81 | 0.667988 |
cb15206d5e5036c281472292354467929be2d1c8 | 6,279 | py | Python | src/draw/gwave.py | takenori-y/SPTK | 573df2dd032c39db2aa24de012d2fe9a44f96280 | [
"Apache-2.0"
] | 91 | 2017-12-25T14:13:17.000Z | 2022-03-15T14:33:23.000Z | src/draw/gwave.py | takenori-y/SPTK | 573df2dd032c39db2aa24de012d2fe9a44f96280 | [
"Apache-2.0"
] | 4 | 2020-09-28T03:58:29.000Z | 2021-12-14T01:23:13.000Z | src/draw/gwave.py | takenori-y/SPTK | 573df2dd032c39db2aa24de012d2fe9a44f96280 | [
"Apache-2.0"
] | 16 | 2018-03-12T07:08:09.000Z | 2022-03-22T02:38:13.000Z | #!/usr/bin/env python
# ------------------------------------------------------------------------ #
# Copyright 2021 SPTK Working Group #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# ------------------------------------------------------------------------ #
import argparse
import os
import sys
import numpy as np
import plotly.graph_objs as go
from plotly.subplots import make_subplots
import sptk.draw_utils as utils
def get_arguments():
parser = argparse.ArgumentParser(description="draw a waveform")
parser.add_argument(
metavar="infile",
dest="in_file",
default=None,
nargs="?",
type=str,
help="waveform (double)",
)
parser.add_argument(
metavar="outfile",
dest="out_file",
type=str,
help="figure",
)
parser.add_argument(
"-F",
metavar="F",
dest="factor",
default=1.0,
type=float,
help="scale of figure",
)
parser.add_argument(
"-W",
metavar="W",
dest="width",
default=None,
type=int,
help="width of figure [px]",
)
parser.add_argument(
"-H",
metavar="H",
dest="height",
default=None,
type=int,
help="height of figure [px]",
)
parser.add_argument(
"-g",
dest="grid",
action="store_true",
help="draw grid",
)
parser.add_argument(
"-s",
metavar="s",
dest="start_point",
default=0,
type=int,
help="start point",
)
parser.add_argument(
"-e",
metavar="e",
dest="end_point",
default=None,
type=int,
help="end point",
)
parser.add_argument(
"-n",
metavar="n",
dest="num_samples",
default=None,
type=int,
help="number of samples per screen",
)
parser.add_argument(
"-i",
metavar="i",
dest="num_screens",
default=1,
type=int,
help="number of screens",
)
parser.add_argument(
"-y",
metavar=("YMIN", "YMAX"),
dest="ylim",
default=(None, None),
nargs=2,
type=float,
help="y-axis limits",
)
parser.add_argument(
"-ls",
dest="line_style",
choices=utils.line_styles,
default="solid",
type=str,
help="line style",
)
parser.add_argument(
"-lc",
metavar="lc",
dest="line_color",
default="#636EFA",
type=str,
help="line color",
)
parser.add_argument(
"-lw",
metavar="lw",
dest="line_width",
default=1,
type=int,
help="line width",
)
return parser.parse_args()
##
# @a gwave [ @e option ] [ @e infile ] @e outfile
#
# - @b -F @e float
# - scale of figure
# - @b -W @e int
# - width of figure in pixels
# - @b -H @e int
# - height of figure in pixels
# - @b -g
# - draw grid
# - @b -s @e int
# - start point
# - @b -e @e int
# - end point
# - @b -n @e int
# - number of samples per screen
# - @b -i @e int
# - number of screens
# - @b -y @e float @e float
# - y-axis limits
# - @b -ls @e str
# - line style (solid, dash, dot, or dashdot)
# - @b -lc @e str
# - line color
# - @b -lw @e int
# - line width
# - @b infile @e str
# - double-type waveform
# - @b outfile @e str
# - figure
#
# The below example draws an entire waveform on five screens.
# @code{.sh}
# x2x +sd data.short | gwave -i 5 out.pdf
# @endcode
def main():
args = get_arguments()
if args.in_file is None:
data = utils.read_stdin()
else:
if not os.path.exists(args.in_file):
utils.print_error_message("gwave", f"Cannot open {args.in_file}")
sys.exit(1)
data = utils.read_binary(args.in_file)
y = data[args.start_point : args.end_point]
x = np.arange(len(y)) + args.start_point
if args.ylim[0] is None:
ymax = np.amax(np.abs(y))
ylim = (-ymax, ymax)
else:
ylim = args.ylim
if args.num_samples is None:
n = len(y) // args.num_screens
else:
n = args.num_samples
fig = make_subplots(rows=args.num_screens, cols=1)
s = 0
for i in range(args.num_screens):
last = i == args.num_screens - 1
if args.num_samples is None and last:
e = len(y)
else:
e = s + n
fig.add_trace(
go.Scatter(
x=x[s:e],
y=y[s:e],
line=dict(
color=args.line_color,
width=args.line_width,
dash=args.line_style,
),
),
row=i + 1,
col=1,
)
fig.update_xaxes(
title_text="Time [sample]" if last else "",
showgrid=args.grid,
row=i + 1,
col=1,
),
fig.update_yaxes(
range=ylim,
showgrid=args.grid,
row=i + 1,
col=1,
)
s = e
fig.update_layout(showlegend=False)
fig.write_image(
args.out_file,
width=args.width,
height=args.height,
scale=args.factor,
)
if __name__ == "__main__":
main()
| 25.116 | 77 | 0.478579 |
26549a8614fe5da77df53d78f7b80df0953db452 | 4,146 | py | Python | channels_irc/consumers.py | AdvocatesInc/django-channels-irc | 4fb9b304ca652ed661ff0ca032259e92cdcbb54d | [
"MIT"
] | 15 | 2018-07-15T14:07:17.000Z | 2021-12-15T20:14:51.000Z | channels_irc/consumers.py | AdvocatesInc/django-channels-irc | 4fb9b304ca652ed661ff0ca032259e92cdcbb54d | [
"MIT"
] | 1 | 2018-05-04T22:12:47.000Z | 2018-05-04T22:12:47.000Z | channels_irc/consumers.py | AdvocatesInc/django-channels-irc | 4fb9b304ca652ed661ff0ca032259e92cdcbb54d | [
"MIT"
] | 6 | 2018-03-20T19:02:08.000Z | 2021-05-28T06:39:26.000Z | from channels.consumer import AsyncConsumer
from channels.exceptions import InvalidChannelLayerError, StopConsumer
class AsyncIrcConsumer(AsyncConsumer):
"""
Base IRC consumer; Implements basic hooks for interfacing with the IRC Interface Server
"""
groups = []
async def on_welcome(self, channel, user=None, body=None):
"""
Called when the IRC Interface Server connects to the IRC Server
and receives the "welcome" command from IRC
"""
try:
for group in self.groups:
await self.channel_layer.group_add(group, self.channel_name)
except AttributeError:
raise InvalidChannelLayerError("BACKEND is unconfigured or doesn't support groups")
await self.welcome(channel)
async def welcome(self, channel):
"""
Hook for any action(s) to be run on connecting to the IRC Server
"""
pass
async def irc_on_disconnect(self, message):
"""
Called when the connection to the IRC Server is closed
"""
try:
for group in self.groups:
await self.channel_layer.group_discard(group, self.channel_name)
except AttributeError:
raise InvalidChannelLayerError("BACKEND is unconfigured or doesn't support groups")
await self.on_disconnect(message['server'][0], message['server'][1])
raise StopConsumer()
async def on_disconnect(self, server, port):
"""
Hook for any action(s) to be run on disconnecting from the IRC Server
"""
pass
async def irc_receive(self, message):
"""
Parses incoming messages and routes them to the appropriate handler, depending on the
incoming action type
"""
command_type = message.get('command', None)
if command_type is None:
raise ValueError('An `irc.receive` message must specify a `command` key')
handler = getattr(self, 'on_{}'.format(command_type), None)
if handler is not None:
await handler(
channel=message.get('channel', None),
user=message.get('user', None),
body=message.get('body', None),
)
async def send_message(self, channel, text):
"""
Sends a PRIVMSG to the IRC Server
"""
await self.send_command('message', channel=channel, body=text)
async def send_command(self, command, channel=None, body=None):
"""
Sends a command to the IRC Server. Message should be of the format:
{
'type': 'irc.command',
'command': '<IRC_COMMAND>',
'channel': '<IRC_CHANNEL>', # Optional, depending on command
'body': '<COMMAND_TEXT>', # Optional, depending on command
}
"""
await self.send({
'type': 'irc.send',
'command': command,
'channel': channel,
'body': body,
})
class MultiIrcConsumer(AsyncConsumer):
"""
Consumer for managing multiple IRC connections. Used with the `MultiConnectionClient`
"""
async def irc_multi_init(self, message):
"""
Called when the consumer is initially loaded.
"""
await self.on_init()
async def on_init(self):
"""
Hook for executing commands on loading the consumer
"""
pass
async def send_connect(self, server, port, nickname, **kwargs):
"""
Creates a new connection, if no connection to that server/nickname
combination exists.
"""
await self.send({
'type': 'irc.multi.connect',
'server': server,
'port': port,
'nickname': nickname,
**kwargs
})
async def send_disconnect(self, server, nickname):
"""
Disconnects a connnect and removes it from the stored connections
"""
await self.send({
'type': 'irc.multi.disconnect',
'server': server,
'nickname': nickname,
})
| 32.139535 | 95 | 0.582007 |
65947f5c5f18f195396e51adf19afb3b19b590ec | 9,298 | py | Python | src/pyff/pipes.py | Razumain/pyFF | 55c51e0f3e64aef09ccf76bd42f0429d451d2428 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | src/pyff/pipes.py | Razumain/pyFF | 55c51e0f3e64aef09ccf76bd42f0429d451d2428 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | src/pyff/pipes.py | Razumain/pyFF | 55c51e0f3e64aef09ccf76bd42f0429d451d2428 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | """
Pipes and plumbing. Plumbing instances are sequences of pipes. Each pipe is called in order to load, select,
transform, sign or output SAML metadata.
"""
import traceback
from six import StringIO
import os
import yaml
from .utils import resource_string, PyffException
from .logs import log
__author__ = 'leifj'
registry = dict()
def pipe(*args, **kwargs):
"""
Register the decorated function in the pyff pipe registry
:param name: optional name - if None, use function name
"""
def deco_none(f):
return f
def deco_pipe(f):
f_name = kwargs.get('name', f.__name__)
registry[f_name] = f
return f
if 1 == len(args):
f = args[0]
registry[f.__name__] = f
return deco_none
else:
return deco_pipe
class PipeException(PyffException):
pass
class PluginsRegistry(dict):
"""
The plugin registry uses pkg_resources.iter_entry_points to list all EntryPoints in the group 'pyff.pipe'. All pipe
entry_points must have the following prototype:
def the_something_func(req,*opts):
pass
Referencing this function as an entry_point using something = module:the_somethig_func in setup.py allows the
function to be referenced as 'something' in a pipeline.
"""
# def __init__(self):
# for entry_point in iter_entry_points('pyff.pipe'):
# if entry_point.name in self:
# log.warn("Duplicate entry point: %s" % entry_point.name)
# else:
# log.debug("Registering entry point: %s" % entry_point.name)
# self[entry_point.name] = entry_point.load()
def load_pipe(d):
"""Return a triple callable,name,args of the pipe specified by the object d.
:param d: The following alternatives for d are allowed:
- d is a string (or unicode) in which case the pipe is named d called with None as args.
- d is a dict of the form {name: args} (i.e one key) in which case the pipe named *name* is called with args
- d is an iterable (eg tuple or list) in which case d[0] is treated as the pipe name and d[1:] becomes the args
"""
def _n(_d):
lst = _d.split()
_name = lst[0]
_opts = lst[1:]
return _name, _opts
name = None
args = None
opts = []
if type(d) is str or type(d) is unicode:
name, opts = _n(d)
elif hasattr(d, '__iter__') and not type(d) is dict:
if not len(d):
raise PipeException("This does not look like a length of pipe... \n%s" % repr(d))
name, opts = _n(d[0])
elif type(d) is dict:
k = d.keys()[0]
name, opts = _n(k)
args = d[k]
else:
raise PipeException("This does not look like a length of pipe... \n%s" % repr(d))
if name is None:
raise PipeException("Anonymous length of pipe... \n%s" % repr(d))
func = None
if name in registry:
func = registry[name]
if func is None or not hasattr(func, '__call__'):
raise PipeException('No pipe named %s is installed' % name)
return func, opts, name, args
class PipelineCallback(object):
"""
A delayed pipeline callback used as a post for parse_saml_metadata
"""
def __init__(self, entry_point, req, store=None):
self.entry_point = entry_point
self.plumbing = Plumbing(req.plumbing.pipeline, "%s-via-%s" % (req.plumbing.id, entry_point))
self.req = req
self.store = store
def __call__(self, *args, **kwargs):
log.debug("called %s" % repr(self.plumbing))
t = args[0]
if t is None:
raise ValueError("PipelineCallback must be called with a parse-tree argument")
try:
return self.plumbing.process(self.req.md, args=kwargs, store=self.store, state={self.entry_point: True}, t=t)
except Exception as ex:
traceback.print_exc(ex)
raise ex
class Plumbing(object):
"""
A plumbing instance represents a basic processing chain for SAML metadata. A simple, yet reasonably complete example:
.. code-block:: yaml
- load:
- /var/metadata/registry
- http://md.example.com
- select:
- #md:EntityDescriptor[md:IDPSSODescriptor]
- xslt:
stylesheet: tidy.xsl
- fork:
- finalize:
Name: http://example.com/metadata.xml
cacheDuration: PT1H
validUntil: PT1D
- sign:
key: signer.key
cert: signer.crt
- publish: /var/metadata/public/metadata.xml
Running this plumbing would bake all metadata found in /var/metadata/registry and at http://md.example.com into an
EntitiesDescriptor element with @Name http://example.com/metadata.xml, @cacheDuration set to 1hr and @validUntil
1 day from the time the 'finalize' command was run. The tree woud be transformed using the "tidy" stylesheets and
would then be signed (using signer.key) and finally published in /var/metadata/public/metadata.xml
"""
def __init__(self, pipeline, pid):
self._id = pid
self.pipeline = pipeline
@property
def id(self):
return self._id
@property
def pid(self):
return self._id
def __iter__(self):
return self.pipeline
def __str__(self):
return "PL[{}]".format(self.pid)
class Request(object):
"""
Represents a single request. When processing a set of pipelines a single request is used. Any part of the pipeline
may modify any of the fields.
"""
def __init__(self, pl, md, t, name=None, args=None, state=None, store=None):
if not state:
state = dict()
if not args:
args = []
self.plumbing = pl
self.md = md
self.t = t
self.name = name
self.args = args
self.state = state
self.done = False
self._store = store
def lookup(self, member):
return self.md.lookup(member, store=self.store)
@property
def store(self):
if self._store:
return self._store
return self.md.store
def process(self, pl):
"""The inner request pipeline processor.
:param pl: The plumbing to run this request through
"""
for p in pl.pipeline:
cb, opts, name, args = load_pipe(p)
log.debug("calling '{}' in {} using args: {} and opts: {}".format(name, pl, repr(args), repr(opts)))
# log.debug("traversing pipe %s,%s,%s using %s" % (pipe,name,args,opts))
if type(args) is str or type(args) is unicode:
args = [args]
if args is not None and type(args) is not dict and type(args) is not list and type(args) is not tuple:
raise PipeException("Unknown argument type %s" % repr(args))
self.args = args
self.name = name
ot = cb(self, *opts)
if ot is not None:
self.t = ot
if self.done:
break
return self.t
def process(self, md, args=None, state=None, t=None, store=None):
"""
The main entrypoint for processing a request pipeline. Calls the inner processor.
:param md: The current metadata repository
:param state: The active request state
:param t: The active working document
:param store: The store object to operate on
:return: The result of applying the processing pipeline to t.
"""
if not state:
state = dict()
return Plumbing.Request(self, md, t, args=args, state=state, store=store).process(self)
def iprocess(self, req):
"""The inner request pipeline processor.
:param req: The request to run through the pipeline
"""
log.debug("Processing {}".format(self.pipeline))
for p in self.pipeline:
try:
pipefn, opts, name, args = load_pipe(p)
# log.debug("traversing pipe %s,%s,%s using %s" % (pipe,name,args,opts))
if type(args) is str or type(args) is unicode:
args = [args]
if args is not None and type(args) is not dict and type(args) is not list and type(args) is not tuple:
raise PipeException("Unknown argument type %s" % repr(args))
req.args = args
req.name = name
ot = pipefn(req, *opts)
if ot is not None:
req.t = ot
if req.done:
break
except PipeException as ex:
log.error(ex)
break
return req.t
def plumbing(fn):
"""
Create a new plumbing instance by parsing yaml from the filename.
:param fn: A filename containing the pipeline.
:return: A plumbing object
This uses the resource framework to locate the yaml file which means that pipelines can be shipped as plugins.
"""
pid = os.path.splitext(fn)[0]
ystr = resource_string(fn)
if ystr is None:
raise PipeException("Plumbing not found: %s" % fn)
pipeline = yaml.safe_load(ystr)
return Plumbing(pipeline=pipeline, pid=pid)
| 32.17301 | 121 | 0.59572 |
d178430106ad78468dc7ca8b8512e5500ce69ffe | 2,084 | py | Python | experiments/render.py | eager-dev/eagerx_dcsc_setups | 72a14a2c640f8abb1c1bfad017caaa51fa4832ea | [
"Apache-2.0"
] | 1 | 2022-03-24T10:32:57.000Z | 2022-03-24T10:32:57.000Z | experiments/render.py | eager-dev/eagerx_dcsc_setups | 72a14a2c640f8abb1c1bfad017caaa51fa4832ea | [
"Apache-2.0"
] | null | null | null | experiments/render.py | eager-dev/eagerx_dcsc_setups | 72a14a2c640f8abb1c1bfad017caaa51fa4832ea | [
"Apache-2.0"
] | null | null | null | # ROS packages required
import eagerx
from eagerx import Engine, process, Graph
eagerx.initialize("eagerx_core", anonymous=True, log_level=eagerx.log.INFO)
# Environment
from eagerx.core.env import EagerxEnv
from eagerx.wrappers import Flatten
# Implementation specific
import eagerx.nodes # Registers butterworth_filter # noqa # pylint: disable=unused-import
import eagerx_reality # Registers RealEngine # noqa # pylint: disable=unused-import
import eagerx_dcsc_setups.pendulum # Registers Pendulum # noqa # pylint: disable=unused-import
# Other
import stable_baselines3 as sb
from functools import partial
import experiments.util as util
import os
import numpy as np
if __name__ == "__main__":
DR = False
FA = 1
eval_delay = None
real_delay = 0.0
# Define constants
sensor_rate = 30.0
actuator_rate = 90.0
image_rate = sensor_rate / 2
engine_rate = max([sensor_rate, actuator_rate, image_rate])
seed = 27
np.random.seed(seed)
length_eval_eps = 90
eval_eps = 3
log_name = "2022-05-09-0659"
model_name = "bl_fa1_0"
NAME = "sim"
LOG_DIR = os.path.dirname(eagerx_dcsc_setups.__file__) + f"/../logs/{NAME}_{log_name}"
step_fn = partial(util.step_fn, length_eps=length_eval_eps)
eval_reset_fn = partial(util.eval_reset_fn, eval_delay=None)
# Define engines
engine_real = Engine.make("RealEngine", rate=engine_rate, sync=True, process=process.NEW_PROCESS)
# Create evaluation environment
graph = Graph.create()
graph = util.make_graph(
DR=DR,
FA=FA,
FD=0.0,
evaluation=True,
sensor_rate=sensor_rate,
actuator_rate=actuator_rate,
image_rate=image_rate,
render=True,
)
env = Flatten(
EagerxEnv(
name="render_env", rate=sensor_rate, graph=graph, engine=engine_real, step_fn=step_fn, reset_fn=eval_reset_fn
)
)
model = sb.SAC.load(f"{LOG_DIR}/models/{model_name}")
util.eval_env(model, env, eval_eps, gif_file=f"{LOG_DIR}/{model_name}_sync.gif", real_delay=real_delay)
| 28.944444 | 121 | 0.702975 |
381b5d403c0f6cd963797c794a2634cc59672c6f | 22,448 | py | Python | CodonSubstitution/build/biopython/Bio/MarkovModel.py | JackCurragh/DARNED | 13963d129bd8f69fb1106ad1f47394b3211a939c | [
"MIT"
] | 37 | 2015-02-24T18:58:30.000Z | 2021-03-07T21:22:18.000Z | Bio/MarkovModel.py | mchelem/biopython | 2daa5fee06077bbada8b89fe6032c3f123318fc2 | [
"PostgreSQL"
] | 12 | 2016-06-09T21:57:00.000Z | 2020-09-11T18:48:51.000Z | Bio/MarkovModel.py | mchelem/biopython | 2daa5fee06077bbada8b89fe6032c3f123318fc2 | [
"PostgreSQL"
] | 19 | 2016-03-26T08:15:17.000Z | 2021-04-12T05:03:29.000Z | """
This is an implementation of a state-emitting MarkovModel. I am using
terminology similar to Manning and Schutze.
Functions:
train_bw Train a markov model using the Baum-Welch algorithm.
train_visible Train a visible markov model using MLE.
find_states Find the a state sequence that explains some observations.
load Load a MarkovModel.
save Save a MarkovModel.
Classes:
MarkovModel Holds the description of a markov model
"""
import numpy
try:
logaddexp = numpy.logaddexp
except AttributeError:
# Numpy versions older than 1.3 do not contain logaddexp.
# Once we require Numpy version 1.3 or later, we should revisit this
# module to see if we can simplify some of the other functions in
# this module.
import warnings
warnings.warn("For optimal speed, please update to Numpy version 1.3 or later (current version is %s)" % numpy.__version__)
def logaddexp(logx, logy):
if logy - logx > 100:
return logy
elif logx - logy > 100:
return logx
minxy = min(logx, logy)
return minxy + numpy.log(numpy.exp(logx-minxy) + numpy.exp(logy-minxy))
def itemindex(values):
d = {}
entries = enumerate(values[::-1])
n = len(values)-1
for index, key in entries: d[key] = n-index
return d
numpy.random.seed()
VERY_SMALL_NUMBER = 1E-300
LOG0 = numpy.log(VERY_SMALL_NUMBER)
class MarkovModel(object):
def __init__(self, states, alphabet,
p_initial=None, p_transition=None, p_emission=None):
self.states = states
self.alphabet = alphabet
self.p_initial = p_initial
self.p_transition = p_transition
self.p_emission = p_emission
def __str__(self):
import StringIO
handle = StringIO.StringIO()
save(self, handle)
handle.seek(0)
return handle.read()
def _readline_and_check_start(handle, start):
line = handle.readline()
if not line.startswith(start):
raise ValueError("I expected %r but got %r" % (start, line))
return line
def load(handle):
"""load(handle) -> MarkovModel()"""
# Load the states.
line = _readline_and_check_start(handle, "STATES:")
states = line.split()[1:]
# Load the alphabet.
line = _readline_and_check_start(handle, "ALPHABET:")
alphabet = line.split()[1:]
mm = MarkovModel(states, alphabet)
N, M = len(states), len(alphabet)
# Load the initial probabilities.
mm.p_initial = numpy.zeros(N)
line = _readline_and_check_start(handle, "INITIAL:")
for i in range(len(states)):
line = _readline_and_check_start(handle, " %s:" % states[i])
mm.p_initial[i] = float(line.split()[-1])
# Load the transition.
mm.p_transition = numpy.zeros((N, N))
line = _readline_and_check_start(handle, "TRANSITION:")
for i in range(len(states)):
line = _readline_and_check_start(handle, " %s:" % states[i])
mm.p_transition[i,:] = map(float, line.split()[1:])
# Load the emission.
mm.p_emission = numpy.zeros((N, M))
line = _readline_and_check_start(handle, "EMISSION:")
for i in range(len(states)):
line = _readline_and_check_start(handle, " %s:" % states[i])
mm.p_emission[i,:] = map(float, line.split()[1:])
return mm
def save(mm, handle):
"""save(mm, handle)"""
# This will fail if there are spaces in the states or alphabet.
w = handle.write
w("STATES: %s\n" % ' '.join(mm.states))
w("ALPHABET: %s\n" % ' '.join(mm.alphabet))
w("INITIAL:\n")
for i in range(len(mm.p_initial)):
w(" %s: %g\n" % (mm.states[i], mm.p_initial[i]))
w("TRANSITION:\n")
for i in range(len(mm.p_transition)):
x = map(str, mm.p_transition[i])
w(" %s: %s\n" % (mm.states[i], ' '.join(x)))
w("EMISSION:\n")
for i in range(len(mm.p_emission)):
x = map(str, mm.p_emission[i])
w(" %s: %s\n" % (mm.states[i], ' '.join(x)))
# XXX allow them to specify starting points
def train_bw(states, alphabet, training_data,
pseudo_initial=None, pseudo_transition=None, pseudo_emission=None,
update_fn=None,
):
"""train_bw(states, alphabet, training_data[, pseudo_initial]
[, pseudo_transition][, pseudo_emission][, update_fn]) -> MarkovModel
Train a MarkovModel using the Baum-Welch algorithm. states is a list
of strings that describe the names of each state. alphabet is a
list of objects that indicate the allowed outputs. training_data
is a list of observations. Each observation is a list of objects
from the alphabet.
pseudo_initial, pseudo_transition, and pseudo_emission are
optional parameters that you can use to assign pseudo-counts to
different matrices. They should be matrices of the appropriate
size that contain numbers to add to each parameter matrix, before
normalization.
update_fn is an optional callback that takes parameters
(iteration, log_likelihood). It is called once per iteration.
"""
N, M = len(states), len(alphabet)
if not training_data:
raise ValueError("No training data given.")
if pseudo_initial!=None:
pseudo_initial = numpy.asarray(pseudo_initial)
if pseudo_initial.shape != (N,):
raise ValueError("pseudo_initial not shape len(states)")
if pseudo_transition!=None:
pseudo_transition = numpy.asarray(pseudo_transition)
if pseudo_transition.shape != (N,N):
raise ValueError("pseudo_transition not shape " + \
"len(states) X len(states)")
if pseudo_emission!=None:
pseudo_emission = numpy.asarray(pseudo_emission)
if pseudo_emission.shape != (N,M):
raise ValueError("pseudo_emission not shape " + \
"len(states) X len(alphabet)")
# Training data is given as a list of members of the alphabet.
# Replace those with indexes into the alphabet list for easier
# computation.
training_outputs = []
indexes = itemindex(alphabet)
for outputs in training_data:
training_outputs.append([indexes[x] for x in outputs])
# Do some sanity checking on the outputs.
lengths = map(len, training_outputs)
if min(lengths) == 0:
raise ValueError("I got training data with outputs of length 0")
# Do the training with baum welch.
x = _baum_welch(N, M, training_outputs,
pseudo_initial=pseudo_initial,
pseudo_transition=pseudo_transition,
pseudo_emission=pseudo_emission,
update_fn=update_fn)
p_initial, p_transition, p_emission = x
return MarkovModel(states, alphabet, p_initial, p_transition, p_emission)
MAX_ITERATIONS = 1000
def _baum_welch(N, M, training_outputs,
p_initial=None, p_transition=None, p_emission=None,
pseudo_initial=None, pseudo_transition=None,
pseudo_emission=None, update_fn=None):
# Returns (p_initial, p_transition, p_emission)
if p_initial==None:
p_initial = _random_norm(N)
else:
p_initial = _copy_and_check(p_initial, (N,))
if p_transition==None:
p_transition = _random_norm((N,N))
else:
p_transition = _copy_and_check(p_transition, (N,N))
if p_emission==None:
p_emission = _random_norm((N,M))
else:
p_emission = _copy_and_check(p_emission, (N,M))
# Do all the calculations in log space to avoid underflows.
lp_initial, lp_transition, lp_emission = map(
numpy.log, (p_initial, p_transition, p_emission))
if pseudo_initial!=None:
lpseudo_initial = numpy.log(pseudo_initial)
else:
lpseudo_initial = None
if pseudo_transition!=None:
lpseudo_transition = numpy.log(pseudo_transition)
else:
lpseudo_transition = None
if pseudo_emission!=None:
lpseudo_emission = numpy.log(pseudo_emission)
else:
lpseudo_emission = None
# Iterate through each sequence of output, updating the parameters
# to the HMM. Stop when the log likelihoods of the sequences
# stops varying.
prev_llik = None
for i in range(MAX_ITERATIONS):
llik = LOG0
for outputs in training_outputs:
x = _baum_welch_one(
N, M, outputs,
lp_initial, lp_transition, lp_emission,
lpseudo_initial, lpseudo_transition, lpseudo_emission,)
llik += x
if update_fn is not None:
update_fn(i, llik)
if prev_llik is not None and numpy.fabs(prev_llik-llik) < 0.1:
break
prev_llik = llik
else:
raise RuntimeError("HMM did not converge in %d iterations" \
% MAX_ITERATIONS)
# Return everything back in normal space.
return map(numpy.exp, (lp_initial, lp_transition, lp_emission))
def _baum_welch_one(N, M, outputs,
lp_initial, lp_transition, lp_emission,
lpseudo_initial, lpseudo_transition, lpseudo_emission):
# Do one iteration of Baum-Welch based on a sequence of output.
# NOTE: This will change the values of lp_initial, lp_transition,
# and lp_emission in place.
T = len(outputs)
fmat = _forward(N, T, lp_initial, lp_transition, lp_emission, outputs)
bmat = _backward(N, T, lp_transition, lp_emission, outputs)
# Calculate the probability of traversing each arc for any given
# transition.
lp_arc = numpy.zeros((N, N, T))
for t in range(T):
k = outputs[t]
lp_traverse = numpy.zeros((N, N)) # P going over one arc.
for i in range(N):
for j in range(N):
# P(getting to this arc)
# P(making this transition)
# P(emitting this character)
# P(going to the end)
lp = fmat[i][t] + \
lp_transition[i][j] + \
lp_emission[i][k] + \
bmat[j][t+1]
lp_traverse[i][j] = lp
# Normalize the probability for this time step.
lp_arc[:,:,t] = lp_traverse - _logsum(lp_traverse)
# Sum of all the transitions out of state i at time t.
lp_arcout_t = numpy.zeros((N, T))
for t in range(T):
for i in range(N):
lp_arcout_t[i][t] = _logsum(lp_arc[i,:,t])
# Sum of all the transitions out of state i.
lp_arcout = numpy.zeros(N)
for i in range(N):
lp_arcout[i] = _logsum(lp_arcout_t[i,:])
# UPDATE P_INITIAL.
lp_initial = lp_arcout_t[:,0]
if lpseudo_initial!=None:
lp_initial = _logvecadd(lp_initial, lpseudo_initial)
lp_initial = lp_initial - _logsum(lp_initial)
# UPDATE P_TRANSITION. p_transition[i][j] is the sum of all the
# transitions from i to j, normalized by the sum of the
# transitions out of i.
for i in range(N):
for j in range(N):
lp_transition[i][j] = _logsum(lp_arc[i,j,:]) - lp_arcout[i]
if lpseudo_transition!=None:
lp_transition[i] = _logvecadd(lp_transition[i], lpseudo_transition)
lp_transition[i] = lp_transition[i] - _logsum(lp_transition[i])
# UPDATE P_EMISSION. lp_emission[i][k] is the sum of all the
# transitions out of i when k is observed, divided by the sum of
# the transitions out of i.
for i in range(N):
ksum = numpy.zeros(M)+LOG0 # ksum[k] is the sum of all i with k.
for t in range(T):
k = outputs[t]
for j in range(N):
ksum[k] = logaddexp(ksum[k], lp_arc[i,j,t])
ksum = ksum - _logsum(ksum) # Normalize
if lpseudo_emission!=None:
ksum = _logvecadd(ksum, lpseudo_emission[i])
ksum = ksum - _logsum(ksum) # Renormalize
lp_emission[i,:] = ksum
# Calculate the log likelihood of the output based on the forward
# matrix. Since the parameters of the HMM has changed, the log
# likelihoods are going to be a step behind, and we might be doing
# one extra iteration of training. The alternative is to rerun
# the _forward algorithm and calculate from the clean one, but
# that may be more expensive than overshooting the training by one
# step.
return _logsum(fmat[:,T])
def _forward(N, T, lp_initial, lp_transition, lp_emission, outputs):
# Implement the forward algorithm. This actually calculates a
# Nx(T+1) matrix, where the last column is the total probability
# of the output.
matrix = numpy.zeros((N, T+1))
# Initialize the first column to be the initial values.
matrix[:,0] = lp_initial
for t in range(1, T+1):
k = outputs[t-1]
for j in range(N):
# The probability of the state is the sum of the
# transitions from all the states from time t-1.
lprob = LOG0
for i in range(N):
lp = matrix[i][t-1] + \
lp_transition[i][j] + \
lp_emission[i][k]
lprob = logaddexp(lprob, lp)
matrix[j][t] = lprob
return matrix
def _backward(N, T, lp_transition, lp_emission, outputs):
matrix = numpy.zeros((N, T+1))
for t in range(T-1, -1, -1):
k = outputs[t]
for i in range(N):
# The probability of the state is the sum of the
# transitions from all the states from time t+1.
lprob = LOG0
for j in range(N):
lp = matrix[j][t+1] + \
lp_transition[i][j] + \
lp_emission[i][k]
lprob = logaddexp(lprob, lp)
matrix[i][t] = lprob
return matrix
def train_visible(states, alphabet, training_data,
pseudo_initial=None, pseudo_transition=None,
pseudo_emission=None):
"""train_visible(states, alphabet, training_data[, pseudo_initial]
[, pseudo_transition][, pseudo_emission]) -> MarkovModel
Train a visible MarkovModel using maximum likelihoood estimates
for each of the parameters. states is a list of strings that
describe the names of each state. alphabet is a list of objects
that indicate the allowed outputs. training_data is a list of
(outputs, observed states) where outputs is a list of the emission
from the alphabet, and observed states is a list of states from
states.
pseudo_initial, pseudo_transition, and pseudo_emission are
optional parameters that you can use to assign pseudo-counts to
different matrices. They should be matrices of the appropriate
size that contain numbers to add to each parameter matrix
"""
N, M = len(states), len(alphabet)
if pseudo_initial!=None:
pseudo_initial = numpy.asarray(pseudo_initial)
if pseudo_initial.shape != (N,):
raise ValueError("pseudo_initial not shape len(states)")
if pseudo_transition!=None:
pseudo_transition = numpy.asarray(pseudo_transition)
if pseudo_transition.shape != (N,N):
raise ValueError("pseudo_transition not shape " + \
"len(states) X len(states)")
if pseudo_emission!=None:
pseudo_emission = numpy.asarray(pseudo_emission)
if pseudo_emission.shape != (N,M):
raise ValueError("pseudo_emission not shape " + \
"len(states) X len(alphabet)")
# Training data is given as a list of members of the alphabet.
# Replace those with indexes into the alphabet list for easier
# computation.
training_states, training_outputs = [], []
states_indexes = itemindex(states)
outputs_indexes = itemindex(alphabet)
for toutputs, tstates in training_data:
if len(tstates) != len(toutputs):
raise ValueError("states and outputs not aligned")
training_states.append([states_indexes[x] for x in tstates])
training_outputs.append([outputs_indexes[x] for x in toutputs])
x = _mle(N, M, training_outputs, training_states,
pseudo_initial, pseudo_transition, pseudo_emission)
p_initial, p_transition, p_emission = x
return MarkovModel(states, alphabet, p_initial, p_transition, p_emission)
def _mle(N, M, training_outputs, training_states, pseudo_initial,
pseudo_transition, pseudo_emission):
# p_initial is the probability that a sequence of states starts
# off with a particular one.
p_initial = numpy.zeros(N)
if pseudo_initial:
p_initial = p_initial + pseudo_initial
for states in training_states:
p_initial[states[0]] += 1
p_initial = _normalize(p_initial)
# p_transition is the probability that a state leads to the next
# one. C(i,j)/C(i) where i and j are states.
p_transition = numpy.zeros((N,N))
if pseudo_transition:
p_transition = p_transition + pseudo_transition
for states in training_states:
for n in range(len(states)-1):
i, j = states[n], states[n+1]
p_transition[i, j] += 1
for i in range(len(p_transition)):
p_transition[i,:] = p_transition[i,:] / sum(p_transition[i,:])
# p_emission is the probability of an output given a state.
# C(s,o)|C(s) where o is an output and s is a state.
p_emission = numpy.zeros((N,M))
if pseudo_emission:
p_emission = p_emission + pseudo_emission
p_emission = numpy.ones((N,M))
for outputs, states in zip(training_outputs, training_states):
for o, s in zip(outputs, states):
p_emission[s, o] += 1
for i in range(len(p_emission)):
p_emission[i,:] = p_emission[i,:] / sum(p_emission[i,:])
return p_initial, p_transition, p_emission
def _argmaxes(vector, allowance=None):
return [numpy.argmax(vector)]
def find_states(markov_model, output):
"""find_states(markov_model, output) -> list of (states, score)"""
mm = markov_model
N = len(mm.states)
# _viterbi does calculations in log space. Add a tiny bit to the
# matrices so that the logs will not break.
x = mm.p_initial + VERY_SMALL_NUMBER
y = mm.p_transition + VERY_SMALL_NUMBER
z = mm.p_emission + VERY_SMALL_NUMBER
lp_initial, lp_transition, lp_emission = map(numpy.log, (x, y, z))
# Change output into a list of indexes into the alphabet.
indexes = itemindex(mm.alphabet)
output = [indexes[x] for x in output]
# Run the viterbi algorithm.
results = _viterbi(N, lp_initial, lp_transition, lp_emission, output)
for i in range(len(results)):
states, score = results[i]
results[i] = [mm.states[x] for x in states], numpy.exp(score)
return results
def _viterbi(N, lp_initial, lp_transition, lp_emission, output):
# The Viterbi algorithm finds the most likely set of states for a
# given output. Returns a list of states.
T = len(output)
# Store the backtrace in a NxT matrix.
backtrace = [] # list of indexes of states in previous timestep.
for i in range(N):
backtrace.append([None] * T)
# Store the best scores.
scores = numpy.zeros((N, T))
scores[:,0] = lp_initial + lp_emission[:,output[0]]
for t in range(1, T):
k = output[t]
for j in range(N):
# Find the most likely place it came from.
i_scores = scores[:,t-1] + \
lp_transition[:,j] + \
lp_emission[j,k]
indexes = _argmaxes(i_scores)
scores[j,t] = i_scores[indexes[0]]
backtrace[j][t] = indexes
# Do the backtrace. First, find a good place to start. Then,
# we'll follow the backtrace matrix to find the list of states.
# In the event of ties, there may be multiple paths back through
# the matrix, which implies a recursive solution. We'll simulate
# it by keeping our own stack.
in_process = [] # list of (t, states, score)
results = [] # return values. list of (states, score)
indexes = _argmaxes(scores[:,T-1]) # pick the first place
for i in indexes:
in_process.append((T-1, [i], scores[i][T-1]))
while in_process:
t, states, score = in_process.pop()
if t == 0:
results.append((states, score))
else:
indexes = backtrace[states[0]][t]
for i in indexes:
in_process.append((t-1, [i]+states, score))
return results
def _normalize(matrix):
# Make sure numbers add up to 1.0
if len(matrix.shape) == 1:
matrix = matrix / float(sum(matrix))
elif len(matrix.shape) == 2:
# Normalize by rows.
for i in range(len(matrix)):
matrix[i,:] = matrix[i,:] / sum(matrix[i,:])
else:
raise ValueError("I cannot handle matrixes of that shape")
return matrix
def _uniform_norm(shape):
matrix = numpy.ones(shape)
return _normalize(matrix)
def _random_norm(shape):
matrix = numpy.random.random(shape)
return _normalize(matrix)
def _copy_and_check(matrix, desired_shape):
# Copy the matrix.
matrix = numpy.array(matrix, copy=1)
# Check the dimensions.
if matrix.shape != desired_shape:
raise ValueError("Incorrect dimension")
# Make sure it's normalized.
if len(matrix.shape) == 1:
if numpy.fabs(sum(matrix)-1.0) > 0.01:
raise ValueError("matrix not normalized to 1.0")
elif len(matrix.shape) == 2:
for i in range(len(matrix)):
if numpy.fabs(sum(matrix[i])-1.0) > 0.01:
raise ValueError("matrix %d not normalized to 1.0" % i)
else:
raise ValueError("I don't handle matrices > 2 dimensions")
return matrix
def _logsum(matrix):
if len(matrix.shape) > 1:
vec = numpy.reshape(matrix, (numpy.product(matrix.shape),))
else:
vec = matrix
sum = LOG0
for num in vec:
sum = logaddexp(sum, num)
return sum
def _logvecadd(logvec1, logvec2):
assert len(logvec1) == len(logvec2), "vectors aren't the same length"
sumvec = numpy.zeros(len(logvec1))
for i in range(len(logvec1)):
sumvec[i] = logaddexp(logvec1[i], logvec2[i])
return sumvec
def _exp_logsum(numbers):
sum = _logsum(numbers)
return numpy.exp(sum)
| 37.98308 | 127 | 0.628564 |
d4a7d95a9f223064052da15a9a7a9eecfe46cfa7 | 3,810 | py | Python | atmosphere/custom_activity/base_class.py | ambiata/atmosphere-python-sdk | 48880a8553000cdea59d63b0fba49e1f0f482784 | [
"MIT"
] | null | null | null | atmosphere/custom_activity/base_class.py | ambiata/atmosphere-python-sdk | 48880a8553000cdea59d63b0fba49e1f0f482784 | [
"MIT"
] | 9 | 2021-02-21T21:53:03.000Z | 2021-11-05T06:06:55.000Z | atmosphere/custom_activity/base_class.py | ambiata/atmosphere-python-sdk | 48880a8553000cdea59d63b0fba49e1f0f482784 | [
"MIT"
] | null | null | null | from abc import ABC, abstractmethod
from typing import Tuple
from requests import Response
from .pydantic_models import (AppliedExclusionConditionsResponse,
BiasAttributeConfigListResponse,
ComputeRewardResponse, DefaultPredictionResponse,
ExclusionRuleConditionListResponse,
PredictionResponsePayloadFormatListResponse)
class BaseActivityCustomCode(ABC):
"""
The main class of this repository: the one to be implemented
"""
is_for_mocker: bool
def __init__(self, is_for_mocker: bool = False):
self.is_for_mocker = is_for_mocker
@abstractmethod
def validate_prediction_request(self, prediction_request: dict) -> None:
"""Raise a ValidationError if the received prediction request is not valid"""
@abstractmethod
def validate_outcome_request(self, outcome_request: dict) -> None:
"""Raise a ValidationError if the received outcome request is not valid"""
@abstractmethod
def compute_reward(self, outcome_request: dict) -> ComputeRewardResponse:
"""From an outcome, compute the reward"""
@abstractmethod
def get_module_version(self) -> str:
"""Return the version of the module."""
@abstractmethod
def send_mock_prediction_request(
self, url_prediction_endpoint: str
) -> Tuple[Response, dict]:
"""
Send a mock request to the provided url and returns the corresponding response
with extra information if required for computing the prediction.
The response and dictionary will be provided to
the `send_mock_outcome_request`.
"""
@abstractmethod
def send_mock_outcome_request(
self,
url_outcome_endpoint: str,
prediction_response: Response,
info_from_prediction: dict,
) -> Response:
"""
Send a mock request to the provided url and returns the corresponding response.
Provide the prediction response and extra information created while
creating the prediction request from `send_mock_prediction_request`.
"""
def get_prediction_response_payload_formats(
self,
) -> PredictionResponsePayloadFormatListResponse:
"""
Return the list of available format of the prediction payload.
Every format should have a name and a description
The name of the format should be unique.
"""
return {"prediction_response_payload_formats": []}
def format_prediction_payload_response(
self,
default_prediction_response: DefaultPredictionResponse,
payload_format: str, # noqa pylint: disable=unused-argument
) -> dict:
"""
You can format the prediction the way you want based
on the information returned by default
"""
return default_prediction_response
def get_exclusion_rule_conditions(self) -> ExclusionRuleConditionListResponse:
"""
Define the exclusion rules for the activity
"""
return ExclusionRuleConditionListResponse(exclusion_rule_conditions=[])
def get_applied_exclusion_conditions(
self, prediction_request: dict # noqa pylint: disable=unused-argument
) -> AppliedExclusionConditionsResponse:
"""
Define the exclusion rules for the activity
"""
return AppliedExclusionConditionsResponse(applied_exclusion_conditions=[])
def get_bias_attribute_configs(self) -> BiasAttributeConfigListResponse:
"""
Define the bias attribute configs, these decide which attributes may be
used by atmospherex as bias attributes
"""
return BiasAttributeConfigListResponse(bias_attribute_configs=[])
| 36.634615 | 87 | 0.684777 |
f3b2eefcce0cdd44e830799547478d3b5cf37700 | 623 | py | Python | examples/pylab_examples/stackplot_demo2.py | hmeine/matplotlib | b8cb6c37bef913d3352ba0a6bec3109b161f406f | [
"MIT",
"BSD-3-Clause"
] | 16 | 2016-06-14T19:45:35.000Z | 2020-11-30T19:02:58.000Z | examples/pylab_examples/stackplot_demo2.py | hmeine/matplotlib | b8cb6c37bef913d3352ba0a6bec3109b161f406f | [
"MIT",
"BSD-3-Clause"
] | 7 | 2015-05-08T19:36:25.000Z | 2015-06-30T15:32:17.000Z | examples/pylab_examples/stackplot_demo2.py | hmeine/matplotlib | b8cb6c37bef913d3352ba0a6bec3109b161f406f | [
"MIT",
"BSD-3-Clause"
] | 6 | 2015-06-05T03:34:06.000Z | 2022-01-25T09:07:10.000Z | import numpy as np
import matplotlib.pyplot as plt
np.random.seed(0)
def layers(n, m):
"""
Return *n* random Gaussian mixtures, each of length *m*.
"""
def bump(a):
x = 1 / (.1 + np.random.random())
y = 2 * np.random.random() - .5
z = 10 / (.1 + np.random.random())
for i in range(m):
w = (i / float(m) - y) * z
a[i] += x * np.exp(-w * w)
a = np.zeros((m, n))
for i in range(n):
for j in range(5):
bump(a[:, i])
return a
d = layers(3, 100)
plt.subplots()
plt.stackplot(range(100), d.T, baseline='wiggle')
plt.show()
| 23.074074 | 60 | 0.500803 |
c50941fe7e35b9d056d1ed5ecb3fda8f5b7d5f57 | 1,048 | py | Python | tests/animation/test_animation_examples.py | OrangeUtan/MCMetagen | 0293ea14bf1c6b1bae58741f9876ba662930b43d | [
"MIT"
] | null | null | null | tests/animation/test_animation_examples.py | OrangeUtan/MCMetagen | 0293ea14bf1c6b1bae58741f9876ba662930b43d | [
"MIT"
] | null | null | null | tests/animation/test_animation_examples.py | OrangeUtan/MCMetagen | 0293ea14bf1c6b1bae58741f9876ba662930b43d | [
"MIT"
] | null | null | null | from pathlib import Path
from unittest.mock import MagicMock, patch
import pytest
from pytest_insta import SnapshotFixture
import mcanitexgen
@pytest.mark.parametrize(
"file",
[
"steve.animation.py",
"dog.animation.py",
"unweighted_seq_in_weighted.animation.py",
"weighted_blinking.animation.py",
"weighted_blinking_2.animation.py",
"simple_blinking.animation.py",
],
)
def test(file: str, snapshot: SnapshotFixture):
animations = mcanitexgen.animation.load_animations_from_file(
Path("tests/animation/examples/" + file)
)
with patch("builtins.open", new=MagicMock()) as mock_open:
with patch("json.dump", new=MagicMock()) as mock_dump:
mcanitexgen.animation.write_mcmeta_files(animations, Path("out"))
assert mock_dump.call_count == len(animations)
for i, (name, _) in enumerate(animations.items()):
mcmeta = mock_dump.call_args_list[i][0][0]
assert snapshot(f"{name}.json") == mcmeta
| 30.823529 | 77 | 0.662214 |
07bbc5d6f9f8266250bd1927c4eb096646a36589 | 1,240 | py | Python | aiida/backends/djsite/db/migrations/0010_process_type.py | azadoks/aiida-core | b806b7fef8fc79090deccfe2019b77cb922e0581 | [
"MIT",
"BSD-3-Clause"
] | 180 | 2019-07-12T07:45:26.000Z | 2022-03-22T13:16:57.000Z | aiida/backends/djsite/db/migrations/0010_process_type.py | azadoks/aiida-core | b806b7fef8fc79090deccfe2019b77cb922e0581 | [
"MIT",
"BSD-3-Clause"
] | 2,325 | 2019-07-04T13:41:44.000Z | 2022-03-31T12:17:10.000Z | aiida/backends/djsite/db/migrations/0010_process_type.py | azadoks/aiida-core | b806b7fef8fc79090deccfe2019b77cb922e0581 | [
"MIT",
"BSD-3-Clause"
] | 88 | 2019-07-06T01:42:39.000Z | 2022-03-18T14:20:09.000Z | # -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
# pylint: disable=invalid-name
"""Database migration."""
from django.db import migrations, models
from aiida.backends.djsite.db.migrations import upgrade_schema_version
REVISION = '1.0.10'
DOWN_REVISION = '1.0.9'
class Migration(migrations.Migration):
"""Database migration."""
dependencies = [
('db', '0009_base_data_plugin_type_string'),
]
operations = [
migrations.AddField(
model_name='dbnode', name='process_type', field=models.CharField(max_length=255, db_index=True, null=True)
),
upgrade_schema_version(REVISION, DOWN_REVISION)
]
| 37.575758 | 118 | 0.526613 |
388a14205859dd4d6c221321c26968d186f07a06 | 6,441 | py | Python | src/trydjango/settings.py | dinhnhobao/NUSBuy | 87af7b44f174463e878d543ec8e30634026e0324 | [
"MIT"
] | null | null | null | src/trydjango/settings.py | dinhnhobao/NUSBuy | 87af7b44f174463e878d543ec8e30634026e0324 | [
"MIT"
] | 16 | 2019-07-15T09:43:24.000Z | 2022-03-11T23:53:53.000Z | src/trydjango/settings.py | dinhnhobao/NUSBuy | 87af7b44f174463e878d543ec8e30634026e0324 | [
"MIT"
] | 1 | 2021-01-19T09:37:55.000Z | 2021-01-19T09:37:55.000Z | """
Django settings for trydjango project.
Generated by 'django-admin startproject' using Django 2.0.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '@hh&)hed7(d*lqzlipkt)6)hdahnc$vz8*q&gulc%mp^xt5q=3='
# SECURITY WARNING: don't run with dTebug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'channels',
'chat',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.sites',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# third party
'category',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
'allauth.socialaccount.providers.facebook',
'allauth.socialaccount.providers.github',
'allauth.socialaccount.providers.amazon',
'allauth.socialaccount.providers.discord',
'allauth.socialaccount.providers.dropbox',
'allauth.socialaccount.providers.instagram',
'allauth.socialaccount.providers.openid',
'allauth.socialaccount.providers.telegram',
'corsheaders',
'rest_auth',
'rest_auth.registration',
'rest_framework',
'rest_framework.utils',
'rest_framework.authtoken',
# own
'pages',
'products',
'login',
'nusopenid',
]
chat_application_url = 'http://localhost:1234/'
CORS_ORIGIN_ALLOW_ALL = True
MIDDLEWARE = [
'corsheaders.middleware.CorsMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'trydjango.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, "templates")],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'trydjango.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
'''
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
'''
# mysite/settings.py
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
'TEST': {
'NAME': os.path.join(BASE_DIR, 'db_test.sqlite3')
}
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Singapore'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, '/')
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
)
#LOGIN_URL = ''
LOGIN_REDIRECT_URL = 'home' #if we enter 'home', it will redirect to log-in windows
LOGOUT_REDIRECT_URL = 'home' #after logout will direct to 'home'
SITE_ID = 1
#Django all auth settings
AUTHENTICATION_BACKENDS = (
# needed to login by username in Django admin, regardless of allauth
'django.contrib.auth.backends.ModelBackend',
# 'allauth' specific authentication methods, such as login by email
'allauth.account.auth_backends.AuthenticationBackend',
)
#LOGIN_REDIRECT_URL = '/'
SOCIALACCOUNT_PROVIDERS = {
'telegram': {
'TOKEN': 'insert-token-received-from-botfather'
}
}
ACCOUNT_EMAIL_VERIFICATION = 'none'
ASGI_APPLICATION = "trydjango.routing.application"
# mysite/settings.py
# Channels
CHANNEL_LAYERS = {
'default': {
'BACKEND': 'channels_redis.core.RedisChannelLayer',
'CONFIG': {
"hosts": [('127.0.0.1', 6379)],
},
},
}
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticatedOrReadOnly',
),
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.TokenAuthentication',
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.BasicAuthentication',
),
}
CORS_ORIGIN_WHITELIST = (
'http://localhost:1234',
'http://10.0.0.8:1234',
)
ACCOUNT_EMAIL_REQUIRED = False
ACCOUNT_AUTHENTICATION_METHOD = 'username'
ACCOUNT_EMAIL_VERIFICATION = 'none'
###SECURITY FEATURES FOR DJANGO###
SECURE_BROWSER_XSS_FILTER = True #Cross site scripting (XSS) protection, (security.W007)
X_FRAME_OPTIONS = 'DENY' #Clickjacking Protection, (security.W019)
#CSRF_COOKIE_SECURE = False #(security.W016)
#SESSION_COOKIE_SECURE = True #only use this to run on SSL(security.W012)
SECURE_CONTENT_TYPE_NOSNIFF = True #(security.W006)
# 3 more unsolved security issues:
# one left unfixed (hard to fix), 2 to be fixed on production
SESSION_ENGIN = 'django.contrib.sessions.backends.cached_db'
| 27.292373 | 91 | 0.69632 |
42e540eb329de3cb3f6a073996eddb5331e9953b | 2,221 | py | Python | atom3d/util/rosetta.py | everyday847/atom3d | 96afeeca65f940bba46a55959daf80093c9fec04 | [
"MIT"
] | null | null | null | atom3d/util/rosetta.py | everyday847/atom3d | 96afeeca65f940bba46a55959daf80093c9fec04 | [
"MIT"
] | null | null | null | atom3d/util/rosetta.py | everyday847/atom3d | 96afeeca65f940bba46a55959daf80093c9fec04 | [
"MIT"
] | null | null | null | import io
import os
from pathlib import Path
import subprocess
import pandas as pd
import tqdm
import atom3d.util.file as fi
class Scores(object):
"""
Track and lookup Rosetta score files.
Args:
data_path (Union[str, Path, list[str, Path]]):
Path to silent files.
"""
def __init__(self, data_path):
self._scores = {}
score_paths = fi.find_files(data_path, 'sc')
if len(score_paths) == 0:
raise RuntimeError('No score files found.')
for silent_file in score_paths:
key = self._key_from_silent_file(silent_file)
self._scores[key] = self._parse_scores(silent_file)
self._scores = pd.concat(self._scores).sort_index()
def _parse_scores(self, silent_file):
grep_cmd = f"grep ^SCORE: {silent_file}"
out = subprocess.Popen(
grep_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
cwd=os.getcwd(), shell=True)
(stdout, stderr) = out.communicate()
f = io.StringIO(stdout.decode('utf-8'))
return pd.read_csv(f, delimiter='\s+').drop('SCORE:', axis=1) \
.set_index('description')
def _key_from_silent_file(self, silent_file):
return silent_file.stem.split('.')[0]
def _lookup(self, file_path):
file_path = Path(file_path)
key = (file_path.stem, file_path.name)
if key in self._scores.index:
return self._scores.loc[key]
key = (file_path.parent.stem, file_path.stem)
if key in self._scores.index:
return self._scores.loc[key]
return None
def __call__(self, x, error_if_missing=False):
x['scores'] = self._lookup(x['file_path'])
if x['scores'] is None and error_if_missing:
raise RuntimeError(f'Unable to find scores for {x["file_path"]}')
return x
def remove_missing(self, file_list):
"""Remove examples we cannot find in score files."""
result = []
for i, file_path in tqdm.tqdm(enumerate(file_list), total=len(file_list)):
entry = self._lookup(file_path)
if entry is not None:
result.append(file_path)
return result
| 31.728571 | 82 | 0.616839 |
2afa13f14317955798960a85bf319aa9b22a2537 | 877 | py | Python | sample/all_methods/getNoteApplicationData.py | matthewayne/evernote-sdk-python | 53b0c1263d250b88a1810987b51bc9def586db02 | [
"Apache-2.0",
"BSD-3-Clause"
] | 3 | 2016-02-14T13:39:46.000Z | 2021-09-03T16:02:18.000Z | sample/all_methods/getNoteApplicationData.py | matthewayne/evernote-sdk-python | 53b0c1263d250b88a1810987b51bc9def586db02 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | sample/all_methods/getNoteApplicationData.py | matthewayne/evernote-sdk-python | 53b0c1263d250b88a1810987b51bc9def586db02 | [
"Apache-2.0",
"BSD-3-Clause"
] | 1 | 2018-03-28T01:08:25.000Z | 2018-03-28T01:08:25.000Z | # Import the Evernote client
from evernote.api.client import EvernoteClient
# Define access token either:
# Developer Tokens (https://dev.evernote.com/doc/articles/dev_tokens.php)
# or OAuth (https://dev.evernote.com/doc/articles/authentication.php)
access_token = "insert dev or oauth token here"
# Setup the client
client = EvernoteClient(token = access_token, sandbox = True)
# Get note store object
note_store = client.get_note_store()
# GUID of the note to attach the application data to
note_guid = "insert note GUID to attach key-value storage to here"
#Returns a lazy map of all the application data associated with the note
application_data = note_store.getNoteApplicationData(note_guid)
print "Note has %s application data entries:" % len(application_data.fullMap)
for key, value in application_data.fullMap.iteritems():
print " * %s: '%s'" % (key, value)
| 35.08 | 77 | 0.77081 |
5d94ae2c3d0b6b612d5c9d4663466456fba0578e | 3,770 | py | Python | webdataset/tests/test_writer.py | techthiyanes/webdataset | 682b30ee484d719a954554654d2d6baa213f9371 | [
"BSD-3-Clause"
] | null | null | null | webdataset/tests/test_writer.py | techthiyanes/webdataset | 682b30ee484d719a954554654d2d6baa213f9371 | [
"BSD-3-Clause"
] | null | null | null | webdataset/tests/test_writer.py | techthiyanes/webdataset | 682b30ee484d719a954554654d2d6baa213f9371 | [
"BSD-3-Clause"
] | null | null | null | import os
import numpy as np
import webdataset as wds
from webdataset import writer
def getkeys(sample):
return set(x for x in sample if not x.startswith("_"))
def test_writer(tmpdir):
with writer.TarWriter(f"{tmpdir}/writer.tar") as sink:
sink.write(dict(__key__="a", txt="hello", cls="3"))
os.system(f"ls -l {tmpdir}")
ftype = os.popen(f"file {tmpdir}/writer.tar").read()
assert "compress" not in ftype, ftype
ds = wds.DataPipeline(
wds.SimpleShardList(f"{tmpdir}/writer.tar"),
wds.tarfile_samples,
wds.decode("rgb")
)
for sample in ds:
assert getkeys(sample) == set("txt cls".split()), getkeys(sample)
break
def test_writer2(tmpdir):
with writer.TarWriter(f"{tmpdir}/writer2.tgz") as sink:
sink.write(dict(__key__="a", txt="hello", cls="3"))
os.system(f"ls -l {tmpdir}")
ftype = os.popen(f"file {tmpdir}/writer2.tgz").read()
assert "compress" in ftype, ftype
ds = wds.DataPipeline(
wds.SimpleShardList(f"{tmpdir}/writer2.tgz"),
wds.tarfile_samples,
wds.decode("rgb")
)
for sample in ds:
assert getkeys(sample) == set("txt cls".split()), getkeys(sample)
break
def test_writer3(tmpdir):
with writer.TarWriter(f"{tmpdir}/writer3.tar") as sink:
sink.write(dict(__key__="a", pth=["abc"], pyd=dict(x=0)))
os.system(f"ls -l {tmpdir}")
os.system(f"tar tvf {tmpdir}/writer3.tar")
ftype = os.popen(f"file {tmpdir}/writer3.tar").read()
assert "compress" not in ftype, ftype
ds = wds.DataPipeline(
wds.SimpleShardList(f"{tmpdir}/writer3.tar"),
wds.tarfile_samples,
wds.decode("rgb")
)
for sample in ds:
assert getkeys(sample) == set("pth pyd".split())
assert isinstance(sample["pyd"], dict)
assert sample["pyd"] == dict(x=0)
assert isinstance(sample["pth"], list)
assert sample["pth"] == ["abc"]
def test_writer4(tmpdir):
with writer.TarWriter(f"{tmpdir}/writer4.tar") as sink:
sink.write(dict(__key__="a", ten=np.zeros((3, 3)), tb=[np.ones(1), np.ones(2)]))
os.system(f"ls -l {tmpdir}")
os.system(f"tar tvf {tmpdir}/writer4.tar")
ftype = os.popen(f"file {tmpdir}/writer4.tar").read()
assert "compress" not in ftype, ftype
ds = wds.DataPipeline(
wds.SimpleShardList(f"{tmpdir}/writer4.tar"),
wds.tarfile_samples,
wds.decode(),
)
for sample in ds:
assert getkeys(sample) == set("tb ten".split())
assert isinstance(sample["ten"], list)
assert isinstance(sample["ten"][0], np.ndarray)
assert sample["ten"][0].shape == (3, 3)
assert isinstance(sample["tb"], list)
assert len(sample["tb"]) == 2
assert len(sample["tb"][0]) == 1
assert len(sample["tb"][1]) == 2
assert sample["tb"][0][0] == 1.0
def test_writer_pipe(tmpdir):
with writer.TarWriter(f"pipe:cat > {tmpdir}/writer_pipe.tar") as sink:
sink.write(dict(__key__="a", txt="hello", cls="3"))
os.system(f"ls -l {tmpdir}")
ds = wds.DataPipeline(
wds.SimpleShardList(f"{tmpdir}/writer_pipe.tar"),
wds.tarfile_samples,
wds.decode("rgb")
)
for sample in ds:
assert getkeys(sample) == set("txt cls".split())
break
def test_shardwriter(tmpdir):
def post(fname):
assert fname is not None
with writer.ShardWriter(
f"{tmpdir}/shards-%04d.tar", maxcount=5, post=post, encoder=False
) as sink:
for i in range(50):
sink.write(dict(__key__=str(i), txt=b"hello", cls=b"3"))
os.system(f"ls -l {tmpdir}")
ftype = os.popen(f"file {tmpdir}/shards-0000.tar").read()
assert "compress" not in ftype, ftype
| 31.680672 | 88 | 0.606101 |
85c038f287a15f155d44ce8f2dd76aba7e25e983 | 349 | py | Python | tools/lib/lazy_property.py | loudsun1997/openpilot | 68c1a666a03a381768673a1b29a73f9131f0ed62 | [
"MIT"
] | 251 | 2019-07-12T05:14:20.000Z | 2022-03-30T21:05:22.000Z | tools/lib/lazy_property.py | loudsun1997/openpilot | 68c1a666a03a381768673a1b29a73f9131f0ed62 | [
"MIT"
] | 66 | 2020-04-09T20:27:57.000Z | 2022-01-27T14:39:24.000Z | tools/lib/lazy_property.py | loudsun1997/openpilot | 68c1a666a03a381768673a1b29a73f9131f0ed62 | [
"MIT"
] | 284 | 2019-07-29T13:14:19.000Z | 2022-03-30T17:26:47.000Z | class lazy_property(object):
"""Defines a property whose value will be computed only once and as needed.
This can only be used on instance methods.
"""
def __init__(self, func):
self._func = func
def __get__(self, obj_self, cls):
value = self._func(obj_self)
setattr(obj_self, self._func.__name__, value)
return value
| 26.846154 | 77 | 0.69914 |
7423982821b1fa826fff78aae76d7cd31152734a | 15,600 | py | Python | babyai/rl/algos/rcppo.py | Genius1237/babyai | afe2c4456f20741ad6ec14a907ad4b500a807fec | [
"BSD-3-Clause"
] | 2 | 2020-01-27T12:16:05.000Z | 2020-11-25T17:43:50.000Z | babyai/rl/algos/rcppo.py | Genius1237/babyai | afe2c4456f20741ad6ec14a907ad4b500a807fec | [
"BSD-3-Clause"
] | null | null | null | babyai/rl/algos/rcppo.py | Genius1237/babyai | afe2c4456f20741ad6ec14a907ad4b500a807fec | [
"BSD-3-Clause"
] | null | null | null | from babyai.rl.algos import PPOAlgo
from babyai.rl.utils import ParallelEnv
import babyai
import copy
import random
import time
import logging
import numpy as np
from multiprocessing import Process, Pipe
from multiprocessing.managers import BaseManager
import gym
import gc
import traceback
import sys
import os
import itertools
import math
def generator(env_name, demo_loc, curr_method):
demos = babyai.utils.demos.load_demos(demo_loc)
seed = 0
max_len = max([len(demo[3]) for demo in demos]) -1
envs = []
for i in range(len(demos)):
env = gym.make(env_name)
env.seed(seed+i)
env.reset()
envs.append(env)
states = []
curr_done = False
prob = 0
for ll in range(max_len):
if curr_method == 'log':
prob += 2**ll
else:
prob = int(curr_method)*len(demos)
prob = min(prob,max_len)
if ll == max_len - 1:
curr_done=True
for i,demo in enumerate(demos):
actions = demo[3]
env = copy.deepcopy(envs[i])
n_steps = len(actions) -1
for j in range(n_steps-ll):
_,_,done,_ = env.step(actions[j].value)
if random.randint(1,prob) == 1:
states.append(env)
env.step_count = 0
env.count=0
if curr_method == 'log':
if math.log2(ll+2) == int(math.log2(ll+2)) or curr_done:
yield states,curr_done
states = []
else:
num = int(curr_method)
if ll%num == num-1 or curr_done:
yield states,curr_done
states = []
def worker(conn, random_seed, env_name, demo_loc, curr_method):
#babyai.utils.seed(0)
random.seed(random_seed)
start_state_generator = generator(env_name, demo_loc, curr_method)
i=0
#good_start_states = []
for good_start_states,curr_done in start_state_generator:
#good_start_states.extend(good_start_states_u)
if i==0:
i+=1
env = copy.deepcopy(random.choice(good_start_states))
else:
if curr_done:
conn.send("curr_done")
else:
conn.send("done")
while True:
try:
cmd, data = conn.recv()
if cmd == "step":
obs, reward, done, info = env.step(data)
if done:
env = copy.deepcopy(random.choice(good_start_states))
obs = env.gen_obs()
conn.send((obs, reward, done, info))
elif cmd == "reset":
env = copy.deepcopy(random.choice(good_start_states))
obs = env.gen_obs()
conn.send(obs)
elif cmd == "print":
#print(env,env.mission)
conn.send(env.count)
elif cmd == "update":
break
else:
raise NotImplementedError
except:
traceback.print_exc()
class RCParallelEnv(gym.Env):
"""A concurrent execution of environments in multiple processes."""
def __init__(self, env_name, n_env, demo_loc, curr_method):
assert n_env >= 1, "No environment given."
self.env_name = env_name
self.n_env = n_env
temp_env = gym.make(env_name)
self.observation_space = temp_env.observation_space
self.action_space = temp_env.action_space
self.locals = []
self.processes = []
rand_seed = random.randint(0,n_env-1)
for i in range(self.n_env):
local, remote = Pipe()
self.locals.append(local)
p = Process(target=worker, args=(remote, rand_seed+i, env_name, demo_loc,curr_method))
p.daemon = True
p.start()
remote.close()
self.processes.append(p)
def reset(self):
for local in self.locals:
local.send(("reset", None))
#self.envs[0].env = copy.deepcopy(random.choice(RCParallelEnv.good_start_states))
#results = [self.envs[0].gen_obs()] + [local.recv() for local in self.locals]
results = [local.recv() for local in self.locals]
return results
def step(self, actions):
for local, action in zip(self.locals, actions):
local.send(("step", action))
#if done:
# self.envs[0].env = copy.deepcopy(random.choice(RCParallelEnv.good_start_states))
#obs, reward, done, info = self.envs[0].step(actions[0])
# obs = self.envs[0].gen_obs()
#results = zip(*[(obs, reward, done, info)] + [local.recv() for local in self.locals])
results = zip(*[local.recv() for local in self.locals])
return results
def print(self):
for local in self.locals:
local.send(("print",None))
print(sum([local.recv() for local in self.locals])/len(self.locals))
def __del__(self):
for p in self.processes:
p.terminate()
def update_good_start_states(self):
#print(sys.getrefcount(good_start_states),sys.getsizeof(good_start_states))
[local.send(("update",None)) for local in self.locals]
t = [local.recv() for local in self.locals]
if t[0] == "curr_done":
return True
else:
return False
class RCPPOAlgo(PPOAlgo):
"""
The class containing an application of Reverse Curriculum learning from
https://arxiv.org/pdf/1707.05300.pdf to Proximal Policy Optimization
"""
def __init__(self, env_name, n_env, acmodel, demo_loc, version, es_method=2, update_frequency = 10, transfer_ratio=0.15, random_walk_length=1, curr_method = 'one',num_frames_per_proc=None, discount=0.99, lr=7e-4, beta1=0.9, beta2=0.999,
gae_lambda=0.95,
entropy_coef=0.01, value_loss_coef=0.5, max_grad_norm=0.5, recurrence=4,
adam_eps=1e-5, clip_eps=0.2, epochs=4, batch_size=256, preprocess_obss=None,
reshape_reward=None, aux_info=None):
self.n_env = n_env
self.env_name = env_name
self.transfer_ratio = transfer_ratio
self.random_walk_length = random_walk_length
self.version = version
self.update_frequency = update_frequency
self.es_method = es_method
super().__init__([gym.make(env_name) for _ in range(n_env)], acmodel, num_frames_per_proc, discount, lr, beta1, beta2, gae_lambda, entropy_coef,
value_loss_coef, max_grad_norm, recurrence, adam_eps, clip_eps, epochs,
batch_size, preprocess_obss, reshape_reward, aux_info)
if version == "v1":
self.good_start_states = self.read_good_start_states(env_name, demo_loc)
elif version == "v2" or version == "v3":
self.read_good_start_states_v2(env_name,demo_loc,curr_method)
self.env = None
self.env = RCParallelEnv(self.env_name,self.n_env, demo_loc, curr_method)
self.obs = self.env.reset()
self.update = 0
self.curr_update = 1
self.log_history = []
self.es_max = -1
self.es_pat = 0
self.curr_done = False
self.curr_really_done = False
def early_stopping_check(self, method, bound):
'''
if len(self.log_history) < patience:
return False
else:
for i in range(patience-1):
if self.log_history[-1-i]-self.log_history[-2-i] >= min_delta:
return False
return True
'''
'''
if len(self.log_history) ==0 :
return False
else:
for i in range(patience):
if self.log_history[-1-i] >= 0.9:
continue
else:
return False
return True
'''
if self.log_history[-1] >= bound:
return True
else:
return False
'''
if self.log_history[-1] - self.es_max > min_delta:
self.es_max = self.log_history[-1]
self.es_pat = 0
self.best_weights = self.acmodel.state_dict()
ans = False
no = 0
else:
self.es_pat += 1
if self.es_pat >= patience:
self.es_max = -1
self.es_pat = 0
self.acmodel.load_state_dict(self.best_weights)
ans = True
no = 1
else:
ans = False
no = 1
#print(ans,no,self.es_pat,patience)
return ans
'''
def update_parameters(self):
logs = super().update_parameters()
'''logs = {
"entropy":0,"value":0,"policy_loss":0,"value_loss":0,"grad_norm":0,"loss":0,"return_per_episode": [0],"reshaped_return_per_episode": [0],"num_frames_per_episode": [0],"num_frames": 0,"episodes_done": 0
}'''
self.update += 1
if self.version == "v1":
if self.update % self.update_frequency == 0 and self.update//self.update_frequency < 15:
self.good_start_states = self.update_good_start_states(self.good_start_states,self.random_walk_length,self.transfer_ratio)
self.env.update_good_start_states()
for state in self.good_start_states[-3:]:
s1 = copy.copy(state)
s1.render()
input()
elif self.version == "v2":
logger = logging.getLogger(__name__)
if self.update % self.update_frequency ==0 and self.update//self.update_frequency < self.curriculum_length:
"""self.env.print()
print(sum([state.count for state in self.env.good_start_states])/len(self.env.good_start_states))"""
self.env.update_good_start_states()
logger.info('Start state Update Number {}/{}'.format(self.update//self.update_frequency,self.curriculum_length))
if self.update % self.update_frequency ==0 and self.update//self.update_frequency == self.curriculum_length:
logger.info('Start State Updates Done')
self.env = ParallelEnv([gym.make(self.env_name) for _ in range(self.n_env)])
elif self.version == "v3":
if self.update % self.update_frequency == 0 and not self.curr_really_done:
success_rate = np.mean([1 if r > 0 else 0 for r in logs['return_per_episode']])
self.log_history.append(success_rate)
logger = logging.getLogger(__name__)
min_delta = 0.025
patience = 1
if self.es_method == 1:
bound = 0.9
elif self.es_method == 2:
bound = 0.7+(self.curr_update/self.curriculum_length)*(0.99-0.7)
if not self.curr_done:
#if self.early_stopping_check(patience+(self.curr_update),min_delta):
if self.early_stopping_check(self.es_method,bound):
self.curr_update+=1
self.log_history = []
self.curr_done = self.env.update_good_start_states()
logger.info('Start state Update Number {}'.format(self.curr_update))
else:
if self.early_stopping_check(self.es_method,bound):
self.curr_update += 1
self.log_history = []
logger.info('Start State Updates Done')
self.env = ParallelEnv([gym.make(self.env_name) for _ in range(self.n_env)])
self.curr_really_done = True
#self.obs = self.env.reset()
return logs
def update_good_start_states(self, good_start_states, random_walk_length, transfer_ratio):
new_starts = []
#new_starts.extend(copy.deepcopy(self.good_start_states))
#"""
for state in good_start_states:
s1 = state
for i in range(random_walk_length):
s1 = copy.deepcopy(s1)
action = s1.action_space.sample()
s1.step(action)
s1.count += 1
s1.step_count = 0
new_starts.append(s1)
"""
#n_threads = self.n_env
n_threads = 64
for start in range(0,len(self.good_start_states),n_threads):
end = min(start+n_threads,len(self.good_start_states))
good_start_states = ParallelEnv(self.good_start_states[start:end])
for i in range(n_explore):
action = [good_start_states.action_space.sample() for _ in range(len(good_start_states.envs))]
good_start_states.step(action)
new_starts.extend(copy.deepcopy(good_start_states.envs))
"""
n_old = int(transfer_ratio*len(good_start_states))
l = len(good_start_states)
good_start_states = random.sample(good_start_states,n_old)
good_start_states.extend(random.sample(new_starts,l-n_old))
return good_start_states
def read_good_start_states(self,env_name,demo_loc):
demos = babyai.utils.demos.load_demos(demo_loc)
seed = 0
start_states = []
for i,demo in enumerate(demos):
actions = demo[3]
env = gym.make(env_name)
babyai.utils.seed(seed)
env.seed(seed+i)
env.reset()
for j in range(len(actions)-1):
_,_,done,_ = env.step(actions[j].value)
env.step_count = 0
env.count = 1
start_states.append(env)
return start_states[:500]
def read_good_start_states_v2(self, env_name, demo_loc,curr_method):
demos = babyai.utils.demos.load_demos(demo_loc)
seed = 0
max_len = max([len(demo[3]) for demo in demos]) -1
self.pos = 0
if curr_method == 'log':
self.curriculum_length = math.floor(math.log2(max_len)) + 1
else:
combining_factor = int(curr_method)
self.curriculum_length = math.ceil(max_len/combining_factor)
return
self.start_states = [[] for _ in range(max_len)]
for i,demo in enumerate(demos):
actions = demo[3]
env = gym.make(env_name)
env.seed(seed+i)
env.reset()
env.count = len(actions)
n_steps = len(actions) -1
for j in range(max_len-1,n_steps-1,-1):
self.start_states[j].append(copy.deepcopy(env))
for j in range(n_steps):
_,_,done,_ = env.step(actions[j].value)
env.count -= 1
env.step_count = 0
self.start_states[n_steps-j-1].append(copy.deepcopy(env))
def update_good_start_states_v2(self):
self.pos += 1
new_starts = self.start_states[self.pos]
l = len(self.good_start_states)
n_old = int(self.transfer_ratio*l)
good_start_states = random.sample(self.good_start_states,n_old)
good_start_states.extend(random.sample(new_starts,l-n_old))
return good_start_states | 36.705882 | 240 | 0.557308 |
db58ca0ba01bdbb8ffc6a6e6a5aba3f3bb7e9c5a | 5,212 | py | Python | ingestion_server/test/unit_tests.py | MuhammadFaizanHaidar/openverse_api | 65a624f4ad8389ce2b41bf210248f0d7a9ef2f28 | [
"MIT"
] | null | null | null | ingestion_server/test/unit_tests.py | MuhammadFaizanHaidar/openverse_api | 65a624f4ad8389ce2b41bf210248f0d7a9ef2f28 | [
"MIT"
] | null | null | null | ingestion_server/test/unit_tests.py | MuhammadFaizanHaidar/openverse_api | 65a624f4ad8389ce2b41bf210248f0d7a9ef2f28 | [
"MIT"
] | 1 | 2021-11-02T17:58:29.000Z | 2021-11-02T17:58:29.000Z | import datetime
from uuid import uuid4
from psycopg2.extras import Json
from ingestion_server.cleanup import CleanupFunctions
from ingestion_server.elasticsearch_models import Image
def create_mock_image(override=None):
"""
Produce a mock image. Override default fields by passing in a dict with the
desired keys and values.
For example, to make an image with a custom title and default everything
else:
>>> create_mock_image({'title': 'My title'})
:return:
"""
test_popularity = {"views": 50, "likes": 3, "comments": 1}
license_url = "https://creativecommons.org/licenses/by/2.0/fr/legalcode"
meta_data = {"popularity_metrics": test_popularity, "license_url": license_url}
test_data = {
"id": 0,
"title": "Unit test title",
"identifier": str(uuid4()),
"creator": "Eric Idle",
"creator_url": "https://creativecommons.org",
"tags": [{"name": "test", "accuracy": 0.9}],
"created_on": datetime.datetime.now(),
"url": "https://creativecommons.org",
"thumbnail": "https://creativecommons.org",
"provider": "test",
"source": "test",
"license": "cc-by",
"license_version": "4.0",
"foreign_landing_url": "https://creativecommons.org",
"view_count": 0,
"height": 500,
"width": 500,
"mature": False,
"meta_data": meta_data,
}
if override:
for k, v in override.items():
test_data[k] = v
schema = {}
row = []
idx = 0
for k, v in test_data.items():
schema[k] = idx
row.append(v)
idx += 1
return Image.database_row_to_elasticsearch_doc(row, schema)
class TestImage:
@staticmethod
def test_size():
small = create_mock_image({"height": 600, "width": 300})
assert small.size == Image.ImageSizes.SMALL.name.lower()
huge = create_mock_image({"height": 4096, "width": 4096})
assert huge.size == Image.ImageSizes.LARGE.name.lower()
@staticmethod
def test_aspect_ratio():
square = create_mock_image({"height": 300, "width": 300})
assert square.aspect_ratio == Image.AspectRatios.SQUARE.name.lower()
tall = create_mock_image({"height": 500, "width": 200})
assert tall.aspect_ratio == Image.AspectRatios.TALL.name.lower()
wide = create_mock_image({"height": 200, "width": 500})
assert wide.aspect_ratio == Image.AspectRatios.WIDE.name.lower()
@staticmethod
def test_extension():
no_extension = create_mock_image({"url": "https://creativecommons.org/hello"})
assert no_extension.extension is None
jpg = create_mock_image({"url": "https://creativecommons.org/hello.jpg"})
assert jpg.extension == "jpg"
@staticmethod
def test_mature_metadata():
# Received upstream indication the work is mature
meta = {"mature": True}
mature_metadata = create_mock_image({"meta_data": meta})
assert mature_metadata["mature"]
@staticmethod
def test_mature_api():
# Manually flagged work as mature ourselves
mature_work = create_mock_image({"mature": True})
assert mature_work["mature"]
@staticmethod
def test_default_maturity():
# Default to not flagged
sfw = create_mock_image()
assert not sfw["mature"]
class TestCleanup:
@staticmethod
def test_tag_blacklist():
tags = [
{"name": "cc0"},
{"name": " cc0"},
{"name": "valid", "accuracy": 0.99},
{"name": "valid_no_accuracy"},
{
"name": "garbage:=metacrap",
},
]
result = str(CleanupFunctions.cleanup_tags(tags))
expected = str(
Json([{"name": "valid", "accuracy": 0.99}, {"name": "valid_no_accuracy"}])
)
assert result == expected
@staticmethod
def test_tag_no_update():
tags = [{"name": "valid", "accuracy": 0.92}]
result = CleanupFunctions.cleanup_tags(tags)
assert result is None
@staticmethod
def test_accuracy_filter():
tags = [
{"name": "inaccurate", "accuracy": 0.5},
{"name": "accurate", "accuracy": 0.999},
]
result = str(CleanupFunctions.cleanup_tags(tags))
expected = str(Json([{"name": "accurate", "accuracy": 0.999}]))
assert result == expected
@staticmethod
def test_url_protocol_fix():
bad_url = "flickr.com"
tls_support_cache = {}
result = CleanupFunctions.cleanup_url(bad_url, tls_support_cache)
expected = "'https://flickr.com'"
bad_http = "neverssl.com"
result_http = CleanupFunctions.cleanup_url(bad_http, tls_support_cache)
expected_http = "'http://neverssl.com'"
assert result == expected
assert result_http == expected_http
@staticmethod
def test_rank_feature_verify():
img = create_mock_image({"standardized_popularity": 200})
assert img.standardized_popularity == 100
img2 = create_mock_image({"standardized_popularity": 0})
assert img2.standardized_popularity is None
| 33.625806 | 86 | 0.610898 |
f299583cbc87965c5f48ee09604adc806be38658 | 2,342 | py | Python | readtwice/layers/tpu_utils.py | officecowboy/google-research | 572b76279f22b1f8dff187662fff2bdecde6485c | [
"Apache-2.0"
] | 1 | 2022-03-13T21:48:52.000Z | 2022-03-13T21:48:52.000Z | readtwice/layers/tpu_utils.py | officecowboy/google-research | 572b76279f22b1f8dff187662fff2bdecde6485c | [
"Apache-2.0"
] | null | null | null | readtwice/layers/tpu_utils.py | officecowboy/google-research | 572b76279f22b1f8dff187662fff2bdecde6485c | [
"Apache-2.0"
] | 1 | 2022-03-30T07:20:29.000Z | 2022-03-30T07:20:29.000Z | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TPU specific Tensorflow operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Optional, Text
import tensorflow.compat.v1 as tf
# pylint: disable=g-direct-tensorflow-import
from tensorflow.compiler.tf2xla.python import xla
def cross_replica_concat(tensor,
num_replicas,
name = None):
"""Reduce a concatenation of the `tensor` across tpu cores.
Branched from //audio/ears/nnfp/tensorflow/tpu_ops.py
Args:
tensor: tensor to concatenate.
num_replicas: Number of TPU cores.
name: A name for the op.
Returns:
Tensor of the same rank as `tensor` with first dimension `num_replicas`
times larger.
"""
replica_id = xla.replica_id()
with tf.compat.v1.name_scope(name, 'tpu_cross_replica_concat'):
# This creates a tensor that is like the input tensor but has an added
# replica dimension as the outermost dimension. On each replica it will
# contain the local values and zeros for all other values that need to be
# fetched from other replicas.
ext_tensor = tf.scatter_nd(
indices=[[replica_id]],
updates=[tensor],
shape=[num_replicas] + tensor.shape.as_list())
# As every value is only present on one replica and 0 in all others, adding
# them all together will result in the full tensor on all replicas.
ext_tensor = tf.compat.v1.tpu.cross_replica_sum(ext_tensor)
# Flatten the replica dimension.
# The first dimension size will be: tensor.shape[0] * num_replicas
# Using [-1] trick to support also scalar input.
return tf.reshape(ext_tensor, [-1] + ext_tensor.shape.as_list()[2:])
| 35.484848 | 79 | 0.722459 |
9af752902a07d209c9fc13599a7539e31779b60c | 23,008 | py | Python | test/scenarios/synapse/output/extflatten/src/synapse/azext_synapse/vendored_sdks/synapse/operations/_private_endpoint_connection_operations.py | kingces95/autorest.az | 616138f2d86d34cb6238e43359cf97738fa2f1bb | [
"MIT"
] | null | null | null | test/scenarios/synapse/output/extflatten/src/synapse/azext_synapse/vendored_sdks/synapse/operations/_private_endpoint_connection_operations.py | kingces95/autorest.az | 616138f2d86d34cb6238e43359cf97738fa2f1bb | [
"MIT"
] | 1 | 2021-02-24T09:10:12.000Z | 2021-02-24T09:10:12.000Z | test/scenarios/synapse/output/extflatten/src/synapse/azext_synapse/vendored_sdks/synapse/operations/_private_endpoint_connection_operations.py | kingces95/autorest.az | 616138f2d86d34cb6238e43359cf97738fa2f1bb | [
"MIT"
] | 1 | 2021-03-21T03:59:29.000Z | 2021-03-21T03:59:29.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class PrivateEndpointConnectionOperations(object):
"""PrivateEndpointConnectionOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~synapse_management_client.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def get(
self,
resource_group_name, # type: str
workspace_name, # type: str
private_endpoint_connection_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.PrivateEndpointConnection"
"""Gets a private endpoint connection.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param private_endpoint_connection_name: The name of the private endpoint connection.
:type private_endpoint_connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateEndpointConnection, or the result of cls(response)
:rtype: ~synapse_management_client.models.PrivateEndpointConnection
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.PrivateEndpointConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01-preview"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str'),
'privateEndpointConnectionName': self._serialize.url("private_endpoint_connection_name", private_endpoint_connection_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorContract, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/privateEndpointConnections/{privateEndpointConnectionName}'} # type: ignore
def _create_initial(
self,
resource_group_name, # type: str
workspace_name, # type: str
private_endpoint_connection_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.PrivateEndpointConnection"
cls = kwargs.pop('cls', None) # type: ClsType["models.PrivateEndpointConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01-preview"
accept = "application/json"
# Construct URL
url = self._create_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str'),
'privateEndpointConnectionName': self._serialize.url("private_endpoint_connection_name", private_endpoint_connection_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.put(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorContract, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/privateEndpointConnections/{privateEndpointConnectionName}'} # type: ignore
def begin_create(
self,
resource_group_name, # type: str
workspace_name, # type: str
private_endpoint_connection_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller["models.PrivateEndpointConnection"]
"""Approve or reject a private endpoint connection.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param private_endpoint_connection_name: The name of the private endpoint connection.
:type private_endpoint_connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either PrivateEndpointConnection or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~synapse_management_client.models.PrivateEndpointConnection]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.PrivateEndpointConnection"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_initial(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
private_endpoint_connection_name=private_endpoint_connection_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str'),
'privateEndpointConnectionName': self._serialize.url("private_endpoint_connection_name", private_endpoint_connection_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/privateEndpointConnections/{privateEndpointConnectionName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
workspace_name, # type: str
private_endpoint_connection_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Optional["models.OperationResource"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["models.OperationResource"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01-preview"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str'),
'privateEndpointConnectionName': self._serialize.url("private_endpoint_connection_name", private_endpoint_connection_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorContract, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 202:
deserialized = self._deserialize('OperationResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/privateEndpointConnections/{privateEndpointConnectionName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
workspace_name, # type: str
private_endpoint_connection_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller["models.OperationResource"]
"""Delete a private endpoint connection.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param private_endpoint_connection_name: The name of the private endpoint connection.
:type private_endpoint_connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either OperationResource or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~synapse_management_client.models.OperationResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.OperationResource"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
private_endpoint_connection_name=private_endpoint_connection_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('OperationResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str'),
'privateEndpointConnectionName': self._serialize.url("private_endpoint_connection_name", private_endpoint_connection_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/privateEndpointConnections/{privateEndpointConnectionName}'} # type: ignore
def list(
self,
resource_group_name, # type: str
workspace_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["models.PrivateEndpointConnectionList"]
"""Lists private endpoint connection in workspace.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PrivateEndpointConnectionList or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~synapse_management_client.models.PrivateEndpointConnectionList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.PrivateEndpointConnectionList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('PrivateEndpointConnectionList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(models.ErrorContract, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/privateEndpointConnections'} # type: ignore
| 51.936795 | 238 | 0.675982 |
0b35cf4c470751f1f9ed9f4c0fc1a9e1e953bfd0 | 7,427 | py | Python | ocr/main.py | Manuj229/ocr | eede33b8cae504328baee3b9d44e65a33e1b5d53 | [
"MIT"
] | null | null | null | ocr/main.py | Manuj229/ocr | eede33b8cae504328baee3b9d44e65a33e1b5d53 | [
"MIT"
] | null | null | null | ocr/main.py | Manuj229/ocr | eede33b8cae504328baee3b9d44e65a33e1b5d53 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import shutil
import os
from os.path import join
import logging
from .input import pdftotext, tesseract5
from .input import pdfminer_wrapper
from .input import tesseract
from .input import tesseract4
from .input import gvision
from .extract.loader import read_templates
from .output import to_csv
from .output import to_json
from .output import to_xml
logger = logging.getLogger(__name__)
input_mapping = {
"pdftotext": pdftotext,
"tesseract": tesseract,
"tesseract4": tesseract4,
"pdfminer": pdfminer_wrapper,
"gvision": gvision,
"tesseract5": tesseract5
}
output_mapping = {"csv": to_csv, "json": to_json, "xml": to_xml, "none": None}
def extract_data(invoicefile, templates=None, input_module=pdftotext):
"""Extracts structured data from PDF/image invoices.
This function uses the text extracted from a PDF file or image and
pre-defined regex templates to find structured data.
Reads template if no template assigned
Required fields are matches from templates
Parameters
----------
invoicefile : str
path of electronic invoice file in PDF,JPEG,PNG (example: "/home/duskybomb/pdf/invoice.pdf")
templates : list of instances of class `InvoiceTemplate`, optional
Templates are loaded using `read_template` function in `loader.py`
input_module : {'pdftotext', 'pdfminer', 'tesseract'}, optional
library to be used to extract text from given `invoicefile`,
Returns
-------
dict or False
extracted and matched fields or False if no template matches
Notes
-----
Import required `input_module` when using invoice2data as a library
See Also
--------
read_template : Function where templates are loaded
InvoiceTemplate : Class representing single template files that live as .yml files on the disk
Examples
--------
When using `invoice2data` as an library
>>> from invoice2data.input import pdftotext
>>> extract_data("invoice2data/test/pdfs/oyo.pdf", None, pdftotext)
{'issuer': 'OYO', 'amount': 1939.0, 'date': datetime.datetime(2017, 12, 31, 0, 0), 'invoice_number': 'IBZY2087',
'currency': 'INR', 'desc': 'Invoice IBZY2087 from OYO'}
"""
if templates is None:
templates = read_templates()
# print(templates[0])
extracted_str = input_module.to_text(invoicefile).decode("utf-8")
print("extracted_str:", extracted_str)
logger.debug("START pdftotext result ===========================")
logger.debug(extracted_str)
logger.debug("END pdftotext result =============================")
logger.debug("Testing {} template files".format(len(templates)))
for t in templates:
optimized_str = t.prepare_input(extracted_str)
print("optimized_str:", extracted_str)
if t.matches_input(optimized_str):
identified_fields = t.extract(optimized_str)
print("identified_fields:", identified_fields)
if not is_all_fields_empty(identified_fields):
return identified_fields
logger.error("No template for %s", invoicefile)
return False
# check if all other fields apart from issuer are empty
def is_all_fields_empty(identified_fields):
for key, value in identified_fields.items():
if key != "issuer":
if value:
return False
return True
def create_parser():
"""Returns argument parser """
parser = argparse.ArgumentParser(
description="Extract structured data from PDF files and save to CSV or JSON."
)
parser.add_argument(
"--input-reader",
choices=input_mapping.keys(),
default="pdftotext",
help="Choose text extraction function. Default: pdftotext",
)
parser.add_argument(
"--output-format",
choices=output_mapping.keys(),
default="none",
help="Choose output format. Default: none",
)
parser.add_argument(
"--output-date-format",
dest="output_date_format",
default="%Y-%m-%d",
help="Choose output date format. Default: %%Y-%%m-%%d (ISO 8601 Date)",
)
parser.add_argument(
"--output-name",
"-o",
dest="output_name",
default="invoices-output",
help="Custom name for output file. Extension is added based on chosen format.",
)
parser.add_argument(
"--debug", dest="debug", action="store_true", help="Enable debug information."
)
parser.add_argument(
"--copy",
"-c",
dest="copy",
help="Copy and rename processed PDFs to specified folder.",
)
parser.add_argument(
"--move",
"-m",
dest="move",
help="Move and rename processed PDFs to specified folder.",
)
parser.add_argument(
"--filename-format",
dest="filename",
default="{date} {invoice_number} {desc}.pdf",
help="Filename format to use when moving or copying processed PDFs."
'Default: "{date} {invoice_number} {desc}.pdf"',
)
parser.add_argument(
"--template-folder",
"-t",
dest="template_folder",
help="Folder containing invoice templates in yml file. Always adds built-in templates.",
)
parser.add_argument(
"--exclude-built-in-templates",
dest="exclude_built_in_templates",
default=False,
help="Ignore built-in templates.",
action="store_true",
)
parser.add_argument(
"input_files",
type=argparse.FileType("r"),
nargs="+",
help="File or directory to analyze.",
)
return parser
def main(args=None):
"""Take folder or single file and analyze each."""
if args is None:
parser = create_parser()
args = parser.parse_args()
if args.debug:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
input_module = input_mapping[args.input_reader]
output_module = output_mapping[args.output_format]
templates = []
# Load templates from external folder if set.
if args.template_folder:
templates += read_templates(os.path.abspath(args.template_folder))
# Load internal templates, if not disabled.
if not args.exclude_built_in_templates:
templates += read_templates()
output = []
for f in args.input_files:
res = extract_data(f.name, templates=templates, input_module=input_module)
if res:
logger.info(res)
output.append(res)
if args.copy:
filename = args.filename.format(
date=res["date"].strftime("%Y-%m-%d"),
invoice_number=res["invoice_number"],
desc=res["desc"],
)
shutil.copyfile(f.name, join(args.copy, filename))
if args.move:
filename = args.filename.format(
date=res["date"].strftime("%Y-%m-%d"),
invoice_number=res["invoice_number"],
desc=res["desc"],
)
shutil.move(f.name, join(args.move, filename))
f.close()
if output_module is not None:
output_module.write_to_file(output, args.output_name, args.output_date_format)
if __name__ == "__main__":
main()
| 29.708 | 116 | 0.626902 |
4b122890748aae7e7e305509ceedf9ac6c0cf993 | 393 | py | Python | the_gram/wsgi.py | CheboiDerrick/the-gram | 0f2a459956081daa0ac0d0916509b67aa4c45977 | [
"MIT"
] | null | null | null | the_gram/wsgi.py | CheboiDerrick/the-gram | 0f2a459956081daa0ac0d0916509b67aa4c45977 | [
"MIT"
] | null | null | null | the_gram/wsgi.py | CheboiDerrick/the-gram | 0f2a459956081daa0ac0d0916509b67aa4c45977 | [
"MIT"
] | null | null | null | """
WSGI config for the_gram project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'the_gram.settings')
application = get_wsgi_application()
| 23.117647 | 78 | 0.78626 |
80d17075751d94b16c127cb9c720c37d615fc7d4 | 1,381 | py | Python | src/simplify_docx/utils/friendly_names.py | sillsdev/Simplify-Docx | cbc6cb574e3b58c804096980aa5ad925686b36e9 | [
"MIT"
] | 88 | 2019-05-10T19:30:30.000Z | 2022-03-17T01:12:45.000Z | src/simplify_docx/utils/friendly_names.py | sillsdev/Simplify-Docx | cbc6cb574e3b58c804096980aa5ad925686b36e9 | [
"MIT"
] | 14 | 2019-11-04T19:53:11.000Z | 2022-03-25T17:04:29.000Z | src/simplify_docx/utils/friendly_names.py | sillsdev/Simplify-Docx | cbc6cb574e3b58c804096980aa5ad925686b36e9 | [
"MIT"
] | 22 | 2019-11-04T19:48:43.000Z | 2021-10-18T03:04:07.000Z | """
Utilities for applying friendly names
"""
def apply_friendly_names(x: object) -> None:
"""
A utility function for applying friendly names to a simplified document
"""
_walk(x, _apply_friendly_names)
__friendly_names__ = {
"CT_Tc": "table-cell",
"CT_Row": "table-row",
"CT_Tbl": "table",
"SymbolChar": "symbol",
"CT_Ind": "indentation-data",
"CT_SimpleField": "simple-field",
"CT_Hyperlink": "hyperlink",
"CT_P": "paragraph",
"numPr": "numbering-properties",
"Checkbox": "check-box",
"DropDown": "drop-down",
"CT_Text": "text",
"TextInput": "text-input",
"fldChar": "form-field",
"CT_FFData": "form-field-data",
"CT_FFTextInput": "text-input-data",
"CT_FFDDList": "drop-down-data",
"CT_Body": "body",
"CT_FFCheckBox": "check-box-data",
"CT_AltChunk": "nested-file",
"CT_Document": "document",
"CT_Rel": "nested-file",
}
def _apply_friendly_names(x):
x["TYPE"] = __friendly_names__.get(x["TYPE"], x["TYPE"])
def _walk(x, fun):
fun(x)
val = x.get("VALUE", None)
if not val:
return
if isinstance(val, dict) and val.get("TYPE", None):
# child is an element
_walk(val, fun)
if isinstance(val, list) and val[0].get("TYPE", None):
# child is a list of elements
for child in val:
_walk(child, fun)
| 25.109091 | 75 | 0.599566 |
81c1e268d9a684047946728b547e9e711b92c0c8 | 5,619 | py | Python | test/functional/p2p_invalid_block.py | ComputerCraftr/XEP-Core | 6b1117095f3a70181fdbb40ac57658b828bab3ce | [
"MIT"
] | 17 | 2021-01-04T10:12:07.000Z | 2022-02-14T21:33:01.000Z | test/functional/p2p_invalid_block.py | ComputerCraftr/XEP-Core | 6b1117095f3a70181fdbb40ac57658b828bab3ce | [
"MIT"
] | 1 | 2021-06-12T09:26:53.000Z | 2021-06-15T13:31:07.000Z | test/functional/p2p_invalid_block.py | ComputerCraftr/XEP-Core | 6b1117095f3a70181fdbb40ac57658b828bab3ce | [
"MIT"
] | 12 | 2020-12-30T10:36:07.000Z | 2022-01-17T19:49:24.000Z | #!/usr/bin/env python3
# Copyright (c) 2015-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test node responses to invalid blocks.
In this test we connect to one node over p2p, and test block requests:
1) Valid blocks should be requested and become chain tip.
2) Invalid block with duplicated transaction should be re-requested.
3) Invalid block with bad coinbase value should be rejected and not
re-requested.
"""
import copy
from test_framework.blocktools import create_block, create_coinbase, create_tx_with_script
from test_framework.messages import COIN
from test_framework.p2p import P2PDataStore
from test_framework.test_framework import XEPTestFramework
from test_framework.util import assert_equal
class InvalidBlockRequestTest(XEPTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
self.extra_args = [["[email protected]"]]
def run_test(self):
# Add p2p connection to node0
node = self.nodes[0] # convenience reference to the node
peer = node.add_p2p_connection(P2PDataStore())
best_block = node.getblock(node.getbestblockhash())
tip = int(node.getbestblockhash(), 16)
height = best_block["height"] + 1
block_time = best_block["time"] + 1
self.log.info("Create a new block with an anyone-can-spend coinbase")
height = 1
block = create_block(tip, create_coinbase(height), block_time)
block.solve()
# Save the coinbase for later
block1 = block
tip = block.sha256
peer.send_blocks_and_test([block1], node, success=True)
self.log.info("Mature the block.")
node.generatetoaddress(100, node.get_deterministic_priv_key().address)
best_block = node.getblock(node.getbestblockhash())
tip = int(node.getbestblockhash(), 16)
height = best_block["height"] + 1
block_time = best_block["time"] + 1
# Use merkle-root malleability to generate an invalid block with
# same blockheader (CVE-2012-2459).
# Manufacture a block with 3 transactions (coinbase, spend of prior
# coinbase, spend of that spend). Duplicate the 3rd transaction to
# leave merkle root and blockheader unchanged but invalidate the block.
# For more information on merkle-root malleability see src/consensus/merkle.cpp.
self.log.info("Test merkle root malleability.")
block2 = create_block(tip, create_coinbase(height), block_time)
block_time += 1
# b'0x51' is OP_TRUE
tx1 = create_tx_with_script(block1.vtx[0], 0, script_sig=b'\x51', amount=50 * COIN)
tx2 = create_tx_with_script(tx1, 0, script_sig=b'\x51', amount=50 * COIN)
block2.vtx.extend([tx1, tx2])
block2.hashMerkleRoot = block2.calc_merkle_root()
block2.rehash()
block2.solve()
orig_hash = block2.sha256
block2_orig = copy.deepcopy(block2)
# Mutate block 2
block2.vtx.append(tx2)
assert_equal(block2.hashMerkleRoot, block2.calc_merkle_root())
assert_equal(orig_hash, block2.rehash())
assert block2_orig.vtx != block2.vtx
peer.send_blocks_and_test([block2], node, success=False, reject_reason='bad-txns-duplicate')
# Check transactions for duplicate inputs (CVE-2018-17144)
self.log.info("Test duplicate input block.")
block2_dup = copy.deepcopy(block2_orig)
block2_dup.vtx[2].vin.append(block2_dup.vtx[2].vin[0])
block2_dup.vtx[2].rehash()
block2_dup.hashMerkleRoot = block2_dup.calc_merkle_root()
block2_dup.rehash()
block2_dup.solve()
peer.send_blocks_and_test([block2_dup], node, success=False, reject_reason='bad-txns-inputs-duplicate')
self.log.info("Test very broken block.")
block3 = create_block(tip, create_coinbase(height), block_time)
block_time += 1
block3.vtx[0].vout[0].nValue = 100 * COIN # Too high!
block3.vtx[0].sha256 = None
block3.vtx[0].calc_sha256()
block3.hashMerkleRoot = block3.calc_merkle_root()
block3.rehash()
block3.solve()
peer.send_blocks_and_test([block3], node, success=False, reject_reason='bad-cb-amount')
# Complete testing of CVE-2012-2459 by sending the original block.
# It should be accepted even though it has the same hash as the mutated one.
self.log.info("Test accepting original block after rejecting its mutated version.")
peer.send_blocks_and_test([block2_orig], node, success=True, timeout=5)
# Update tip info
height += 1
block_time += 1
tip = int(block2_orig.hash, 16)
# Complete testing of CVE-2018-17144, by checking for the inflation bug.
# Create a block that spends the output of a tx in a previous block.
block4 = create_block(tip, create_coinbase(height), block_time)
tx3 = create_tx_with_script(tx2, 0, script_sig=b'\x51', amount=50 * COIN)
# Duplicates input
tx3.vin.append(tx3.vin[0])
tx3.rehash()
block4.vtx.append(tx3)
block4.hashMerkleRoot = block4.calc_merkle_root()
block4.rehash()
block4.solve()
self.log.info("Test inflation by duplicating input")
peer.send_blocks_and_test([block4], node, success=False, reject_reason='bad-txns-inputs-duplicate')
if __name__ == '__main__':
InvalidBlockRequestTest().main()
| 40.717391 | 111 | 0.680014 |
c07a280aef76cc3fb7f29c2bfc5f633f08b8d1c2 | 12,685 | py | Python | src/examples/python/ctf-writeups/custom-crackmes/aarch64-hash/solve.py | gmh5225/Triton | 243026c9c1e07a5ca834c4aaf628d1079f6a85ea | [
"Apache-2.0"
] | null | null | null | src/examples/python/ctf-writeups/custom-crackmes/aarch64-hash/solve.py | gmh5225/Triton | 243026c9c1e07a5ca834c4aaf628d1079f6a85ea | [
"Apache-2.0"
] | null | null | null | src/examples/python/ctf-writeups/custom-crackmes/aarch64-hash/solve.py | gmh5225/Triton | 243026c9c1e07a5ca834c4aaf628d1079f6a85ea | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
## -*- coding: utf-8 -*-
##
## Jonathan Salwan - 2018-12-26
##
## A custom crackme to test the AArch64 architecture. The goal is to find an
## hash collision to take the 'Win' branch. Firs we run the binary with a random
## seed, then we calculate the hash collision and run a second time the binary with
## the good input to take the 'Win' branch.
##
## Output:
##
## $ time ./solve.py
## [+] Loading 0x000040 - 0x000238
## [+] Loading 0x000238 - 0x000253
## [+] Loading 0x000000 - 0x000a3c
## [+] Loading 0x010db8 - 0x011040
## [+] Loading 0x010dc8 - 0x010fa8
## [+] Loading 0x000254 - 0x000274
## [+] Loading 0x000948 - 0x000984
## [+] Loading 0x000000 - 0x000000
## [+] Loading 0x010db8 - 0x011000
## [+] Hooking __libc_start_main
## [+] Hooking puts
## [+] Starting emulation.
## [+] __libc_start_main hooked
## [+] argv[0] = ./crackme_hash
## [+] argv[1] = arm64
## [+] Please wait, calculating hash collisions...
## [+] Found several hash collisions:
## {0L: "0x6c, 'l'", 1L: "0x78, 'x'", 2L: "0x75, 'u'", 3L: "0x70, 'p'", 4L: "0x6e, 'n'"}
## {0L: "0x63, 'c'", 1L: "0x78, 'x'", 2L: "0x62, 'b'", 3L: "0x70, 'p'", 4L: "0x62, 'b'"}
## {0L: "0x73, 's'", 1L: "0x68, 'h'", 2L: "0x62, 'b'", 3L: "0x70, 'p'", 4L: "0x62, 'b'"}
## {0L: "0x71, 'q'", 1L: "0x66, 'f'", 2L: "0x62, 'b'", 3L: "0x70, 'p'", 4L: "0x62, 'b'"}
## {0L: "0x75, 'u'", 1L: "0x66, 'f'", 2L: "0x66, 'f'", 3L: "0x70, 'p'", 4L: "0x62, 'b'"}
## {0L: "0x75, 'u'", 1L: "0x67, 'g'", 2L: "0x67, 'g'", 3L: "0x70, 'p'", 4L: "0x62, 'b'"}
## {0L: "0x75, 'u'", 1L: "0x6f, 'o'", 2L: "0x67, 'g'", 3L: "0x78, 'x'", 4L: "0x62, 'b'"}
## {0L: "0x75, 'u'", 1L: "0x6f, 'o'", 2L: "0x67, 'g'", 3L: "0x70, 'p'", 4L: "0x6a, 'j'"}
## {0L: "0x75, 'u'", 1L: "0x6f, 'o'", 2L: "0x67, 'g'", 3L: "0x74, 't'", 4L: "0x6e, 'n'"}
## {0L: "0x75, 'u'", 1L: "0x6f, 'o'", 2L: "0x67, 'g'", 3L: "0x75, 'u'", 4L: "0x6f, 'o'"}
## {0L: "0x76, 'v'", 1L: "0x70, 'p'", 2L: "0x67, 'g'", 3L: "0x75, 'u'", 4L: "0x6f, 'o'"}
## {0L: "0x77, 'w'", 1L: "0x70, 'p'", 2L: "0x66, 'f'", 3L: "0x75, 'u'", 4L: "0x6f, 'o'"}
## {0L: "0x77, 'w'", 1L: "0x70, 'p'", 2L: "0x66, 'f'", 3L: "0x71, 'q'", 4L: "0x6b, 'k'"}
## {0L: "0x76, 'v'", 1L: "0x70, 'p'", 2L: "0x67, 'g'", 3L: "0x71, 'q'", 4L: "0x6b, 'k'"}
## {0L: "0x76, 'v'", 1L: "0x70, 'p'", 2L: "0x67, 'g'", 3L: "0x70, 'p'", 4L: "0x6a, 'j'"}
## {0L: "0x77, 'w'", 1L: "0x70, 'p'", 2L: "0x66, 'f'", 3L: "0x70, 'p'", 4L: "0x6a, 'j'"}
## {0L: "0x77, 'w'", 1L: "0x70, 'p'", 2L: "0x66, 'f'", 3L: "0x72, 'r'", 4L: "0x6c, 'l'"}
## {0L: "0x77, 'w'", 1L: "0x6e, 'n'", 2L: "0x64, 'd'", 3L: "0x72, 'r'", 4L: "0x6c, 'l'"}
## {0L: "0x75, 'u'", 1L: "0x6c, 'l'", 2L: "0x64, 'd'", 3L: "0x72, 'r'", 4L: "0x6c, 'l'"}
## {0L: "0x75, 'u'", 1L: "0x6e, 'n'", 2L: "0x66, 'f'", 3L: "0x72, 'r'", 4L: "0x6c, 'l'"}
## [+] Pick up the first serial: lxupn
## [+] puts hooked
## fail
## [+] Instruction executed: 240
## [+] Emulation done.
## [+] Start a second emualtion with the good serial to validate the chall
## [+] Starting emulation.
## [+] __libc_start_main hooked
## [+] argv[0] = ./crackme_hash
## [+] argv[1] = lxupn
## [+] puts hooked
## Win
## [+] Instruction executed: 240
## [+] Emulation done.
##
## ./solve.py 0.10s user 0.00s system 99% cpu 0.105 total
##
from __future__ import print_function
from triton import *
import random
import string
import sys
import lief
import os
DEBUG = True
INPUT = 'arm64'
SERIAL = None
TARGET = os.path.join(os.path.dirname(__file__), 'crackme_hash')
VALID = False
# The debug function
def debug(s):
if DEBUG: print(s)
# Memory mapping
BASE_PLT = 0x10000000
BASE_ARGV = 0x20000000
BASE_STACK = 0x9fffffff
def getMemoryString(ctx, addr):
s = str()
index = 0
while ctx.getConcreteMemoryValue(addr+index):
c = chr(ctx.getConcreteMemoryValue(addr+index))
if c not in string.printable: c = ""
s += c
index += 1
return s
# Simulate the puts() function
def putsHandler(ctx):
debug('[+] puts hooked')
# Get arguments
arg1 = getMemoryString(ctx, ctx.getConcreteRegisterValue(ctx.registers.x0))
sys.stdout.write(arg1 + '\n')
# Return value
return len(arg1) + 1
def exitHandler(ctx):
debug('[+] exit hooked')
sys.exit(0)
def libcMainHandler(ctx):
debug('[+] __libc_start_main hooked')
# Get arguments
main = ctx.getConcreteRegisterValue(ctx.registers.x0)
# Push the return value to jump into the main() function
ctx.setConcreteRegisterValue(ctx.registers.sp, ctx.getConcreteRegisterValue(ctx.registers.pc)-CPUSIZE.QWORD)
ret2main = MemoryAccess(ctx.getConcreteRegisterValue(ctx.registers.sp), CPUSIZE.QWORD)
ctx.setConcreteMemoryValue(ret2main, main)
# Setup argc / argv
ctx.concretizeRegister(ctx.registers.x0)
ctx.concretizeRegister(ctx.registers.x1)
argvs = [
bytes(TARGET.encode('utf-8')), # argv[0]
bytes(INPUT.encode('utf-8'))
]
# Define argc / argv
base = BASE_ARGV
addrs = list()
index = 0
for argv in argvs:
addrs.append(base)
ctx.setConcreteMemoryAreaValue(base, argv+b'\x00')
if index == 1:
# Only symbolized argv[1]
for indexCell in range(len(argv)):
var = ctx.symbolizeMemory(MemoryAccess(base+indexCell, CPUSIZE.BYTE))
var.setComment('argv[%d][%d]' %(index, indexCell))
debug('[+] argv[%d] = %s' %(index, argv))
base += len(argv)+1
index += 1
argc = len(argvs)
argv = base
for addr in addrs:
ctx.setConcreteMemoryValue(MemoryAccess(base, CPUSIZE.QWORD), addr)
base += CPUSIZE.QWORD
ctx.setConcreteRegisterValue(ctx.registers.x0, argc)
ctx.setConcreteRegisterValue(ctx.registers.x1, argv)
return None
# Functions to emulate
customRelocation = [
('__libc_start_main', libcMainHandler, BASE_PLT + 0),
('exit', exitHandler, BASE_PLT + 1),
('puts', putsHandler, BASE_PLT + 2),
]
def hookingHandler(ctx):
pc = ctx.getConcreteRegisterValue(ctx.registers.pc)
for rel in customRelocation:
if rel[2] == pc:
# Emulate the routine and the return value
ret_value = rel[1](ctx)
if ret_value is not None:
ctx.setConcreteRegisterValue(ctx.registers.x0, ret_value)
# Get the return address
ret_addr = ctx.getConcreteMemoryValue(MemoryAccess(ctx.getConcreteRegisterValue(ctx.registers.sp), CPUSIZE.QWORD))
# Hijack RIP to skip the call
ctx.setConcreteRegisterValue(ctx.registers.pc, ret_addr)
# Restore RSP (simulate the ret)
ctx.setConcreteRegisterValue(ctx.registers.sp, ctx.getConcreteRegisterValue(ctx.registers.sp)+CPUSIZE.QWORD)
return
# Emulate the binary.
def emulate(ctx, pc):
global SERIAL
global VALID
count = 0
while pc:
# Fetch opcodes
opcodes = ctx.getConcreteMemoryAreaValue(pc, 4)
# Create the Triton instruction
instruction = Instruction()
instruction.setOpcode(opcodes)
instruction.setAddress(pc)
# Process
if ctx.processing(instruction) == False:
debug('[-] Instruction not supported: %s' %(str(instruction)))
break
#print(instruction)
# .text:0000000000000864 ADRP X0, #aWin@PAGE ; "Win"
# .text:0000000000000868 ADD X0, X0, #aWin@PAGEOFF ; "Win"
# .text:000000000000086C BL .puts
if pc == 0x868:
# We validated the crackme
VALID = True
# .text:0000000000000858 MOV W0, #0xAD6D
# .text:000000000000085C CMP W1, W0
# .text:0000000000000860 B.NE loc_874
if pc == 0x85c and SERIAL is None:
print('[+] Please wait, calculating hash collisions...')
x1 = ctx.getSymbolicRegister(ctx.registers.x1)
SymVar_0 = ctx.getSymbolicVariable('SymVar_0')
SymVar_1 = ctx.getSymbolicVariable('SymVar_1')
SymVar_2 = ctx.getSymbolicVariable('SymVar_2')
SymVar_3 = ctx.getSymbolicVariable('SymVar_3')
SymVar_4 = ctx.getSymbolicVariable('SymVar_4')
astCtxt = ctx.getAstContext()
# We want printable characters
expr = astCtxt.land([
astCtxt.bvugt(astCtxt.variable(SymVar_0), astCtxt.bv(96, CPUSIZE.BYTE_BIT)),
astCtxt.bvult(astCtxt.variable(SymVar_0), astCtxt.bv(123, CPUSIZE.BYTE_BIT)),
astCtxt.bvugt(astCtxt.variable(SymVar_1), astCtxt.bv(96, CPUSIZE.BYTE_BIT)),
astCtxt.bvult(astCtxt.variable(SymVar_1), astCtxt.bv(123, CPUSIZE.BYTE_BIT)),
astCtxt.bvugt(astCtxt.variable(SymVar_2), astCtxt.bv(96, CPUSIZE.BYTE_BIT)),
astCtxt.bvult(astCtxt.variable(SymVar_2), astCtxt.bv(123, CPUSIZE.BYTE_BIT)),
astCtxt.bvugt(astCtxt.variable(SymVar_3), astCtxt.bv(96, CPUSIZE.BYTE_BIT)),
astCtxt.bvult(astCtxt.variable(SymVar_3), astCtxt.bv(123, CPUSIZE.BYTE_BIT)),
astCtxt.bvugt(astCtxt.variable(SymVar_4), astCtxt.bv(96, CPUSIZE.BYTE_BIT)),
astCtxt.bvult(astCtxt.variable(SymVar_4), astCtxt.bv(123, CPUSIZE.BYTE_BIT)),
astCtxt.equal(x1.getAst(), astCtxt.bv(0xad6d, CPUSIZE.QWORD_BIT)) # collision: (assert (= x1 0xad6d)
])
# Get max 20 different models
models = ctx.getModels(expr, 20)
print('[+] Found several hash collisions:')
for model in models:
print({k: "0x%x, '%c'" % (v.getValue(), v.getValue()) for k, v in list(model.items())})
SERIAL = str()
for _, v in list(sorted(models[0].items())):
SERIAL += "%c" % (v.getValue())
print('[+] Pick up the first serial: %s' %(SERIAL))
# Inc the number of instructions exected
count += 1
# Simulate routines
hookingHandler(ctx)
# Next
pc = ctx.getConcreteRegisterValue(ctx.registers.pc)
debug('[+] Instruction executed: %d' %(count))
return
def loadBinary(ctx, binary):
# Map the binary into the memory
phdrs = binary.segments
for phdr in phdrs:
size = phdr.physical_size
vaddr = phdr.virtual_address
debug('[+] Loading 0x%06x - 0x%06x' %(vaddr, vaddr+size))
ctx.setConcreteMemoryAreaValue(vaddr, list(phdr.content))
return
def makeRelocation(ctx, binary):
# Perform our own relocations
try:
for rel in binary.pltgot_relocations:
symbolName = rel.symbol.name
symbolRelo = rel.address
for crel in customRelocation:
if symbolName == crel[0]:
debug('[+] Hooking %s' %(symbolName))
ctx.setConcreteMemoryValue(MemoryAccess(symbolRelo, CPUSIZE.QWORD), crel[2])
except:
pass
# Perform our own relocations
try:
for rel in binary.dynamic_relocations:
symbolName = rel.symbol.name
symbolRelo = rel.address
for crel in customRelocation:
if symbolName == crel[0]:
debug('[+] Hooking %s' %(symbolName))
ctx.setConcreteMemoryValue(MemoryAccess(symbolRelo, CPUSIZE.QWORD), crel[2])
except:
pass
return
def run(ctx, binary):
# Concretize previous context
ctx.concretizeAllMemory()
ctx.concretizeAllRegister()
# Define a fake stack
ctx.setConcreteRegisterValue(ctx.registers.sp, BASE_STACK)
# Let's emulate the binary from the entry point
debug('[+] Starting emulation.')
emulate(ctx, binary.entrypoint)
debug('[+] Emulation done.')
return
def main():
global INPUT
global SERIAL
# Get a Triton context
ctx = TritonContext()
# Set the architecture
ctx.setArchitecture(ARCH.AARCH64)
# Set optimization
ctx.setMode(MODE.ALIGNED_MEMORY, True)
ctx.setMode(MODE.ONLY_ON_SYMBOLIZED, True)
# Parse the binary
binary = lief.parse(TARGET)
# Load the binary
loadBinary(ctx, binary)
# Perform our own relocations
makeRelocation(ctx, binary)
# First emulation
run(ctx, binary)
# Replace the input with the good serial to validate the chall
INPUT = SERIAL
# Second emulation
print('[+] Start a second emualtion with the good serial to validate the chall')
run(ctx, binary)
return not VALID == True
if __name__ == '__main__':
retValue = main()
sys.exit(retValue)
| 33.558201 | 126 | 0.587071 |
4d742401a14de7f4b159a47955c92badd7fef4cd | 655 | py | Python | lithops/version.py | Dahk/pywren-ibm-cloud | 560a587e35dfe8f6dff4b85cc4bc722ec5f7fd9d | [
"Apache-2.0"
] | 1 | 2018-12-12T10:42:51.000Z | 2018-12-12T10:42:51.000Z | lithops/version.py | omerb01/pywren-ibm-cloud | ca22e759729a828db6969d28b82e9a0e30db4f59 | [
"Apache-2.0"
] | null | null | null | lithops/version.py | omerb01/pywren-ibm-cloud | ca22e759729a828db6969d28b82e9a0e30db4f59 | [
"Apache-2.0"
] | null | null | null | #
# (C) Copyright IBM Corp. 2020
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
__version__ = "2.0.0"
if __name__ == "__main__":
print(__version__)
| 31.190476 | 74 | 0.740458 |
3b06fa3c8edb2fe1219f481dff35dd35f1e40bdc | 1,021 | py | Python | Algos/leetcode/top/linkedLists/linked_list_cycle_2.py | Suraj-Rajesh/code | 3d554c4d1d5cf4bd9d084b8034641c1f6c2a47c9 | [
"MIT"
] | null | null | null | Algos/leetcode/top/linkedLists/linked_list_cycle_2.py | Suraj-Rajesh/code | 3d554c4d1d5cf4bd9d084b8034641c1f6c2a47c9 | [
"MIT"
] | null | null | null | Algos/leetcode/top/linkedLists/linked_list_cycle_2.py | Suraj-Rajesh/code | 3d554c4d1d5cf4bd9d084b8034641c1f6c2a47c9 | [
"MIT"
] | null | null | null | #
# https://leetcode.com/problems/linked-list-cycle-ii/
#
def detect_cycle(head):
slow = fast = head
#
# we need to check for both because,
#
# fast itself can be None (head is None (or) fast was previously one node
# earlier to tail and on doing, fast.next.next(fast.next will be tail in this
# case) it became None
# (or)
# fast.next is None
#
# in both cases, fast.next.next will fail
#
# also, no need to check if head is None initially, since head is assigned
# to fast and in while-condition below, if fast is None, condition fails
# and we return None
#
while fast is not None and fast.next is not None:
fast = fast.next.next
slow = slow.next
# found an entry point
if slow == fast:
slow = head
# loop over next nodes until we intersect
while slow != head:
slow = slow.next
fast = fast.next
return slow
return None
| 28.361111 | 81 | 0.582762 |
4052c3925c87f6a3e2b7ba8074af3bf8174c296a | 556 | py | Python | src/olympia/scanners/serializers.py | 10allday-Software/addons-server | 3cfbb4d203a0b9593488a3e4fa21b65877e0619f | [
"BSD-3-Clause"
] | 843 | 2016-02-09T13:00:37.000Z | 2022-03-20T19:17:06.000Z | src/olympia/scanners/serializers.py | 10allday-Software/addons-server | 3cfbb4d203a0b9593488a3e4fa21b65877e0619f | [
"BSD-3-Clause"
] | 10,187 | 2016-02-05T23:51:05.000Z | 2022-03-31T15:24:44.000Z | src/olympia/scanners/serializers.py | 10allday-Software/addons-server | 3cfbb4d203a0b9593488a3e4fa21b65877e0619f | [
"BSD-3-Clause"
] | 551 | 2016-02-08T20:32:16.000Z | 2022-03-15T16:49:24.000Z | from rest_framework import serializers
from .models import ScannerResult
class ScannerResultSerializer(serializers.ModelSerializer):
scanner = serializers.SerializerMethodField()
label = serializers.CharField(default=None)
results = serializers.JSONField()
class Meta:
model = ScannerResult
fields = (
'id',
'scanner',
'label',
'results',
'created',
'model_version',
)
def get_scanner(self, obj):
return obj.get_scanner_name()
| 23.166667 | 59 | 0.611511 |
eb021b4c039b82f00bbfa1a2ea88d4a1e501bf54 | 2,550 | py | Python | src/openprocurement/agreement/cfaua/models/change.py | pontostroy/openprocurement.api | 6651ef29413d155c83f893ee64a611cf75f4daaf | [
"Apache-2.0"
] | 3 | 2020-03-13T06:44:23.000Z | 2020-11-05T18:25:29.000Z | src/openprocurement/agreement/cfaua/models/change.py | pontostroy/openprocurement.api | 6651ef29413d155c83f893ee64a611cf75f4daaf | [
"Apache-2.0"
] | 2 | 2021-03-25T23:27:04.000Z | 2022-03-21T22:18:15.000Z | src/openprocurement/agreement/cfaua/models/change.py | scrubele/prozorro-testing | 42b93ea2f25d8cc40e66c596f582c7c05e2a9d76 | [
"Apache-2.0"
] | 3 | 2020-10-16T16:25:14.000Z | 2021-05-22T12:26:20.000Z | # -*- coding: utf-8 -*-
from schematics.types import StringType
from schematics.types.compound import ModelType
from openprocurement.api.roles import RolesFromCsv
from openprocurement.api.models import ListType
from openprocurement.agreement.core.models.change import Change as BaseChange
from openprocurement.agreement.cfaua.models.modification import UnitPriceModification, ContractModification
from openprocurement.agreement.cfaua.validation import (
validate_item_price_variation_modifications,
validate_third_party_modifications,
validate_modifications_contracts_uniq,
validate_modifications_items_uniq,
validate_only_addend_or_only_factor,
)
class ClassicChange(BaseChange):
class Options:
namespace = "Change"
roles = RolesFromCsv("Change.csv", relative_to=__file__)
agreementNumber = StringType()
status = StringType(choices=["pending", "active", "cancelled"], default="pending")
class ChangeTaxRate(ClassicChange):
class Options:
namespace = "Change"
roles = RolesFromCsv("ChangeTaxRate.csv", relative_to=__file__)
rationaleType = StringType(default="taxRate")
modifications = ListType(
ModelType(UnitPriceModification, required=True),
validators=[validate_modifications_items_uniq, validate_only_addend_or_only_factor],
)
class ChangeItemPriceVariation(ClassicChange):
class Options:
namespace = "Change"
roles = RolesFromCsv("ChangeItemPriceVariation.csv", relative_to=__file__)
rationaleType = StringType(default="itemPriceVariation")
modifications = ListType(
ModelType(UnitPriceModification, required=True),
validators=[validate_item_price_variation_modifications, validate_modifications_items_uniq],
)
class ChangeThirdParty(ClassicChange):
class Options:
namespace = "Change"
roles = RolesFromCsv("ChangeThirdParty.csv", relative_to=__file__)
rationaleType = StringType(default="thirdParty")
modifications = ListType(
ModelType(UnitPriceModification, required=True),
validators=[validate_third_party_modifications, validate_modifications_items_uniq],
)
class ChangePartyWithdrawal(ClassicChange):
class Options:
namespace = "Change"
roles = RolesFromCsv("ChangePartyWithdrawal.csv", relative_to=__file__)
rationaleType = StringType(default="partyWithdrawal")
modifications = ListType(
ModelType(ContractModification, required=True), validators=[validate_modifications_contracts_uniq]
)
| 35.416667 | 107 | 0.763922 |
9c69f97584b3f046361ac8fbea20dd18c8aab302 | 942 | py | Python | scheduler.py | karlmcguire/scheduler | 9e2cb1479c67f6cccac42397d3d9c845ed772925 | [
"MIT"
] | null | null | null | scheduler.py | karlmcguire/scheduler | 9e2cb1479c67f6cccac42397d3d9c845ed772925 | [
"MIT"
] | null | null | null | scheduler.py | karlmcguire/scheduler | 9e2cb1479c67f6cccac42397d3d9c845ed772925 | [
"MIT"
] | null | null | null | import collections
Task = collections.namedtuple("Task", "id weight depends")
A = Task(id="A", weight=3, depends=[])
C = Task(id="C", weight=2, depends=[])
D = Task(id="D", weight=3, depends=[])
E = Task(id="E", weight=1, depends=[A])
B = Task(id="B", weight=5, depends=[C])
H = Task(id="H", weight=3, depends=[C, D])
F = Task(id="F", weight=4, depends=[D])
G = Task(id="G", weight=1, depends=[E])
J = Task(id="J", weight=4, depends=[E, B, H])
I = Task(id="I", weight=5, depends=[H, F])
L = Task(id="L", weight=2, depends=[F])
K = Task(id="K", weight=5, depends=[G])
# all tasks A-K must be completed within 10 weeks
# you're allocated 4 workers per week
# the most workers on a given task is 2 per week
# the most workers on all tasks for a given week is 5
# putting an additional worker on a task (2 total) adds a $100 charge
# charge $200 per worker per week
# if you use an extra worker (5 total) it is a $300 charge for the week
| 31.4 | 71 | 0.646497 |
60375af10399596fa3701f4342fd9fefa7a9a67d | 8,428 | py | Python | ctools/common.py | dryabtse/workscripts | 6b119652cc889cb415e1d9abab3ab6cc56bc2100 | [
"Unlicense"
] | null | null | null | ctools/common.py | dryabtse/workscripts | 6b119652cc889cb415e1d9abab3ab6cc56bc2100 | [
"Unlicense"
] | null | null | null | ctools/common.py | dryabtse/workscripts | 6b119652cc889cb415e1d9abab3ab6cc56bc2100 | [
"Unlicense"
] | null | null | null | # Helper utilities to be used by the ctools scripts
#
import asyncio
import bson
import datetime
import logging
import motor.motor_asyncio
import subprocess
import sys
import uuid
from bson.binary import UuidRepresentation
from bson.codec_options import CodecOptions
from bson.objectid import ObjectId
from pymongo import uri_parser
# Function for a Yes/No result based on the answer provided as an argument
def yes_no(answer):
yes = set(['yes', 'y', 'y'])
no = set(['no', 'n', ''])
while True:
choice = input(answer + '\nProceed (yes/NO)? ').lower()
if choice in yes:
return
elif choice in no:
raise KeyboardInterrupt('User canceled')
else:
print("Please respond with 'yes' or 'no'\n")
# Abstracts constructing the name of an executable on POSIX vs Windows platforms
def exe_name(name):
if (sys.platform == 'win32'):
return name + '.exe'
return name
# Abstracts the connection to and some administrative operations against a MongoDB cluster. This
# class is highly tailored to the usage in the ctools scripts in the same directory and is not a
# generic utility.
class Cluster:
def __init__(self, uri, loop):
self.uri_options = uri_parser.parse_uri(uri)['options']
self.client = motor.motor_asyncio.AsyncIOMotorClient(uri)
self.adminDb = self.client.get_database('admin',
codec_options=CodecOptions(uuid_representation=UuidRepresentation.STANDARD))
self.configDb = self.client.get_database(
'config', codec_options=CodecOptions(uuid_representation=UuidRepresentation.STANDARD))
class NotMongosException(Exception):
pass
class BalancerEnabledException(Exception):
pass
@property
async def configsvrConnectionString(self):
serverStatus = await self.adminDb.command({'serverStatus': 1, 'sharding': 1})
return serverStatus['sharding']['configsvrConnectionString']
@property
async def FCV(self):
fcvDocument = await self.adminDb['system.version'].find_one(
{'_id': 'featureCompatibilityVersion'})
return fcvDocument['version']
@property
async def shardIds(self):
return list(
map(lambda x: x['_id'], await self.configDb.shards.find({}).sort('_id',
1).to_list(None)))
async def check_is_mongos(self, warn_only=False):
print('Server is running at FCV', await self.FCV)
try:
ismaster = await self.adminDb.command('ismaster')
if 'msg' not in ismaster or ismaster['msg'] != 'isdbgrid':
raise Cluster.NotMongosException('Not connected to a mongos')
except Cluster.NotMongosException:
if warn_only:
print('WARNING: Not connected to a MongoS')
else:
raise
async def check_balancer_is_disabled(self, warn_only=False):
try:
balancer_status = await self.adminDb.command({'balancerStatus': 1})
assert 'mode' in balancer_status, f'Unrecognized balancer status response: {balancer_status}'
if balancer_status['mode'] != 'off':
raise Cluster.BalancerEnabledException(
'''The balancer must be stopped before running this script.
Please run sh.stopBalancer()''')
except Cluster.BalancerEnabledException:
if warn_only:
print('WARNING: Balancer is still enabled')
else:
raise
async def make_direct_shard_connection(self, shard):
if (isinstance(shard, str)):
shard = await self.configDb.shards.find_one({'_id': shard})
conn_parts = shard['host'].split('/', 1)
uri = 'mongodb://' + conn_parts[1]
return motor.motor_asyncio.AsyncIOMotorClient(uri, replicaset=conn_parts[0],
**self.uri_options)
async def make_direct_config_server_connection(self):
return await self.make_direct_shard_connection({
'_id': 'config',
'host': await self.configsvrConnectionString
})
async def on_each_shard(self, fn):
tasks = []
async for shard in self.configDb.shards.find({}):
tasks.append(
asyncio.ensure_future(
fn(shard['_id'], await self.make_direct_shard_connection(shard))))
await asyncio.gather(*tasks)
# Utility class to generate the components for sharding a collection externally
class ShardCollection:
def __init__(self, ns, uuid, shard_key, unique, fcv):
self.ns = ns
self.uuid = uuid
self.shard_key = shard_key
self.unique = unique
self.fcv = fcv
self.shard_key_is_string = (self.fcv <= '4.2')
self.epoch = bson.objectid.ObjectId()
self.creation_time = datetime.datetime.now()
self.timestamp = bson.timestamp.Timestamp(self.creation_time, 1)
logging.info(f'''Sharding an existing collection {self.ns} with the following parameters:
uuid: {self.uuid}
shard_key: {self.shard_key}
unique: {self.unique}
''')
# Accepts an array of tuples which must contain exactly the following fields:
# min, max, shard
# AND MUST be sorted according to range['min']
def generate_config_chunks(self, chunks):
def make_chunk_id(i):
if self.shard_key_is_string:
return f'shard-key-{self.ns}-{str(i).zfill(8)}'
else:
return ObjectId()
chunk_idx = 0
for c in chunks:
chunk_obj = {
'_id': make_chunk_id(chunk_idx),
'min': c['min'],
'max': c['max'],
'shard': c['shard'],
'lastmod': bson.timestamp.Timestamp(1, chunk_idx),
}
if self.fcv >= '5.0':
chunk_obj.update({'uuid': self.uuid})
else:
chunk_obj.update({
'ns': self.ns,
'lastmodEpoch': self.epoch,
})
chunk_idx += 1
yield chunk_obj
# Accepts an array of tuples which must contain exactly the following fields:
# min, max, shard
# AND MUST be sorted according to range['min']
def generate_shard_chunks(self, chunks):
pass
def generate_collection_entry(self):
coll_obj = {
'_id': self.ns,
'lastmodEpoch': self.epoch,
'lastmod': self.creation_time,
'key': self.shard_key,
'unique': self.unique,
'uuid': self.uuid
}
if self.fcv >= '5.0':
coll_obj.update({'timestamp': self.timestamp})
else:
coll_obj.update({'dropped': False})
return coll_obj
# This class implements an iterable wrapper around the 'mgeneratejs' script from
# https://github.com/rueckstiess/mgeneratejs. It allows custom-shaped MongoDB documents to be
# generated in a streaming fashion for scripts which need to generate some data according to a given
# shard key.
#
# The mgeneratejs script must be installed in advance and must be on the system's PATH.
#
# Example usages:
# it = iter(common.MGenerateJSGenerator("{a:\'\"$name\"\'}", 100)
# This will generate 100 documents with the form `{a:'John Smith'}`
class MGenerateJSGenerator:
def __init__(self, doc_pattern, num_docs):
self.doc_pattern = doc_pattern
self.num_docs = num_docs
def __iter__(self):
self.mgeneratejs_process = subprocess.Popen(
f'mgeneratejs --number {self.num_docs} {self.doc_pattern}', shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding='utf-8')
self.stdout_iter = iter(self.mgeneratejs_process.stdout.readline, '')
return self
def __next__(self):
try:
return next(self.stdout_iter).strip()
except StopIteration:
if self.mgeneratejs_process.returncode == 0:
raise
else:
raise Exception(
f"Error occurred running mgeneratejs {''.join(self.mgeneratejs_process.stderr.readlines())}"
)
| 35.711864 | 112 | 0.605363 |
b03e33b8d794d8246d7a1501e85611e2797a2661 | 20,273 | py | Python | sky/tools/webkitpy/layout_tests/views/printing.py | rafaelw/mojo | d3495a129dcbe679e2d5ac729c85a58acf38f8c4 | [
"BSD-3-Clause"
] | 5 | 2015-04-30T00:13:21.000Z | 2019-07-10T02:17:24.000Z | sky/tools/webkitpy/layout_tests/views/printing.py | rafaelw/mojo | d3495a129dcbe679e2d5ac729c85a58acf38f8c4 | [
"BSD-3-Clause"
] | null | null | null | sky/tools/webkitpy/layout_tests/views/printing.py | rafaelw/mojo | d3495a129dcbe679e2d5ac729c85a58acf38f8c4 | [
"BSD-3-Clause"
] | 1 | 2019-05-12T13:53:44.000Z | 2019-05-12T13:53:44.000Z | # Copyright (C) 2010, 2012 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Package that handles non-debug, non-file output for run-webkit-tests."""
import math
import optparse
from webkitpy.tool import grammar
from webkitpy.layout_tests.models import test_expectations
from webkitpy.layout_tests.models.test_expectations import TestExpectations, TestExpectationParser
from webkitpy.layout_tests.views.metered_stream import MeteredStream
NUM_SLOW_TESTS_TO_LOG = 10
def print_options():
return [
optparse.make_option('-q', '--quiet', action='store_true', default=False,
help='run quietly (errors, warnings, and progress only)'),
optparse.make_option('--timing', action='store_true', default=False,
help='display test times (summary plus per-test w/ --verbose)'),
optparse.make_option('-v', '--verbose', action='store_true', default=False,
help='print a summarized result for every test (one line per test)'),
optparse.make_option('--details', action='store_true', default=False,
help='print detailed results for every test'),
optparse.make_option('--debug-rwt-logging', action='store_true', default=False,
help='print timestamps and debug information for run-webkit-tests itself'),
]
class Printer(object):
"""Class handling all non-debug-logging printing done by run-webkit-tests."""
def __init__(self, port, options, regular_output, logger=None):
self.num_completed = 0
self.num_tests = 0
self._port = port
self._options = options
self._meter = MeteredStream(regular_output, options.debug_rwt_logging, logger=logger,
number_of_columns=self._port.host.platform.terminal_width())
self._running_tests = []
self._completed_tests = []
def cleanup(self):
self._meter.cleanup()
def __del__(self):
self.cleanup()
def print_config(self, results_directory):
self._print_default("Using port '%s'" % self._port.name())
self._print_default("Test configuration: %s" % self._port.test_configuration())
self._print_default("View the test results at file://%s/results.html" % results_directory)
if self._options.enable_versioned_results:
self._print_default("View the archived results dashboard at file://%s/dashboard.html" % results_directory)
# FIXME: should these options be in printing_options?
if self._options.new_baseline:
self._print_default("Placing new baselines in %s" % self._port.baseline_path())
fs = self._port.host.filesystem
fallback_path = [fs.split(x)[1] for x in self._port.baseline_search_path()]
self._print_default("Baseline search path: %s -> generic" % " -> ".join(fallback_path))
self._print_default("Using %s build" % self._options.configuration)
if self._options.pixel_tests:
self._print_default("Pixel tests enabled")
else:
self._print_default("Pixel tests disabled")
self._print_default("Regular timeout: %s, slow test timeout: %s" %
(self._options.time_out_ms, self._options.slow_time_out_ms))
self._print_default('Sky server command line: ' + ' '.join(self._port.server_command_line()))
self._print_default('Command line: ' + ' '.join(self._port.driver_cmd_line()))
self._print_default('')
def print_found(self, num_all_test_files, num_to_run, repeat_each, iterations):
found_str = 'Found %s; running %d' % (grammar.pluralize('test', num_all_test_files), num_to_run)
if repeat_each * iterations > 1:
found_str += ' (%d times each: --repeat-each=%d --iterations=%d)' % (repeat_each * iterations, repeat_each, iterations)
found_str += ', skipping %d' % (num_all_test_files - num_to_run)
self._print_default(found_str + '.')
def print_expected(self, run_results, tests_with_result_type_callback):
self._print_expected_results_of_type(run_results, test_expectations.PASS, "passes", tests_with_result_type_callback)
self._print_expected_results_of_type(run_results, test_expectations.FAIL, "failures", tests_with_result_type_callback)
self._print_expected_results_of_type(run_results, test_expectations.FLAKY, "flaky", tests_with_result_type_callback)
self._print_debug('')
def print_workers_and_shards(self, num_workers, num_shards, num_locked_shards):
driver_name = self._port.driver_name()
if num_workers == 1:
self._print_default("Running 1 %s." % driver_name)
self._print_debug("(%s)." % grammar.pluralize('shard', num_shards))
else:
self._print_default("Running %d %ss in parallel." % (num_workers, driver_name))
self._print_debug("(%d shards; %d locked)." % (num_shards, num_locked_shards))
self._print_default('')
def _print_expected_results_of_type(self, run_results, result_type, result_type_str, tests_with_result_type_callback):
tests = tests_with_result_type_callback(result_type)
now = run_results.tests_by_timeline[test_expectations.NOW]
wontfix = run_results.tests_by_timeline[test_expectations.WONTFIX]
# We use a fancy format string in order to print the data out in a
# nicely-aligned table.
fmtstr = ("Expect: %%5d %%-8s (%%%dd now, %%%dd wontfix)"
% (self._num_digits(now), self._num_digits(wontfix)))
self._print_debug(fmtstr % (len(tests), result_type_str, len(tests & now), len(tests & wontfix)))
def _num_digits(self, num):
ndigits = 1
if len(num):
ndigits = int(math.log10(len(num))) + 1
return ndigits
def print_results(self, run_time, run_results, summarized_results):
self._print_timing_statistics(run_time, run_results)
self._print_one_line_summary(run_time, run_results)
def _print_timing_statistics(self, total_time, run_results):
self._print_debug("Test timing:")
self._print_debug(" %6.2f total testing time" % total_time)
self._print_debug("")
self._print_worker_statistics(run_results, int(self._options.child_processes))
self._print_aggregate_test_statistics(run_results)
self._print_individual_test_times(run_results)
self._print_directory_timings(run_results)
def _print_worker_statistics(self, run_results, num_workers):
self._print_debug("Thread timing:")
stats = {}
cuml_time = 0
for result in run_results.results_by_name.values():
stats.setdefault(result.worker_name, {'num_tests': 0, 'total_time': 0})
stats[result.worker_name]['num_tests'] += 1
stats[result.worker_name]['total_time'] += result.total_run_time
cuml_time += result.total_run_time
for worker_name in stats:
self._print_debug(" %10s: %5d tests, %6.2f secs" % (worker_name, stats[worker_name]['num_tests'], stats[worker_name]['total_time']))
self._print_debug(" %6.2f cumulative, %6.2f optimal" % (cuml_time, cuml_time / num_workers))
self._print_debug("")
def _print_aggregate_test_statistics(self, run_results):
times_for_dump_render_tree = [result.test_run_time for result in run_results.results_by_name.values()]
self._print_statistics_for_test_timings("PER TEST TIME IN TESTSHELL (seconds):", times_for_dump_render_tree)
def _print_individual_test_times(self, run_results):
# Reverse-sort by the time spent in the driver.
individual_test_timings = sorted(run_results.results_by_name.values(), key=lambda result: result.test_run_time, reverse=True)
num_printed = 0
slow_tests = []
timeout_or_crash_tests = []
unexpected_slow_tests = []
for test_tuple in individual_test_timings:
test_name = test_tuple.test_name
is_timeout_crash_or_slow = False
if test_name in run_results.slow_tests:
is_timeout_crash_or_slow = True
slow_tests.append(test_tuple)
if test_name in run_results.failures_by_name:
result = run_results.results_by_name[test_name].type
if (result == test_expectations.TIMEOUT or
result == test_expectations.CRASH):
is_timeout_crash_or_slow = True
timeout_or_crash_tests.append(test_tuple)
if (not is_timeout_crash_or_slow and num_printed < NUM_SLOW_TESTS_TO_LOG):
num_printed = num_printed + 1
unexpected_slow_tests.append(test_tuple)
self._print_debug("")
if unexpected_slow_tests:
self._print_test_list_timing("%s slowest tests that are not marked as SLOW and did not timeout/crash:" %
NUM_SLOW_TESTS_TO_LOG, unexpected_slow_tests)
self._print_debug("")
if slow_tests:
self._print_test_list_timing("Tests marked as SLOW:", slow_tests)
self._print_debug("")
if timeout_or_crash_tests:
self._print_test_list_timing("Tests that timed out or crashed:", timeout_or_crash_tests)
self._print_debug("")
def _print_test_list_timing(self, title, test_list):
self._print_debug(title)
for test_tuple in test_list:
test_run_time = round(test_tuple.test_run_time, 1)
self._print_debug(" %s took %s seconds" % (test_tuple.test_name, test_run_time))
def _print_directory_timings(self, run_results):
stats = {}
for result in run_results.results_by_name.values():
stats.setdefault(result.shard_name, {'num_tests': 0, 'total_time': 0})
stats[result.shard_name]['num_tests'] += 1
stats[result.shard_name]['total_time'] += result.total_run_time
min_seconds_to_print = 15
timings = []
for directory in stats:
rounded_time = round(stats[directory]['total_time'], 1)
if rounded_time > min_seconds_to_print:
timings.append((directory, rounded_time, stats[directory]['num_tests']))
if not timings:
return
timings.sort()
self._print_debug("Time to process slowest subdirectories:")
for timing in timings:
self._print_debug(" %s took %s seconds to run %s tests." % timing)
self._print_debug("")
def _print_statistics_for_test_timings(self, title, timings):
self._print_debug(title)
timings.sort()
num_tests = len(timings)
if not num_tests:
return
percentile90 = timings[int(.9 * num_tests)]
percentile99 = timings[int(.99 * num_tests)]
if num_tests % 2 == 1:
median = timings[((num_tests - 1) / 2) - 1]
else:
lower = timings[num_tests / 2 - 1]
upper = timings[num_tests / 2]
median = (float(lower + upper)) / 2
mean = sum(timings) / num_tests
for timing in timings:
sum_of_deviations = math.pow(timing - mean, 2)
std_deviation = math.sqrt(sum_of_deviations / num_tests)
self._print_debug(" Median: %6.3f" % median)
self._print_debug(" Mean: %6.3f" % mean)
self._print_debug(" 90th percentile: %6.3f" % percentile90)
self._print_debug(" 99th percentile: %6.3f" % percentile99)
self._print_debug(" Standard dev: %6.3f" % std_deviation)
self._print_debug("")
def _print_one_line_summary(self, total_time, run_results):
if self._options.timing:
parallel_time = sum(result.total_run_time for result in run_results.results_by_name.values())
# There is serial overhead in layout_test_runner.run() that we can't easily account for when
# really running in parallel, but taking the min() ensures that in the worst case
# (if parallel time is less than run_time) we do account for it.
serial_time = total_time - min(run_results.run_time, parallel_time)
speedup = (parallel_time + serial_time) / total_time
timing_summary = ' in %.2fs (%.2fs in rwt, %.2gx)' % (total_time, serial_time, speedup)
else:
timing_summary = ''
total = run_results.total - run_results.expected_skips
expected = run_results.expected - run_results.expected_skips
unexpected = run_results.unexpected
incomplete = total - expected - unexpected
incomplete_str = ''
if incomplete:
self._print_default("")
incomplete_str = " (%d didn't run)" % incomplete
if self._options.verbose or self._options.debug_rwt_logging or unexpected:
self.writeln("")
expected_summary_str = ''
if run_results.expected_failures > 0:
expected_summary_str = " (%d passed, %d didn't)" % (expected - run_results.expected_failures, run_results.expected_failures)
summary = ''
if unexpected == 0:
if expected == total:
if expected > 1:
summary = "All %d tests ran as expected%s%s." % (expected, expected_summary_str, timing_summary)
else:
summary = "The test ran as expected%s%s." % (expected_summary_str, timing_summary)
else:
summary = "%s ran as expected%s%s%s." % (grammar.pluralize('test', expected), expected_summary_str, incomplete_str, timing_summary)
else:
summary = "%s ran as expected%s, %d didn't%s%s:" % (grammar.pluralize('test', expected), expected_summary_str, unexpected, incomplete_str, timing_summary)
self._print_quiet(summary)
self._print_quiet("")
def _test_status_line(self, test_name, suffix):
format_string = '[%d/%d] %s%s'
status_line = format_string % (self.num_completed, self.num_tests, test_name, suffix)
if len(status_line) > self._meter.number_of_columns():
overflow_columns = len(status_line) - self._meter.number_of_columns()
ellipsis = '...'
if len(test_name) < overflow_columns + len(ellipsis) + 2:
# We don't have enough space even if we elide, just show the test filename.
fs = self._port.host.filesystem
test_name = fs.split(test_name)[1]
else:
new_length = len(test_name) - overflow_columns - len(ellipsis)
prefix = int(new_length / 2)
test_name = test_name[:prefix] + ellipsis + test_name[-(new_length - prefix):]
return format_string % (self.num_completed, self.num_tests, test_name, suffix)
def print_started_test(self, test_name):
self._running_tests.append(test_name)
if len(self._running_tests) > 1:
suffix = ' (+%d)' % (len(self._running_tests) - 1)
else:
suffix = ''
if self._options.verbose:
write = self._meter.write_update
else:
write = self._meter.write_throttled_update
write(self._test_status_line(test_name, suffix))
def print_finished_test(self, result, expected, exp_str, got_str):
self.num_completed += 1
test_name = result.test_name
result_message = self._result_message(result.type, result.failures, expected,
self._options.timing, result.test_run_time)
if self._options.details:
self._print_test_trace(result, exp_str, got_str)
elif self._options.verbose or not expected:
self.writeln(self._test_status_line(test_name, result_message))
elif self.num_completed == self.num_tests:
self._meter.write_update('')
else:
if test_name == self._running_tests[0]:
self._completed_tests.insert(0, [test_name, result_message])
else:
self._completed_tests.append([test_name, result_message])
for test_name, result_message in self._completed_tests:
self._meter.write_throttled_update(self._test_status_line(test_name, result_message))
self._completed_tests = []
self._running_tests.remove(test_name)
def _result_message(self, result_type, failures, expected, timing, test_run_time):
exp_string = ' unexpectedly' if not expected else ''
timing_string = ' %.4fs' % test_run_time if timing else ''
if result_type == test_expectations.PASS:
return ' passed%s%s' % (exp_string, timing_string)
else:
return ' failed%s (%s)%s' % (exp_string, ', '.join(failure.message() for failure in failures), timing_string)
def _print_test_trace(self, result, exp_str, got_str):
test_name = result.test_name
self._print_default(self._test_status_line(test_name, ''))
base = self._port.lookup_virtual_test_base(test_name)
if base:
args = ' '.join(self._port.lookup_virtual_test_args(test_name))
self._print_default(' base: %s' % base)
self._print_default(' args: %s' % args)
references = self._port.reference_files(test_name)
if references:
for _, filename in references:
self._print_default(' ref: %s' % self._port.relative_test_filename(filename))
else:
for extension in ('.txt', '.png', '.wav'):
self._print_baseline(test_name, extension)
self._print_default(' exp: %s' % exp_str)
self._print_default(' got: %s' % got_str)
self._print_default(' took: %-.3f' % result.test_run_time)
self._print_default('')
def _print_baseline(self, test_name, extension):
baseline = self._port.expected_filename(test_name, extension)
if self._port._filesystem.exists(baseline):
relpath = self._port.relative_test_filename(baseline)
else:
relpath = '<none>'
self._print_default(' %s: %s' % (extension[1:], relpath))
def _print_quiet(self, msg):
self.writeln(msg)
def _print_default(self, msg):
if not self._options.quiet:
self.writeln(msg)
def _print_debug(self, msg):
if self._options.debug_rwt_logging:
self.writeln(msg)
def write_throttled_update(self, msg):
self._meter.write_throttled_update(msg)
def write_update(self, msg):
self._meter.write_update(msg)
def writeln(self, msg):
self._meter.writeln(msg)
def flush(self):
self._meter.flush()
| 46.285388 | 166 | 0.655354 |
87e257560bbb2b2f62c47171f1b726021bc8bb7d | 4,160 | py | Python | azure-mgmt-network/azure/mgmt/network/v2016_12_01/models/virtual_network_peering_py3.py | Prasanna-Padmanabhan/azure-sdk-for-python | 102d37706348985be660fd679865db209d431f3f | [
"MIT"
] | 1 | 2021-09-07T18:36:04.000Z | 2021-09-07T18:36:04.000Z | azure-mgmt-network/azure/mgmt/network/v2016_12_01/models/virtual_network_peering_py3.py | Prasanna-Padmanabhan/azure-sdk-for-python | 102d37706348985be660fd679865db209d431f3f | [
"MIT"
] | 2 | 2019-10-02T23:37:38.000Z | 2020-10-02T01:17:31.000Z | azure-mgmt-network/azure/mgmt/network/v2016_12_01/models/virtual_network_peering_py3.py | Prasanna-Padmanabhan/azure-sdk-for-python | 102d37706348985be660fd679865db209d431f3f | [
"MIT"
] | 1 | 2018-08-28T14:36:47.000Z | 2018-08-28T14:36:47.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource_py3 import SubResource
class VirtualNetworkPeering(SubResource):
"""Peerings in a virtual network resource.
:param id: Resource ID.
:type id: str
:param allow_virtual_network_access: Whether the VMs in the linked virtual
network space would be able to access all the VMs in local Virtual network
space.
:type allow_virtual_network_access: bool
:param allow_forwarded_traffic: Whether the forwarded traffic from the VMs
in the remote virtual network will be allowed/disallowed.
:type allow_forwarded_traffic: bool
:param allow_gateway_transit: If gateway links can be used in remote
virtual networking to link to this virtual network.
:type allow_gateway_transit: bool
:param use_remote_gateways: If remote gateways can be used on this virtual
network. If the flag is set to true, and allowGatewayTransit on remote
peering is also true, virtual network will use gateways of remote virtual
network for transit. Only one peering can have this flag set to true. This
flag cannot be set if virtual network already has a gateway.
:type use_remote_gateways: bool
:param remote_virtual_network: The reference of the remote virtual
network.
:type remote_virtual_network:
~azure.mgmt.network.v2016_12_01.models.SubResource
:param peering_state: The status of the virtual network peering. Possible
values are 'Initiated', 'Connected', and 'Disconnected'. Possible values
include: 'Initiated', 'Connected', 'Disconnected'
:type peering_state: str or
~azure.mgmt.network.v2016_12_01.models.VirtualNetworkPeeringState
:param provisioning_state: The provisioning state of the resource.
:type provisioning_state: str
:param name: The name of the resource that is unique within a resource
group. This name can be used to access the resource.
:type name: str
:param etag: A unique read-only string that changes whenever the resource
is updated.
:type etag: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'allow_virtual_network_access': {'key': 'properties.allowVirtualNetworkAccess', 'type': 'bool'},
'allow_forwarded_traffic': {'key': 'properties.allowForwardedTraffic', 'type': 'bool'},
'allow_gateway_transit': {'key': 'properties.allowGatewayTransit', 'type': 'bool'},
'use_remote_gateways': {'key': 'properties.useRemoteGateways', 'type': 'bool'},
'remote_virtual_network': {'key': 'properties.remoteVirtualNetwork', 'type': 'SubResource'},
'peering_state': {'key': 'properties.peeringState', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, *, id: str=None, allow_virtual_network_access: bool=None, allow_forwarded_traffic: bool=None, allow_gateway_transit: bool=None, use_remote_gateways: bool=None, remote_virtual_network=None, peering_state=None, provisioning_state: str=None, name: str=None, etag: str=None, **kwargs) -> None:
super(VirtualNetworkPeering, self).__init__(id=id, **kwargs)
self.allow_virtual_network_access = allow_virtual_network_access
self.allow_forwarded_traffic = allow_forwarded_traffic
self.allow_gateway_transit = allow_gateway_transit
self.use_remote_gateways = use_remote_gateways
self.remote_virtual_network = remote_virtual_network
self.peering_state = peering_state
self.provisioning_state = provisioning_state
self.name = name
self.etag = etag
| 52.658228 | 312 | 0.696154 |
043750dbd3dca8643351c66f788af51b293b2441 | 167 | py | Python | py_tdlib/constructors/toggle_basic_group_administrators.py | Mr-TelegramBot/python-tdlib | 2e2d21a742ebcd439971a32357f2d0abd0ce61eb | [
"MIT"
] | 24 | 2018-10-05T13:04:30.000Z | 2020-05-12T08:45:34.000Z | py_tdlib/constructors/toggle_basic_group_administrators.py | MrMahdi313/python-tdlib | 2e2d21a742ebcd439971a32357f2d0abd0ce61eb | [
"MIT"
] | 3 | 2019-06-26T07:20:20.000Z | 2021-05-24T13:06:56.000Z | py_tdlib/constructors/toggle_basic_group_administrators.py | MrMahdi313/python-tdlib | 2e2d21a742ebcd439971a32357f2d0abd0ce61eb | [
"MIT"
] | 5 | 2018-10-05T14:29:28.000Z | 2020-08-11T15:04:10.000Z | from ..factory import Method
class toggleBasicGroupAdministrators(Method):
basic_group_id = None # type: "int32"
everyone_is_administrator = None # type: "Bool"
| 23.857143 | 49 | 0.766467 |
8a5ea49c06678f9fa24f5c03903036696badebeb | 7,544 | py | Python | example/imagenet_example_cw.py | gabrielhao/AdvBox | 23506eb6db879528dd145fc29a7509dc0e12fb1c | [
"Apache-2.0"
] | 1 | 2020-09-19T13:32:43.000Z | 2020-09-19T13:32:43.000Z | example/imagenet_example_cw.py | gabrielhao/AdvBox | 23506eb6db879528dd145fc29a7509dc0e12fb1c | [
"Apache-2.0"
] | null | null | null | example/imagenet_example_cw.py | gabrielhao/AdvBox | 23506eb6db879528dd145fc29a7509dc0e12fb1c | [
"Apache-2.0"
] | null | null | null | #coding=utf-8
# Copyright 2017 - 2018 Baidu Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#attack resnet and alexnet model with CW, and the dataset is imagenet
from __future__ import print_function
import sys
sys.path.append("..")
import os
import numpy as np
import logging
import paddle.fluid as fluid
import paddle
#classification
import models
import reader
import argparse
import functools
from utility import add_arguments, print_arguments, generation_image
#attack
from advbox.adversary import Adversary
from advbox.attacks.cw import CW_L2
from advbox.models.paddle import PaddleModel
#通过设置环境变量WITH_GPU 来动态设置是否使用GPU资源 特别适合在mac上开发但是在GPU服务器上运行的情况
#比如在mac上不设置该环境变量,在GPU服务器上设置 export WITH_GPU=1
with_gpu = os.getenv('WITH_GPU', '0') != '0'
# Test image
# DATA_PATH is test image path
# TEST_LIST is desc file, Support multiple files
TEST_LIST = './images/mytest_list.txt'
DATA_PATH = './images'
parser = argparse.ArgumentParser(description=__doc__)
add_arg = functools.partial(add_arguments, argparser=parser)
add_arg('batch_size', int, 256, "Minibatch size.")
add_arg('use_gpu', bool, False, "Whether to use GPU or not.")
add_arg('class_dim', int, 1000, "Class number.")
add_arg('image_shape', str, "3,224,224", "Input image size")
#add_arg('pretrained_model', str, "./parameters/resnet_50/115", "Whether to use pretrained model.")
add_arg('pretrained_model', str, "./parameters/alexnet/116", "Whether to use pretrained model.")
#add_arg('model', str, "ResNet50", "Set the network to use.")
add_arg('model', str, "AlexNet", "Set the network to use.")
add_arg('target', int, -1, "target class.")
add_arg('log_debug', bool, False, "Whether to open logging DEBUG.")
add_arg('inference', bool, False, "only inference,do not create adversarial example.")
model_list = [m for m in dir(models) if "__" not in m]
print(model_list)
def infer(infer_program, image, logits, place, exe):
print("--------------------inference-------------------")
test_batch_size = 1
test_reader = paddle.batch(reader.test(TEST_LIST, DATA_PATH), batch_size=test_batch_size)
feeder = fluid.DataFeeder(place=place, feed_list=[image])
fetch_list = [logits.name]
label_res = {}
for batch_id, data in enumerate(test_reader()):
data_img = data[0][0]
filename = data[0][1]
result = exe.run(infer_program,
fetch_list=fetch_list,
feed=feeder.feed([data_img]))
#print(result)
result = result[0][0]
pred_label = np.argmax(result)
print("Test-{0}-score: {1}, class {2}, name={3}"
.format(batch_id, result[pred_label], pred_label, filename))
label_res[filename] = pred_label
sys.stdout.flush()
return label_res
def main(use_cuda):
"""
Advbox example which demonstrate how to use advbox.
"""
# base marco
TOTAL_NUM = 100
IMG_NAME = 'image'
LABEL_NAME = 'label'
# parse args
args = parser.parse_args()
print_arguments(args)
# parameters from arguments
class_dim = args.class_dim
model_name = args.model
target_class = args.target
pretrained_model = args.pretrained_model
image_shape = [int(m) for m in args.image_shape.split(",")]
if args.log_debug:
logging.getLogger().setLevel(logging.INFO)
assert model_name in model_list, "{} is not in lists: {}".format(args.model, model_list)
# model definition
model = models.__dict__[model_name]()
# declare vars
image = fluid.layers.data(name=IMG_NAME, shape=image_shape, dtype='float32')
logits = model.net(input=image, class_dim=class_dim)
# clone program and graph for inference
infer_program = fluid.default_main_program().clone(for_test=True)
image.stop_gradient = False
label = fluid.layers.data(name=LABEL_NAME, shape=[1], dtype='int64')
cost = fluid.layers.cross_entropy(input=logits, label=label)
avg_cost = fluid.layers.mean(x=cost)
BATCH_SIZE = 1
test_reader = paddle.batch(
reader.test(TEST_LIST, DATA_PATH), batch_size=BATCH_SIZE)
# setup run environment
enable_gpu = use_cuda and args.use_gpu
place = fluid.CUDAPlace(0) if enable_gpu else fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
# advbox demo
m = PaddleModel(
fluid.default_main_program(),
IMG_NAME,
LABEL_NAME,
logits.name,
avg_cost.name,
(0, 1),
channel_axis=3)
# Adversarial method: CW
attack = CW_L2(m, learning_rate=0.1, attack_model=model.conv_net, with_gpu=enable_gpu,
shape=image_shape, dim=class_dim, confidence_level=0.9, multi_clip=True)
attack_config = {"attack_iterations": 50,
"c_search_step": 10,
"c_range": (0.01,100),
"c_start": 10,
"targeted": True}
# reload model vars
if pretrained_model:
def if_exist(var):
return os.path.exists(os.path.join(pretrained_model, var.name))
fluid.io.load_vars(exe, pretrained_model, predicate=if_exist)
# inference
pred_label = infer(infer_program, image, logits, place, exe)
# if only inference ,and exit
if args.inference:
exit(0)
print("--------------------adversary-------------------")
# use test data to generate adversarial examples
total_count = 0
fooling_count = 0
for data in test_reader():
total_count += 1
data_img = [data[0][0]]
filename = data[0][1]
org_data = data_img[0][0]
adversary = Adversary(org_data, pred_label[filename])
#target attack
if target_class != -1:
tlabel = target_class
adversary.set_target(is_targeted_attack=True, target_label=tlabel)
adversary = attack(adversary, **attack_config)
if adversary.is_successful():
fooling_count += 1
print(
'attack success, original_label=%d, adversarial_label=%d, count=%d'
% (pred_label[filename], adversary.adversarial_label, total_count))
#output original image, adversarial image and difference image
generation_image(total_count, org_data, pred_label[filename],
adversary.adversarial_example, adversary.adversarial_label, "CW")
else:
print('attack failed, original_label=%d, count=%d' %
(pred_label[filename], total_count))
if total_count >= TOTAL_NUM:
print(
"[TEST_DATASET]: fooling_count=%d, total_count=%d, fooling_rate=%f"
% (fooling_count, total_count,
float(fooling_count) / total_count))
break
print("cw attack done")
if __name__ == '__main__':
main(use_cuda=with_gpu)
| 35.088372 | 116 | 0.646607 |