hexsha
stringlengths 40
40
| size
int64 5
1.04M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
236
| max_stars_repo_name
stringlengths 4
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequence | max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
236
| max_issues_repo_name
stringlengths 4
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequence | max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
236
| max_forks_repo_name
stringlengths 4
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequence | max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
1.04M
| avg_line_length
float64 1.3
664k
| max_line_length
int64 1
1.01M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b61ff226cc8bc5c97a79f089b48d89c585563de5 | 10,549 | py | Python | kubernetes_asyncio/client/models/v1_certificate_signing_request_condition.py | lsst-sqre/kubernetes_asyncio | f028cc793e3a2c519be6a52a49fb77ff0b014c9b | [
"Apache-2.0"
] | null | null | null | kubernetes_asyncio/client/models/v1_certificate_signing_request_condition.py | lsst-sqre/kubernetes_asyncio | f028cc793e3a2c519be6a52a49fb77ff0b014c9b | [
"Apache-2.0"
] | null | null | null | kubernetes_asyncio/client/models/v1_certificate_signing_request_condition.py | lsst-sqre/kubernetes_asyncio | f028cc793e3a2c519be6a52a49fb77ff0b014c9b | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1.19.15
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes_asyncio.client.configuration import Configuration
class V1CertificateSigningRequestCondition(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'last_transition_time': 'datetime',
'last_update_time': 'datetime',
'message': 'str',
'reason': 'str',
'status': 'str',
'type': 'str'
}
attribute_map = {
'last_transition_time': 'lastTransitionTime',
'last_update_time': 'lastUpdateTime',
'message': 'message',
'reason': 'reason',
'status': 'status',
'type': 'type'
}
def __init__(self, last_transition_time=None, last_update_time=None, message=None, reason=None, status=None, type=None, local_vars_configuration=None): # noqa: E501
"""V1CertificateSigningRequestCondition - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._last_transition_time = None
self._last_update_time = None
self._message = None
self._reason = None
self._status = None
self._type = None
self.discriminator = None
if last_transition_time is not None:
self.last_transition_time = last_transition_time
if last_update_time is not None:
self.last_update_time = last_update_time
if message is not None:
self.message = message
if reason is not None:
self.reason = reason
self.status = status
self.type = type
@property
def last_transition_time(self):
"""Gets the last_transition_time of this V1CertificateSigningRequestCondition. # noqa: E501
lastTransitionTime is the time the condition last transitioned from one status to another. If unset, when a new condition type is added or an existing condition's status is changed, the server defaults this to the current time. # noqa: E501
:return: The last_transition_time of this V1CertificateSigningRequestCondition. # noqa: E501
:rtype: datetime
"""
return self._last_transition_time
@last_transition_time.setter
def last_transition_time(self, last_transition_time):
"""Sets the last_transition_time of this V1CertificateSigningRequestCondition.
lastTransitionTime is the time the condition last transitioned from one status to another. If unset, when a new condition type is added or an existing condition's status is changed, the server defaults this to the current time. # noqa: E501
:param last_transition_time: The last_transition_time of this V1CertificateSigningRequestCondition. # noqa: E501
:type: datetime
"""
self._last_transition_time = last_transition_time
@property
def last_update_time(self):
"""Gets the last_update_time of this V1CertificateSigningRequestCondition. # noqa: E501
lastUpdateTime is the time of the last update to this condition # noqa: E501
:return: The last_update_time of this V1CertificateSigningRequestCondition. # noqa: E501
:rtype: datetime
"""
return self._last_update_time
@last_update_time.setter
def last_update_time(self, last_update_time):
"""Sets the last_update_time of this V1CertificateSigningRequestCondition.
lastUpdateTime is the time of the last update to this condition # noqa: E501
:param last_update_time: The last_update_time of this V1CertificateSigningRequestCondition. # noqa: E501
:type: datetime
"""
self._last_update_time = last_update_time
@property
def message(self):
"""Gets the message of this V1CertificateSigningRequestCondition. # noqa: E501
message contains a human readable message with details about the request state # noqa: E501
:return: The message of this V1CertificateSigningRequestCondition. # noqa: E501
:rtype: str
"""
return self._message
@message.setter
def message(self, message):
"""Sets the message of this V1CertificateSigningRequestCondition.
message contains a human readable message with details about the request state # noqa: E501
:param message: The message of this V1CertificateSigningRequestCondition. # noqa: E501
:type: str
"""
self._message = message
@property
def reason(self):
"""Gets the reason of this V1CertificateSigningRequestCondition. # noqa: E501
reason indicates a brief reason for the request state # noqa: E501
:return: The reason of this V1CertificateSigningRequestCondition. # noqa: E501
:rtype: str
"""
return self._reason
@reason.setter
def reason(self, reason):
"""Sets the reason of this V1CertificateSigningRequestCondition.
reason indicates a brief reason for the request state # noqa: E501
:param reason: The reason of this V1CertificateSigningRequestCondition. # noqa: E501
:type: str
"""
self._reason = reason
@property
def status(self):
"""Gets the status of this V1CertificateSigningRequestCondition. # noqa: E501
status of the condition, one of True, False, Unknown. Approved, Denied, and Failed conditions may not be \"False\" or \"Unknown\". # noqa: E501
:return: The status of this V1CertificateSigningRequestCondition. # noqa: E501
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this V1CertificateSigningRequestCondition.
status of the condition, one of True, False, Unknown. Approved, Denied, and Failed conditions may not be \"False\" or \"Unknown\". # noqa: E501
:param status: The status of this V1CertificateSigningRequestCondition. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and status is None: # noqa: E501
raise ValueError("Invalid value for `status`, must not be `None`") # noqa: E501
self._status = status
@property
def type(self):
"""Gets the type of this V1CertificateSigningRequestCondition. # noqa: E501
type of the condition. Known conditions are \"Approved\", \"Denied\", and \"Failed\". An \"Approved\" condition is added via the /approval subresource, indicating the request was approved and should be issued by the signer. A \"Denied\" condition is added via the /approval subresource, indicating the request was denied and should not be issued by the signer. A \"Failed\" condition is added via the /status subresource, indicating the signer failed to issue the certificate. Approved and Denied conditions are mutually exclusive. Approved, Denied, and Failed conditions cannot be removed once added. Only one condition of a given type is allowed. # noqa: E501
:return: The type of this V1CertificateSigningRequestCondition. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this V1CertificateSigningRequestCondition.
type of the condition. Known conditions are \"Approved\", \"Denied\", and \"Failed\". An \"Approved\" condition is added via the /approval subresource, indicating the request was approved and should be issued by the signer. A \"Denied\" condition is added via the /approval subresource, indicating the request was denied and should not be issued by the signer. A \"Failed\" condition is added via the /status subresource, indicating the signer failed to issue the certificate. Approved and Denied conditions are mutually exclusive. Approved, Denied, and Failed conditions cannot be removed once added. Only one condition of a given type is allowed. # noqa: E501
:param type: The type of this V1CertificateSigningRequestCondition. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and type is None: # noqa: E501
raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
self._type = type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1CertificateSigningRequestCondition):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1CertificateSigningRequestCondition):
return True
return self.to_dict() != other.to_dict()
| 39.807547 | 675 | 0.661864 |
ef0ecc7c942476be2cb92d1d1a13b7b292ce8a16 | 1,538 | py | Python | .travis/docs_post_process.py | goerz/bibdeskparser | 4f60f9960f6f0156c2f3c89033065c4e121800ab | [
"BSD-3-Clause"
] | null | null | null | .travis/docs_post_process.py | goerz/bibdeskparser | 4f60f9960f6f0156c2f3c89033065c4e121800ab | [
"BSD-3-Clause"
] | null | null | null | .travis/docs_post_process.py | goerz/bibdeskparser | 4f60f9960f6f0156c2f3c89033065c4e121800ab | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
from pathlib import Path
import subprocess
from versions import get_versions_data, write_versions_json
INDEX_HTML = r'''<!DOCTYPE html>
<html>
<head>
<meta http-equiv="Refresh" content="0; url={default_branch}" />
</head>
<body>
<p>Got to <a href="{default_branch}">default documentation</a>.</p>
</body>
</html>
'''
def write_index_html(default_branch):
"""Write an index.html that redirects to the DEFAULT_BRANCH."""
with open("index.html", "w") as out_fh:
out_fh.write(INDEX_HTML.format(default_branch=default_branch))
subprocess.run(['git', 'add', 'index.html'], check=True)
def find_downloads(folder):
"""Find files in the 'download' subfolder of the given `folder`."""
downloads = []
for filename in Path(folder).glob(r'download/*'):
label = "".join(filename.suffixes).replace('.', '').lower()
if len(label) > 0:
downloads.append((label, str(filename)))
return downloads
def main():
"""Main function."""
print("Post-processing documentation on gh-pages")
print("Gather versions info")
versions_data = get_versions_data(find_downloads=find_downloads)
latest_release = versions_data['latest_release']
if latest_release is None:
latest_release = 'master'
print("Write index.html")
write_index_html(latest_release)
print("Write versions.json")
write_versions_json(versions_data, outfile='versions.json')
print("DONE post-processing")
if __name__ == "__main__":
main()
| 29.018868 | 71 | 0.678153 |
eb688de90834d6518898aa2ec1fef74fddf96e40 | 404 | gyp | Python | ui/webui/resources/cr_components/compiled_resources2.gyp | zipated/src | 2b8388091c71e442910a21ada3d97ae8bc1845d3 | [
"BSD-3-Clause"
] | 2,151 | 2020-04-18T07:31:17.000Z | 2022-03-31T08:39:18.000Z | ui/webui/resources/cr_components/compiled_resources2.gyp | zipated/src | 2b8388091c71e442910a21ada3d97ae8bc1845d3 | [
"BSD-3-Clause"
] | 395 | 2020-04-18T08:22:18.000Z | 2021-12-08T13:04:49.000Z | ui/webui/resources/cr_components/compiled_resources2.gyp | zipated/src | 2b8388091c71e442910a21ada3d97ae8bc1845d3 | [
"BSD-3-Clause"
] | 338 | 2020-04-18T08:03:10.000Z | 2022-03-29T12:33:22.000Z | # Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'targets': [
{
'target_name': 'cr_components_resources',
'type': 'none',
'dependencies': [
'chromeos/compiled_resources2.gyp:*',
'certificate_manager/compiled_resources2.gyp:*',
],
},
]
}
| 25.25 | 72 | 0.636139 |
7811bebd99ec25f433c0a6ed27e8a627c3b61246 | 1,982 | py | Python | RCJ_pcms_base/scripts/action_commands/go_to_point.py | FablabHome/The_Essense_of_the_Grey_Region | 6385ada0879bdc6c00cb707192841fdab9ab7bf1 | [
"MIT"
] | 1 | 2021-09-23T09:42:32.000Z | 2021-09-23T09:42:32.000Z | RCJ_pcms_base/scripts/action_commands/go_to_point.py | FablabHome/The_Essense_of_the_Grey_Region | 6385ada0879bdc6c00cb707192841fdab9ab7bf1 | [
"MIT"
] | null | null | null | RCJ_pcms_base/scripts/action_commands/go_to_point.py | FablabHome/The_Essense_of_the_Grey_Region | 6385ada0879bdc6c00cb707192841fdab9ab7bf1 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import argparse
import json
import sys
from os import path
import rospy
from geometry_msgs.msg import PoseStamped
from rospkg import RosPack
def main(args, goal_pub):
global config
msg = PoseStamped()
if args['point']:
x, y, z, w = args['point']
else:
try:
x, y, z, w = config[args['loc']]
except KeyError:
raise KeyError(f'Location {args["loc"]} does not exist')
msg.header.frame_id = 'map'
msg.pose.position.x = x
msg.pose.position.y = y
msg.pose.orientation.z = z
msg.pose.orientation.w = w
while rospy.get_param('/status_monitor/status_code') != 0:
goal_pub.publish(msg)
if args['wait_until_end']:
while rospy.get_param('/status_monitor/status_code') != 3:
continue
base = RosPack().get_path('rcj_pcms_base')
config = json.load(open(path.join(base, 'config/points.json')))
if __name__ == '__main__':
rospy.init_node('go_to_point', anonymous=True)
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--point', nargs=4,
type=float,
help='point for robot to go, (x, y, z. w)')
parser.add_argument('-l', '--loc', type=str,
help='Publish point based config file "points.json"'
)
parser.add_argument('--wait-until-end', action='store_true',
help="Wait until the slam has end")
args = vars(parser.parse_args())
goal_pub = rospy.Publisher(
'/move_base_simple/goal',
PoseStamped,
queue_size=1
)
try:
if not (args['point'] or args['loc']):
raise Exception('Must specify -p or -l')
elif args['point'] and args['loc']:
raise Exception('Can only specify one of them')
main(args, goal_pub)
sys.exit(0)
except Exception as e:
print(f'Program ended due to: {e}')
sys.exit(1)
| 29.147059 | 76 | 0.584763 |
edb80171102d708b01a20f8f8778a9ca45f6e6e6 | 4,318 | py | Python | adapter/images/mkimage/mkextimage.py | ShadowCCY/build | 5c88ebad21093ef816087c9160bda8e5e9035008 | [
"Apache-2.0"
] | null | null | null | adapter/images/mkimage/mkextimage.py | ShadowCCY/build | 5c88ebad21093ef816087c9160bda8e5e9035008 | [
"Apache-2.0"
] | null | null | null | adapter/images/mkimage/mkextimage.py | ShadowCCY/build | 5c88ebad21093ef816087c9160bda8e5e9035008 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# Copyright (c) 2021 Huawei Device Co., Ltd.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import argparse
import subprocess
import os
FS_TYPE = "ext4"
BLOCKSIZE = 4096
def args_parse(argv):
parser = argparse.ArgumentParser(description='mkextimage.py')
parser.add_argument("src_dir", help="The source file for sload.")
parser.add_argument("device", help="The deivce for mkfs.")
parser.add_argument("mount_point", help="The filesystem mountpoint.")
parser.add_argument("fs_size", help="The size of filesystem.")
parser.add_argument("--fs_type", help="The filesystem type.")
parser.add_argument("--dac_config",
help="The path of dac config to e2fsdroid.")
parser.add_argument("--inode_size", help="The inode size to mke2fs.")
parser.add_argument("--file_context",
help="The path of file_context to e2fsdroid.")
parser.add_argument("--root_dir", help="The root dir for root image.")
parser.add_argument("--journal_size", help="The journal_size for mke2fs.")
parser.add_argument("--reserve_percent",
help="The reserve_percent for mke2fs.")
parser.add_argument("--extend_opts", nargs='+',
help="The extend opt for mke2fs.")
args = parser.parse_known_args(argv)[0]
return args
def run_cmd(cmd):
res = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
sout, serr = res.communicate()
return res.pid, res.returncode, sout, serr
def build_run_mke2fs(args):
mke2fs_opts = ""
mke2fs_cmd = ""
is_data = False
if "data" in args.mount_point:
is_data = True
if args.extend_opts:
mke2fs_opts += " -E " + ",".join(args.extend_opts)
if args.inode_size:
mke2fs_opts += " -I " + args.inode_size
else:
mke2fs_opts += " -I " + "256"
if args.journal_size:
mke2fs_opts += " -J size=" + args.journal_size
elif not is_data:
mke2fs_opts += " -O ^has_journal"
if args.reserve_percent:
mke2fs_opts += " -m " + args.reserve_percent
elif not is_data:
mke2fs_opts += " -m 0"
mke2fs_opts += " -L " + args.mount_point + " -M " + args.mount_point
blocks = int(int(args.fs_size) / BLOCKSIZE)
mke2fs_cmd += ("mke2fs " + str(mke2fs_opts) + " -t " + FS_TYPE + " -b "
+ str(BLOCKSIZE) + " " + args.device + " " + str(blocks))
res = run_cmd(mke2fs_cmd)
if res[1] != 0:
print("info: " + mke2fs_cmd)
print("pid " + str(res[0]) + " ret " + str(res[1]) + "\n" +
res[2].decode() + res[3].decode())
return res[1]
def build_run_e2fsdroid(args):
e2fsdroid_opts = ""
e2fsdroid_cmd = ""
if not args.extend_opts or not "android_sparse" in args.extend_opts:
e2fsdroid_opts += " -e"
if args.dac_config:
e2fsdroid_opts += " -C " + args.dac_config
if args.file_context:
e2fsdroid_opts += " -S " + args.file_context
e2fsdroid_cmd += ("e2fsdroid" + e2fsdroid_opts + " -f " +
args.src_dir + " -a " + args.mount_point + " " + args.device)
res = run_cmd(e2fsdroid_cmd)
if res[1] != 0:
print("info: " + e2fsdroid_cmd)
print("pid " + str(res[0]) + " ret " + str(res[1]) + "\n" +
res[2].decode() + res[3].decode())
return res[1]
def build(args):
args = args_parse(args)
res = build_run_mke2fs(args)
if res != 0:
print("error run mke2fs errno: " + str(res))
sys.exit(1)
res = build_run_e2fsdroid(args)
if res != 0:
print("error run e2fsdroid errno: " + str(res))
os.remove(args.device)
sys.exit(2)
if __name__ == '__main__':
build(sys.argv[1:])
| 33.734375 | 78 | 0.619037 |
ee70cabd99219dee1472a3a0e566c6b2960ba004 | 163 | py | Python | backend/modules/terms_and_conditions/apps.py | crowdbotics-apps/test-004-32225 | 11bcaa9ba15964c39a2aca2a5eaf016334585c50 | [
"FTL",
"AML",
"RSA-MD"
] | 1 | 2022-02-09T16:02:17.000Z | 2022-02-09T16:02:17.000Z | backend/modules/terms_and_conditions/apps.py | crowdbotics-apps/test-004-32225 | 11bcaa9ba15964c39a2aca2a5eaf016334585c50 | [
"FTL",
"AML",
"RSA-MD"
] | 321 | 2021-07-16T15:22:20.000Z | 2021-07-19T20:57:51.000Z | backend/modules/terms_and_conditions/apps.py | crowdbotics-apps/test-004-32225 | 11bcaa9ba15964c39a2aca2a5eaf016334585c50 | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | from django.apps import AppConfig
class TermsAndConditionsConfig(AppConfig):
name = "modules.terms_and_conditions"
verbose_name = "Terms and Conditions"
| 23.285714 | 42 | 0.785276 |
d9ead0c192eb7fafb7e0d312f76149804e905a54 | 7,786 | py | Python | tests/test_enable_ssl.py | Spread0x/pg_auto_failover | e471d140a528e277c0272a6a5a307be8d2050ac4 | [
"PostgreSQL"
] | null | null | null | tests/test_enable_ssl.py | Spread0x/pg_auto_failover | e471d140a528e277c0272a6a5a307be8d2050ac4 | [
"PostgreSQL"
] | null | null | null | tests/test_enable_ssl.py | Spread0x/pg_auto_failover | e471d140a528e277c0272a6a5a307be8d2050ac4 | [
"PostgreSQL"
] | null | null | null | import pgautofailover_utils as pgautofailover
import ssl_cert_utils as cert
import subprocess
import os
cluster = None
monitor = None
node1 = None
node2 = None
def setup_module():
global cluster
cluster = pgautofailover.Cluster()
def teardown_module():
cluster.destroy()
# remove client side setup for certificates too
client_top_directory = os.path.join(os.getenv("HOME"), ".postgresql")
p = subprocess.Popen(["sudo", "-E", '-u', os.getenv("USER"),
'env', 'PATH=' + os.getenv("PATH"),
"rm", "-rf", client_top_directory])
assert(p.wait() == 0)
# also remove certificates we created for the servers
p = subprocess.run(["sudo", "-E", '-u', os.getenv("USER"),
'env', 'PATH=' + os.getenv("PATH"),
"rm", "-rf", "/tmp/certs"])
assert(p.returncode == 0)
def test_000_create_monitor():
global monitor
monitor = cluster.create_monitor("/tmp/enable/monitor")
monitor.run()
monitor.wait_until_pg_is_running()
monitor.check_ssl("off", "prefer")
def test_001_init_primary():
global node1
node1 = cluster.create_datanode("/tmp/enable/node1")
node1.create()
node1.run()
assert node1.wait_until_state(target_state="single")
node1.wait_until_pg_is_running()
node1.check_ssl("off", "prefer", primary=True)
def test_002_create_t1():
node1.run_sql_query("CREATE TABLE t1(a int)")
node1.run_sql_query("INSERT INTO t1 VALUES (1), (2)")
def test_003_init_secondary():
global node2
node2 = cluster.create_datanode("/tmp/enable/node2")
node2.create()
node2.run()
assert node2.wait_until_state(target_state="secondary")
assert node1.wait_until_state(target_state="primary")
node2.check_ssl("off", "prefer")
def test_004_maintenance():
print()
print("Enabling maintenance on node2")
node2.enable_maintenance()
assert node2.wait_until_state(target_state="maintenance")
def test_005_enable_ssl_monitor():
monitor.enable_ssl(sslSelfSigned=True, sslMode="require")
monitor.sleep(2) # we signaled, wait some time
monitor.check_ssl("on", "require")
def test_006_enable_ssl_primary():
# we stop pg_autoctl to make it easier for the test to be reliable
# without too much delay/sleep hacking; when doing the `pg_autoctl
# enable ssl` online we need to make sure the signal made it to the
# running process and then was acted upon
node1.stop_pg_autoctl()
node1.enable_ssl(sslSelfSigned=True, sslMode="require")
node1.run()
node1.wait_until_pg_is_running()
node1.check_ssl("on", "require", primary=True)
def test_007_enable_ssl_secondary():
node2.stop_pg_autoctl()
node2.enable_ssl(sslSelfSigned=True, sslMode="require")
node2.run()
node2.wait_until_pg_is_running()
node2.check_ssl("on", "require")
def test_008_disable_maintenance():
print("Disabling maintenance on node2")
node2.disable_maintenance()
assert node2.wait_until_pg_is_running()
assert node2.wait_until_state(target_state="secondary")
assert node1.wait_until_state(target_state="primary")
# upgrade to verify full
def test_009_enable_maintenance():
print()
print("Enabling maintenance on node2")
node2.enable_maintenance()
assert node2.wait_until_state(target_state="maintenance")
def test_010_enable_ssl_verify_ca_monitor():
client_top_directory = os.path.join(os.getenv("HOME"), ".postgresql")
print()
print("Creating cluster root certificate")
cluster.create_root_cert(client_top_directory,
basename = "root",
CN = "/CN=root.pgautofailover.ca")
p = subprocess.run(["ls", "-ld",
client_top_directory,
cluster.cert.crt, cluster.cert.csr, cluster.cert.key],
text=True,
capture_output=True)
print("%s" % p.stdout)
# now create and sign the CLIENT certificate
print("Creating cluster client certificate")
clientCert = cert.SSLCert(client_top_directory,
basename = "postgresql",
CN = "/CN=autoctl_node")
clientCert.create_signed_certificate(cluster.cert)
p = subprocess.run(["ls", "-ld",
client_top_directory,
clientCert.crt, clientCert.csr, clientCert.key],
text=True,
capture_output=True)
print("%s" % p.stdout)
# the root user also needs the certificates, tests are connecting with it
subprocess.run(["ln", "-s", client_top_directory, "/root/.postgresql"])
assert(p.returncode == 0)
p = subprocess.run(["ls", "-l", "/root/.postgresql"],
text=True,
capture_output=True)
print("%s" % p.stdout)
# now create and sign the SERVER certificate for the monitor
print("Creating monitor server certificate")
monitorCert = cert.SSLCert("/tmp/certs/monitor", "server",
"/CN=monitor.pgautofailover.ca")
monitorCert.create_signed_certificate(cluster.cert)
p = subprocess.run(["ls", "-ld",
client_top_directory,
cluster.cert.crt, cluster.cert.csr, cluster.cert.key,
clientCert.crt, clientCert.csr, clientCert.key,
monitorCert.crt, monitorCert.csr, monitorCert.key],
text=True,
capture_output=True)
print("%s" % p.stdout)
monitor.enable_ssl(sslCAFile=cluster.cert.crt,
sslServerKey=monitorCert.key,
sslServerCert=monitorCert.crt,
sslMode="verify-ca")
monitor.sleep(2) # we signaled, wait some time
monitor.check_ssl("on", "verify-ca")
def test_011_enable_ssl_verify_ca_primary():
node1Cert = cert.SSLCert("/tmp/certs/node1", "server",
"/CN=node1.pgautofailover.ca")
node1Cert.create_signed_certificate(cluster.cert)
node1.stop_pg_autoctl()
node1.enable_ssl(sslCAFile = cluster.cert.crt,
sslServerKey = node1Cert.key,
sslServerCert = node1Cert.crt,
sslMode="verify-ca")
node1.run()
node1.wait_until_pg_is_running()
node1.check_ssl("on", "verify-ca", primary=True)
def test_012_enable_ssl_verify_ca_secondary():
node2Cert = cert.SSLCert("/tmp/certs/node2", "server",
"/CN=node2.pgautofailover.ca")
node2Cert.create_signed_certificate(cluster.cert)
node2.stop_pg_autoctl()
node2.enable_ssl(sslCAFile = cluster.cert.crt,
sslServerKey = node2Cert.key,
sslServerCert = node2Cert.crt,
sslMode="verify-ca")
node2.run()
node2.wait_until_pg_is_running()
node2.check_ssl("on", "verify-ca")
def test_013_disable_maintenance():
print("Disabling maintenance on node2")
node2.disable_maintenance()
assert node2.wait_until_pg_is_running()
assert node2.wait_until_state(target_state="secondary")
assert node1.wait_until_state(target_state="primary")
def test_014_enable_ssl_require_primary():
node1Cert = cert.SSLCert("/tmp/certs/node1", "server",
"/CN=node1.pgautofailover.ca")
node1Cert.create_signed_certificate(cluster.cert)
node1.stop_pg_autoctl()
node1.enable_ssl(sslServerKey = node1Cert.key,
sslServerCert = node1Cert.crt,
sslMode="require")
node1.run()
node2.wait_until_pg_is_running()
node1.check_ssl("on", "require", primary=True)
| 34.451327 | 78 | 0.636142 |
bef868f360c61403e17d12cd7dda002ebac14b20 | 497 | py | Python | mcf_standard_browser/standards_review/migrations/0005_auto_20160825_1731.py | andy-d-palmer/curatr | 2bd5140c9d4e121c7a0ad32529350e5ccc6d201d | [
"Apache-2.0"
] | 12 | 2016-04-27T21:25:57.000Z | 2021-10-01T08:33:03.000Z | mcf_standard_browser/standards_review/migrations/0005_auto_20160825_1731.py | andy-d-palmer/curatr | 2bd5140c9d4e121c7a0ad32529350e5ccc6d201d | [
"Apache-2.0"
] | 4 | 2017-11-10T13:50:46.000Z | 2021-06-10T19:21:21.000Z | mcf_standard_browser/standards_review/migrations/0005_auto_20160825_1731.py | andy-d-palmer/curatr | 2bd5140c9d4e121c7a0ad32529350e5ccc6d201d | [
"Apache-2.0"
] | 4 | 2017-12-19T06:47:41.000Z | 2020-03-24T16:54:48.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-08-25 17:31
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('standards_review', '0004_auto_20160816_1205'),
]
operations = [
migrations.AlterField(
model_name='molecule',
name='tags',
field=models.ManyToManyField(blank=True, to='standards_review.MoleculeTag'),
),
]
| 23.666667 | 88 | 0.639839 |
1a34e67bbed7d4659482659d167a66b8102e6dcc | 2,445 | py | Python | app/core/tests/test_models.py | dherbison/recipe-app-api | 3dc5cf1dfcb2a4068c3536209cf78b8d3a134dd4 | [
"MIT"
] | null | null | null | app/core/tests/test_models.py | dherbison/recipe-app-api | 3dc5cf1dfcb2a4068c3536209cf78b8d3a134dd4 | [
"MIT"
] | null | null | null | app/core/tests/test_models.py | dherbison/recipe-app-api | 3dc5cf1dfcb2a4068c3536209cf78b8d3a134dd4 | [
"MIT"
] | null | null | null | from core import models
from django.contrib.auth import get_user_model
from django.test import TestCase
from unittest.mock import patch
def sample_user(email='[email protected]', password='testpass'):
return get_user_model().objects.create_user(email, password)
class ModelTests(TestCase):
def test_create_user_with_email_successful(self):
email = '[email protected]'
password = 'Testpass123'
user = get_user_model().objects.create_user(
email=email,
password=password
)
self.assertEqual(user.email, email)
self.assertTrue(user.check_password(password))
def test_new_user_email_normallized(self):
email = '[email protected]'
user = get_user_model().objects.create_user(email, 'tst21132')
self.assertEqual(user.email, email.lower())
def test_new_user_invalid_email(self):
with self.assertRaises(ValueError):
get_user_model().objects.create_user(None, 'test123')
def test_create_new_superuser(self):
""""""
user = get_user_model().objects.create_superuser(
'[email protected]', 'test123')
self.assertTrue(user.is_superuser)
self.assertTrue(user.is_staff)
def test_create_tag_str(self):
tag = models.Tag.objects.create(
user=sample_user(),
name='Vegan'
)
# this set() fx is the __str__ function in the
# Tag.__str__ method in the models.py file.
self.assertEqual(str(tag), tag.name)
def test_ingredient_str(self):
ingredient = models.Ingredient.objects.create(
user=sample_user(),
name='Cucumber'
)
self.assertEqual(str(ingredient), ingredient.name)
def test_recipe_str(self):
"""Test the recipe string representation"""
recipe = models.Recipe.objects.create(
user=sample_user(),
title='Steak and mushroom sauce',
time_minutes=5,
price=5.00
)
self.assertEqual(str(recipe), recipe.title)
@patch('uuid.uuid4')
def test_recipe_file_name_uuid(self, mock_uuid):
"""Test that image is saved in the correct location"""
uuid = 'test-uuid'
mock_uuid.return_value = uuid
file_path = models.recipe_image_file_path(None, 'myimage.jpg')
exp_path = f'uploads/recipe/{uuid}.jpg'
self.assertEqual(file_path, exp_path)
| 32.171053 | 70 | 0.64499 |
bc69429d3c97ccccf75eb792d3c7fc738cbe1c15 | 642 | py | Python | auana/matplot.py | Hoohaha/Auana-P | f60603468322751682204e42718cc1089a23ac60 | [
"Artistic-2.0"
] | 6 | 2015-03-26T11:32:14.000Z | 2017-02-17T02:40:44.000Z | auana/matplot.py | Hoohaha/Auana-P | f60603468322751682204e42718cc1089a23ac60 | [
"Artistic-2.0"
] | 1 | 2015-06-07T19:09:33.000Z | 2015-06-23T08:54:50.000Z | auana/matplot.py | Hoohaha/Auana-P | f60603468322751682204e42718cc1089a23ac60 | [
"Artistic-2.0"
] | 1 | 2015-04-23T09:13:23.000Z | 2015-04-23T09:13:23.000Z | import numpy as np
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
# example data
mu = 100 # mean of distribution
sigma = 15 # standard deviation of distribution
x = mu + sigma * np.random.randn(10000)
print len(x)
num_bins = 20
# the histogram of the data
n, bins, patches = plt.hist(x, num_bins, normed=1, facecolor='green', alpha=0.5)#num_bins,
# add a 'best fit' line
# y = mlab.normpdf(bins, mu, sigma)
# plt.plot(bins, y, 'r--')
plt.xlabel('Smarts')
plt.ylabel('Probability')
plt.title(r'Histogram of IQ: $\mu=100$, $\sigma=15$')
# Tweak spacing to prevent clipping of ylabel
plt.subplots_adjust(left=0.15)
plt.show() | 26.75 | 90 | 0.714953 |
a06596eba9cb41f16483b714bc52536b1507941d | 4,044 | py | Python | sample/nov_rp/models.py | hdknr/pypam | e2253a31073f6b46c504515479683dc8571433b8 | [
"Apache-2.0"
] | null | null | null | sample/nov_rp/models.py | hdknr/pypam | e2253a31073f6b46c504515479683dc8571433b8 | [
"Apache-2.0"
] | 1 | 2016-01-28T17:05:58.000Z | 2016-01-28T17:05:58.000Z | sample/nov_rp/models.py | hdknr/pypam | e2253a31073f6b46c504515479683dc8571433b8 | [
"Apache-2.0"
] | null | null | null | # This is an auto-generated Django model module.
# You'll have to do the following manually to clean this up:
# * Rearrange models' order
# * Make sure each model has one field with primary_key=True
# Feel free to rename the models,
# but don't rename db_table values or field names.
#
# Also note: You'll have to insert the output
# of 'django-admin.py sqlcustom [appname]'
# into your database.
from __future__ import unicode_literals
from django.db import models
import requests
class Accounts(models.Model):
id = models.IntegerField(primary_key=True)
created_at = models.DateTimeField()
updated_at = models.DateTimeField()
class Meta:
db_table = 'accounts'
class OpenIds(models.Model):
id = models.IntegerField(primary_key=True)
account_id = models.IntegerField(null=True, blank=True)
provider_id = models.IntegerField(null=True, blank=True)
identifier = models.CharField(max_length=255, blank=True)
access_token = models.CharField(max_length=255, blank=True)
id_token = models.CharField(max_length=1024, blank=True)
created_at = models.DateTimeField()
updated_at = models.DateTimeField()
class Meta:
db_table = 'open_ids'
def __init__(self, *args, **kwargs):
super(OpenIds, self).__init__(*args, **kwargs)
self._provider = None
@property
def provider(self):
if self._provider is None:
self._provider = Providers.objects.get(id=self.provider_id)
return self._provider
@property
def authorization_header(self):
return {"Authorization": "Bearer %s" % self.access_token}
def get_resource(self, endpoint):
res = requests.get(
endpoint, headers=self.authorization_header)
return res.json()
def post_resource(self, endpoint, **kwargs):
res = requests.post(
endpoint, data=kwargs, headers=self.authorization_header)
return res.json()
def get_user_info(self):
return self.get_resource(self.provider.userinfo_endpoint)
def introspect_test(self):
return self.get_resource(self.provider.introspect_endpoint)
def introspect_id_token(self):
return self.post_resource(
self.provider.introspect_endpoint,
token=self.id_token,
token_type_hint="id_token",
)
def introspect_access_token(self):
return self.post_resource(
self.provider.introspect_endpoint,
token=self.access_token,
token_type_hint="access_token",
)
class Providers(models.Model):
id = models.IntegerField(primary_key=True)
account_id = models.IntegerField(null=True, blank=True)
issuer = models.CharField(max_length=255, blank=True)
jwks_uri = models.CharField(max_length=255, blank=True)
name = models.CharField(max_length=255, blank=True)
identifier = models.CharField(max_length=255, blank=True)
secret = models.CharField(max_length=255, blank=True)
scope = models.CharField(max_length=255, blank=True)
host = models.CharField(max_length=255, blank=True)
scheme = models.CharField(max_length=255, blank=True)
authorization_endpoint = models.CharField(max_length=255, blank=True)
token_endpoint = models.CharField(max_length=255, blank=True)
userinfo_endpoint = models.CharField(max_length=255, blank=True)
# dynamic = models.NullBooleanField(null=True, blank=True)
dynamic = models.CharField(max_length=1, null=True, blank=True)
expires_at = models.DateTimeField(null=True, blank=True)
created_at = models.DateTimeField()
updated_at = models.DateTimeField()
jwkset = models.TextField(blank=True, null=True,)
class Meta:
db_table = 'providers'
@property
def introspect_endpoint(self):
return self.userinfo_endpoint.replace(
'user_info', 'introspect')
class SchemaMigrations(models.Model):
version = models.CharField(max_length=255, unique=True)
class Meta:
db_table = 'schema_migrations'
| 33.983193 | 73 | 0.699555 |
942e03334ff1906a7790bfac6507dbdf182930c3 | 5,001 | py | Python | django_concurrent_tests/utils.py | alexey74/django-concurrent-test-helper | 1202915049a498d8fc31a75d83b459854f76750b | [
"Apache-2.0"
] | 13 | 2016-03-30T10:45:10.000Z | 2020-12-29T14:15:50.000Z | django_concurrent_tests/utils.py | alexey74/django-concurrent-test-helper | 1202915049a498d8fc31a75d83b459854f76750b | [
"Apache-2.0"
] | 5 | 2017-04-05T14:56:32.000Z | 2020-03-28T20:34:09.000Z | django_concurrent_tests/utils.py | alexey74/django-concurrent-test-helper | 1202915049a498d8fc31a75d83b459854f76750b | [
"Apache-2.0"
] | 1 | 2021-01-12T16:38:49.000Z | 2021-01-12T16:38:49.000Z | from __future__ import print_function
import os
import logging
import subprocess
import sys
import threading
from collections import namedtuple
from contextlib import contextmanager
import six
from django.conf import settings
from django.core.management import call_command
from . import b64pickle, errors
logger = logging.getLogger(__name__)
SUBPROCESS_TIMEOUT = int(os.environ.get('DJANGO_CONCURRENT_TESTS_TIMEOUT', '30'))
SubprocessRun = namedtuple('SubprocessRun', ['manager', 'result'])
class ProcessManager(object):
def __init__(self, cmd):
"""
Kwargs:
cmd (Union[str, List[str]]): `args` arg to `Popen` call
"""
self.cmd = cmd
self.process = None
self.stdout = None
self.stderr = None
self.terminated = False # whether subprocess was terminated by timeout
def run(self, timeout):
"""
Kwargs:
timeout (Float): how long to wait for the subprocess to complete task
Returns:
str: stdout output from subprocess
"""
def target():
env = os.environ.copy()
env['DJANGO_CONCURRENT_TESTS_PARENT_PID'] = str(os.getpid())
self.process = subprocess.Popen(
self.cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env,
)
logger.debug('[{pid}] {cmd}'.format(pid=self.process.pid, cmd=' '.join(self.cmd)))
self.stdout, self.stderr = self.process.communicate()
thread = threading.Thread(target=target)
thread.start()
thread.join(timeout)
if thread.is_alive():
# we reached the timeout deadline with process still running
if self.process:
logger.debug('[{pid}] reached timeout: terminating...'.format(pid=self.process.pid))
self.process.terminate()
logger.debug('[{pid}] reached timeout: terminated.'.format(pid=self.process.pid))
else:
logger.debug('reached timeout: process did not start.')
self.terminated = True
thread.join()
if self.stderr:
logger.error(self.stderr)
return self.stdout
def run_in_subprocess(f, **kwargs):
"""
Args:
f (Union[function, str]): the function to call, or
the 'dotted module.path.to:function' as a string (NOTE
colon separates the name to import)
**kwargs - kwargs to pass to `function`
Returns:
SubprocessRun: where `<SubprocessRun>.result` is either
<return value> OR <exception raised>
or None if result was empty
NOTE:
`kwargs` must be pickleable
<return value> of `function` must be pickleable
"""
# wrap everything in a catch-all except to avoid hanging the subprocess
try:
serialized_kwargs = b64pickle.dumps(kwargs)
if isinstance(f, six.string_types):
function_path = f
else:
function_path = '{module}:{name}'.format(
module=f.__module__,
name=f.__name__,
)
if not os.environ.get('CONCURRENT_TESTS_NO_SUBPROCESS'):
cmd = [
getattr(settings, 'MANAGE_PY_PATH', './manage.py'),
'concurrent_call_wrapper',
function_path,
'--kwargs=%s' % serialized_kwargs,
]
manager = ProcessManager(cmd)
result = manager.run(timeout=SUBPROCESS_TIMEOUT)
if manager.terminated:
raise errors.TerminatedProcessError(result)
else:
logger.debug('Calling {f} in current process'.format(f=function_path))
manager = None
# TODO: collect stdout and maybe log it from here
result = call_command(
'concurrent_call_wrapper',
function_path,
kwargs=serialized_kwargs,
)
# deserialize the result from subprocess run
# (any error raised when running the concurrent func will be stored in `result`)
return SubprocessRun(
manager=manager,
result=b64pickle.loads(result) if result else None,
)
except Exception as e:
# handle any errors which occurred during setup of subprocess
return SubprocessRun(
manager=manager,
result=errors.WrappedError(e),
)
@contextmanager
def redirect_stdout(to):
original = sys.stdout
sys.stdout = to
yield
sys.stdout = original
@contextmanager
def override_environment(**kwargs):
"""
NOTE:
The values in `kwargs` must be strings else you will get a cryptic:
TypeError: execve() arg 3 contains a non-string value
"""
old_env = os.environ
new_env = os.environ.copy()
new_env.update(kwargs)
os.environ = new_env
yield
os.environ = old_env
| 30.309091 | 100 | 0.596081 |
c984158c7f9cb31b236eb10f777233a69c43da0b | 255 | py | Python | fython/test/instruction/various_test.py | nicolasessisbreton/fython | 988f5a94cee8b16b0000501a22239195c73424a1 | [
"Apache-2.0"
] | 41 | 2016-01-21T05:14:45.000Z | 2021-11-24T20:37:21.000Z | fython/test/instruction/various_test.py | nicolasessisbreton/fython | 988f5a94cee8b16b0000501a22239195c73424a1 | [
"Apache-2.0"
] | 5 | 2016-01-21T05:36:37.000Z | 2016-08-22T19:26:51.000Z | fython/test/instruction/various_test.py | nicolasessisbreton/fython | 988f5a94cee8b16b0000501a22239195c73424a1 | [
"Apache-2.0"
] | 3 | 2016-01-23T04:03:44.000Z | 2016-08-21T15:58:38.000Z | s = r"""
.a.fy
real : x y z(10)
int dimension(3) a = [1, 2, 3]
x += 1 + 3
x /= sum(
x +
y +
z
)
"""
from fython.test import *
writer(s)
w = load('.a', force=1, release=1, verbose=0)
# print(open(w.module.url.fortran_path, 'r').read())
| 11.590909 | 52 | 0.517647 |
429ef552bab3a2edc7fc294879023c596ddcef88 | 15,649 | py | Python | fedml_experiments/standalone/fedavg/main_fedavg.py | Starry-Hu/FedML | 0fd4bd55b7b3122c8cb4faee9fe36dcb1998657d | [
"Apache-2.0"
] | 1 | 2021-08-10T13:16:36.000Z | 2021-08-10T13:16:36.000Z | fedml_experiments/standalone/fedavg/main_fedavg.py | Starry-Hu/FedML | 0fd4bd55b7b3122c8cb4faee9fe36dcb1998657d | [
"Apache-2.0"
] | null | null | null | fedml_experiments/standalone/fedavg/main_fedavg.py | Starry-Hu/FedML | 0fd4bd55b7b3122c8cb4faee9fe36dcb1998657d | [
"Apache-2.0"
] | null | null | null | import argparse
import logging
import os
import random
import sys
import numpy as np
import torch
import wandb
sys.path.insert(0, os.path.abspath(os.path.join(os.getcwd(), "../../../")))
from fedml_api.data_preprocessing.cifar10.data_loader import load_partition_data_cifar10
from fedml_api.data_preprocessing.cifar100.data_loader import load_partition_data_cifar100
from fedml_api.data_preprocessing.cinic10.data_loader import load_partition_data_cinic10
from fedml_api.data_preprocessing.fed_cifar100.data_loader import load_partition_data_federated_cifar100
from fedml_api.data_preprocessing.shakespeare.data_loader import load_partition_data_shakespeare
from fedml_api.data_preprocessing.fed_shakespeare.data_loader import load_partition_data_federated_shakespeare
from fedml_api.data_preprocessing.stackoverflow_lr.data_loader import load_partition_data_federated_stackoverflow_lr
from fedml_api.data_preprocessing.stackoverflow_nwp.data_loader import load_partition_data_federated_stackoverflow_nwp
from fedml_api.data_preprocessing.ImageNet.data_loader import load_partition_data_ImageNet
from fedml_api.data_preprocessing.Landmarks.data_loader import load_partition_data_landmarks
from fedml_api.model.cv.mobilenet import mobilenet
from fedml_api.model.cv.resnet import resnet56
from fedml_api.model.cv.cnn import CNN_DropOut
from fedml_api.data_preprocessing.FederatedEMNIST.data_loader import load_partition_data_federated_emnist
from fedml_api.model.nlp.rnn import RNN_OriginalFedAvg, RNN_StackOverFlow
from fedml_api.data_preprocessing.MNIST.data_loader import load_partition_data_mnist
from fedml_api.model.linear.lr import LogisticRegression
from fedml_api.model.cv.resnet_gn import resnet18
from fedml_api.standalone.fedavg.fedavg_api import FedAvgAPI
from fedml_api.standalone.fedavg.my_model_trainer_classification import MyModelTrainer as MyModelTrainerCLS
from fedml_api.standalone.fedavg.my_model_trainer_nwp import MyModelTrainer as MyModelTrainerNWP
from fedml_api.standalone.fedavg.my_model_trainer_tag_prediction import MyModelTrainer as MyModelTrainerTAG
def add_args(parser):
"""
parser : argparse.ArgumentParser
return a parser added with args required by fit
"""
# Training settings
parser.add_argument('--model', type=str, default='resnet56', metavar='N',
help='neural network used in training')
parser.add_argument('--dataset', type=str, default='cifar10', metavar='N',
help='dataset used for training')
parser.add_argument('--data_dir', type=str, default='./../../../data/cifar10',
help='data directory')
parser.add_argument('--partition_method', type=str, default='hetero', metavar='N',
help='how to partition the dataset on local workers')
parser.add_argument('--partition_alpha', type=float, default=0.5, metavar='PA',
help='partition alpha (default: 0.5)')
parser.add_argument('--batch_size', type=int, default=128, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--client_optimizer', type=str, default='adam',
help='SGD with momentum; adam')
parser.add_argument('--lr', type=float, default=0.001, metavar='LR',
help='learning rate (default: 0.001)')
parser.add_argument('--wd', help='weight decay parameter;', type=float, default=0.001)
parser.add_argument('--epochs', type=int, default=5, metavar='EP',
help='how many epochs will be trained locally')
parser.add_argument('--client_num_in_total', type=int, default=10, metavar='NN',
help='number of workers in a distributed cluster')
parser.add_argument('--client_num_per_round', type=int, default=10, metavar='NN',
help='number of workers')
parser.add_argument('--comm_round', type=int, default=10,
help='how many round of communications we shoud use')
parser.add_argument('--frequency_of_the_test', type=int, default=5,
help='the frequency of the algorithms')
parser.add_argument('--gpu', type=int, default=0,
help='gpu')
parser.add_argument('--ci', type=int, default=0,
help='CI')
return parser
def load_data(args, dataset_name):
# check if the centralized training is enabled(如果总共只有一个设备参与,则认为是中心化训练)
centralized = True if args.client_num_in_total == 1 else False
# check if the full-batch training is enabled
args_batch_size = args.batch_size
if args.batch_size <= 0: # 为什么是<=0?
full_batch = True
args.batch_size = 128 # temporary batch size
else:
full_batch = False
if dataset_name == "mnist":
logging.info("load_data. dataset_name = %s" % dataset_name)
client_num, train_data_num, test_data_num, train_data_global, test_data_global, \
train_data_local_num_dict, train_data_local_dict, test_data_local_dict, \
class_num = load_partition_data_mnist(args.batch_size)
"""
For shallow NN or linear models,
we uniformly sample a fraction of clients each round (as the original FedAvg paper)
"""
args.client_num_in_total = client_num
elif dataset_name == "femnist":
logging.info("load_data. dataset_name = %s" % dataset_name)
client_num, train_data_num, test_data_num, train_data_global, test_data_global, \
train_data_local_num_dict, train_data_local_dict, test_data_local_dict, \
class_num = load_partition_data_federated_emnist(args.dataset, args.data_dir)
args.client_num_in_total = client_num
elif dataset_name == "shakespeare":
logging.info("load_data. dataset_name = %s" % dataset_name)
client_num, train_data_num, test_data_num, train_data_global, test_data_global, \
train_data_local_num_dict, train_data_local_dict, test_data_local_dict, \
class_num = load_partition_data_shakespeare(args.batch_size)
args.client_num_in_total = client_num
elif dataset_name == "fed_shakespeare":
logging.info("load_data. dataset_name = %s" % dataset_name)
client_num, train_data_num, test_data_num, train_data_global, test_data_global, \
train_data_local_num_dict, train_data_local_dict, test_data_local_dict, \
class_num = load_partition_data_federated_shakespeare(args.dataset, args.data_dir)
args.client_num_in_total = client_num
elif dataset_name == "fed_cifar100":
logging.info("load_data. dataset_name = %s" % dataset_name)
client_num, train_data_num, test_data_num, train_data_global, test_data_global, \
train_data_local_num_dict, train_data_local_dict, test_data_local_dict, \
class_num = load_partition_data_federated_cifar100(args.dataset, args.data_dir)
args.client_num_in_total = client_num
elif dataset_name == "stackoverflow_lr":
logging.info("load_data. dataset_name = %s" % dataset_name)
client_num, train_data_num, test_data_num, train_data_global, test_data_global, \
train_data_local_num_dict, train_data_local_dict, test_data_local_dict, \
class_num = load_partition_data_federated_stackoverflow_lr(args.dataset, args.data_dir)
args.client_num_in_total = client_num
elif dataset_name == "stackoverflow_nwp":
logging.info("load_data. dataset_name = %s" % dataset_name)
client_num, train_data_num, test_data_num, train_data_global, test_data_global, \
train_data_local_num_dict, train_data_local_dict, test_data_local_dict, \
class_num = load_partition_data_federated_stackoverflow_nwp(args.dataset, args.data_dir)
args.client_num_in_total = client_num
elif dataset_name == "ILSVRC2012":
logging.info("load_data. dataset_name = %s" % dataset_name)
train_data_num, test_data_num, train_data_global, test_data_global, \
train_data_local_num_dict, train_data_local_dict, test_data_local_dict, \
class_num = load_partition_data_ImageNet(dataset=dataset_name, data_dir=args.data_dir,
partition_method=None, partition_alpha=None,
client_number=args.client_num_in_total, batch_size=args.batch_size)
elif dataset_name == "gld23k":
logging.info("load_data. dataset_name = %s" % dataset_name)
args.client_num_in_total = 233
fed_train_map_file = os.path.join(args.data_dir, 'mini_gld_train_split.csv')
fed_test_map_file = os.path.join(args.data_dir, 'mini_gld_test.csv')
train_data_num, test_data_num, train_data_global, test_data_global, \
train_data_local_num_dict, train_data_local_dict, test_data_local_dict, \
class_num = load_partition_data_landmarks(dataset=dataset_name, data_dir=args.data_dir,
fed_train_map_file=fed_train_map_file,
fed_test_map_file=fed_test_map_file,
partition_method=None, partition_alpha=None,
client_number=args.client_num_in_total, batch_size=args.batch_size)
elif dataset_name == "gld160k":
logging.info("load_data. dataset_name = %s" % dataset_name)
args.client_num_in_total = 1262
fed_train_map_file = os.path.join(args.data_dir, 'federated_train.csv')
fed_test_map_file = os.path.join(args.data_dir, 'test.csv')
train_data_num, test_data_num, train_data_global, test_data_global, \
train_data_local_num_dict, train_data_local_dict, test_data_local_dict, \
class_num = load_partition_data_landmarks(dataset=dataset_name, data_dir=args.data_dir,
fed_train_map_file=fed_train_map_file,
fed_test_map_file=fed_test_map_file,
partition_method=None, partition_alpha=None,
client_number=args.client_num_in_total, batch_size=args.batch_size)
else:
if dataset_name == "cifar10":
data_loader = load_partition_data_cifar10
elif dataset_name == "cifar100":
data_loader = load_partition_data_cifar100
elif dataset_name == "cinic10":
data_loader = load_partition_data_cinic10
else:
data_loader = load_partition_data_cifar10
train_data_num, test_data_num, train_data_global, test_data_global, \
train_data_local_num_dict, train_data_local_dict, test_data_local_dict, \
class_num = data_loader(args.dataset, args.data_dir, args.partition_method,
args.partition_alpha, args.client_num_in_total, args.batch_size)
# 如果是中心化训练(只有一个设备参与),修改客户端本地数据映射字典,全部映射到0号
if centralized:
train_data_local_num_dict = {
0: sum(user_train_data_num for user_train_data_num in train_data_local_num_dict.values())}
train_data_local_dict = {
0: [batch for cid in sorted(train_data_local_dict.keys()) for batch in train_data_local_dict[cid]]}
test_data_local_dict = {
0: [batch for cid in sorted(test_data_local_dict.keys()) for batch in test_data_local_dict[cid]]}
args.client_num_in_total = 1
# 如果是全批次训练,则
if full_batch:
train_data_global = combine_batches(train_data_global)
test_data_global = combine_batches(test_data_global)
train_data_local_dict = {cid: combine_batches(train_data_local_dict[cid]) for cid in
train_data_local_dict.keys()}
test_data_local_dict = {cid: combine_batches(test_data_local_dict[cid]) for cid in test_data_local_dict.keys()}
args.batch_size = args_batch_size
dataset = [train_data_num, test_data_num, train_data_global, test_data_global,
train_data_local_num_dict, train_data_local_dict, test_data_local_dict, class_num]
return dataset
def combine_batches(batches):
full_x = torch.from_numpy(np.asarray([])).float()
full_y = torch.from_numpy(np.asarray([])).long()
for (batched_x, batched_y) in batches:
full_x = torch.cat((full_x, batched_x), 0)
full_y = torch.cat((full_y, batched_y), 0)
return [(full_x, full_y)]
def create_model(args, model_name, output_dim):
logging.info("create_model. model_name = %s, output_dim = %s" % (model_name, output_dim))
model = None
if model_name == "lr" and args.dataset == "mnist":
logging.info("LogisticRegression + MNIST")
model = LogisticRegression(28 * 28, output_dim)
elif model_name == "cnn" and args.dataset == "femnist":
logging.info("CNN + FederatedEMNIST")
model = CNN_DropOut(False)
elif model_name == "resnet18_gn" and args.dataset == "fed_cifar100":
logging.info("ResNet18_GN + Federated_CIFAR100")
model = resnet18()
elif model_name == "rnn" and args.dataset == "shakespeare":
logging.info("RNN + shakespeare")
model = RNN_OriginalFedAvg()
elif model_name == "rnn" and args.dataset == "fed_shakespeare":
logging.info("RNN + fed_shakespeare")
model = RNN_OriginalFedAvg()
elif model_name == "lr" and args.dataset == "stackoverflow_lr":
logging.info("lr + stackoverflow_lr")
model = LogisticRegression(10000, output_dim)
elif model_name == "rnn" and args.dataset == "stackoverflow_nwp":
logging.info("RNN + stackoverflow_nwp")
model = RNN_StackOverFlow()
elif model_name == "resnet56":
model = resnet56(class_num=output_dim)
elif model_name == "mobilenet":
model = mobilenet(class_num=output_dim)
return model
def custom_model_trainer(args, model):
if args.dataset == "stackoverflow_lr":
return MyModelTrainerTAG(model)
elif args.dataset in ["fed_shakespeare", "stackoverflow_nwp"]:
return MyModelTrainerNWP(model)
else: # default model trainer is for classification problem
return MyModelTrainerCLS(model)
if __name__ == "__main__":
logging.basicConfig()
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
parser = add_args(argparse.ArgumentParser(description='FedAvg-standalone'))
args = parser.parse_args()
logger.info(args)
device = torch.device("cuda:" + str(args.gpu) if torch.cuda.is_available() else "cpu")
logger.info(device)
wandb.init(
project="fedml",
name="FedAVG-r" + str(args.comm_round) + "-e" + str(args.epochs) + "-lr" + str(args.lr),
config=args
)
# Set the random seed. The np.random seed determines the dataset partition.
# The torch_manual_seed determines the initial weight.
# We fix these two, so that we can reproduce the result.
random.seed(0)
np.random.seed(0)
torch.manual_seed(0)
torch.cuda.manual_seed_all(0)
# load data
dataset = load_data(args, args.dataset)
# create model.
# Note if the model is DNN (e.g., ResNet), the training will be very slow.
# In this case, please use our FedML distributed version (./fedml_experiments/distributed_fedavg)
model = create_model(args, model_name=args.model, output_dim=dataset[7])
model_trainer = custom_model_trainer(args, model)
logging.info(model)
fedavgAPI = FedAvgAPI(dataset, device, args, model_trainer)
fedavgAPI.train()
| 49.365931 | 119 | 0.698894 |
7ae1ea121dc8bdaffb51292d10b07fe5cb831721 | 7,528 | py | Python | ventanaAdministradorVehiculos.py | DavidPareja14/Administrador-de-vehiculos | 7f294bbfd11579d470379f03d426c7223949e7a3 | [
"MIT"
] | null | null | null | ventanaAdministradorVehiculos.py | DavidPareja14/Administrador-de-vehiculos | 7f294bbfd11579d470379f03d426c7223949e7a3 | [
"MIT"
] | null | null | null | ventanaAdministradorVehiculos.py | DavidPareja14/Administrador-de-vehiculos | 7f294bbfd11579d470379f03d426c7223949e7a3 | [
"MIT"
] | null | null | null | from kivy.uix.screenmanager import ScreenManager, Screen
from kivy.clock import Clock
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.button import Button
from kivy.uix.label import Label
from kivy.uix.popup import Popup
from kivy.lang import Builder
from anadirVehiculo import AgregarVehiculo
import requests
"""
Los botones de vehiculo, Ubicacion y Eliminar los implemento como clases aparte, esto con el objeto de poder obtener la instancia de cada
btn al presionar uno, ya que desde el kv solo es mandar como parametro a la funcion (self) si el btn es presionado, hay otras formas,
pero no quiero, con esto puedo obtener el id correspondiente al vehiculo.
"""
#l=[]
k=Builder.load_string("""
<SecondWindow>:
name: "segunda"
BoxLayout:
id:box
orientation:"vertical"
BoxLayout:
size_hint_y:0.3
orientation:"vertical"
Label:
text: "Administrador de vehiculos"
BoxLayout:
Label:
text: "Vehiculo"
Label:
text: "Ubicacion"
Label:
text: "Eliminar"
ScrollView:
id: scroll
GridLayout:
id: contenedorFilas
cols: 1
size_hint_y: None #Si esto no se pone, el scroll no aparece.
row_default_height: root.height*0.1
height: self.minimum_height
BoxLayout:
size_hint_y:0.25
spacing: 50
padding: 20,30,50,10 #Margenes: izquierda, arriba, derecha, abajo
Button:
text: "Agregar Vehiculo"
on_release:
root.oprimidoBtnAgregarVehiculo()
Button:
text: "GPS"
on_release:
root.pantallas(app)
root.manager.current="gps"
<BotonVehiculo>:
on_press: app.root.current="tableroPrincipal"
<BotonUbicacion>:
on_press: root.ubicacionVehiculo()
<BotonEliminar>:
on_press: root.eliminarVehiculo()
""")
class BotonVehiculo(Button):
def tableroVehiculo(self):
pass
class BotonUbicacion(Button):
def ubicacionVehiculo(self):
ip_request = requests.get('https://get.geojs.io/v1/ip.json')
my_ip = ip_request.json()['ip']
geo_request = requests.get('https://get.geojs.io/v1/ip/geo/' +my_ip + '.json')
geo_data = geo_request.json()
#Agregar ubicacion DB
print(self.parent.children[2].text) #Para obtener el nombre del vehiculo.
print(geo_data['latitude'], geo_data['longitude'])
self.popup = Popup(title="ESTADO",
content=Label(text="Ubicacion guardada correctamente"),
size_hint=(0.7, 0.2))
self.popup.open()
class BotonEliminar(Button):
"""ATENCION
Para la f eliminarVeh no me permite eliminar botones con el parent cosa que es extraña, porque con solo poner self.parent me muestra el pad
re del btn, supuse que tal vez me interpretaba el btn como un objeto diferente, como sea, lo que hice fue crear una lista l con todos
los objetos boxlayout creados, luego comparo ese con el boxlayout padre de mi btn seleccionado y borro los botones (veh, ubic, elim)
pero desde el obj metido en la lista y funciona. Luego meti el gridLayout que contiene a todos los boxlayout en la ultima pos de la lis
ta para poder accederlo y elimimar el boxlayout que contiene al boton oprimido, lo elimina, pero en la terminal de cmd salen errores
al yo cerrar la ventana de kivy.
LO HE SOLUCIONADO
Utilizando la lista l para meter los objetos BoxLayout y Grid ([objBox,objBox,objBox,..., objGridLayout]) podia eliminar los
objetos BoxLayout si se seleccionaba el boton respectivo, sin embargo al cerrar la aplicacion, se generaban errores, lo que pienso,
que yo eliminaba un box pero como era una copia quedaba el otro, esto puede generar incosistencias, al hacer la prueba unitaria con este
modulo, me di cuenta que mi implementacion funcionaba con normalidad, sin necesidad de una lista, solo con self.parent... ahora, he quitado
el codigo kv del archivo vistas.kv y lo integro en este archivo y funciona.
"""
def eliminarVehiculo(self):
print (self.parent.children[2].text) #saca el nombre del vehiculo a eliminar de BD
self.parent.parent.remove_widget(self.parent)
#print(self.parent.remove_widget(self)) #Es para que elimine los botones con respecto al eliminar, pero genera error, si se prueba como un modulo
#individual esta bien.
"""
for obj in l:
if self.parent==obj:
l[-1].remove_widget(obj)
"""
class SecondWindow(Screen):
#l=[]
def __init__(self, **kwargs):
super(SecondWindow, self).__init__(**kwargs)
Clock.schedule_once(lambda dt:self.scrollVehiculos()) #hago este proceso, porque si trato de usar los self.ids desde el constructor,
#Me dara error, ya que no se han creado todavia, por eso con clock lo que trato es
#retardar el proceso, de esta manera funciona, con la func lambda no tengo que obtener dt.
def oprimidoBtnAgregarVehiculo(self):
self.content = AgregarVehiculo() #Este texto que paso lo captura el stringProperty
self.content.bind(on_guardar=self._on_guardar) #segun mi analisis, es para dar el mando de on_answer a _on_answer
self.popup = Popup(title="Agregue el vehiculo que desee",
content=self.content,
size_hint=(0.9, 0.9))
self.popup.open()
def pantallas(self, app):
app.root.screens[3].actualizarMarcadores() #Es para que el mapa siempre aparesca centrado en la ubicacion actual
def _on_guardar(self, instance):
resultadoVentanaAgregarVehiculo=self.content.on_guardar() #La pos 0 me determina si los datos de agregarVeh son correctos o no.
if resultadoVentanaAgregarVehiculo[0]: #pos que contiene True o False
box=BoxLayout(orientation="horizontal")
box.add_widget(BotonVehiculo(text=resultadoVentanaAgregarVehiculo[1])) #pos que tiene nombre del vehiculo.
box.add_widget(BotonUbicacion(text="ubicacion")) #Los ids son iguales y corresponden al nombre del vehiculo
box.add_widget(BotonEliminar(text="Eliminar"))
self.ids.contenedorFilas.add_widget(box)
self.popup.dismiss()
else:
pass
def scrollVehiculos(self):
# CONSULTA BASE DE DATOS PARA LISTAR TODOS LOS VEHICULOS
for i in range(5):
#self.l.append(BoxLayout(orientation="horizontal"))
#self.ids.contenedorFilas.add_widget(self.l[-1]) #al gridlayout le agrego lo boxlayout necesarios, en cada boxlayout puedo posicionar
#mis tres botones.
self.ids.contenedorFilas.add_widget(BoxLayout(orientation="horizontal"))
for i, n in enumerate(self.ids.contenedorFilas.children):
n.add_widget(BotonVehiculo(text="vehiculo"+str(i)))
n.add_widget(BotonUbicacion(text="ubicacion"+str(i))) #Los ids son iguales y corresponden al nombre del vehiculo
n.add_widget(BotonEliminar(text="Eliminar"+str(i)))
#l.append(n)
#l.append(self.ids.contenedorFilas)
#print(l) #No entiendo porque se imprimen dos listas
"""
#Esta funcion la dejo por si algo, no funciono para eliminar los botones, pero fue un intento que depronto me sirva en el futuro.
def eliminarVehiculo(self, idBoton): #esto es para eliminar los botones asociados a un boxL pero sale raro, creo que es porque meto los
#boxLayout a una lista, o porque el parametr idBoton me lo pasan desde otra clase.
#print(idBoton)
#self.l[int(idBoton)].clear_widgets()
#self.ids.contenedorFilas.remove_widget(self.l[int(idBoton)])
#self.l.pop(int(idBoton))
""" | 45.349398 | 147 | 0.703108 |
361dd64cd261a81246151b95e4c72637e2698d06 | 51,549 | py | Python | synapse/handlers/e2e_keys.py | mattcen/synapse | 26e13ad126473fc15ce0b674f821f05f1f1158e2 | [
"Apache-2.0"
] | 1 | 2020-07-21T17:51:02.000Z | 2020-07-21T17:51:02.000Z | synapse/handlers/e2e_keys.py | mjvaldez/synapse | de119063f248981510e961e83f1515a3add19a21 | [
"Apache-2.0"
] | null | null | null | synapse/handlers/e2e_keys.py | mjvaldez/synapse | de119063f248981510e961e83f1515a3add19a21 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2016 OpenMarket Ltd
# Copyright 2018-2019 New Vector Ltd
# Copyright 2019 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import attr
from canonicaljson import encode_canonical_json, json
from signedjson.key import decode_verify_key_bytes
from signedjson.sign import SignatureVerifyException, verify_signed_json
from unpaddedbase64 import decode_base64
from twisted.internet import defer
from synapse.api.errors import CodeMessageException, Codes, NotFoundError, SynapseError
from synapse.logging.context import make_deferred_yieldable, run_in_background
from synapse.logging.opentracing import log_kv, set_tag, tag_args, trace
from synapse.replication.http.devices import ReplicationUserDevicesResyncRestServlet
from synapse.types import (
UserID,
get_domain_from_id,
get_verify_key_from_cross_signing_key,
)
from synapse.util import unwrapFirstError
from synapse.util.async_helpers import Linearizer
from synapse.util.caches.expiringcache import ExpiringCache
from synapse.util.retryutils import NotRetryingDestination
logger = logging.getLogger(__name__)
class E2eKeysHandler(object):
def __init__(self, hs):
self.store = hs.get_datastore()
self.federation = hs.get_federation_client()
self.device_handler = hs.get_device_handler()
self.is_mine = hs.is_mine
self.clock = hs.get_clock()
self._edu_updater = SigningKeyEduUpdater(hs, self)
federation_registry = hs.get_federation_registry()
self._is_master = hs.config.worker_app is None
if not self._is_master:
self._user_device_resync_client = ReplicationUserDevicesResyncRestServlet.make_client(
hs
)
else:
# Only register this edu handler on master as it requires writing
# device updates to the db
#
# FIXME: switch to m.signing_key_update when MSC1756 is merged into the spec
federation_registry.register_edu_handler(
"org.matrix.signing_key_update",
self._edu_updater.incoming_signing_key_update,
)
# doesn't really work as part of the generic query API, because the
# query request requires an object POST, but we abuse the
# "query handler" interface.
federation_registry.register_query_handler(
"client_keys", self.on_federation_query_client_keys
)
@trace
async def query_devices(self, query_body, timeout, from_user_id):
""" Handle a device key query from a client
{
"device_keys": {
"<user_id>": ["<device_id>"]
}
}
->
{
"device_keys": {
"<user_id>": {
"<device_id>": {
...
}
}
}
}
Args:
from_user_id (str): the user making the query. This is used when
adding cross-signing signatures to limit what signatures users
can see.
"""
device_keys_query = query_body.get("device_keys", {})
# separate users by domain.
# make a map from domain to user_id to device_ids
local_query = {}
remote_queries = {}
for user_id, device_ids in device_keys_query.items():
# we use UserID.from_string to catch invalid user ids
if self.is_mine(UserID.from_string(user_id)):
local_query[user_id] = device_ids
else:
remote_queries[user_id] = device_ids
set_tag("local_key_query", local_query)
set_tag("remote_key_query", remote_queries)
# First get local devices.
failures = {}
results = {}
if local_query:
local_result = await self.query_local_devices(local_query)
for user_id, keys in local_result.items():
if user_id in local_query:
results[user_id] = keys
# Now attempt to get any remote devices from our local cache.
remote_queries_not_in_cache = {}
if remote_queries:
query_list = []
for user_id, device_ids in remote_queries.items():
if device_ids:
query_list.extend((user_id, device_id) for device_id in device_ids)
else:
query_list.append((user_id, None))
(
user_ids_not_in_cache,
remote_results,
) = await self.store.get_user_devices_from_cache(query_list)
for user_id, devices in remote_results.items():
user_devices = results.setdefault(user_id, {})
for device_id, device in devices.items():
keys = device.get("keys", None)
device_display_name = device.get("device_display_name", None)
if keys:
result = dict(keys)
unsigned = result.setdefault("unsigned", {})
if device_display_name:
unsigned["device_display_name"] = device_display_name
user_devices[device_id] = result
for user_id in user_ids_not_in_cache:
domain = get_domain_from_id(user_id)
r = remote_queries_not_in_cache.setdefault(domain, {})
r[user_id] = remote_queries[user_id]
# Get cached cross-signing keys
cross_signing_keys = await self.get_cross_signing_keys_from_cache(
device_keys_query, from_user_id
)
# Now fetch any devices that we don't have in our cache
@trace
async def do_remote_query(destination):
"""This is called when we are querying the device list of a user on
a remote homeserver and their device list is not in the device list
cache. If we share a room with this user and we're not querying for
specific user we will update the cache with their device list.
"""
destination_query = remote_queries_not_in_cache[destination]
# We first consider whether we wish to update the device list cache with
# the users device list. We want to track a user's devices when the
# authenticated user shares a room with the queried user and the query
# has not specified a particular device.
# If we update the cache for the queried user we remove them from further
# queries. We use the more efficient batched query_client_keys for all
# remaining users
user_ids_updated = []
for (user_id, device_list) in destination_query.items():
if user_id in user_ids_updated:
continue
if device_list:
continue
room_ids = await self.store.get_rooms_for_user(user_id)
if not room_ids:
continue
# We've decided we're sharing a room with this user and should
# probably be tracking their device lists. However, we haven't
# done an initial sync on the device list so we do it now.
try:
if self._is_master:
user_devices = await self.device_handler.device_list_updater.user_device_resync(
user_id
)
else:
user_devices = await self._user_device_resync_client(
user_id=user_id
)
user_devices = user_devices["devices"]
user_results = results.setdefault(user_id, {})
for device in user_devices:
user_results[device["device_id"]] = device["keys"]
user_ids_updated.append(user_id)
except Exception as e:
failures[destination] = _exception_to_failure(e)
if len(destination_query) == len(user_ids_updated):
# We've updated all the users in the query and we do not need to
# make any further remote calls.
return
# Remove all the users from the query which we have updated
for user_id in user_ids_updated:
destination_query.pop(user_id)
try:
remote_result = await self.federation.query_client_keys(
destination, {"device_keys": destination_query}, timeout=timeout
)
for user_id, keys in remote_result["device_keys"].items():
if user_id in destination_query:
results[user_id] = keys
if "master_keys" in remote_result:
for user_id, key in remote_result["master_keys"].items():
if user_id in destination_query:
cross_signing_keys["master_keys"][user_id] = key
if "self_signing_keys" in remote_result:
for user_id, key in remote_result["self_signing_keys"].items():
if user_id in destination_query:
cross_signing_keys["self_signing_keys"][user_id] = key
except Exception as e:
failure = _exception_to_failure(e)
failures[destination] = failure
set_tag("error", True)
set_tag("reason", failure)
await make_deferred_yieldable(
defer.gatherResults(
[
run_in_background(do_remote_query, destination)
for destination in remote_queries_not_in_cache
],
consumeErrors=True,
).addErrback(unwrapFirstError)
)
ret = {"device_keys": results, "failures": failures}
ret.update(cross_signing_keys)
return ret
async def get_cross_signing_keys_from_cache(self, query, from_user_id):
"""Get cross-signing keys for users from the database
Args:
query (Iterable[string]) an iterable of user IDs. A dict whose keys
are user IDs satisfies this, so the query format used for
query_devices can be used here.
from_user_id (str): the user making the query. This is used when
adding cross-signing signatures to limit what signatures users
can see.
Returns:
defer.Deferred[dict[str, dict[str, dict]]]: map from
(master_keys|self_signing_keys|user_signing_keys) -> user_id -> key
"""
master_keys = {}
self_signing_keys = {}
user_signing_keys = {}
user_ids = list(query)
keys = await self.store.get_e2e_cross_signing_keys_bulk(user_ids, from_user_id)
for user_id, user_info in keys.items():
if user_info is None:
continue
if "master" in user_info:
master_keys[user_id] = user_info["master"]
if "self_signing" in user_info:
self_signing_keys[user_id] = user_info["self_signing"]
if (
from_user_id in keys
and keys[from_user_id] is not None
and "user_signing" in keys[from_user_id]
):
# users can see other users' master and self-signing keys, but can
# only see their own user-signing keys
user_signing_keys[from_user_id] = keys[from_user_id]["user_signing"]
return {
"master_keys": master_keys,
"self_signing_keys": self_signing_keys,
"user_signing_keys": user_signing_keys,
}
@trace
async def query_local_devices(self, query):
"""Get E2E device keys for local users
Args:
query (dict[string, list[string]|None): map from user_id to a list
of devices to query (None for all devices)
Returns:
defer.Deferred: (resolves to dict[string, dict[string, dict]]):
map from user_id -> device_id -> device details
"""
set_tag("local_query", query)
local_query = []
result_dict = {}
for user_id, device_ids in query.items():
# we use UserID.from_string to catch invalid user ids
if not self.is_mine(UserID.from_string(user_id)):
logger.warning("Request for keys for non-local user %s", user_id)
log_kv(
{
"message": "Requested a local key for a user which"
" was not local to the homeserver",
"user_id": user_id,
}
)
set_tag("error", True)
raise SynapseError(400, "Not a user here")
if not device_ids:
local_query.append((user_id, None))
else:
for device_id in device_ids:
local_query.append((user_id, device_id))
# make sure that each queried user appears in the result dict
result_dict[user_id] = {}
results = await self.store.get_e2e_device_keys(local_query)
# Build the result structure
for user_id, device_keys in results.items():
for device_id, device_info in device_keys.items():
result_dict[user_id][device_id] = device_info
log_kv(results)
return result_dict
async def on_federation_query_client_keys(self, query_body):
""" Handle a device key query from a federated server
"""
device_keys_query = query_body.get("device_keys", {})
res = await self.query_local_devices(device_keys_query)
ret = {"device_keys": res}
# add in the cross-signing keys
cross_signing_keys = await self.get_cross_signing_keys_from_cache(
device_keys_query, None
)
ret.update(cross_signing_keys)
return ret
@trace
async def claim_one_time_keys(self, query, timeout):
local_query = []
remote_queries = {}
for user_id, device_keys in query.get("one_time_keys", {}).items():
# we use UserID.from_string to catch invalid user ids
if self.is_mine(UserID.from_string(user_id)):
for device_id, algorithm in device_keys.items():
local_query.append((user_id, device_id, algorithm))
else:
domain = get_domain_from_id(user_id)
remote_queries.setdefault(domain, {})[user_id] = device_keys
set_tag("local_key_query", local_query)
set_tag("remote_key_query", remote_queries)
results = await self.store.claim_e2e_one_time_keys(local_query)
json_result = {}
failures = {}
for user_id, device_keys in results.items():
for device_id, keys in device_keys.items():
for key_id, json_bytes in keys.items():
json_result.setdefault(user_id, {})[device_id] = {
key_id: json.loads(json_bytes)
}
@trace
async def claim_client_keys(destination):
set_tag("destination", destination)
device_keys = remote_queries[destination]
try:
remote_result = await self.federation.claim_client_keys(
destination, {"one_time_keys": device_keys}, timeout=timeout
)
for user_id, keys in remote_result["one_time_keys"].items():
if user_id in device_keys:
json_result[user_id] = keys
except Exception as e:
failure = _exception_to_failure(e)
failures[destination] = failure
set_tag("error", True)
set_tag("reason", failure)
await make_deferred_yieldable(
defer.gatherResults(
[
run_in_background(claim_client_keys, destination)
for destination in remote_queries
],
consumeErrors=True,
)
)
logger.info(
"Claimed one-time-keys: %s",
",".join(
(
"%s for %s:%s" % (key_id, user_id, device_id)
for user_id, user_keys in json_result.items()
for device_id, device_keys in user_keys.items()
for key_id, _ in device_keys.items()
)
),
)
log_kv({"one_time_keys": json_result, "failures": failures})
return {"one_time_keys": json_result, "failures": failures}
@tag_args
async def upload_keys_for_user(self, user_id, device_id, keys):
time_now = self.clock.time_msec()
# TODO: Validate the JSON to make sure it has the right keys.
device_keys = keys.get("device_keys", None)
if device_keys:
logger.info(
"Updating device_keys for device %r for user %s at %d",
device_id,
user_id,
time_now,
)
log_kv(
{
"message": "Updating device_keys for user.",
"user_id": user_id,
"device_id": device_id,
}
)
# TODO: Sign the JSON with the server key
changed = await self.store.set_e2e_device_keys(
user_id, device_id, time_now, device_keys
)
if changed:
# Only notify about device updates *if* the keys actually changed
await self.device_handler.notify_device_update(user_id, [device_id])
else:
log_kv({"message": "Not updating device_keys for user", "user_id": user_id})
one_time_keys = keys.get("one_time_keys", None)
if one_time_keys:
log_kv(
{
"message": "Updating one_time_keys for device.",
"user_id": user_id,
"device_id": device_id,
}
)
await self._upload_one_time_keys_for_user(
user_id, device_id, time_now, one_time_keys
)
else:
log_kv(
{"message": "Did not update one_time_keys", "reason": "no keys given"}
)
# the device should have been registered already, but it may have been
# deleted due to a race with a DELETE request. Or we may be using an
# old access_token without an associated device_id. Either way, we
# need to double-check the device is registered to avoid ending up with
# keys without a corresponding device.
await self.device_handler.check_device_registered(user_id, device_id)
result = await self.store.count_e2e_one_time_keys(user_id, device_id)
set_tag("one_time_key_counts", result)
return {"one_time_key_counts": result}
async def _upload_one_time_keys_for_user(
self, user_id, device_id, time_now, one_time_keys
):
logger.info(
"Adding one_time_keys %r for device %r for user %r at %d",
one_time_keys.keys(),
device_id,
user_id,
time_now,
)
# make a list of (alg, id, key) tuples
key_list = []
for key_id, key_obj in one_time_keys.items():
algorithm, key_id = key_id.split(":")
key_list.append((algorithm, key_id, key_obj))
# First we check if we have already persisted any of the keys.
existing_key_map = await self.store.get_e2e_one_time_keys(
user_id, device_id, [k_id for _, k_id, _ in key_list]
)
new_keys = [] # Keys that we need to insert. (alg, id, json) tuples.
for algorithm, key_id, key in key_list:
ex_json = existing_key_map.get((algorithm, key_id), None)
if ex_json:
if not _one_time_keys_match(ex_json, key):
raise SynapseError(
400,
(
"One time key %s:%s already exists. "
"Old key: %s; new key: %r"
)
% (algorithm, key_id, ex_json, key),
)
else:
new_keys.append(
(algorithm, key_id, encode_canonical_json(key).decode("ascii"))
)
log_kv({"message": "Inserting new one_time_keys.", "keys": new_keys})
await self.store.add_e2e_one_time_keys(user_id, device_id, time_now, new_keys)
async def upload_signing_keys_for_user(self, user_id, keys):
"""Upload signing keys for cross-signing
Args:
user_id (string): the user uploading the keys
keys (dict[string, dict]): the signing keys
"""
# if a master key is uploaded, then check it. Otherwise, load the
# stored master key, to check signatures on other keys
if "master_key" in keys:
master_key = keys["master_key"]
_check_cross_signing_key(master_key, user_id, "master")
else:
master_key = await self.store.get_e2e_cross_signing_key(user_id, "master")
# if there is no master key, then we can't do anything, because all the
# other cross-signing keys need to be signed by the master key
if not master_key:
raise SynapseError(400, "No master key available", Codes.MISSING_PARAM)
try:
master_key_id, master_verify_key = get_verify_key_from_cross_signing_key(
master_key
)
except ValueError:
if "master_key" in keys:
# the invalid key came from the request
raise SynapseError(400, "Invalid master key", Codes.INVALID_PARAM)
else:
# the invalid key came from the database
logger.error("Invalid master key found for user %s", user_id)
raise SynapseError(500, "Invalid master key")
# for the other cross-signing keys, make sure that they have valid
# signatures from the master key
if "self_signing_key" in keys:
self_signing_key = keys["self_signing_key"]
_check_cross_signing_key(
self_signing_key, user_id, "self_signing", master_verify_key
)
if "user_signing_key" in keys:
user_signing_key = keys["user_signing_key"]
_check_cross_signing_key(
user_signing_key, user_id, "user_signing", master_verify_key
)
# if everything checks out, then store the keys and send notifications
deviceids = []
if "master_key" in keys:
await self.store.set_e2e_cross_signing_key(user_id, "master", master_key)
deviceids.append(master_verify_key.version)
if "self_signing_key" in keys:
await self.store.set_e2e_cross_signing_key(
user_id, "self_signing", self_signing_key
)
try:
deviceids.append(
get_verify_key_from_cross_signing_key(self_signing_key)[1].version
)
except ValueError:
raise SynapseError(400, "Invalid self-signing key", Codes.INVALID_PARAM)
if "user_signing_key" in keys:
await self.store.set_e2e_cross_signing_key(
user_id, "user_signing", user_signing_key
)
# the signature stream matches the semantics that we want for
# user-signing key updates: only the user themselves is notified of
# their own user-signing key updates
await self.device_handler.notify_user_signature_update(user_id, [user_id])
# master key and self-signing key updates match the semantics of device
# list updates: all users who share an encrypted room are notified
if len(deviceids):
await self.device_handler.notify_device_update(user_id, deviceids)
return {}
async def upload_signatures_for_device_keys(self, user_id, signatures):
"""Upload device signatures for cross-signing
Args:
user_id (string): the user uploading the signatures
signatures (dict[string, dict[string, dict]]): map of users to
devices to signed keys. This is the submission from the user; an
exception will be raised if it is malformed.
Returns:
dict: response to be sent back to the client. The response will have
a "failures" key, which will be a dict mapping users to devices
to errors for the signatures that failed.
Raises:
SynapseError: if the signatures dict is not valid.
"""
failures = {}
# signatures to be stored. Each item will be a SignatureListItem
signature_list = []
# split between checking signatures for own user and signatures for
# other users, since we verify them with different keys
self_signatures = signatures.get(user_id, {})
other_signatures = {k: v for k, v in signatures.items() if k != user_id}
self_signature_list, self_failures = await self._process_self_signatures(
user_id, self_signatures
)
signature_list.extend(self_signature_list)
failures.update(self_failures)
other_signature_list, other_failures = await self._process_other_signatures(
user_id, other_signatures
)
signature_list.extend(other_signature_list)
failures.update(other_failures)
# store the signature, and send the appropriate notifications for sync
logger.debug("upload signature failures: %r", failures)
await self.store.store_e2e_cross_signing_signatures(user_id, signature_list)
self_device_ids = [item.target_device_id for item in self_signature_list]
if self_device_ids:
await self.device_handler.notify_device_update(user_id, self_device_ids)
signed_users = [item.target_user_id for item in other_signature_list]
if signed_users:
await self.device_handler.notify_user_signature_update(
user_id, signed_users
)
return {"failures": failures}
async def _process_self_signatures(self, user_id, signatures):
"""Process uploaded signatures of the user's own keys.
Signatures of the user's own keys from this API come in two forms:
- signatures of the user's devices by the user's self-signing key,
- signatures of the user's master key by the user's devices.
Args:
user_id (string): the user uploading the keys
signatures (dict[string, dict]): map of devices to signed keys
Returns:
(list[SignatureListItem], dict[string, dict[string, dict]]):
a list of signatures to store, and a map of users to devices to failure
reasons
Raises:
SynapseError: if the input is malformed
"""
signature_list = []
failures = {}
if not signatures:
return signature_list, failures
if not isinstance(signatures, dict):
raise SynapseError(400, "Invalid parameter", Codes.INVALID_PARAM)
try:
# get our self-signing key to verify the signatures
(
_,
self_signing_key_id,
self_signing_verify_key,
) = await self._get_e2e_cross_signing_verify_key(user_id, "self_signing")
# get our master key, since we may have received a signature of it.
# We need to fetch it here so that we know what its key ID is, so
# that we can check if a signature that was sent is a signature of
# the master key or of a device
(
master_key,
_,
master_verify_key,
) = await self._get_e2e_cross_signing_verify_key(user_id, "master")
# fetch our stored devices. This is used to 1. verify
# signatures on the master key, and 2. to compare with what
# was sent if the device was signed
devices = await self.store.get_e2e_device_keys([(user_id, None)])
if user_id not in devices:
raise NotFoundError("No device keys found")
devices = devices[user_id]
except SynapseError as e:
failure = _exception_to_failure(e)
failures[user_id] = {device: failure for device in signatures.keys()}
return signature_list, failures
for device_id, device in signatures.items():
# make sure submitted data is in the right form
if not isinstance(device, dict):
raise SynapseError(400, "Invalid parameter", Codes.INVALID_PARAM)
try:
if "signatures" not in device or user_id not in device["signatures"]:
# no signature was sent
raise SynapseError(
400, "Invalid signature", Codes.INVALID_SIGNATURE
)
if device_id == master_verify_key.version:
# The signature is of the master key. This needs to be
# handled differently from signatures of normal devices.
master_key_signature_list = self._check_master_key_signature(
user_id, device_id, device, master_key, devices
)
signature_list.extend(master_key_signature_list)
continue
# at this point, we have a device that should be signed
# by the self-signing key
if self_signing_key_id not in device["signatures"][user_id]:
# no signature was sent
raise SynapseError(
400, "Invalid signature", Codes.INVALID_SIGNATURE
)
try:
stored_device = devices[device_id]
except KeyError:
raise NotFoundError("Unknown device")
if self_signing_key_id in stored_device.get("signatures", {}).get(
user_id, {}
):
# we already have a signature on this device, so we
# can skip it, since it should be exactly the same
continue
_check_device_signature(
user_id, self_signing_verify_key, device, stored_device
)
signature = device["signatures"][user_id][self_signing_key_id]
signature_list.append(
SignatureListItem(
self_signing_key_id, user_id, device_id, signature
)
)
except SynapseError as e:
failures.setdefault(user_id, {})[device_id] = _exception_to_failure(e)
return signature_list, failures
def _check_master_key_signature(
self, user_id, master_key_id, signed_master_key, stored_master_key, devices
):
"""Check signatures of a user's master key made by their devices.
Args:
user_id (string): the user whose master key is being checked
master_key_id (string): the ID of the user's master key
signed_master_key (dict): the user's signed master key that was uploaded
stored_master_key (dict): our previously-stored copy of the user's master key
devices (iterable(dict)): the user's devices
Returns:
list[SignatureListItem]: a list of signatures to store
Raises:
SynapseError: if a signature is invalid
"""
# for each device that signed the master key, check the signature.
master_key_signature_list = []
sigs = signed_master_key["signatures"]
for signing_key_id, signature in sigs[user_id].items():
_, signing_device_id = signing_key_id.split(":", 1)
if (
signing_device_id not in devices
or signing_key_id not in devices[signing_device_id]["keys"]
):
# signed by an unknown device, or the
# device does not have the key
raise SynapseError(400, "Invalid signature", Codes.INVALID_SIGNATURE)
# get the key and check the signature
pubkey = devices[signing_device_id]["keys"][signing_key_id]
verify_key = decode_verify_key_bytes(signing_key_id, decode_base64(pubkey))
_check_device_signature(
user_id, verify_key, signed_master_key, stored_master_key
)
master_key_signature_list.append(
SignatureListItem(signing_key_id, user_id, master_key_id, signature)
)
return master_key_signature_list
async def _process_other_signatures(self, user_id, signatures):
"""Process uploaded signatures of other users' keys. These will be the
target user's master keys, signed by the uploading user's user-signing
key.
Args:
user_id (string): the user uploading the keys
signatures (dict[string, dict]): map of users to devices to signed keys
Returns:
(list[SignatureListItem], dict[string, dict[string, dict]]):
a list of signatures to store, and a map of users to devices to failure
reasons
Raises:
SynapseError: if the input is malformed
"""
signature_list = []
failures = {}
if not signatures:
return signature_list, failures
try:
# get our user-signing key to verify the signatures
(
user_signing_key,
user_signing_key_id,
user_signing_verify_key,
) = await self._get_e2e_cross_signing_verify_key(user_id, "user_signing")
except SynapseError as e:
failure = _exception_to_failure(e)
for user, devicemap in signatures.items():
failures[user] = {device_id: failure for device_id in devicemap.keys()}
return signature_list, failures
for target_user, devicemap in signatures.items():
# make sure submitted data is in the right form
if not isinstance(devicemap, dict):
raise SynapseError(400, "Invalid parameter", Codes.INVALID_PARAM)
for device in devicemap.values():
if not isinstance(device, dict):
raise SynapseError(400, "Invalid parameter", Codes.INVALID_PARAM)
device_id = None
try:
# get the target user's master key, to make sure it matches
# what was sent
(
master_key,
master_key_id,
_,
) = await self._get_e2e_cross_signing_verify_key(
target_user, "master", user_id
)
# make sure that the target user's master key is the one that
# was signed (and no others)
device_id = master_key_id.split(":", 1)[1]
if device_id not in devicemap:
logger.debug(
"upload signature: could not find signature for device %s",
device_id,
)
# set device to None so that the failure gets
# marked on all the signatures
device_id = None
raise NotFoundError("Unknown device")
key = devicemap[device_id]
other_devices = [k for k in devicemap.keys() if k != device_id]
if other_devices:
# other devices were signed -- mark those as failures
logger.debug("upload signature: too many devices specified")
failure = _exception_to_failure(NotFoundError("Unknown device"))
failures[target_user] = {
device: failure for device in other_devices
}
if user_signing_key_id in master_key.get("signatures", {}).get(
user_id, {}
):
# we already have the signature, so we can skip it
continue
_check_device_signature(
user_id, user_signing_verify_key, key, master_key
)
signature = key["signatures"][user_id][user_signing_key_id]
signature_list.append(
SignatureListItem(
user_signing_key_id, target_user, device_id, signature
)
)
except SynapseError as e:
failure = _exception_to_failure(e)
if device_id is None:
failures[target_user] = {
device_id: failure for device_id in devicemap.keys()
}
else:
failures.setdefault(target_user, {})[device_id] = failure
return signature_list, failures
async def _get_e2e_cross_signing_verify_key(
self, user_id: str, key_type: str, from_user_id: str = None
):
"""Fetch locally or remotely query for a cross-signing public key.
First, attempt to fetch the cross-signing public key from storage.
If that fails, query the keys from the homeserver they belong to
and update our local copy.
Args:
user_id: the user whose key should be fetched
key_type: the type of key to fetch
from_user_id: the user that we are fetching the keys for.
This affects what signatures are fetched.
Returns:
dict, str, VerifyKey: the raw key data, the key ID, and the
signedjson verify key
Raises:
NotFoundError: if the key is not found
SynapseError: if `user_id` is invalid
"""
user = UserID.from_string(user_id)
key = await self.store.get_e2e_cross_signing_key(
user_id, key_type, from_user_id
)
if key:
# We found a copy of this key in our database. Decode and return it
key_id, verify_key = get_verify_key_from_cross_signing_key(key)
return key, key_id, verify_key
# If we couldn't find the key locally, and we're looking for keys of
# another user then attempt to fetch the missing key from the remote
# user's server.
#
# We may run into this in possible edge cases where a user tries to
# cross-sign a remote user, but does not share any rooms with them yet.
# Thus, we would not have their key list yet. We instead fetch the key,
# store it and notify clients of new, associated device IDs.
if self.is_mine(user) or key_type not in ["master", "self_signing"]:
# Note that master and self_signing keys are the only cross-signing keys we
# can request over federation
raise NotFoundError("No %s key found for %s" % (key_type, user_id))
(
key,
key_id,
verify_key,
) = await self._retrieve_cross_signing_keys_for_remote_user(user, key_type)
if key is None:
raise NotFoundError("No %s key found for %s" % (key_type, user_id))
return key, key_id, verify_key
async def _retrieve_cross_signing_keys_for_remote_user(
self, user: UserID, desired_key_type: str,
):
"""Queries cross-signing keys for a remote user and saves them to the database
Only the key specified by `key_type` will be returned, while all retrieved keys
will be saved regardless
Args:
user: The user to query remote keys for
desired_key_type: The type of key to receive. One of "master", "self_signing"
Returns:
Deferred[Tuple[Optional[Dict], Optional[str], Optional[VerifyKey]]]: A tuple
of the retrieved key content, the key's ID and the matching VerifyKey.
If the key cannot be retrieved, all values in the tuple will instead be None.
"""
try:
remote_result = await self.federation.query_user_devices(
user.domain, user.to_string()
)
except Exception as e:
logger.warning(
"Unable to query %s for cross-signing keys of user %s: %s %s",
user.domain,
user.to_string(),
type(e),
e,
)
return None, None, None
# Process each of the retrieved cross-signing keys
desired_key = None
desired_key_id = None
desired_verify_key = None
retrieved_device_ids = []
for key_type in ["master", "self_signing"]:
key_content = remote_result.get(key_type + "_key")
if not key_content:
continue
# Ensure these keys belong to the correct user
if "user_id" not in key_content:
logger.warning(
"Invalid %s key retrieved, missing user_id field: %s",
key_type,
key_content,
)
continue
if user.to_string() != key_content["user_id"]:
logger.warning(
"Found %s key of user %s when querying for keys of user %s",
key_type,
key_content["user_id"],
user.to_string(),
)
continue
# Validate the key contents
try:
# verify_key is a VerifyKey from signedjson, which uses
# .version to denote the portion of the key ID after the
# algorithm and colon, which is the device ID
key_id, verify_key = get_verify_key_from_cross_signing_key(key_content)
except ValueError as e:
logger.warning(
"Invalid %s key retrieved: %s - %s %s",
key_type,
key_content,
type(e),
e,
)
continue
# Note down the device ID attached to this key
retrieved_device_ids.append(verify_key.version)
# If this is the desired key type, save it and its ID/VerifyKey
if key_type == desired_key_type:
desired_key = key_content
desired_verify_key = verify_key
desired_key_id = key_id
# At the same time, store this key in the db for subsequent queries
await self.store.set_e2e_cross_signing_key(
user.to_string(), key_type, key_content
)
# Notify clients that new devices for this user have been discovered
if retrieved_device_ids:
# XXX is this necessary?
await self.device_handler.notify_device_update(
user.to_string(), retrieved_device_ids
)
return desired_key, desired_key_id, desired_verify_key
def _check_cross_signing_key(key, user_id, key_type, signing_key=None):
"""Check a cross-signing key uploaded by a user. Performs some basic sanity
checking, and ensures that it is signed, if a signature is required.
Args:
key (dict): the key data to verify
user_id (str): the user whose key is being checked
key_type (str): the type of key that the key should be
signing_key (VerifyKey): (optional) the signing key that the key should
be signed with. If omitted, signatures will not be checked.
"""
if (
key.get("user_id") != user_id
or key_type not in key.get("usage", [])
or len(key.get("keys", {})) != 1
):
raise SynapseError(400, ("Invalid %s key" % (key_type,)), Codes.INVALID_PARAM)
if signing_key:
try:
verify_signed_json(key, user_id, signing_key)
except SignatureVerifyException:
raise SynapseError(
400, ("Invalid signature on %s key" % key_type), Codes.INVALID_SIGNATURE
)
def _check_device_signature(user_id, verify_key, signed_device, stored_device):
"""Check that a signature on a device or cross-signing key is correct and
matches the copy of the device/key that we have stored. Throws an
exception if an error is detected.
Args:
user_id (str): the user ID whose signature is being checked
verify_key (VerifyKey): the key to verify the device with
signed_device (dict): the uploaded signed device data
stored_device (dict): our previously stored copy of the device
Raises:
SynapseError: if the signature was invalid or the sent device is not the
same as the stored device
"""
# make sure that the device submitted matches what we have stored
stripped_signed_device = {
k: v for k, v in signed_device.items() if k not in ["signatures", "unsigned"]
}
stripped_stored_device = {
k: v for k, v in stored_device.items() if k not in ["signatures", "unsigned"]
}
if stripped_signed_device != stripped_stored_device:
logger.debug(
"upload signatures: key does not match %s vs %s",
signed_device,
stored_device,
)
raise SynapseError(400, "Key does not match")
try:
verify_signed_json(signed_device, user_id, verify_key)
except SignatureVerifyException:
logger.debug("invalid signature on key")
raise SynapseError(400, "Invalid signature", Codes.INVALID_SIGNATURE)
def _exception_to_failure(e):
if isinstance(e, SynapseError):
return {"status": e.code, "errcode": e.errcode, "message": str(e)}
if isinstance(e, CodeMessageException):
return {"status": e.code, "message": str(e)}
if isinstance(e, NotRetryingDestination):
return {"status": 503, "message": "Not ready for retry"}
# include ConnectionRefused and other errors
#
# Note that some Exceptions (notably twisted's ResponseFailed etc) don't
# give a string for e.message, which json then fails to serialize.
return {"status": 503, "message": str(e)}
def _one_time_keys_match(old_key_json, new_key):
old_key = json.loads(old_key_json)
# if either is a string rather than an object, they must match exactly
if not isinstance(old_key, dict) or not isinstance(new_key, dict):
return old_key == new_key
# otherwise, we strip off the 'signatures' if any, because it's legitimate
# for different upload attempts to have different signatures.
old_key.pop("signatures", None)
new_key_copy = dict(new_key)
new_key_copy.pop("signatures", None)
return old_key == new_key_copy
@attr.s
class SignatureListItem:
"""An item in the signature list as used by upload_signatures_for_device_keys.
"""
signing_key_id = attr.ib()
target_user_id = attr.ib()
target_device_id = attr.ib()
signature = attr.ib()
class SigningKeyEduUpdater(object):
"""Handles incoming signing key updates from federation and updates the DB"""
def __init__(self, hs, e2e_keys_handler):
self.store = hs.get_datastore()
self.federation = hs.get_federation_client()
self.clock = hs.get_clock()
self.e2e_keys_handler = e2e_keys_handler
self._remote_edu_linearizer = Linearizer(name="remote_signing_key")
# user_id -> list of updates waiting to be handled.
self._pending_updates = {}
# Recently seen stream ids. We don't bother keeping these in the DB,
# but they're useful to have them about to reduce the number of spurious
# resyncs.
self._seen_updates = ExpiringCache(
cache_name="signing_key_update_edu",
clock=self.clock,
max_len=10000,
expiry_ms=30 * 60 * 1000,
iterable=True,
)
async def incoming_signing_key_update(self, origin, edu_content):
"""Called on incoming signing key update from federation. Responsible for
parsing the EDU and adding to pending updates list.
Args:
origin (string): the server that sent the EDU
edu_content (dict): the contents of the EDU
"""
user_id = edu_content.pop("user_id")
master_key = edu_content.pop("master_key", None)
self_signing_key = edu_content.pop("self_signing_key", None)
if get_domain_from_id(user_id) != origin:
logger.warning("Got signing key update edu for %r from %r", user_id, origin)
return
room_ids = await self.store.get_rooms_for_user(user_id)
if not room_ids:
# We don't share any rooms with this user. Ignore update, as we
# probably won't get any further updates.
return
self._pending_updates.setdefault(user_id, []).append(
(master_key, self_signing_key)
)
await self._handle_signing_key_updates(user_id)
async def _handle_signing_key_updates(self, user_id):
"""Actually handle pending updates.
Args:
user_id (string): the user whose updates we are processing
"""
device_handler = self.e2e_keys_handler.device_handler
device_list_updater = device_handler.device_list_updater
with (await self._remote_edu_linearizer.queue(user_id)):
pending_updates = self._pending_updates.pop(user_id, [])
if not pending_updates:
# This can happen since we batch updates
return
device_ids = []
logger.info("pending updates: %r", pending_updates)
for master_key, self_signing_key in pending_updates:
new_device_ids = await device_list_updater.process_cross_signing_key_update(
user_id, master_key, self_signing_key,
)
device_ids = device_ids + new_device_ids
await device_handler.notify_device_update(user_id, device_ids)
| 39.83694 | 104 | 0.590138 |
f78aea993c17664887e0fc2e716667a7ba1767a4 | 4,712 | py | Python | builders/specs/cli/PyCOMPSsCLIResources/pycompss_cli/core/utils.py | eflows4hpc/compss | c497f6d34722103c6c8f83ebc314b495573ce054 | [
"Apache-2.0"
] | null | null | null | builders/specs/cli/PyCOMPSsCLIResources/pycompss_cli/core/utils.py | eflows4hpc/compss | c497f6d34722103c6c8f83ebc314b495573ce054 | [
"Apache-2.0"
] | null | null | null | builders/specs/cli/PyCOMPSsCLIResources/pycompss_cli/core/utils.py | eflows4hpc/compss | c497f6d34722103c6c8f83ebc314b495573ce054 | [
"Apache-2.0"
] | null | null | null | import json
from glob import glob
from pathlib import Path
import subprocess
import os
def is_debug():
return os.getenv('PYCOMPSS_CLI_DEBUG', 'false').lower() == 'true'
def get_object_method_by_name(obj, method_name, include_in_name=False):
for class_method_name in dir(obj):
if not '__' in class_method_name and callable(getattr(obj, class_method_name)):
if class_method_name.startswith(method_name) or (include_in_name and method_name in class_method_name):
return class_method_name
def table_print(col_names, data):
print_table(data, header=col_names)
def get_current_env_conf(return_path=False):
home_path = str(Path.home())
current_env = glob(home_path + '/.COMPSs/envs/*/current')[0].replace('current', 'env.json')
with open(current_env, 'r') as env:
if return_path:
return json.load(env), current_env
return json.load(env)
def get_env_conf_by_name(env_name):
home_path = str(Path.home())
env_path = home_path + '/.COMPSs/envs/' + env_name + '/env.json'
with open(env_path, 'r') as env:
return json.load(env)
def ssh_run_commands(login_info, commands, **kwargs):
cmd = ' ; '.join(filter(len, commands))
res = subprocess.run(f"ssh {login_info} '{cmd}'", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kwargs)
return res.stdout.decode(), res.stderr.decode()
def check_exit_code(command):
return subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE).returncode
def is_inside_docker():
return ':/docker/' in subprocess.check_output(['cat', '/proc/self/cgroup']).decode('utf-8')
def print_table(items, header=None, wrap=True, wrap_style="wrap", row_line=False, fix_col_width=False):
''' Prints a matrix of data as a human readable table. Matrix
should be a list of lists containing any type of values that can
be converted into text strings.
Two different column adjustment methods are supported through
the *wrap_style* argument:
wrap: it will wrap values to fit max_col_width (by extending cell height)
cut: it will strip values to max_col_width
If the *wrap* argument is set to False, column widths are set to fit all
values in each column.
This code is free software. Updates can be found at
https://gist.github.com/jhcepas/5884168
'''
max_col_width = os.get_terminal_size().columns
if fix_col_width:
c2maxw = dict([(i, max_col_width) for i in range(len(items[0]))])
wrap = True
elif not wrap:
c2maxw = dict([(i, max([len(str(e[i])) for e in items])) for i in range(len(items[0]))])
else:
c2maxw = dict([(i, min(max_col_width, max([len(str(e[i])) for e in items])))
for i in range(len(items[0]))])
if header:
current_item = -1
row = header
if wrap and not fix_col_width:
for col, maxw in c2maxw.items():
c2maxw[col] = max(maxw, len(header[col]))
if wrap:
c2maxw[col] = min(c2maxw[col], max_col_width)
else:
current_item = 0
row = items[current_item]
while row:
is_extra = False
values = []
extra_line = [""]*len(row)
for col, val in enumerate(row):
cwidth = c2maxw[col]
wrap_width = cwidth
val = str(val)
try:
newline_i = val.index("\n")
except ValueError:
pass
else:
wrap_width = min(newline_i+1, wrap_width)
val = val.replace("\n", " ", 1)
if wrap and len(val) > wrap_width:
if wrap_style == "cut":
val = val[:wrap_width-1]+"+"
elif wrap_style == "wrap":
extra_line[col] = val[wrap_width:]
val = val[:wrap_width]
val = val.ljust(cwidth)
values.append(val)
print(' | '.join(values))
if not set(extra_line) - set(['']):
if header and current_item == -1:
print(' | '.join(['='*c2maxw[col] for col in range(len(row)) ]))
current_item += 1
try:
row = items[current_item]
except IndexError:
row = None
else:
row = extra_line
is_extra = True
if row_line and not is_extra and not (header and current_item == 0):
if row:
print(' | '.join(['-'*c2maxw[col] for col in range(len(row)) ]))
else:
print(' | '.join(['='*c2maxw[col] for col in range(len(extra_line)) ])) | 38.622951 | 123 | 0.593379 |
162917fff93631f32887699568e6de091d83a8f1 | 9,219 | py | Python | pandas/tests/window/moments/test_moments_ewm.py | gabsmoreira/pandas | ee1efb6d923a2c3e5a912efe20a336179614993d | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | pandas/tests/window/moments/test_moments_ewm.py | gabsmoreira/pandas | ee1efb6d923a2c3e5a912efe20a336179614993d | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | pandas/tests/window/moments/test_moments_ewm.py | gabsmoreira/pandas | ee1efb6d923a2c3e5a912efe20a336179614993d | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | import numpy as np
from numpy.random import randn
import pytest
import pandas as pd
from pandas import DataFrame, Series
import pandas._testing as tm
from pandas.tests.window.common import Base
@pytest.mark.filterwarnings("ignore:can't resolve package:ImportWarning")
class TestMoments(Base):
def setup_method(self, method):
self._create_data()
def test_ewma(self):
self._check_ew(name="mean")
vals = pd.Series(np.zeros(1000))
vals[5] = 1
result = vals.ewm(span=100, adjust=False).mean().sum()
assert np.abs(result - 1) < 1e-2
@pytest.mark.parametrize("adjust", [True, False])
@pytest.mark.parametrize("ignore_na", [True, False])
def test_ewma_cases(self, adjust, ignore_na):
# try adjust/ignore_na args matrix
s = Series([1.0, 2.0, 4.0, 8.0])
if adjust:
expected = Series([1.0, 1.6, 2.736842, 4.923077])
else:
expected = Series([1.0, 1.333333, 2.222222, 4.148148])
result = s.ewm(com=2.0, adjust=adjust, ignore_na=ignore_na).mean()
tm.assert_series_equal(result, expected)
def test_ewma_nan_handling(self):
s = Series([1.0] + [np.nan] * 5 + [1.0])
result = s.ewm(com=5).mean()
tm.assert_series_equal(result, Series([1.0] * len(s)))
s = Series([np.nan] * 2 + [1.0] + [np.nan] * 2 + [1.0])
result = s.ewm(com=5).mean()
tm.assert_series_equal(result, Series([np.nan] * 2 + [1.0] * 4))
# GH 7603
s0 = Series([np.nan, 1.0, 101.0])
s1 = Series([1.0, np.nan, 101.0])
s2 = Series([np.nan, 1.0, np.nan, np.nan, 101.0, np.nan])
s3 = Series([1.0, np.nan, 101.0, 50.0])
com = 2.0
alpha = 1.0 / (1.0 + com)
def simple_wma(s, w):
return (s.multiply(w).cumsum() / w.cumsum()).fillna(method="ffill")
for (s, adjust, ignore_na, w) in [
(s0, True, False, [np.nan, (1.0 - alpha), 1.0]),
(s0, True, True, [np.nan, (1.0 - alpha), 1.0]),
(s0, False, False, [np.nan, (1.0 - alpha), alpha]),
(s0, False, True, [np.nan, (1.0 - alpha), alpha]),
(s1, True, False, [(1.0 - alpha) ** 2, np.nan, 1.0]),
(s1, True, True, [(1.0 - alpha), np.nan, 1.0]),
(s1, False, False, [(1.0 - alpha) ** 2, np.nan, alpha]),
(s1, False, True, [(1.0 - alpha), np.nan, alpha]),
(
s2,
True,
False,
[np.nan, (1.0 - alpha) ** 3, np.nan, np.nan, 1.0, np.nan],
),
(s2, True, True, [np.nan, (1.0 - alpha), np.nan, np.nan, 1.0, np.nan]),
(
s2,
False,
False,
[np.nan, (1.0 - alpha) ** 3, np.nan, np.nan, alpha, np.nan],
),
(s2, False, True, [np.nan, (1.0 - alpha), np.nan, np.nan, alpha, np.nan]),
(s3, True, False, [(1.0 - alpha) ** 3, np.nan, (1.0 - alpha), 1.0]),
(s3, True, True, [(1.0 - alpha) ** 2, np.nan, (1.0 - alpha), 1.0]),
(
s3,
False,
False,
[
(1.0 - alpha) ** 3,
np.nan,
(1.0 - alpha) * alpha,
alpha * ((1.0 - alpha) ** 2 + alpha),
],
),
(
s3,
False,
True,
[(1.0 - alpha) ** 2, np.nan, (1.0 - alpha) * alpha, alpha],
),
]:
expected = simple_wma(s, Series(w))
result = s.ewm(com=com, adjust=adjust, ignore_na=ignore_na).mean()
tm.assert_series_equal(result, expected)
if ignore_na is False:
# check that ignore_na defaults to False
result = s.ewm(com=com, adjust=adjust).mean()
tm.assert_series_equal(result, expected)
def test_ewmvar(self):
self._check_ew(name="var")
def test_ewmvol(self):
self._check_ew(name="vol")
def test_ewma_span_com_args(self):
A = self.series.ewm(com=9.5).mean()
B = self.series.ewm(span=20).mean()
tm.assert_almost_equal(A, B)
with pytest.raises(ValueError):
self.series.ewm(com=9.5, span=20)
with pytest.raises(ValueError):
self.series.ewm().mean()
def test_ewma_halflife_arg(self):
A = self.series.ewm(com=13.932726172912965).mean()
B = self.series.ewm(halflife=10.0).mean()
tm.assert_almost_equal(A, B)
with pytest.raises(ValueError):
self.series.ewm(span=20, halflife=50)
with pytest.raises(ValueError):
self.series.ewm(com=9.5, halflife=50)
with pytest.raises(ValueError):
self.series.ewm(com=9.5, span=20, halflife=50)
with pytest.raises(ValueError):
self.series.ewm()
def test_ewm_alpha(self):
# GH 10789
s = Series(self.arr)
a = s.ewm(alpha=0.61722699889169674).mean()
b = s.ewm(com=0.62014947789973052).mean()
c = s.ewm(span=2.240298955799461).mean()
d = s.ewm(halflife=0.721792864318).mean()
tm.assert_series_equal(a, b)
tm.assert_series_equal(a, c)
tm.assert_series_equal(a, d)
def test_ewm_alpha_arg(self):
# GH 10789
s = self.series
with pytest.raises(ValueError):
s.ewm()
with pytest.raises(ValueError):
s.ewm(com=10.0, alpha=0.5)
with pytest.raises(ValueError):
s.ewm(span=10.0, alpha=0.5)
with pytest.raises(ValueError):
s.ewm(halflife=10.0, alpha=0.5)
def test_ewm_domain_checks(self):
# GH 12492
s = Series(self.arr)
msg = "comass must satisfy: comass >= 0"
with pytest.raises(ValueError, match=msg):
s.ewm(com=-0.1)
s.ewm(com=0.0)
s.ewm(com=0.1)
msg = "span must satisfy: span >= 1"
with pytest.raises(ValueError, match=msg):
s.ewm(span=-0.1)
with pytest.raises(ValueError, match=msg):
s.ewm(span=0.0)
with pytest.raises(ValueError, match=msg):
s.ewm(span=0.9)
s.ewm(span=1.0)
s.ewm(span=1.1)
msg = "halflife must satisfy: halflife > 0"
with pytest.raises(ValueError, match=msg):
s.ewm(halflife=-0.1)
with pytest.raises(ValueError, match=msg):
s.ewm(halflife=0.0)
s.ewm(halflife=0.1)
msg = "alpha must satisfy: 0 < alpha <= 1"
with pytest.raises(ValueError, match=msg):
s.ewm(alpha=-0.1)
with pytest.raises(ValueError, match=msg):
s.ewm(alpha=0.0)
s.ewm(alpha=0.1)
s.ewm(alpha=1.0)
with pytest.raises(ValueError, match=msg):
s.ewm(alpha=1.1)
@pytest.mark.parametrize("method", ["mean", "vol", "var"])
def test_ew_empty_series(self, method):
vals = pd.Series([], dtype=np.float64)
ewm = vals.ewm(3)
result = getattr(ewm, method)()
tm.assert_almost_equal(result, vals)
def _check_ew(self, name=None, preserve_nan=False):
series_result = getattr(self.series.ewm(com=10), name)()
assert isinstance(series_result, Series)
frame_result = getattr(self.frame.ewm(com=10), name)()
assert type(frame_result) == DataFrame
result = getattr(self.series.ewm(com=10), name)()
if preserve_nan:
assert result[self._nan_locs].isna().all()
@pytest.mark.parametrize("min_periods", [0, 1])
@pytest.mark.parametrize("name", ["mean", "var", "vol"])
def test_ew_min_periods(self, min_periods, name):
# excluding NaNs correctly
arr = randn(50)
arr[:10] = np.NaN
arr[-10:] = np.NaN
s = Series(arr)
# check min_periods
# GH 7898
result = getattr(s.ewm(com=50, min_periods=2), name)()
assert result[:11].isna().all()
assert not result[11:].isna().any()
result = getattr(s.ewm(com=50, min_periods=min_periods), name)()
if name == "mean":
assert result[:10].isna().all()
assert not result[10:].isna().any()
else:
# ewm.std, ewm.vol, ewm.var (with bias=False) require at least
# two values
assert result[:11].isna().all()
assert not result[11:].isna().any()
# check series of length 0
result = getattr(
Series(dtype=object).ewm(com=50, min_periods=min_periods), name
)()
tm.assert_series_equal(result, Series(dtype="float64"))
# check series of length 1
result = getattr(Series([1.0]).ewm(50, min_periods=min_periods), name)()
if name == "mean":
tm.assert_series_equal(result, Series([1.0]))
else:
# ewm.std, ewm.vol, ewm.var with bias=False require at least
# two values
tm.assert_series_equal(result, Series([np.NaN]))
# pass in ints
result2 = getattr(Series(np.arange(50)).ewm(span=10), name)()
assert result2.dtype == np.float_
| 35.187023 | 86 | 0.530752 |
9cbd9ef9f77776a020982b3a7224ffa529e834c6 | 364 | py | Python | auto_derby/services/cleanup.py | gentle-knight-13/auto-derby | 70593fea2c3d803487e6e0d2ce0c40d60bc6304d | [
"MIT"
] | null | null | null | auto_derby/services/cleanup.py | gentle-knight-13/auto-derby | 70593fea2c3d803487e6e0d2ce0c40d60bc6304d | [
"MIT"
] | null | null | null | auto_derby/services/cleanup.py | gentle-knight-13/auto-derby | 70593fea2c3d803487e6e0d2ce0c40d60bc6304d | [
"MIT"
] | null | null | null | # -*- coding=UTF-8 -*-
# pyright: strict
from __future__ import annotations
from typing import Callable, Protocol
Callback = Callable[[], None]
class Service(Protocol):
def add(self, cb: Callback) -> None:
...
def run(self) -> None:
...
def __enter__(self) -> Service:
...
def __exit__(self, *_) -> None:
...
| 16.545455 | 40 | 0.568681 |
4460437b7bd779818e8933718c8badf20d6eb3e8 | 1,258 | py | Python | app/core/models.py | nesar-ahmed/recipe-api | 98f560d780f92339662be66547f59a54fdcbb613 | [
"MIT"
] | null | null | null | app/core/models.py | nesar-ahmed/recipe-api | 98f560d780f92339662be66547f59a54fdcbb613 | [
"MIT"
] | null | null | null | app/core/models.py | nesar-ahmed/recipe-api | 98f560d780f92339662be66547f59a54fdcbb613 | [
"MIT"
] | null | null | null | from django.db import models
from django.contrib.auth.models import AbstractBaseUser, \
BaseUserManager, \
PermissionsMixin
class UserManager(BaseUserManager):
def create_user(self, email, password=None, **extra_fields):
"""Creates and saves a new user"""
if not email:
raise ValueError('User must have an email address')
user = self.model(email=self.normalize_email(email), **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password):
"""Creates and saves a new super user"""
user = self.create_user(email, password)
user.is_staff = True
user.is_superuser = True
user.save(using=self._db)
return user
class User(AbstractBaseUser, PermissionsMixin):
"""Custom user model that supports using email instead of username"""
name = models.CharField(max_length=255, )
email = models.EmailField(max_length=255, unique=True)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
objects = UserManager()
USERNAME_FIELD = 'email'
| 32.25641 | 76 | 0.643879 |
baba3d3c67808e177b631266c5cd535f5d48c1bf | 6,150 | py | Python | Scripts/ProportionalAllocation.py | d-wasserman/arc-numerical-tools | a88ed46c48083dfa615895ecf75e7c1c9c650f97 | [
"Apache-2.0"
] | null | null | null | Scripts/ProportionalAllocation.py | d-wasserman/arc-numerical-tools | a88ed46c48083dfa615895ecf75e7c1c9c650f97 | [
"Apache-2.0"
] | 3 | 2022-01-25T00:58:37.000Z | 2022-01-26T05:44:40.000Z | Scripts/ProportionalAllocation.py | d-wasserman/arc-numerical-tools | a88ed46c48083dfa615895ecf75e7c1c9c650f97 | [
"Apache-2.0"
] | 2 | 2018-09-14T21:44:34.000Z | 2020-08-15T22:21:05.000Z | # --------------------------------
# Name: ProportionalAllocation.py
# Purpose: This script is intended to provide a way to use sampling geography that will calculate proportional
# averages or sums based on the percentage of an intersection covered by the sampling geography. The output is
# the sampling geography with fields sampled from the base features.
# Current Owner: David Wasserman
# Last Modified: 4/17/2021
# Copyright: David Wasserman
# ArcGIS Version: ArcGIS Pro
# Python Version: 3.6
# --------------------------------
# Copyright 2021 David J. Wasserman
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# --------------------------------
# Import Modules
import arcpy
import os
import pandas as pd
from arcgis.features import GeoAccessor, GeoSeriesAccessor
import SharedArcNumericalLib as san
# Function Definitions
def proportional_allocation(sampling_features, base_features, out_feature_class,
sum_fields=[], mean_fields=[]):
"""This script is intended to provide a way to use sampling geography that will calculate proportional
averages or sums based on the percentage of an intersection covered by the sampling geography. The output is
the sampling geography with fields sampled from the base features.
Parameters
--------------------
sampling_features - The sampling features are the features you want to associate proportional averages or sums
from the attributes in the base features. The output will look like this input polygon layer with new fields.
base_features- The base features have the attributes being sampled by the polygon sampling features.
out_feature_class - The output feature class is a copy of the sampling features with new sum & average fields
sum_fields - Fields to proportionally sum (based on the overlapping areas between the sampling and base features)
from the base to the sampling features.
mean_fields - Fields to proportionally average (based on the overlapping areas between the sampling and base features)
from the base to the sampling features.
"""
arcpy.env.overwriteOutput = True
# Start Analysis
temp_intersect = os.path.join("in_memory", "temp_intersect")
san.arc_print("Calculating original areas...")
base_area_col = "Base_Area_SQMI"
inter_area_col = "Inter_Area_SQMI"
sampling_id = "Sampling_ID"
ratio_coverage = "Proportion"
san.add_new_field(base_features, base_area_col, "DOUBLE")
arcpy.CalculateField_management(base_features, base_area_col, "!shape.area@SQUAREMILES!")
san.add_new_field(sampling_features, "Sampling_ID", "LONG")
oid_s = arcpy.Describe(sampling_features).OIDFieldName
arcpy.CalculateField_management(sampling_features, sampling_id, "!{0}!".format(oid_s))
san.arc_print("Conducting an intersection...", True)
arcpy.Intersect_analysis([[sampling_features, 1], [base_features, 1]], temp_intersect)
san.add_new_field(temp_intersect, inter_area_col, "DOUBLE")
arcpy.CalculateField_management(temp_intersect, inter_area_col, "!shape.area@SQUAREMILES!")
san.arc_print("Calculating proportional sums and/or averages...", True)
sum_fields = [i for i in sum_fields if san.field_exist(temp_intersect, i)]
mean_fields = [i for i in mean_fields if san.field_exist(temp_intersect, i)]
agg_fields = list(set(sum_fields + mean_fields))
all_fields = [sampling_id, inter_area_col, base_area_col] + agg_fields
inter_df = san.arcgis_table_to_df(temp_intersect, all_fields)
inter_df[ratio_coverage] = inter_df[inter_area_col].fillna(0) / inter_df[base_area_col].fillna(1)
sum_cols = ["SUM_" + str(i) for i in sum_fields]
for input, sum in zip(sum_fields, sum_cols):
inter_df[sum] = inter_df[input] * inter_df[ratio_coverage] # Weight X Value
inter_groups_sum = inter_df.groupby(sampling_id).sum()
mean_cols = ["MEAN_" + str(i) for i in mean_fields]
for input, mean in zip(mean_fields, mean_cols):
inter_df[mean] = inter_df[input] * inter_df[inter_area_col] # (Weight X Value) / SUM(weights)
inter_groups_avg = inter_df.groupby(sampling_id).sum()
for mean in mean_cols:
inter_groups_avg[mean] = inter_groups_avg[mean]/inter_groups_avg[inter_area_col]
inter_groups = inter_groups_sum.merge(inter_groups_avg[mean_cols], how="left", left_index=True, right_index=True)
san.arc_print("Associating results to sampled SEDF...")
samp_df = pd.DataFrame.spatial.from_featureclass(sampling_features)
samp_df = samp_df.merge(inter_groups, how="left", left_on=sampling_id, right_index=True,
suffixes=("DELETE_X", "DELETE_Y"))
kept_cols = [i for i in samp_df.columns if "DELETE" not in str(i) and str(i) not in agg_fields]
samp_df = samp_df[kept_cols].copy()
san.arc_print("Exporting results...", True)
samp_df.spatial.to_featureclass(out_feature_class)
san.arc_print("Script Completed Successfully.", True)
# End do_analysis function
# This test allows the script to be used from the operating
# system command prompt (stand-alone), in a Python IDE,
# as a geoprocessing script tool, or as a module imported in
# another script
if __name__ == '__main__':
# Define input parameters
target_feature_class = arcpy.GetParameterAsText(0)
join_feature_class = arcpy.GetParameterAsText(1)
output_feature_class = arcpy.GetParameterAsText(2)
sum_fields = arcpy.GetParameterAsText(3).split(";")
mean_fields = arcpy.GetParameterAsText(4).split(";")
proportional_allocation(target_feature_class, join_feature_class, output_feature_class, sum_fields, mean_fields)
| 53.947368 | 123 | 0.737724 |
67625e0b320918976bc854666bd7dc3cf04669b8 | 1,784 | py | Python | hypertools/reduce/reduce.py | jeremymanning/hypertools | 1b39b41aaa634e816d73635e0b9b773f1ed6e709 | [
"MIT"
] | 1 | 2019-08-11T18:25:18.000Z | 2019-08-11T18:25:18.000Z | hypertools/reduce/reduce.py | jeremymanning/hypertools | 1b39b41aaa634e816d73635e0b9b773f1ed6e709 | [
"MIT"
] | 33 | 2020-05-12T01:21:05.000Z | 2021-12-07T16:13:42.000Z | hypertools/reduce/reduce.py | jeremymanning/hypertools | 1b39b41aaa634e816d73635e0b9b773f1ed6e709 | [
"MIT"
] | null | null | null | # noinspection PyPackageRequirements
import datawrangler as dw
import numpy as np
from ..core.model import apply_model
from ..core import get_default_options
from ..align.common import pad
defaults = get_default_options()
def get_n_components(model, **kwargs):
if 'n_components' in kwargs.keys():
return kwargs['n_components']
if type(model) is str:
if model in ['SparseCoder']:
if 'dictionary' in kwargs.keys():
return kwargs['dictionary'].shape[1]
elif model == 'PPCA':
return None
else:
return defaults[model].copy().pop('n_components', None)
elif hasattr(model, '__name__'):
return get_n_components(getattr(model, '__name__'), **kwargs)
elif type(model) is dict and all([k in ['model', 'args', 'kwargs'] for k in model.keys()]):
return get_n_components(model['model'], **model['kwargs'])
else:
return None
@dw.decorate.apply_stacked
def reduce(data, model='IncrementalPCA', **kwargs):
# noinspection PyTypeChecker
n_components = get_n_components(model, **kwargs)
if (n_components is None) or (data.shape[1] > n_components):
return apply_model(data, model, search=['sklearn.decomposition', 'sklearn.manifold', 'sklearn.mixture',
'umap', 'ppca'],
**dw.core.update_dict(get_default_options()['reduce'], kwargs))
elif data.shape[1] == n_components:
transformed_data = data.copy()
else:
transformed_data = pad(data, c=n_components)
return_model = kwargs.pop('return_model', False)
if return_model:
return transformed_data, {'model': model, 'args': [], 'kwargs': kwargs}
else:
return transformed_data
| 34.307692 | 111 | 0.633408 |
8c5fd7bc1c9bd293de89770126a8ee09dbc102a4 | 1,042 | py | Python | testing/tsurf/test1.py | GNS-Science/eq-fault-geom | 2e110c27670b824f5177911085c78ba2ee00d507 | [
"MIT"
] | 1 | 2020-11-21T20:22:11.000Z | 2020-11-21T20:22:11.000Z | testing/tsurf/test1.py | GNS-Science/eq-fault-geom | 2e110c27670b824f5177911085c78ba2ee00d507 | [
"MIT"
] | 43 | 2020-06-29T03:50:10.000Z | 2022-03-15T23:13:14.000Z | testing/tsurf/test1.py | GNS-Science/eq-fault-geom | 2e110c27670b824f5177911085c78ba2ee00d507 | [
"MIT"
] | 1 | 2021-03-10T22:20:18.000Z | 2021-03-10T22:20:18.000Z | #!/usr/bin/env python
"""
Very basic test of faultmeshio/tsurf.py.
Reads and writes a Tsurf file.
"""
import sys
sys.path.insert(0, '../../src')
# import pdb
# pdb.set_trace()
from eq_fault_geom import faultmeshio
# Files.
inFile = '../../data/Wellington_Hutt_Valley_1.ts'
outFile1 = 'Wellington_Hutt_Valley_1_test1.ts'
outFile2 = 'Wellington_Hutt_Valley_1_test2.ts'
outFile3 = 'Wellington_Hutt_Valley_1_test3.ts'
# Read and write sample Tsurf file.
tsurf1 = faultmeshio.tsurf(inFile)
tsurf1.write(outFile1)
# Create and write new mesh using Tsurf and properties from original mesh.
x = tsurf1.x
y = tsurf1.y
z = tsurf1.z
triangles = tsurf1.mesh.cells
tsurf2 = faultmeshio.tsurf(x, y, z, triangles, name=tsurf1.name,
solid_color=tsurf1.solid_color, visible=tsurf1.visible, NAME=tsurf1.NAME,
AXIS_NAME=tsurf1.AXIS_NAME, AXIS_UNIT=tsurf1.AXIS_UNIT, ZPOSITIVE=tsurf1.ZPOSITIVE)
# Write the mesh.
tsurf2.write(outFile2)
# Create and write mesh using default properties.
tsurf3 = faultmeshio.tsurf(x, y, z, triangles)
tsurf3.write(outFile3)
| 27.421053 | 83 | 0.769674 |
6368657193f4d05caf7404e89c8aa8310df8978c | 1,525 | py | Python | tests/test_Accession.py | LinkageIO/Minus80 | 85c83bec68b58e73026de9f306da2541f3310b08 | [
"MIT"
] | 1 | 2020-01-27T18:47:21.000Z | 2020-01-27T18:47:21.000Z | tests/test_Accession.py | LinkageIO/Minus80 | 85c83bec68b58e73026de9f306da2541f3310b08 | [
"MIT"
] | 6 | 2017-09-21T22:30:05.000Z | 2020-12-07T05:07:47.000Z | tests/test_Accession.py | LinkageIO/Minus80 | 85c83bec68b58e73026de9f306da2541f3310b08 | [
"MIT"
] | 2 | 2018-10-31T15:23:51.000Z | 2019-05-22T12:08:18.000Z | from minus80 import Accession
def test_bare_accession():
x = Accession("empty")
assert isinstance(x, Accession)
str(x)
repr(x)
def test_add_relative_path():
x = Accession("empty")
x.add_file("./test.txt")
def test_add_files():
x = Accession("empty")
x.add_files(["./test.txt", "test2.txt", "test3.txt"])
def test_accession_name(simpleAccession):
assert simpleAccession.name == "Sample1"
def test_accession_files(simpleAccession):
assert "file1.txt" in simpleAccession.files
assert "file2.txt" in simpleAccession.files
def test_accession_metadata(simpleAccession):
assert simpleAccession.metadata["type"] == "sample"
def test_accession_getitem(simpleAccession):
assert simpleAccession["type"] == "sample"
def test_accession_setitem(simpleAccession):
simpleAccession["added"] = True
assert simpleAccession["added"] == True
def test_accession_file_check(RNAAccession1):
assert len(RNAAccession1.files) == 4
def test_accession_add_file_skip_check(simpleAccession):
simpleAccession.add_file("ssh://[email protected]/path/to/file.txt")
assert "ssh://[email protected]/path/to/file.txt" in simpleAccession.files
def test_accession_files_are_set(simpleAccession):
simpleAccession.add_file("/path/to/file.txt")
len_files = len(simpleAccession.files)
simpleAccession.add_file("/path/to/file.txt")
assert len(simpleAccession.files) == len_files
def test_load_from_yaml():
Accession.from_yaml("data/test_accession.yaml")
| 25 | 78 | 0.739016 |
7208e20eeb8a0b002aa5bd4736ac752a17ea073c | 1,948 | py | Python | pyglare/math/geometry.py | keyvank/pyglare | 9e26ae444ff4481f0f50d7344d2a5a881d04fe64 | [
"MIT"
] | 6 | 2017-01-13T22:32:55.000Z | 2022-03-27T22:19:49.000Z | pyglare/math/geometry.py | keyvank/pyglare | 9e26ae444ff4481f0f50d7344d2a5a881d04fe64 | [
"MIT"
] | 1 | 2016-09-13T17:59:41.000Z | 2016-09-13T18:05:20.000Z | pyglare/math/geometry.py | keyvank/pyglare | 9e26ae444ff4481f0f50d7344d2a5a881d04fe64 | [
"MIT"
] | null | null | null | import math
class Vector:
def __init__(self,x,y,z):
self.x = x
self.y = y
self.z = z
def length(self):
return math.sqrt(self.x * self.x + self.y * self.y + self.z * self.z)
def normalize(self):
return self / self.length()
def __add__(self,vec):
return Vector(self.x + vec.x,self.y + vec.y,self.z + vec.z)
def __sub__(self,vec):
return Vector(self.x - vec.x,self.y - vec.y,self.z - vec.z)
def __neg__(self):
return Vector(-self.x,-self.y,-self.z)
def __mul__(self,num):
return Vector(self.x * num,self.y * num,self.z * num)
def __truediv__(self,num):
return Vector(self.x / num,self.y / num,self.z / num)
def dot(a,b):
return a.x*b.x + a.y*b.y + a.z*b.z
def cross(a,b):
return Vector(a.y*b.z - a.z*b.y,
a.z*b.x - a.x*b.z,
a.x*b.y - a.y*b.x)
def reflect(self,vec):
mirror=self * Vector.dot(self,vec)/Vector.dot(self,self)
return (mirror*2-vec).normalize()
class Ray:
def __init__(self,position,direction):
self.position = position
self.direction = direction
class Plane:
def __init__(self,normal,intercept):
self.normal = normal
self.intercept = intercept
def intersection(self,ray):
div=Vector.dot(ray.direction,self.normal)
if div==0: # Plane and ray are parallel!
return None
t = -(Vector.dot(ray.position,self.normal)+self.intercept)/div
if t>0:
return t
else:
return None
class Sphere:
def __init__(self,position,radius):
self.position = position
self.radius = radius
def intersection(self,ray):
tca=Vector.dot(self.position-ray.position,ray.direction)
if tca<0:
return None
d2=Vector.dot(self.position-ray.position,self.position-ray.position)-tca*tca
if d2 > self.radius ** 2:
return None
thc=math.sqrt(self.radius ** 2 - d2)
ret=min(tca-thc,tca+thc)
if ret<0:
return None
else:
return ret
class Triangle:
def __init__(self,a,b,c):
self.a = a
self.b = b
self.c = c
| 20.505263 | 78 | 0.649384 |
402fad72683c8b97ece1c7c73977a49d01015fcf | 660 | py | Python | examples/fractals/dragon_curve/test_dragon_curve.py | Electro98/aads | 89607910856600b38349c31665f43fbb33df71c5 | [
"MIT"
] | 7 | 2021-07-24T05:37:07.000Z | 2022-03-15T05:17:25.000Z | examples/fractals/dragon_curve/test_dragon_curve.py | Electro98/aads | 89607910856600b38349c31665f43fbb33df71c5 | [
"MIT"
] | 2 | 2021-08-05T14:09:46.000Z | 2021-08-21T14:12:03.000Z | examples/fractals/dragon_curve/test_dragon_curve.py | Electro98/aads | 89607910856600b38349c31665f43fbb33df71c5 | [
"MIT"
] | 8 | 2021-08-20T17:17:02.000Z | 2022-03-15T05:17:27.000Z | """Тесты для модуля dragon_curve.py"""
import unittest
from dragon_curve import dragon_curve # pylint: disable=E0401
TEST_DRAGON_CURVE = [
# Первая итерация
('fx', 'fx+yf+'),
# Вторая итерации
('fx+yf+', 'fx+yf++-fx-yf+'),
# Третья итерации
('fx+yf++-fx-yf+', 'fx+yf++-fx-yf++-fx+yf+--fx-yf+')
]
class TestDragonCurve(unittest.TestCase):
"""Тест-кейс функции dragon_curve"""
def test_dragon_curve(self): # pylint: disable=C0116
for data, expected in TEST_DRAGON_CURVE:
with self.subTest():
self.assertEqual(dragon_curve(data), expected)
if __name__ == '__main__':
unittest.main()
| 25.384615 | 62 | 0.633333 |
43cc634a104124142b5282f454f0d0de3a0ff1cb | 7,860 | py | Python | Ch13/dtw.py | jason-168/MLCode | 429c17e004fb41ba16c371416c8f73833ab8fc1d | [
"Xnet",
"X11"
] | 146 | 2016-05-24T02:55:53.000Z | 2022-03-23T14:54:42.000Z | Ch13/dtw.py | jason-168/MLCode | 429c17e004fb41ba16c371416c8f73833ab8fc1d | [
"Xnet",
"X11"
] | 1 | 2017-08-17T23:07:39.000Z | 2017-08-18T08:27:19.000Z | Ch13/dtw.py | jason-168/MLCode | 429c17e004fb41ba16c371416c8f73833ab8fc1d | [
"Xnet",
"X11"
] | 94 | 2016-05-06T12:34:33.000Z | 2022-03-30T03:31:04.000Z |
# Code from Chapter 13 of Machine Learning: An Algorithmic Perspective (2nd Edition)
# by Stephen Marsland (http://stephenmonika.net)
# You are free to use, change, or redistribute the code in any way you wish for
# non-commercial purposes, but please maintain the name of the original author.
# This code comes with no warranty of any kind.
# Stephen Marsland, 2008, 2014
import numpy as np
class dtree:
""" Decision Tree with weights"""
def __init__(self):
""" Constructor """
def read_data(self,filename):
fid = open(filename,"r")
data = []
d = []
for line in fid.readlines():
d.append(line.strip())
for d1 in d:
data.append(d1.split(","))
fid.close()
self.featureNames = data[0]
self.featureNames = self.featureNames[:-1]
data = data[1:]
self.classes = []
for d in range(len(data)):
self.classes.append(data[d][-1])
data[d] = data[d][:-1]
return data,self.classes,self.featureNames
def classify(self,tree,datapoint):
if type(tree) == type("string"):
# Have reached a leaf
return tree
else:
a = tree.keys()[0]
for i in range(len(self.featureNames)):
if self.featureNames[i]==a:
break
try:
t = tree[a][datapoint[i]]
return self.classify(t,datapoint)
except:
return None
def classifyAll(self,tree,data):
results = []
for i in range(len(data)):
results.append(self.classify(tree,data[i]))
return results
def make_tree(self,data,weights,classes,featureNames,maxlevel=-1,level=0):
nData = len(data)
nFeatures = len(data[0])
try:
self.featureNames
except:
self.featureNames = featureNames
# List the possible classes
newClasses = []
for aclass in classes:
if newClasses.count(aclass)==0:
newClasses.append(aclass)
# Compute the default class (and total entropy)
frequency = np.zeros(len(newClasses))
totalGini = 0
index = 0
for aclass in newClasses:
frequency[index] = classes.count(aclass)
totalGini += (float(frequency[index])/nData)**2
index += 1
totalGini = 1 - totalGini
default = classes[np.argmax(frequency)]
if nData==0 or nFeatures == 0 or (maxlevel>=0 and level>maxlevel):
# Have reached an empty branch
return default
elif classes.count(classes[0]) == nData:
# Only 1 class remains
return classes[0]
else:
# Choose which feature is best
#print totalGini
gain = np.zeros(nFeatures)
for feature in range(nFeatures):
g = self.calc_info_gain(data,weights,classes,feature)
gain[feature] = totalGini - g
#print "gain", gain
bestFeature = np.argmin(gain)
#print bestFeature
tree = {featureNames[bestFeature]:{}}
# List the values that bestFeature can take
values = []
for datapoint in data:
if values.count(datapoint[bestFeature])==0:
values.append(datapoint[bestFeature])
for value in values:
# Find the datapoints with each feature value
newData = []
newWeights = []
newClasses = []
index = 0
for datapoint in data:
if datapoint[bestFeature]==value:
if bestFeature==0:
newdatapoint = datapoint[1:]
newweight = weights[1:]
newNames = featureNames[1:]
elif bestFeature==nFeatures:
newdatapoint = datapoint[:-1]
newweight = weights[:-1]
newNames = featureNames[:-1]
else:
newdatapoint = datapoint[:bestFeature]
newdatapoint.extend(datapoint[bestFeature+1:])
newweight = weights[:bestFeature]
newweight = np.concatenate((newweight,weights[bestFeature+1:]))
newNames = featureNames[:bestFeature]
newNames.extend(featureNames[bestFeature+1:])
newData.append(newdatapoint)
newWeights = np.concatenate((newWeights,newweight))
newClasses.append(classes[index])
index += 1
# Now recurse to the next level
subtree = self.make_tree(newData,newWeights,newClasses,newNames,maxlevel,level+1)
# And on returning, add the subtree on to the tree
tree[featureNames[bestFeature]][value] = subtree
return tree
def printTree(self,tree,str):
if type(tree) == dict:
print str, tree.keys()[0]
for item in tree.values()[0].keys():
print str, item
self.printTree(tree.values()[0][item], str + "\t")
else:
print str, "\t->\t", tree
def calc_info_gain(self,data,weights,classes,feature,maxlevel=-1,level=0):
gain = 0
nData = len(data)
try:
self.featureNames
except:
self.featureNames = featureNames
# List the values that feature can take
values = []
valueweight = np.array([],dtype=float)
counter = 0
for datapoint in data:
if values.count(datapoint[feature])==0:
values.append(datapoint[feature])
if np.size(valueweight) == 0:
valueweight = np.array([weights[counter]])
else:
valueweight = np.concatenate((valueweight,np.array([weights[counter]])))
else:
ind = values.index(datapoint[feature])
valueweight[ind] += weights[counter]
counter += 1
valueweight /= sum(valueweight)
#print "v",valueweight
featureCounts = np.zeros(len(values))
gini = np.zeros(len(values))
valueIndex = 0
# Find where those values appear in data[feature] and the corresponding class
for value in values:
dataIndex = 0
newClasses = []
for datapoint in data:
if datapoint[feature]==value:
featureCounts[valueIndex]+=1
newClasses.append(classes[dataIndex])
dataIndex += 1
# Get the values in newClasses
classValues = []
for aclass in newClasses:
if classValues.count(aclass)==0:
classValues.append(aclass)
classCounts = np.zeros(len(classValues))
classIndex = 0
for classValue in classValues:
for aclass in newClasses:
if aclass == classValue:
classCounts[classIndex]+=1
classIndex += 1
for classIndex in range(len(classValues)):
gini[valueIndex] += (float(classCounts[classIndex])/sum(classCounts))**2
gain = gain + float(featureCounts[valueIndex])/nData * gini[valueIndex] * valueweight[valueIndex]
valueIndex += 1
return 1-gain
| 35.246637 | 109 | 0.514249 |
End of preview. Expand
in Dataset Viewer.
No dataset card yet
- Downloads last month
- 4