hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c1dc89cefecc64ea21e3ae057486e59c08fdbc37
| 3,999
|
py
|
Python
|
zerver/webhooks/gogs/view.py
|
alexandraciobica/zulip
|
f3753504469070bfccc73f22f933c87bee7d1852
|
[
"Apache-2.0"
] | 1
|
2019-10-01T14:04:07.000Z
|
2019-10-01T14:04:07.000Z
|
zerver/webhooks/gogs/view.py
|
991rajat/zulip
|
648a60baf63f9afade83148bd9ae1fc480510178
|
[
"Apache-2.0"
] | 4
|
2020-06-06T00:51:42.000Z
|
2022-02-10T21:38:40.000Z
|
zerver/webhooks/gogs/view.py
|
991rajat/zulip
|
648a60baf63f9afade83148bd9ae1fc480510178
|
[
"Apache-2.0"
] | 1
|
2020-01-06T15:12:36.000Z
|
2020-01-06T15:12:36.000Z
|
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
from typing import Any, Dict, Optional
from django.http import HttpRequest, HttpResponse
from zerver.decorator import api_key_only_webhook_view
from zerver.lib.request import REQ, has_request_variables
from zerver.lib.response import json_success
from zerver.lib.webhooks.common import check_send_webhook_message, \
validate_extract_webhook_http_header, UnexpectedWebhookEventType, \
get_http_headers_from_filename
from zerver.lib.webhooks.git import TOPIC_WITH_BRANCH_TEMPLATE, \
TOPIC_WITH_PR_OR_ISSUE_INFO_TEMPLATE, get_create_branch_event_message, \
get_pull_request_event_message, get_push_commits_event_message
from zerver.models import UserProfile
fixture_to_headers = get_http_headers_from_filename("HTTP_X_GOGS_EVENT")
def format_push_event(payload: Dict[str, Any]) -> str:
for commit in payload['commits']:
commit['sha'] = commit['id']
commit['name'] = (commit['author']['username'] or
commit['author']['name'].split()[0])
data = {
'user_name': payload['sender']['username'],
'compare_url': payload['compare_url'],
'branch_name': payload['ref'].replace('refs/heads/', ''),
'commits_data': payload['commits']
}
return get_push_commits_event_message(**data)
def format_new_branch_event(payload: Dict[str, Any]) -> str:
branch_name = payload['ref']
url = '{}/src/{}'.format(payload['repository']['html_url'], branch_name)
data = {
'user_name': payload['sender']['username'],
'url': url,
'branch_name': branch_name
}
return get_create_branch_event_message(**data)
def format_pull_request_event(payload: Dict[str, Any],
include_title: Optional[bool]=False) -> str:
data = {
'user_name': payload['pull_request']['user']['username'],
'action': payload['action'],
'url': payload['pull_request']['html_url'],
'number': payload['pull_request']['number'],
'target_branch': payload['pull_request']['head_branch'],
'base_branch': payload['pull_request']['base_branch'],
'title': payload['pull_request']['title'] if include_title else None
}
if payload['pull_request']['merged']:
data['user_name'] = payload['pull_request']['merged_by']['username']
data['action'] = 'merged'
return get_pull_request_event_message(**data)
@api_key_only_webhook_view('Gogs')
@has_request_variables
def api_gogs_webhook(request: HttpRequest, user_profile: UserProfile,
payload: Dict[str, Any]=REQ(argument_type='body'),
branches: Optional[str]=REQ(default=None),
user_specified_topic: Optional[str]=REQ("topic", default=None)) -> HttpResponse:
repo = payload['repository']['name']
event = validate_extract_webhook_http_header(request, 'X_GOGS_EVENT', 'Gogs')
if event == 'push':
branch = payload['ref'].replace('refs/heads/', '')
if branches is not None and branches.find(branch) == -1:
return json_success()
body = format_push_event(payload)
topic = TOPIC_WITH_BRANCH_TEMPLATE.format(
repo=repo,
branch=branch
)
elif event == 'create':
body = format_new_branch_event(payload)
topic = TOPIC_WITH_BRANCH_TEMPLATE.format(
repo=repo,
branch=payload['ref']
)
elif event == 'pull_request':
body = format_pull_request_event(
payload,
include_title=user_specified_topic is not None
)
topic = TOPIC_WITH_PR_OR_ISSUE_INFO_TEMPLATE.format(
repo=repo,
type='PR',
id=payload['pull_request']['id'],
title=payload['pull_request']['title']
)
else:
raise UnexpectedWebhookEventType('Gogs', event)
check_send_webhook_message(request, user_profile, topic, body)
return json_success()
| 37.373832
| 101
| 0.653163
|
0ab22662bd3300b595b3c2cf036579e35200f2d0
| 544
|
py
|
Python
|
froide/campaign/migrations/0006_auto_20190826_1451.py
|
xenein/froide
|
59bd3eeded3c3ed00fbc858fe20bfea99c8dbefa
|
[
"MIT"
] | 198
|
2016-12-03T22:42:55.000Z
|
2022-03-25T15:08:36.000Z
|
froide/campaign/migrations/0006_auto_20190826_1451.py
|
xenein/froide
|
59bd3eeded3c3ed00fbc858fe20bfea99c8dbefa
|
[
"MIT"
] | 264
|
2016-11-30T18:53:17.000Z
|
2022-03-17T11:34:18.000Z
|
froide/campaign/migrations/0006_auto_20190826_1451.py
|
xenein/froide
|
59bd3eeded3c3ed00fbc858fe20bfea99c8dbefa
|
[
"MIT"
] | 42
|
2016-12-22T04:08:27.000Z
|
2022-02-26T08:30:38.000Z
|
# Generated by Django 2.1.11 on 2019-08-26 12:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("campaign", "0005_campaign_group"),
]
operations = [
migrations.AddField(
model_name="campaign",
name="request_hint",
field=models.TextField(blank=True),
),
migrations.AddField(
model_name="campaign",
name="request_match",
field=models.TextField(blank=True),
),
]
| 22.666667
| 48
| 0.577206
|
098429470f168122a47b880313de231b6b9ead81
| 6,760
|
py
|
Python
|
code.py
|
arcsinY/DM_Project2
|
1775f1890a59d92aaed0d5ed08588712e659aeaf
|
[
"Apache-2.0"
] | 2
|
2021-06-07T02:22:47.000Z
|
2021-09-02T08:06:54.000Z
|
code.py
|
arcsinY/DM_Project2
|
1775f1890a59d92aaed0d5ed08588712e659aeaf
|
[
"Apache-2.0"
] | null | null | null |
code.py
|
arcsinY/DM_Project2
|
1775f1890a59d92aaed0d5ed08588712e659aeaf
|
[
"Apache-2.0"
] | null | null | null |
import pandas as pd
import orangecontrib.associate.fpgrowth as oaf
import json as js
import matplotlib.pyplot as plt
import seaborn as sns
data = pd.read_csv('USvideos.csv', sep = ',')
# 转化category_id
with open('US_category_id.json') as f:
json_date = js.load(f)
f.close()
id2cat = {}
for i in range(len(json_date['items'])):
id2cat[json_date['items'][i]['id']] = json_date['items'][i]['snippet']['title']
for i in range(len(data)):
id = data.loc[i, 'category_id']
data.loc[i, 'category_id'] = id2cat[str(id)]
# 转化views
arr = data['views']
one = arr.quantile(0.25)
three = arr.quantile(0.75)
view_level = []
for i in data['views']:
if int(i) >= three:
view_level.append('high view')
elif int(i) <= one:
view_level.append('low view')
else:
view_level.append('medium view')
# 转化likes和dislikes
like = []
for i in range(len(data)):
if data.loc[i, 'likes'] >= data.loc[i, 'dislikes']:
like.append('like')
else:
like.append('dislike')
# 转化comment_count
arr = data['comment_count']
one = arr.quantile(0.25)
three = arr.quantile(0.75)
comment_level = []
for i in data['comment_count']:
if i >= three:
comment_level.append('high comment')
elif i <= one:
comment_level.append('low comment')
else:
comment_level.append('medium comment')
data = data.drop(['views', 'likes', 'dislikes', 'comment_count'], axis = 1)
data.insert(0, 'views', view_level)
data.insert(0, 'like', like)
data.insert(0, 'comment_count', comment_level)
data = data.drop(['video_id', 'trending_date', 'publish_time', 'video_error_or_removed', 'description', 'thumbnail_link', 'title', 'comments_disabled', 'ratings_disabled'], axis = 1)
# 算法输入格式转换
id2str = {} # 整数编码 —> 字符串
str2id = {} # 字符串 -> 整数编码
id = 0
transaction = []
for i in range(len(data)):
one = []
for j in data.columns:
# 拆分tags
if j == 'tags':
str_arr = data.loc[i, j].split('|')
for s in str_arr:
if s in str2id:
one.append(str2id[s])
else:
id2str[id] = s
str2id[s] = id
one.append(id)
id += 1
else:
if data.loc[i, j] in str2id:
one.append(str2id[data.loc[i, j]])
else:
id2str[id] = data.loc[i, j]
str2id[data.loc[i, j]] = id
one.append(id)
id += 1
transaction.append(one)
# 频繁项集
items = list(oaf.frequent_itemsets(transaction))
for i in items:
freq_set = []
abs_sup = i[1]
for j in i[0]:
freq_set.append(id2str[j])
print(freq_set, abs_sup, round(float(abs_sup) / len(data), 2))
# 关联规则
rules = list(oaf.association_rules(dict(items), 0.2))
for i in rules:
antecedent = []
consequent = []
for j in i[0]:
antecedent.append(id2str[j])
for j in i[1]:
consequent.append(id2str[j])
print(antecedent, "->", consequent, i[2], round(i[3],2))
# lift
measure = list(oaf.rules_stats(oaf.association_rules(dict(items), 0.2), dict(oaf.frequent_itemsets(transaction, 0.2)), len(data)))
for i in measure:
antecedent = []
consequent = []
for j in i[0]:
antecedent.append(id2str[j])
for j in i[1]:
consequent.append(id2str[j])
print(antecedent, "->", consequent, round(i[6], 2))
# 计算Kulc
kulc = []
visit = [False for i in range(len(rules))]
for i in range(len(rules)):
if visit[i] == True:
continue
visit[i] = True
for j in range(len(rules)):
if visit[j] == True:
continue
if rules[j][0] == rules[i][1] and rules[j][1] == rules[i][0]:
one = []
antecedent = []
consequent = []
for k in rules[i][0]:
antecedent.append(id2str[k])
for k in rules[i][1]:
consequent.append(id2str[k])
one.append(rules[i][0])
one.append(rules[i][1])
one.append((rules[i][3] + rules[j][3])/2)
kulc.append(one)
print('Kulc(', antecedent, consequent, ') = ', round((rules[i][3] + rules[j][3])/2, 2))
visit[j] = True
# "like" 数量和 "low view" 数量
like_cnt = 0
low_view_cnt = 0
for i in data['like']:
if i == 'like':
like_cnt += 1
for i in data['views']:
if i == 'low view':
low_view_cnt += 1
print(like_cnt, low_view_cnt)
# 可视化
conf_matrix = []
lift_matrix = []
kulc_matrix = []
rules_column = set()
for i in range(len(measure)):
rules_column.add(measure[i][0])
# 计算置信度矩阵
for i in rules_column:
one = []
for j in rules_column:
if i == j:
one.append(1)
else:
flag = False
for k in range(len(rules)):
if rules[k][0] == i and rules[k][1] == j:
one.append(rules[k][3])
flag = True
if flag == False:
one.append(0)
conf_matrix.append(one)
# 计算lift矩阵
for i in rules_column:
one = []
for j in rules_column:
if i == j:
one.append(1)
else:
flag = False
for k in range(len(measure)):
if measure[k][0] == i and measure[k][1] == j:
one.append(measure[k][6])
flag = True
if flag == False:
one.append(0)
lift_matrix.append(one)
# 计算kulc矩阵
for i in rules_column:
one = []
for j in rules_column:
if i == j:
one.append(1)
else:
flag = False
for k in range(len(kulc)):
if kulc[k][0] == i and kulc[k][1] == j:
one.append(kulc[k][2])
flag = True
if flag == False:
one.append(0)
kulc_matrix.append(one)
# 改columns名字
rules_column_list = []
for i in rules_column:
one = ""
for j in range(len(i)):
one += id2str[j]
if j < len(i) - 1:
one += ", "
rules_column_list.append(one)
# 绘制热图的数据
rules_column = list(rules_column)
rules_column_list = []
for i in rules_column:
one = ""
for j in range(len(i)):
one += id2str[list(i)[j]]
if j < len(i) - 1:
one += ", "
rules_column_list.append(one)
lift_pd = pd.DataFrame(lift_matrix, columns = rules_column_list, index = rules_column_list)
plt.figure(figsize=(11, 9),dpi=100)
sns.heatmap(data = lift_pd, annot = True, fmt = ".2f")
plt.show()
| 28.888889
| 183
| 0.522929
|
086157de2e4ae5eda2e9d15175dee84010a61cb3
| 628
|
py
|
Python
|
manage.py
|
chelocool/orbinweb
|
b52b12f335c076c8cf1f80d462d911f7c48c91f1
|
[
"Apache-2.0"
] | null | null | null |
manage.py
|
chelocool/orbinweb
|
b52b12f335c076c8cf1f80d462d911f7c48c91f1
|
[
"Apache-2.0"
] | null | null | null |
manage.py
|
chelocool/orbinweb
|
b52b12f335c076c8cf1f80d462d911f7c48c91f1
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'OrbinWeb.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 28.545455
| 73
| 0.683121
|
495fa8b63cd6af479192db1f7d66a4f405bee29f
| 3,420
|
py
|
Python
|
ibis/pandas/execution/temporal.py
|
tswast/ibis
|
2f6d47e4c33cefd7ea1d679bb1d9253c2245993b
|
[
"Apache-2.0"
] | null | null | null |
ibis/pandas/execution/temporal.py
|
tswast/ibis
|
2f6d47e4c33cefd7ea1d679bb1d9253c2245993b
|
[
"Apache-2.0"
] | null | null | null |
ibis/pandas/execution/temporal.py
|
tswast/ibis
|
2f6d47e4c33cefd7ea1d679bb1d9253c2245993b
|
[
"Apache-2.0"
] | null | null | null |
import six
import datetime
import numpy as np
import pandas as pd
import ibis.expr.operations as ops
from ibis.pandas.dispatch import execute_node
from ibis.pandas.core import numeric_types
@execute_node.register(ops.Strftime, pd.Timestamp, six.string_types)
def execute_strftime_timestamp_str(op, data, format_string, **kwargs):
return data.strftime(format_string)
@execute_node.register(ops.Strftime, pd.Series, six.string_types)
def execute_strftime_series_str(op, data, format_string, **kwargs):
return data.dt.strftime(format_string)
@execute_node.register(
(ops.ExtractTimestampField, ops.ExtractTemporalField),
pd.Timestamp
)
def execute_extract_timestamp_field_timestamp(op, data, **kwargs):
field_name = type(op).__name__.lower().replace('extract', '')
return getattr(data, field_name)
@execute_node.register(ops.ExtractMillisecond, pd.Timestamp)
def execute_extract_millisecond_timestamp(op, data, **kwargs):
return int(data.microsecond // 1000.0)
@execute_node.register(
(ops.ExtractTimestampField, ops.ExtractTemporalField),
pd.Series
)
def execute_extract_timestamp_field_series(op, data, **kwargs):
field_name = type(op).__name__.lower().replace('extract', '')
return getattr(data.dt, field_name).astype(np.int32)
@execute_node.register(
ops.BetweenTime,
pd.Series,
(pd.Series, str, datetime.time),
(pd.Series, str, datetime.time),
)
def execute_between_time(op, data, lower, upper, **kwargs):
indexer = pd.DatetimeIndex(data).indexer_between_time(
lower, upper)
result = np.zeros(len(data), dtype=np.bool_)
result[indexer] = True
return result
@execute_node.register(ops.Date, pd.Series)
def execute_timestamp_date(op, data, **kwargs):
return data.dt.floor('d')
@execute_node.register((ops.TimestampTruncate, ops.DateTruncate), pd.Series)
def execute_timestamp_truncate(op, data, **kwargs):
dtype = 'datetime64[{}]'.format(op.unit)
array = data.values.astype(dtype)
return pd.Series(array, name=data.name)
@execute_node.register(ops.IntervalFromInteger, pd.Series)
def execute_interval_from_integer_series(op, data, **kwargs):
resolution = '{}s'.format(op.resolution)
def convert_to_offset(n):
return pd.offsets.DateOffset(**{resolution: n})
return data.apply(convert_to_offset)
@execute_node.register(
ops.TimestampAdd,
(datetime.datetime, pd.Series),
(datetime.timedelta, np.timedelta64, pd.Timedelta, pd.Series)
)
@execute_node.register(
ops.IntervalAdd, (pd.Timedelta, pd.Series), (pd.Timedelta, pd.Series)
)
def execute_timestamp_add_delta(op, left, right, **kwargs):
return left + right
@execute_node.register(
(ops.TimestampSub, ops.TimestampDiff),
(datetime.datetime, pd.Series),
(
datetime.datetime,
np.datetime64,
datetime.timedelta,
np.timedelta64,
pd.Timedelta,
pd.Series
),
)
def execute_timestamp_sub_diff(op, left, right, **kwargs):
return left - right
@execute_node.register(
ops.IntervalMultiply,
(pd.Timedelta, pd.Series), numeric_types + (pd.Series,)
)
def execute_interval_multiply(op, left, right, **kwargs):
return left * right
@execute_node.register(
ops.IntervalFloorDivide,
(pd.Timedelta, pd.Series),
numeric_types + (pd.Series,)
)
def execute_interval_floor_divide(op, left, right, **kwargs):
return left // right
| 27.804878
| 76
| 0.726608
|
9a2d387a2b3ae45e27bd138677e8b7e67c7398cf
| 2,447
|
py
|
Python
|
avm_client/models/wall_insulation.py
|
ENBISYS/avmPython
|
daa3686ea431e752687c915e2f5b6f65b6c77130
|
[
"Unlicense"
] | null | null | null |
avm_client/models/wall_insulation.py
|
ENBISYS/avmPython
|
daa3686ea431e752687c915e2f5b6f65b6c77130
|
[
"Unlicense"
] | null | null | null |
avm_client/models/wall_insulation.py
|
ENBISYS/avmPython
|
daa3686ea431e752687c915e2f5b6f65b6c77130
|
[
"Unlicense"
] | null | null | null |
# coding: utf-8
"""
AVM
This is api for AVM (automated valuation machine) # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: info@enbisys.com
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class WallInsulation(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
allowed enum values
"""
NO = "no"
PARTIAL = "partial"
INSULATED = "insulated"
ADDITIONAL = "additional"
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
}
attribute_map = {
}
def __init__(self): # noqa: E501
"""WallInsulation - a model defined in OpenAPI""" # noqa: E501
self.discriminator = None
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, WallInsulation):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 26.031915
| 74
| 0.549244
|
deb3f7752649b74277824ee2117cfa1c06a52797
| 3,475
|
py
|
Python
|
management/serializer.py
|
tonythomas01/quarantine_backend
|
dea1c8a4838954fca531ae9c1c7763fc1d1a4fd8
|
[
"MIT"
] | 3
|
2020-05-20T14:07:57.000Z
|
2020-12-18T22:01:59.000Z
|
management/serializer.py
|
tonythomas01/quarantine_backend
|
dea1c8a4838954fca531ae9c1c7763fc1d1a4fd8
|
[
"MIT"
] | 22
|
2020-04-04T11:23:22.000Z
|
2021-12-13T20:37:18.000Z
|
management/serializer.py
|
Quarantine-Help/quarantined_backend
|
3cab95d0716bb914f8005075674aa7bbdccfdae4
|
[
"MIT"
] | 2
|
2020-04-04T15:02:57.000Z
|
2020-05-02T18:10:41.000Z
|
from rest_framework import fields, serializers
from rest_framework.serializers import ModelSerializer
from authentication.serializer import (
UserSerializer,
ParticipantSerializer,
ParticipantAnonymizedSerializer,
)
from crisis.models.crisis_request import Request
from crisis.models.request_assignment import RequestAssignment
from management.models import Ability
class AbilitySerializer(ModelSerializer):
class Meta:
model = Ability
fields = "__all__"
class RequestAssignmentSerializer(ModelSerializer):
class Meta:
model = RequestAssignment
fields = ("status", "id", "created_at", "did_complete", "assignee_id")
class RequestSerializer(ModelSerializer):
assignee = ParticipantSerializer(allow_null=True, required=False, read_only=True)
assignmentHistory = RequestAssignmentSerializer(
source="related_assignment",
allow_null=True,
required=False,
read_only=True,
many=True,
)
bountyCurrency = fields.CharField(source="owner.currency", read_only=True)
status = fields.CharField(required=False, allow_null=True)
def validate(self, attrs):
attrs = super().validate(attrs)
# We do not allow people to set attribute to Transit through the API
# view. al
if not self.instance:
return attrs
if self.instance.status in Request.FINISHED_STATUSES:
raise serializers.ValidationError("You cannot change this request anymore.")
if self.instance.status == "T":
if attrs["status"] not in Request.FINISHED_STATUSES:
raise serializers.ValidationError(
"You can only cancel or mark the request as finished"
)
if attrs["description"] != self.instance.description:
raise serializers.ValidationError(
"You will have to cancel this request first."
)
if not attrs["deadline"] >= self.instance.deadline:
raise serializers.ValidationError(
"You cannot shorten the deadline now. Please cancel"
)
if attrs.get("status", None) == "T":
# This should happen through assignment logic
raise serializers.ValidationError("Cannot update the status to T")
return attrs
class Meta:
model = Request
fields = (
"id",
"type",
"deadline",
"description",
"assignee",
"status",
"assignmentHistory",
"bountyCurrency",
"bounty_amount_offered_to_volunteer",
"created_at",
)
class RequestAnonymizedSerializer(RequestSerializer):
assignee = ParticipantAnonymizedSerializer(
allow_null=True, required=False, read_only=True
)
class AssigneeRequestUpdateSerializer(ModelSerializer):
class Meta:
model = Request
fields = ("id", "status")
def validate(self, attrs):
attrs = super().validate(attrs)
if "status" not in attrs:
raise serializers.ValidationError("Status is mandatory")
allowed_status = [Request.STATUS_FINISHED, Request.STATUS_TRANSIT]
if attrs["status"] not in allowed_status:
raise serializers.ValidationError(
"Only the following status are allowed %s" % str(allowed_status)
)
return attrs
| 32.783019
| 88
| 0.637698
|
28545b5de3dc843010b0c429623358c6cc38e6cf
| 41,479
|
py
|
Python
|
tests/unit/test_loader.py
|
johnj/salt
|
b23656fa5ee24047c43ac702d6796a700570f749
|
[
"Apache-2.0"
] | 5
|
2017-02-07T05:39:29.000Z
|
2020-06-13T02:07:33.000Z
|
tests/unit/test_loader.py
|
johnj/salt
|
b23656fa5ee24047c43ac702d6796a700570f749
|
[
"Apache-2.0"
] | 86
|
2017-01-27T11:54:46.000Z
|
2020-05-20T06:25:26.000Z
|
tests/unit/test_loader.py
|
johnj/salt
|
b23656fa5ee24047c43ac702d6796a700570f749
|
[
"Apache-2.0"
] | 11
|
2017-01-26T19:36:29.000Z
|
2021-12-11T07:54:16.000Z
|
# -*- coding: utf-8 -*-
'''
unit.loader
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Test Salt's loader
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import collections
import compileall
import copy
import imp
import inspect
import logging
import os
import shutil
import sys
import tempfile
import textwrap
# Import Salt Testing libs
from tests.support.case import ModuleCase
from tests.support.unit import TestCase
from tests.support.mock import patch
from tests.support.paths import TMP
# Import Salt libs
import salt.config
import salt.loader
import salt.utils.files
import salt.utils.stringutils
# pylint: disable=import-error,no-name-in-module,redefined-builtin
from salt.ext import six
from salt.ext.six.moves import range
# pylint: enable=no-name-in-module,redefined-builtin
log = logging.getLogger(__name__)
def remove_bytecode(module_path):
paths = [module_path + 'c']
if hasattr(imp, 'get_tag'):
modname, ext = os.path.splitext(module_path.split(os.sep)[-1])
paths.append(
os.path.join(os.path.dirname(module_path),
'__pycache__',
'{}.{}.pyc'.format(modname, imp.get_tag())))
for path in paths:
if os.path.exists(path):
os.unlink(path)
loader_template = '''
import os
from salt.utils.decorators import depends
@depends('os')
def loaded():
return True
@depends('non_existantmodulename')
def not_loaded():
return True
'''
class LazyLoaderTest(TestCase):
'''
Test the loader
'''
module_name = 'lazyloadertest'
@classmethod
def setUpClass(cls):
cls.opts = salt.config.minion_config(None)
cls.opts['grains'] = salt.loader.grains(cls.opts)
if not os.path.isdir(TMP):
os.makedirs(TMP)
def setUp(self):
# Setup the module
self.module_dir = tempfile.mkdtemp(dir=TMP)
self.module_file = os.path.join(self.module_dir,
'{0}.py'.format(self.module_name))
with salt.utils.files.fopen(self.module_file, 'w') as fh:
fh.write(salt.utils.stringutils.to_str(loader_template))
fh.flush()
os.fsync(fh.fileno())
# Invoke the loader
self.loader = salt.loader.LazyLoader([self.module_dir], copy.deepcopy(self.opts), tag='module')
def tearDown(self):
shutil.rmtree(self.module_dir)
if os.path.isdir(self.module_dir):
shutil.rmtree(self.module_dir)
del self.module_dir
del self.module_file
del self.loader
@classmethod
def tearDownClass(cls):
del cls.opts
def test_depends(self):
'''
Test that the depends decorator works properly
'''
# Make sure depends correctly allowed a function to load. If this
# results in a KeyError, the decorator is broken.
self.assertTrue(
inspect.isfunction(
self.loader[self.module_name + '.loaded']
)
)
# Make sure depends correctly kept a function from loading
self.assertTrue(self.module_name + '.not_loaded' not in self.loader)
class LazyLoaderVirtualEnabledTest(TestCase):
'''
Test the base loader of salt.
'''
@classmethod
def setUpClass(cls):
cls.opts = salt.config.minion_config(None)
cls.opts['disable_modules'] = ['pillar']
cls.opts['grains'] = salt.loader.grains(cls.opts)
def setUp(self):
self.loader = salt.loader.LazyLoader(
salt.loader._module_dirs(copy.deepcopy(self.opts), 'modules', 'module'),
copy.deepcopy(self.opts),
tag='module')
def tearDown(self):
del self.loader
@classmethod
def tearDownClass(cls):
del cls.opts
def test_basic(self):
'''
Ensure that it only loads stuff when needed
'''
# make sure it starts empty
self.assertEqual(self.loader._dict, {})
# get something, and make sure its a func
self.assertTrue(inspect.isfunction(self.loader['test.ping']))
# make sure we only loaded "test" functions
for key, val in six.iteritems(self.loader._dict):
self.assertEqual(key.split('.', 1)[0], 'test')
# make sure the depends thing worked (double check of the depends testing,
# since the loader does the calling magically
self.assertFalse('test.missing_func' in self.loader._dict)
def test_badkey(self):
with self.assertRaises(KeyError):
self.loader[None] # pylint: disable=W0104
with self.assertRaises(KeyError):
self.loader[1] # pylint: disable=W0104
def test_disable(self):
self.assertNotIn('pillar.items', self.loader)
def test_len_load(self):
'''
Since LazyLoader is a MutableMapping, if someone asks for len() we have
to load all
'''
self.assertEqual(self.loader._dict, {})
len(self.loader) # force a load all
self.assertNotEqual(self.loader._dict, {})
def test_iter_load(self):
'''
Since LazyLoader is a MutableMapping, if someone asks to iterate we have
to load all
'''
self.assertEqual(self.loader._dict, {})
# force a load all
for key, func in six.iteritems(self.loader):
break
self.assertNotEqual(self.loader._dict, {})
def test_context(self):
'''
Make sure context is shared across modules
'''
# make sure it starts empty
self.assertEqual(self.loader._dict, {})
# get something, and make sure its a func
func = self.loader['test.ping']
with patch.dict(func.__globals__['__context__'], {'foo': 'bar'}):
self.assertEqual(self.loader['test.echo'].__globals__['__context__']['foo'], 'bar')
self.assertEqual(self.loader['grains.get'].__globals__['__context__']['foo'], 'bar')
def test_globals(self):
func_globals = self.loader['test.ping'].__globals__
self.assertEqual(func_globals['__grains__'], self.opts.get('grains', {}))
self.assertEqual(func_globals['__pillar__'], self.opts.get('pillar', {}))
# the opts passed into modules is at least a subset of the whole opts
for key, val in six.iteritems(func_globals['__opts__']):
if key in salt.config.DEFAULT_MASTER_OPTS and key not in salt.config.DEFAULT_MINION_OPTS:
# We loaded the minion opts, but somewhere in the code, the master options got pulled in
# Let's just not check for equality since the option won't even exist in the loaded
# minion options
continue
if key not in salt.config.DEFAULT_MASTER_OPTS and key not in salt.config.DEFAULT_MINION_OPTS:
# This isn't even a default configuration setting, lets carry on
continue
self.assertEqual(self.opts[key], val)
def test_pack(self):
self.loader.pack['__foo__'] = 'bar'
func_globals = self.loader['test.ping'].__globals__
self.assertEqual(func_globals['__foo__'], 'bar')
def test_virtual(self):
self.assertNotIn('test_virtual.ping', self.loader)
class LazyLoaderVirtualDisabledTest(TestCase):
'''
Test the loader of salt without __virtual__
'''
@classmethod
def setUpClass(cls):
cls.opts = salt.config.minion_config(None)
cls.opts['grains'] = salt.loader.grains(cls.opts)
def setUp(self):
self.loader = salt.loader.LazyLoader(
salt.loader._module_dirs(copy.deepcopy(self.opts), 'modules', 'module'),
copy.deepcopy(self.opts),
tag='module',
virtual_enable=False)
def tearDown(self):
del self.loader
@classmethod
def tearDownClass(cls):
del cls.opts
def test_virtual(self):
self.assertTrue(inspect.isfunction(self.loader['test_virtual.ping']))
class LazyLoaderWhitelistTest(TestCase):
'''
Test the loader of salt with a whitelist
'''
@classmethod
def setUpClass(cls):
cls.opts = salt.config.minion_config(None)
cls.opts['grains'] = salt.loader.grains(cls.opts)
def setUp(self):
self.loader = salt.loader.LazyLoader(
salt.loader._module_dirs(copy.deepcopy(self.opts), 'modules', 'module'),
copy.deepcopy(self.opts),
tag='module',
whitelist=['test', 'pillar'])
def tearDown(self):
del self.loader
@classmethod
def tearDownClass(cls):
del cls.opts
def test_whitelist(self):
self.assertTrue(inspect.isfunction(self.loader['test.ping']))
self.assertTrue(inspect.isfunction(self.loader['pillar.get']))
self.assertNotIn('grains.get', self.loader)
class LazyLoaderGrainsBlacklistTest(TestCase):
'''
Test the loader of grains with a blacklist
'''
def setUp(self):
self.opts = salt.config.minion_config(None)
def tearDown(self):
del self.opts
def test_whitelist(self):
opts = copy.deepcopy(self.opts)
opts['grains_blacklist'] = [
'master',
'os*',
'ipv[46]'
]
grains = salt.loader.grains(opts)
self.assertNotIn('master', grains)
self.assertNotIn('os', set([g[:2] for g in list(grains)]))
self.assertNotIn('ipv4', grains)
self.assertNotIn('ipv6', grains)
class LazyLoaderSingleItem(TestCase):
'''
Test loading a single item via the _load() function
'''
@classmethod
def setUpClass(cls):
cls.opts = salt.config.minion_config(None)
cls.opts['grains'] = salt.loader.grains(cls.opts)
def setUp(self):
self.loader = salt.loader.LazyLoader(
salt.loader._module_dirs(copy.deepcopy(self.opts), 'modules', 'module'),
copy.deepcopy(self.opts),
tag='module')
def tearDown(self):
del self.loader
def test_single_item_no_dot(self):
'''
Checks that a KeyError is raised when the function key does not contain a '.'
'''
key = 'testing_no_dot'
expected = "The key '{0}' should contain a '.'".format(key)
with self.assertRaises(KeyError) as err:
inspect.isfunction(self.loader['testing_no_dot'])
result = err.exception.args[0]
assert result == expected, result
module_template = '''
__load__ = ['test', 'test_alias']
__func_alias__ = dict(test_alias='working_alias')
from salt.utils.decorators import depends
def test():
return {count}
def test_alias():
return True
def test2():
return True
@depends('non_existantmodulename')
def test3():
return True
@depends('non_existantmodulename', fallback_function=test)
def test4():
return True
'''
class LazyLoaderReloadingTest(TestCase):
'''
Test the loader of salt with changing modules
'''
module_name = 'loadertest'
module_key = 'loadertest.test'
@classmethod
def setUpClass(cls):
cls.opts = salt.config.minion_config(None)
cls.opts['grains'] = salt.loader.grains(cls.opts)
if not os.path.isdir(TMP):
os.makedirs(TMP)
def setUp(self):
self.tmp_dir = tempfile.mkdtemp(dir=TMP)
self.count = 0
opts = copy.deepcopy(self.opts)
dirs = salt.loader._module_dirs(opts, 'modules', 'module')
dirs.append(self.tmp_dir)
self.utils = salt.loader.utils(opts)
self.proxy = salt.loader.proxy(opts)
self.minion_mods = salt.loader.minion_mods(opts)
self.loader = salt.loader.LazyLoader(
dirs,
opts,
tag='module',
pack={'__utils__': self.utils,
'__proxy__': self.proxy,
'__salt__': self.minion_mods})
def tearDown(self):
shutil.rmtree(self.tmp_dir)
for attrname in ('tmp_dir', 'utils', 'proxy', 'loader', 'minion_mods', 'utils'):
try:
delattr(self, attrname)
except AttributeError:
continue
@classmethod
def tearDownClass(cls):
del cls.opts
def update_module(self):
self.count += 1
with salt.utils.files.fopen(self.module_path, 'wb') as fh:
fh.write(
salt.utils.stringutils.to_bytes(
module_template.format(count=self.count)
)
)
fh.flush()
os.fsync(fh.fileno()) # flush to disk
# pyc files don't like it when we change the original quickly
# since the header bytes only contain the timestamp (granularity of seconds)
# TODO: don't write them? Is *much* slower on re-load (~3x)
# https://docs.python.org/2/library/sys.html#sys.dont_write_bytecode
remove_bytecode(self.module_path)
def rm_module(self):
os.unlink(self.module_path)
remove_bytecode(self.module_path)
@property
def module_path(self):
return os.path.join(self.tmp_dir, '{0}.py'.format(self.module_name))
def test_alias(self):
'''
Make sure that you can access alias-d modules
'''
# ensure it doesn't exist
self.assertNotIn(self.module_key, self.loader)
self.update_module()
self.assertNotIn('{0}.test_alias'.format(self.module_name), self.loader)
self.assertTrue(inspect.isfunction(self.loader['{0}.working_alias'.format(self.module_name)]))
def test_clear(self):
self.assertTrue(inspect.isfunction(self.loader['test.ping']))
self.update_module() # write out out custom module
self.loader.clear() # clear the loader dict
# force a load of our module
self.assertTrue(inspect.isfunction(self.loader[self.module_key]))
# make sure we only loaded our custom module
# which means that we did correctly refresh the file mapping
for k, v in six.iteritems(self.loader._dict):
self.assertTrue(k.startswith(self.module_name))
def test_load(self):
# ensure it doesn't exist
self.assertNotIn(self.module_key, self.loader)
self.update_module()
self.assertTrue(inspect.isfunction(self.loader[self.module_key]))
def test__load__(self):
'''
If a module specifies __load__ we should only load/expose those modules
'''
self.update_module()
# ensure it doesn't exist
self.assertNotIn(self.module_key + '2', self.loader)
def test__load__and_depends(self):
'''
If a module specifies __load__ we should only load/expose those modules
'''
self.update_module()
# ensure it doesn't exist
self.assertNotIn(self.module_key + '3', self.loader)
self.assertNotIn(self.module_key + '4', self.loader)
def test_reload(self):
# ensure it doesn't exist
self.assertNotIn(self.module_key, self.loader)
# make sure it updates correctly
for x in range(1, 3):
self.update_module()
self.loader.clear()
self.assertEqual(self.loader[self.module_key](), self.count)
self.rm_module()
# make sure that even if we remove the module, its still loaded until a clear
self.assertEqual(self.loader[self.module_key](), self.count)
self.loader.clear()
self.assertNotIn(self.module_key, self.loader)
virtual_aliases = ('loadertest2', 'loadertest3')
virtual_alias_module_template = '''
__virtual_aliases__ = {0}
def test():
return True
'''.format(virtual_aliases)
class LazyLoaderVirtualAliasTest(TestCase):
'''
Test the loader of salt with changing modules
'''
module_name = 'loadertest'
@classmethod
def setUpClass(cls):
cls.opts = salt.config.minion_config(None)
cls.opts['grains'] = salt.loader.grains(cls.opts)
if not os.path.isdir(TMP):
os.makedirs(TMP)
def setUp(self):
self.tmp_dir = tempfile.mkdtemp(dir=TMP)
opts = copy.deepcopy(self.opts)
dirs = salt.loader._module_dirs(opts, 'modules', 'module')
dirs.append(self.tmp_dir)
self.utils = salt.loader.utils(opts)
self.proxy = salt.loader.proxy(opts)
self.minion_mods = salt.loader.minion_mods(opts)
self.loader = salt.loader.LazyLoader(
dirs,
opts,
tag='module',
pack={'__utils__': self.utils,
'__proxy__': self.proxy,
'__salt__': self.minion_mods})
def tearDown(self):
del self.tmp_dir
del self.utils
del self.proxy
del self.minion_mods
del self.loader
@classmethod
def tearDownClass(cls):
del cls.opts
def update_module(self):
with salt.utils.files.fopen(self.module_path, 'wb') as fh:
fh.write(salt.utils.stringutils.to_bytes(virtual_alias_module_template))
fh.flush()
os.fsync(fh.fileno()) # flush to disk
# pyc files don't like it when we change the original quickly
# since the header bytes only contain the timestamp (granularity of seconds)
# TODO: don't write them? Is *much* slower on re-load (~3x)
# https://docs.python.org/2/library/sys.html#sys.dont_write_bytecode
remove_bytecode(self.module_path)
@property
def module_path(self):
return os.path.join(self.tmp_dir, '{0}.py'.format(self.module_name))
def test_virtual_alias(self):
'''
Test the __virtual_alias__ feature
'''
self.update_module()
mod_names = [self.module_name] + list(virtual_aliases)
for mod_name in mod_names:
func_name = '.'.join((mod_name, 'test'))
log.debug('Running %s (dict attribute)', func_name)
self.assertTrue(self.loader[func_name]())
log.debug('Running %s (loader attribute)', func_name)
self.assertTrue(getattr(self.loader, mod_name).test())
submodule_template = '''
from __future__ import absolute_import
import {0}.lib
def test():
return ({count}, {0}.lib.test())
'''
submodule_lib_template = '''
def test():
return {count}
'''
class LazyLoaderSubmodReloadingTest(TestCase):
'''
Test the loader of salt with changing modules
'''
module_name = 'loadertestsubmod'
module_key = 'loadertestsubmod.test'
@classmethod
def setUpClass(cls):
cls.opts = salt.config.minion_config(None)
cls.opts['grains'] = salt.loader.grains(cls.opts)
if not os.path.isdir(TMP):
os.makedirs(TMP)
def setUp(self):
self.tmp_dir = tempfile.mkdtemp(dir=TMP)
os.makedirs(self.module_dir)
self.count = 0
self.lib_count = 0
opts = copy.deepcopy(self.opts)
dirs = salt.loader._module_dirs(opts, 'modules', 'module')
dirs.append(self.tmp_dir)
self.utils = salt.loader.utils(opts)
self.proxy = salt.loader.proxy(opts)
self.minion_mods = salt.loader.minion_mods(opts)
self.loader = salt.loader.LazyLoader(
dirs,
opts,
tag='module',
pack={'__utils__': self.utils,
'__proxy__': self.proxy,
'__salt__': self.minion_mods})
def tearDown(self):
shutil.rmtree(self.tmp_dir)
del self.tmp_dir
del self.utils
del self.proxy
del self.minion_mods
del self.loader
@classmethod
def tearDownClass(cls):
del cls.opts
def update_module(self):
self.count += 1
with salt.utils.files.fopen(self.module_path, 'wb') as fh:
fh.write(
salt.utils.stringutils.to_bytes(
submodule_template.format(self.module_name, count=self.count)
)
)
fh.flush()
os.fsync(fh.fileno()) # flush to disk
# pyc files don't like it when we change the original quickly
# since the header bytes only contain the timestamp (granularity of seconds)
# TODO: don't write them? Is *much* slower on re-load (~3x)
# https://docs.python.org/2/library/sys.html#sys.dont_write_bytecode
remove_bytecode(self.module_path)
def rm_module(self):
os.unlink(self.module_path)
remove_bytecode(self.module_path)
def update_lib(self):
self.lib_count += 1
for modname in list(sys.modules):
if modname.startswith(self.module_name):
del sys.modules[modname]
with salt.utils.files.fopen(self.lib_path, 'wb') as fh:
fh.write(
salt.utils.stringutils.to_bytes(
submodule_lib_template.format(count=self.lib_count)
)
)
fh.flush()
os.fsync(fh.fileno()) # flush to disk
# pyc files don't like it when we change the original quickly
# since the header bytes only contain the timestamp (granularity of seconds)
# TODO: don't write them? Is *much* slower on re-load (~3x)
# https://docs.python.org/2/library/sys.html#sys.dont_write_bytecode
remove_bytecode(self.lib_path)
def rm_lib(self):
for modname in list(sys.modules):
if modname.startswith(self.module_name):
del sys.modules[modname]
os.unlink(self.lib_path)
remove_bytecode(self.lib_path)
@property
def module_dir(self):
return os.path.join(self.tmp_dir, self.module_name)
@property
def module_path(self):
return os.path.join(self.module_dir, '__init__.py')
@property
def lib_path(self):
return os.path.join(self.module_dir, 'lib.py')
def test_basic(self):
# ensure it doesn't exist
self.assertNotIn(self.module_key, self.loader)
self.update_module()
self.update_lib()
self.loader.clear()
self.assertIn(self.module_key, self.loader)
def test_reload(self):
# ensure it doesn't exist
self.assertNotIn(self.module_key, self.loader)
# update both the module and the lib
for x in range(1, 3):
self.update_lib()
self.update_module()
self.loader.clear()
self.assertNotIn(self.module_key, self.loader._dict)
self.assertIn(self.module_key, self.loader)
self.assertEqual(self.loader[self.module_key](), (self.count, self.lib_count))
# update just the module
for x in range(1, 3):
self.update_module()
self.loader.clear()
self.assertNotIn(self.module_key, self.loader._dict)
self.assertIn(self.module_key, self.loader)
self.assertEqual(self.loader[self.module_key](), (self.count, self.lib_count))
# update just the lib
for x in range(1, 3):
self.update_lib()
self.loader.clear()
self.assertNotIn(self.module_key, self.loader._dict)
self.assertIn(self.module_key, self.loader)
self.assertEqual(self.loader[self.module_key](), (self.count, self.lib_count))
self.rm_module()
# make sure that even if we remove the module, its still loaded until a clear
self.assertEqual(self.loader[self.module_key](), (self.count, self.lib_count))
self.loader.clear()
self.assertNotIn(self.module_key, self.loader)
def test_reload_missing_lib(self):
# ensure it doesn't exist
self.assertNotIn(self.module_key, self.loader)
# update both the module and the lib
self.update_module()
self.update_lib()
self.loader.clear()
self.assertEqual(self.loader[self.module_key](), (self.count, self.lib_count))
# remove the lib, this means we should fail to load the module next time
self.rm_lib()
self.loader.clear()
self.assertNotIn(self.module_key, self.loader)
mod_template = '''
def test():
return ({val})
'''
class LazyLoaderModulePackageTest(TestCase):
'''
Test the loader of salt with changing modules
'''
module_name = 'loadertestmodpkg'
module_key = 'loadertestmodpkg.test'
@classmethod
def setUpClass(cls):
cls.opts = salt.config.minion_config(None)
cls.opts['grains'] = salt.loader.grains(cls.opts)
if not os.path.isdir(TMP):
os.makedirs(TMP)
def setUp(self):
self.tmp_dir = tempfile.mkdtemp(dir=TMP)
dirs = salt.loader._module_dirs(copy.deepcopy(self.opts), 'modules', 'module')
dirs.append(self.tmp_dir)
self.loader = salt.loader.LazyLoader(
dirs,
copy.deepcopy(self.opts),
tag='module')
def tearDown(self):
shutil.rmtree(self.tmp_dir)
del self.tmp_dir
del self.loader
@classmethod
def tearDownClass(cls):
del cls.opts
def update_pyfile(self, pyfile, contents):
dirname = os.path.dirname(pyfile)
if not os.path.exists(dirname):
os.makedirs(dirname)
with salt.utils.files.fopen(pyfile, 'wb') as fh:
fh.write(salt.utils.stringutils.to_bytes(contents))
fh.flush()
os.fsync(fh.fileno()) # flush to disk
# pyc files don't like it when we change the original quickly
# since the header bytes only contain the timestamp (granularity of seconds)
# TODO: don't write them? Is *much* slower on re-load (~3x)
# https://docs.python.org/2/library/sys.html#sys.dont_write_bytecode
remove_bytecode(pyfile)
def rm_pyfile(self, pyfile):
os.unlink(pyfile)
remove_bytecode(pyfile)
def update_module(self, relative_path, contents):
self.update_pyfile(os.path.join(self.tmp_dir, relative_path), contents)
def rm_module(self, relative_path):
self.rm_pyfile(os.path.join(self.tmp_dir, relative_path))
def test_module(self):
# ensure it doesn't exist
self.assertNotIn('foo', self.loader)
self.assertNotIn('foo.test', self.loader)
self.update_module('foo.py', mod_template.format(val=1))
self.loader.clear()
self.assertIn('foo.test', self.loader)
self.assertEqual(self.loader['foo.test'](), 1)
def test_package(self):
# ensure it doesn't exist
self.assertNotIn('foo', self.loader)
self.assertNotIn('foo.test', self.loader)
self.update_module('foo/__init__.py', mod_template.format(val=2))
self.loader.clear()
self.assertIn('foo.test', self.loader)
self.assertEqual(self.loader['foo.test'](), 2)
def test_module_package_collision(self):
# ensure it doesn't exist
self.assertNotIn('foo', self.loader)
self.assertNotIn('foo.test', self.loader)
self.update_module('foo.py', mod_template.format(val=3))
self.loader.clear()
self.assertIn('foo.test', self.loader)
self.assertEqual(self.loader['foo.test'](), 3)
self.update_module('foo/__init__.py', mod_template.format(val=4))
self.loader.clear()
self.assertIn('foo.test', self.loader)
self.assertEqual(self.loader['foo.test'](), 4)
deep_init_base = '''
from __future__ import absolute_import
import {0}.top_lib
import {0}.top_lib.mid_lib
import {0}.top_lib.mid_lib.bot_lib
def top():
return {0}.top_lib.test()
def mid():
return {0}.top_lib.mid_lib.test()
def bot():
return {0}.top_lib.mid_lib.bot_lib.test()
'''
class LazyLoaderDeepSubmodReloadingTest(TestCase):
module_name = 'loadertestsubmoddeep'
libs = ('top_lib', 'mid_lib', 'bot_lib')
@classmethod
def setUpClass(cls):
cls.opts = salt.config.minion_config(None)
cls.opts['grains'] = salt.loader.grains(cls.opts)
if not os.path.isdir(TMP):
os.makedirs(TMP)
def setUp(self):
self.tmp_dir = tempfile.mkdtemp(dir=TMP)
os.makedirs(self.module_dir)
self.lib_count = collections.defaultdict(int) # mapping of path -> count
# bootstrap libs
with salt.utils.files.fopen(os.path.join(self.module_dir, '__init__.py'), 'w') as fh:
# No .decode() needed here as deep_init_base is defined as str and
# not bytes.
fh.write(
salt.utils.stringutils.to_str(
deep_init_base.format(self.module_name)
)
)
fh.flush()
os.fsync(fh.fileno()) # flush to disk
self.lib_paths = {}
dir_path = self.module_dir
for lib_name in self.libs:
dir_path = os.path.join(dir_path, lib_name)
self.lib_paths[lib_name] = dir_path
os.makedirs(dir_path)
self.update_lib(lib_name)
opts = copy.deepcopy(self.opts)
dirs = salt.loader._module_dirs(opts, 'modules', 'module')
dirs.append(self.tmp_dir)
self.utils = salt.loader.utils(opts)
self.proxy = salt.loader.proxy(opts)
self.minion_mods = salt.loader.minion_mods(opts)
self.loader = salt.loader.LazyLoader(
dirs,
copy.deepcopy(opts),
tag='module',
pack={'__utils__': self.utils,
'__proxy__': self.proxy,
'__salt__': self.minion_mods})
self.assertIn('{0}.top'.format(self.module_name), self.loader)
def tearDown(self):
shutil.rmtree(self.tmp_dir)
del self.tmp_dir
del self.lib_paths
del self.utils
del self.proxy
del self.minion_mods
del self.loader
del self.lib_count
@classmethod
def tearDownClass(cls):
del cls.opts
@property
def module_dir(self):
return os.path.join(self.tmp_dir, self.module_name)
def update_lib(self, lib_name):
for modname in list(sys.modules):
if modname.startswith(self.module_name):
del sys.modules[modname]
path = os.path.join(self.lib_paths[lib_name], '__init__.py')
self.lib_count[lib_name] += 1
with salt.utils.files.fopen(path, 'wb') as fh:
fh.write(
salt.utils.stringutils.to_bytes(
submodule_lib_template.format(count=self.lib_count[lib_name])
)
)
fh.flush()
os.fsync(fh.fileno()) # flush to disk
# pyc files don't like it when we change the original quickly
# since the header bytes only contain the timestamp (granularity of seconds)
# TODO: don't write them? Is *much* slower on re-load (~3x)
# https://docs.python.org/2/library/sys.html#sys.dont_write_bytecode
remove_bytecode(path)
def test_basic(self):
self.assertIn('{0}.top'.format(self.module_name), self.loader)
def _verify_libs(self):
for lib in self.libs:
self.assertEqual(self.loader['{0}.{1}'.format(self.module_name, lib.replace('_lib', ''))](),
self.lib_count[lib])
def test_reload(self):
'''
Make sure that we can reload all libraries of arbitrary depth
'''
self._verify_libs()
# update them all
for lib in self.libs:
for x in range(5):
self.update_lib(lib)
self.loader.clear()
self._verify_libs()
class LoaderGlobalsTest(ModuleCase):
'''
Test all of the globals that the loader is responsible for adding to modules
This shouldn't be done here, but should rather be done per module type (in the cases where they are used)
so they can check ALL globals that they have (or should have) access to.
This is intended as a shorter term way of testing these so we don't break the loader
'''
def _verify_globals(self, mod_dict):
'''
Verify that the globals listed in the doc string (from the test) are in these modules
'''
# find the globals
global_vars = []
for val in six.itervalues(mod_dict):
# only find salty globals
if val.__module__.startswith('salt.loaded'):
if hasattr(val, '__globals__'):
if hasattr(val, '__wrapped__') or '__wrapped__' in val.__globals__:
global_vars.append(sys.modules[val.__module__].__dict__)
else:
global_vars.append(val.__globals__)
# if we couldn't find any, then we have no modules -- so something is broken
self.assertNotEqual(global_vars, [], msg='No modules were loaded.')
# get the names of the globals you should have
func_name = inspect.stack()[1][3]
names = next(six.itervalues(salt.utils.yaml.safe_load(getattr(self, func_name).__doc__)))
# Now, test each module!
for item in global_vars:
for name in names:
self.assertIn(name, list(item.keys()))
def test_auth(self):
'''
Test that auth mods have:
- __pillar__
- __grains__
- __salt__
- __context__
'''
self._verify_globals(salt.loader.auth(self.master_opts))
def test_runners(self):
'''
Test that runners have:
- __pillar__
- __salt__
- __opts__
- __grains__
- __context__
'''
self._verify_globals(salt.loader.runner(self.master_opts))
def test_returners(self):
'''
Test that returners have:
- __salt__
- __opts__
- __pillar__
- __grains__
- __context__
'''
self._verify_globals(salt.loader.returners(self.master_opts, {}))
def test_pillars(self):
'''
Test that pillars have:
- __salt__
- __opts__
- __pillar__
- __grains__
- __context__
'''
self._verify_globals(salt.loader.pillars(self.master_opts, {}))
def test_tops(self):
'''
Test that tops have: []
'''
self._verify_globals(salt.loader.tops(self.master_opts))
def test_outputters(self):
'''
Test that outputters have:
- __opts__
- __pillar__
- __grains__
- __context__
'''
self._verify_globals(salt.loader.outputters(self.master_opts))
def test_serializers(self):
'''
Test that serializers have: []
'''
self._verify_globals(salt.loader.serializers(self.master_opts))
def test_states(self):
'''
Test that states have:
- __pillar__
- __salt__
- __opts__
- __grains__
- __context__
'''
self._verify_globals(salt.loader.states(self.master_opts, {}, {}, {}))
def test_renderers(self):
'''
Test that renderers have:
- __salt__ # Execution functions (i.e. __salt__['test.echo']('foo'))
- __grains__ # Grains (i.e. __grains__['os'])
- __pillar__ # Pillar data (i.e. __pillar__['foo'])
- __opts__ # Minion configuration options
- __context__ # Context dict shared amongst all modules of the same type
'''
self._verify_globals(salt.loader.render(self.master_opts, {}))
class RawModTest(TestCase):
'''
Test the interface of raw_mod
'''
def setUp(self):
self.opts = salt.config.minion_config(None)
def tearDown(self):
del self.opts
def test_basic(self):
testmod = salt.loader.raw_mod(self.opts, 'test', None)
for k, v in six.iteritems(testmod):
self.assertEqual(k.split('.')[0], 'test')
def test_bad_name(self):
testmod = salt.loader.raw_mod(self.opts, 'module_we_do_not_have', None)
self.assertEqual(testmod, {})
class NetworkUtilsTestCase(ModuleCase):
def test_is_private(self):
mod = salt.loader.raw_mod(self.minion_opts, 'network', None)
self.assertTrue(mod['network.is_private']('10.0.0.1'), True)
def test_is_loopback(self):
mod = salt.loader.raw_mod(self.minion_opts, 'network', None)
self.assertTrue(mod['network.is_loopback']('127.0.0.1'), True)
class LazyLoaderOptimizationOrderTest(TestCase):
'''
Test the optimization order priority in the loader (PY3)
'''
module_name = 'lazyloadertest'
module_content = textwrap.dedent('''\
# -*- coding: utf-8 -*-
from __future__ import absolute_import
def test():
return True
''')
@classmethod
def setUpClass(cls):
cls.opts = salt.config.minion_config(None)
cls.opts['grains'] = salt.loader.grains(cls.opts)
def setUp(self):
# Setup the module
self.module_dir = tempfile.mkdtemp(dir=TMP)
self.module_file = os.path.join(self.module_dir,
'{0}.py'.format(self.module_name))
def _get_loader(self, order=None):
opts = copy.deepcopy(self.opts)
if order is not None:
opts['optimization_order'] = order
# Return a loader
return salt.loader.LazyLoader([self.module_dir], opts, tag='module')
def _get_module_filename(self):
# The act of referencing the loader entry forces the module to be
# loaded by the LazyDict.
mod_fullname = self.loader[next(iter(self.loader))].__module__
return sys.modules[mod_fullname].__file__
def _expected(self, optimize=0):
if six.PY3:
return 'lazyloadertest.cpython-{0}{1}{2}.pyc'.format(
sys.version_info[0],
sys.version_info[1],
'' if not optimize else '.opt-{0}'.format(optimize)
)
else:
return 'lazyloadertest.pyc'
def _write_module_file(self):
with salt.utils.files.fopen(self.module_file, 'w') as fh:
fh.write(self.module_content)
fh.flush()
os.fsync(fh.fileno())
def _byte_compile(self):
if salt.loader.USE_IMPORTLIB:
# Skip this check as "optimize" is unique to PY3's compileall
# module, and this will be a false error when Pylint is run on
# Python 2.
# pylint: disable=unexpected-keyword-arg
compileall.compile_file(self.module_file, quiet=1, optimize=0)
compileall.compile_file(self.module_file, quiet=1, optimize=1)
compileall.compile_file(self.module_file, quiet=1, optimize=2)
# pylint: enable=unexpected-keyword-arg
else:
compileall.compile_file(self.module_file, quiet=1)
def _test_optimization_order(self, order):
self._write_module_file()
self._byte_compile()
# Clean up the original file so that we can be assured we're only
# loading the byte-compiled files(s).
os.remove(self.module_file)
self.loader = self._get_loader(order)
filename = self._get_module_filename()
basename = os.path.basename(filename)
assert basename == self._expected(order[0]), basename
if not salt.loader.USE_IMPORTLIB:
# We are only testing multiple optimization levels on Python 3.5+
return
# Remove the file and make a new loader. We should now load the
# byte-compiled file with an optimization level matching the 2nd
# element of the order list.
os.remove(filename)
self.loader = self._get_loader(order)
filename = self._get_module_filename()
basename = os.path.basename(filename)
assert basename == self._expected(order[1]), basename
# Remove the file and make a new loader. We should now load the
# byte-compiled file with an optimization level matching the 3rd
# element of the order list.
os.remove(filename)
self.loader = self._get_loader(order)
filename = self._get_module_filename()
basename = os.path.basename(filename)
assert basename == self._expected(order[2]), basename
def test_optimization_order(self):
'''
Test the optimization_order config param
'''
self._test_optimization_order([0, 1, 2])
self._test_optimization_order([0, 2, 1])
if salt.loader.USE_IMPORTLIB:
# optimization_order only supported on Python 3.5+, earlier
# releases only support unoptimized .pyc files.
self._test_optimization_order([1, 2, 0])
self._test_optimization_order([1, 0, 2])
self._test_optimization_order([2, 0, 1])
self._test_optimization_order([2, 1, 0])
def test_load_source_file(self):
'''
Make sure that .py files are preferred over .pyc files
'''
self._write_module_file()
self._byte_compile()
self.loader = self._get_loader()
filename = self._get_module_filename()
basename = os.path.basename(filename)
expected = 'lazyloadertest.py' if six.PY3 else 'lazyloadertest.pyc'
assert basename == expected, basename
| 32.66063
| 109
| 0.611587
|
564368bc3177eaef7dacff296611ee7e3a02cd2e
| 6,136
|
py
|
Python
|
Chimera/python3_scripts/test_dds_ramp.py
|
zzpwahaha/Chimera-Control-Trim
|
df1bbf6bea0b87b8c7c9a99dce213fdc249118f2
|
[
"MIT"
] | null | null | null |
Chimera/python3_scripts/test_dds_ramp.py
|
zzpwahaha/Chimera-Control-Trim
|
df1bbf6bea0b87b8c7c9a99dce213fdc249118f2
|
[
"MIT"
] | null | null | null |
Chimera/python3_scripts/test_dds_ramp.py
|
zzpwahaha/Chimera-Control-Trim
|
df1bbf6bea0b87b8c7c9a99dce213fdc249118f2
|
[
"MIT"
] | null | null | null |
from axis_fifo import AXIS_FIFO
from devices import fifo_devices
from axi_gpio import AXI_GPIO
from devices import gpio_devices
from dac81416 import DAC81416
from ad9959 import AD9959
import struct
import math
from dds_lock_pll import writeToDDS
from getSeqGPIOWords import getSeqGPIOWords
class GPIO_seq_point:
def __init__(self, address, time, outputA, outputB):
self.address = address
self.time = time
self.outputA = outputA
self.outputB = outputB
class DDS_atw_seq_point:
def __init__(self, address, time, start, steps, incr, chan):
self.address = address
self.time = time
self.start = start
self.steps = steps
self.incr = incr
self.chan = chan
class DDS_ftw_seq_point:
def __init__(self, address, time, start, steps, incr, chan):
self.address = address
self.time = time
self.start = start
self.steps = steps
self.incr = incr
self.chan = chan
class DDS_ramp_tester:
def __init__(self, device, device_atw_seq, device_ftw_seq, main_seq):
#self.dds = AD9959(device) # initialize DDS -- not needed for now
self.gpio2 = AXI_GPIO(gpio_devices['axi_gpio_2'])
self.fifo_dds_atw_seq = AXIS_FIFO(device_atw_seq)
self.fifo_dds_ftw_seq = AXIS_FIFO(device_ftw_seq)
self.fifo_main_seq = AXIS_FIFO(main_seq)
def write_atw_point(self, point):
#01XXAAAA TTTTTTTT DDDDDDDD DDDDDDDD
#phase acc shifts by 12 bit => 4096
#unused <= gpio_in(63 downto 58);
#acc_start <= gpio_in(57 downto 48);
#acc_steps <= gpio_in(47 downto 32);
#unused <= gpio_in(31 downto 26);
#acc_incr <= gpio_in(25 downto 4);
#unused <= gpio_in( 3 downto 2);
#acc_chan <= gpio_in( 1 downto 0);
self.fifo_dds_atw_seq.write_axis_fifo("\x01\x00" + struct.pack('>H', point.address))
self.fifo_dds_atw_seq.write_axis_fifo(struct.pack('>I', point.time))
self.fifo_dds_atw_seq.write_axis_fifo(struct.pack('>I', point.start*256*256 + point.steps))
self.fifo_dds_atw_seq.write_axis_fifo(struct.pack('>I', point.incr*16+point.chan))
def write_ftw_point(self, point):
#01XXAAAA TTTTTTTT DDDDDDDD DDDDDDDD DDDDDDDD
#phase acc shifts by 12 bit => 4096
#acc_start <= gpio_in(95 downto 64);
#acc_steps <= gpio_in(63 downto 48);
#acc_incr <= gpio_in(47 downto 4);
#acc_chan <= to_integer(unsigned(gpio_in( 3 downto 0)));
incr_hi = int(math.floor(point.incr*1.0/2**28))
incr_lo = point.incr - incr_hi * 2**28
self.fifo_dds_ftw_seq.write_axis_fifo("\x01\x00" + struct.pack('>H', point.address))
self.fifo_dds_ftw_seq.write_axis_fifo(struct.pack('>I', point.time))
self.fifo_dds_ftw_seq.write_axis_fifo(struct.pack('>I', point.start))
self.fifo_dds_ftw_seq.write_axis_fifo(struct.pack('>I', point.steps*256*256 + incr_hi))
self.fifo_dds_ftw_seq.write_axis_fifo(struct.pack('>I', incr_lo*16+point.chan))
#def reset_DDS(self): #--not needed
# self.dds = AD9959(device) # initialize DDS
def mod_enable(self):
self.gpio2.set_bit(0, channel=1)
def mod_disable(self):
self.gpio2.clear_bit(0, channel=1)
def mod_report(self):
print((self.gpio2.read_axi_gpio(channel=1)))
def dds_seq_write_atw_points(self):
points=[]
#these ramps should complete in just under 64 ms
points.append(DDS_atw_seq_point(address=0,time=10000,start=1023,steps=0,incr=0,chan=1)) #25% to 75%
# points.append(DDS_atw_seq_point(address=1,time=1000,start=256,steps=1,incr=0,chan=3)) #25% to 75%
points.append(DDS_atw_seq_point(address=1,time=0,start=0,steps= 0,incr= 0,chan=0))
for point in points:
self.write_atw_point(point)
def dds_seq_write_ftw_points(self):
points=[]
#these ramps should complete in just under 64 ms
points.append(DDS_ftw_seq_point(address=0,time=0,start=472446000,steps=0,incr=0,chan=0))
#points.append(DDS_ftw_seq_point(address=1,time= 20000, start=4000000,steps=0,incr=0,chan=0))
# points.append(DDS_ftw_seq_point(address=1,time=1,start=1000000,steps=1,incr=0,chan=3))
points.append(DDS_ftw_seq_point(address=1,time= 0,start= 0,steps= 0,incr= 0,chan=0))
for point in points:
self.write_ftw_point(point)
def dio_seq_write_points(self):
points=[]
points.append(GPIO_seq_point(address=0,time=1,outputA=0x00000001,outputB=0x00000001))
points.append(GPIO_seq_point(address=1,time=20000,outputA=0x00000000,outputB=0x00000000))
points.append(GPIO_seq_point(address=2,time=40000,outputA=0x00000001,outputB=0x00000001))
points.append(GPIO_seq_point(address=3,time=6400000,outputA=0x00000000,outputB=0x00000000))
points.append(GPIO_seq_point(address=4,time=0,outputA=0x00000000,outputB=0x00000000))
for point in points:
print("add: ", point.address)
print("time: ", point.time)
print("outputA: ", point.outputA)
print("outputB: ", point.outputB)
# with open("/dev/axis_fifo_0x0000000080004000", "wb") as character:
for point in points:
# writeToSeqGPIO(character, point)
seqWords = getSeqGPIOWords(point)
for word in seqWords:
# print word
self.fifo_main_seq.write_axis_fifo(word[0], MSB_first=False)
def program(tester):
#tester.dds_seq_write_atw_points()
tester.dds_seq_write_ftw_points()
tester.dio_seq_write_points()
# ~ print('Next, we need to enable modulation')
# ~ print(' tester.mod_enable()')
# ~ print('Now, we can use the software trigger')
# ~ print(' trigger()')
# ~ print('All AXI peripherals can be reset, note this does not disable modulation')
# ~ print(' reset()')
# ~ print('Finally, don\'t forget to disable modulation again')
# ~ print(' tester.mod_disable()')
if __name__ == "__main__":
from soft_trigger import trigger
from reset_all import reset
import sys
import dds_lock_pll
tester = DDS_ramp_tester(fifo_devices['AD9959_1'], fifo_devices['AD9959_1_seq_atw'], fifo_devices['AD9959_1_seq_ftw'], fifo_devices['GPIO_seq'])
tester.mod_disable()
reset()
dds_lock_pll.dds_lock_pll()
program(tester)
tester.mod_enable()
trigger()
| 36.963855
| 146
| 0.704205
|
fad23d784eb4b4f941e0451e5bbe922f12c23094
| 30,800
|
py
|
Python
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_12_01/operations/_service_endpoint_policies_operations.py
|
beltr0n/azure-sdk-for-python
|
2f7fb8bee881b0fc0386a0ad5385755ceedd0453
|
[
"MIT"
] | 2
|
2021-03-24T06:26:11.000Z
|
2021-04-18T15:55:59.000Z
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_12_01/operations/_service_endpoint_policies_operations.py
|
beltr0n/azure-sdk-for-python
|
2f7fb8bee881b0fc0386a0ad5385755ceedd0453
|
[
"MIT"
] | 4
|
2019-04-17T17:57:49.000Z
|
2020-04-24T21:11:22.000Z
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_12_01/operations/_service_endpoint_policies_operations.py
|
beltr0n/azure-sdk-for-python
|
2f7fb8bee881b0fc0386a0ad5385755ceedd0453
|
[
"MIT"
] | 2
|
2021-05-23T16:46:31.000Z
|
2021-05-26T23:51:09.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ServiceEndpointPoliciesOperations(object):
"""ServiceEndpointPoliciesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_12_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
service_endpoint_policy_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-12-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceEndpointPolicyName': self._serialize.url("service_endpoint_policy_name", service_endpoint_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/serviceEndpointPolicies/{serviceEndpointPolicyName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
service_endpoint_policy_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified service endpoint policy.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_endpoint_policy_name: The name of the service endpoint policy.
:type service_endpoint_policy_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
service_endpoint_policy_name=service_endpoint_policy_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceEndpointPolicyName': self._serialize.url("service_endpoint_policy_name", service_endpoint_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/serviceEndpointPolicies/{serviceEndpointPolicyName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
service_endpoint_policy_name, # type: str
expand=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "_models.ServiceEndpointPolicy"
"""Gets the specified service Endpoint Policies in a specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_endpoint_policy_name: The name of the service endpoint policy.
:type service_endpoint_policy_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ServiceEndpointPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_12_01.models.ServiceEndpointPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceEndpointPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-12-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceEndpointPolicyName': self._serialize.url("service_endpoint_policy_name", service_endpoint_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ServiceEndpointPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/serviceEndpointPolicies/{serviceEndpointPolicyName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
service_endpoint_policy_name, # type: str
parameters, # type: "_models.ServiceEndpointPolicy"
**kwargs # type: Any
):
# type: (...) -> "_models.ServiceEndpointPolicy"
cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceEndpointPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-12-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceEndpointPolicyName': self._serialize.url("service_endpoint_policy_name", service_endpoint_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ServiceEndpointPolicy')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ServiceEndpointPolicy', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ServiceEndpointPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/serviceEndpointPolicies/{serviceEndpointPolicyName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
service_endpoint_policy_name, # type: str
parameters, # type: "_models.ServiceEndpointPolicy"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ServiceEndpointPolicy"]
"""Creates or updates a service Endpoint Policies.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_endpoint_policy_name: The name of the service endpoint policy.
:type service_endpoint_policy_name: str
:param parameters: Parameters supplied to the create or update service endpoint policy
operation.
:type parameters: ~azure.mgmt.network.v2018_12_01.models.ServiceEndpointPolicy
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ServiceEndpointPolicy or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2018_12_01.models.ServiceEndpointPolicy]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceEndpointPolicy"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
service_endpoint_policy_name=service_endpoint_policy_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ServiceEndpointPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceEndpointPolicyName': self._serialize.url("service_endpoint_policy_name", service_endpoint_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/serviceEndpointPolicies/{serviceEndpointPolicyName}'} # type: ignore
def _update_initial(
self,
resource_group_name, # type: str
service_endpoint_policy_name, # type: str
parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> "_models.ServiceEndpointPolicy"
cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceEndpointPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-12-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceEndpointPolicyName': self._serialize.url("service_endpoint_policy_name", service_endpoint_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ServiceEndpointPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/serviceEndpointPolicies/{serviceEndpointPolicyName}'} # type: ignore
def begin_update(
self,
resource_group_name, # type: str
service_endpoint_policy_name, # type: str
parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ServiceEndpointPolicy"]
"""Updates service Endpoint Policies.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_endpoint_policy_name: The name of the service endpoint policy.
:type service_endpoint_policy_name: str
:param parameters: Parameters supplied to update service endpoint policy tags.
:type parameters: ~azure.mgmt.network.v2018_12_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ServiceEndpointPolicy or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2018_12_01.models.ServiceEndpointPolicy]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceEndpointPolicy"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_initial(
resource_group_name=resource_group_name,
service_endpoint_policy_name=service_endpoint_policy_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ServiceEndpointPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceEndpointPolicyName': self._serialize.url("service_endpoint_policy_name", service_endpoint_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/serviceEndpointPolicies/{serviceEndpointPolicyName}'} # type: ignore
def list(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ServiceEndpointPolicyListResult"]
"""Gets all the service endpoint policies in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ServiceEndpointPolicyListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2018_12_01.models.ServiceEndpointPolicyListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceEndpointPolicyListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-12-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ServiceEndpointPolicyListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/ServiceEndpointPolicies'} # type: ignore
def list_by_resource_group(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ServiceEndpointPolicyListResult"]
"""Gets all service endpoint Policies in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ServiceEndpointPolicyListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2018_12_01.models.ServiceEndpointPolicyListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceEndpointPolicyListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-12-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ServiceEndpointPolicyListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/serviceEndpointPolicies'} # type: ignore
| 50
| 214
| 0.670097
|
c4c1e2de7cbb47de7bb5f3c568afdd543bfd21d3
| 1,359
|
py
|
Python
|
rlbench/action_modes/action_mode.py
|
guhur/RLBench
|
e267f9eb036390109b24d9ace0b032407ac7f29d
|
[
"BSD-3-Clause"
] | null | null | null |
rlbench/action_modes/action_mode.py
|
guhur/RLBench
|
e267f9eb036390109b24d9ace0b032407ac7f29d
|
[
"BSD-3-Clause"
] | null | null | null |
rlbench/action_modes/action_mode.py
|
guhur/RLBench
|
e267f9eb036390109b24d9ace0b032407ac7f29d
|
[
"BSD-3-Clause"
] | null | null | null |
from abc import abstractmethod
import numpy as np
from rlbench.action_modes.arm_action_modes import ArmActionMode
from rlbench.action_modes.gripper_action_modes import GripperActionMode
from rlbench.backend.scene import Scene
class ActionMode(object):
def __init__(self,
arm_action_mode: 'ArmActionMode',
gripper_action_mode: 'GripperActionMode'):
self.arm_action_mode = arm_action_mode
self.gripper_action_mode = gripper_action_mode
@abstractmethod
def action(self, scene: Scene, action: np.ndarray):
pass
@abstractmethod
def action_shape(self, scene: Scene):
pass
class MoveArmThenGripper(ActionMode):
"""The arm action is first applied, followed by the gripper action. """
def action(self, scene: Scene, action: np.ndarray):
arm_act_size = np.prod(self.arm_action_mode.action_shape(scene))
arm_action = np.array(action[:arm_act_size])
ee_action = np.array(action[arm_act_size:])
# DEBUG
observations = self.arm_action_mode.action(scene, arm_action)
self.gripper_action_mode.action(scene, ee_action)
return observations
def action_shape(self, scene: Scene):
return np.prod(self.arm_action_mode.action_shape(scene)) + np.prod(
self.gripper_action_mode.action_shape(scene))
| 32.357143
| 75
| 0.710817
|
d347723fa1401388e12e1942276240d7da3e31b9
| 210
|
py
|
Python
|
Q04/Q04.2.py
|
edisalvo/patterns
|
a56a49289c3a7de87713afc8923f44a229037dd1
|
[
"MIT"
] | 8
|
2021-01-14T14:55:40.000Z
|
2021-03-01T13:55:56.000Z
|
Q04/Q04.2.py
|
edisalvo/patterns
|
a56a49289c3a7de87713afc8923f44a229037dd1
|
[
"MIT"
] | 17
|
2021-01-12T15:42:16.000Z
|
2021-03-04T06:44:55.000Z
|
Q04/Q04.2.py
|
edisalvo/patterns
|
a56a49289c3a7de87713afc8923f44a229037dd1
|
[
"MIT"
] | 22
|
2021-01-13T00:00:02.000Z
|
2021-03-22T18:54:11.000Z
|
def pattern(n):
for i in range(n):
j=n
k=0
while j>i+1:
print(" ",end='')
j-=1
while k<i+1:
print("*",end='')
k+=1
print()
print("give input for pattern")
n=int(input())
pattern(n)
| 10
| 31
| 0.519048
|
c1eb220859aa5be4255e4c60d0eba6d4db5a30d0
| 120
|
py
|
Python
|
agent/lib/supervisor/__init__.py
|
OptimalBPM/optimalbpm
|
4c65cd43a36e3318a49de7cd598069ea0623dc69
|
[
"RSA-MD"
] | 2
|
2016-05-02T14:16:07.000Z
|
2020-06-25T19:24:29.000Z
|
agent/lib/supervisor/__init__.py
|
OptimalBPM/optimalbpm
|
4c65cd43a36e3318a49de7cd598069ea0623dc69
|
[
"RSA-MD"
] | null | null | null |
agent/lib/supervisor/__init__.py
|
OptimalBPM/optimalbpm
|
4c65cd43a36e3318a49de7cd598069ea0623dc69
|
[
"RSA-MD"
] | 1
|
2020-06-25T19:24:31.000Z
|
2020-06-25T19:24:31.000Z
|
"""
This package contains functionality that manages the job queue and the workers
"""
__author__ = 'Nicklas Börjesson'
| 24
| 78
| 0.775
|
5646b1aa082a0ae18c79651f5ae017bb9edc1152
| 4,040
|
py
|
Python
|
datalad/interface/tests/test_docs.py
|
yarikoptic/datalad
|
c0cd538de2ed9a30c0f58256c7afa6e18d325505
|
[
"MIT"
] | null | null | null |
datalad/interface/tests/test_docs.py
|
yarikoptic/datalad
|
c0cd538de2ed9a30c0f58256c7afa6e18d325505
|
[
"MIT"
] | 6
|
2015-11-20T21:41:13.000Z
|
2018-06-12T14:27:32.000Z
|
datalad/interface/tests/test_docs.py
|
yarikoptic/datalad
|
c0cd538de2ed9a30c0f58256c7afa6e18d325505
|
[
"MIT"
] | 1
|
2017-03-28T14:44:16.000Z
|
2017-03-28T14:44:16.000Z
|
# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
# ex: set sts=4 ts=4 sw=4 noet:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the datalad package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""Tests for interface doc wranglers.
"""
__docformat__ = 'restructuredtext'
from datalad.interface.base import dedent_docstring
from datalad.interface.base import alter_interface_docs_for_api
from datalad.interface.base import alter_interface_docs_for_cmdline
from datalad.tests.utils import assert_true, assert_false, assert_in, \
assert_not_in, eq_
demo_doc = """\
Bla bla summary
Generic intro blurb. Ping pong ping pong ping pong ping pong. Ping pong ping
pong ping pong ping pong. Ping pong ping pong ping pong ping pong. Ping pong
ping pong ping pong ping pong. Ping pong ping pong ping pong ping pong. Ping
pong ping pong ping pong ping pong.
|| CMDLINE >>
|| REFLOW >>
Something for the cmdline only
Multiline!
<< REFLOW ||
<< CMDLINE ||
|| PYTHON >>
|| REFLOW >>
Some Python-only bits
Multiline!
<< REFLOW ||
<< PYTHON ||
And an example for in-line markup: [PY: just for Python PY] and
the other one [CMD: just for the command line CMD]. End of demo.
Generic appendix. Ding dong ding dong ding dong. Ding dong ding dong ding
dong. Ding dong ding dong ding dong. Ding dong ding dong ding dong. Ding
dong ding dong ding dong.
"""
demo_paramdoc = """\
Parameters
----------
dataset : Dataset or None, optional
something [PY: python only PY] inbetween [CMD: cmdline only CMD] appended [PY: more python PY]
dataset is given, an attempt is made to identify the dataset based
Dataset (e.g. a path), or value must be `None`. [Default: None]
"""
demo_argdoc = """\
specify the dataset to perform the install operation
on. If no dataset is given, an attempt is made to
identify the dataset based on the current working
directory and/or the `path` given. Constraints: Value
must be a Dataset or a valid identifier of a Dataset
(e.g. a path), or value must be `None`. [Default:
None]
"""
def test_dedent():
assert_false(dedent_docstring("one liner").endswith("\n"))
def test_alter_interface_docs_for_api():
alt = alter_interface_docs_for_api(demo_doc)
alt_l = alt.split('\n')
# dedented
assert_false(alt_l[0].startswith(' '))
assert_false(alt_l[-1].startswith(' '))
assert_not_in('CMD', alt)
assert_not_in('PY', alt)
assert_not_in('REFLOW', alt)
assert_in("Some Python-only bits Multiline!", alt)
altpd = alter_interface_docs_for_api(demo_paramdoc)
assert_in('python', altpd)
assert_in('inbetween', altpd)
assert_in('appended', altpd)
assert_not_in('cmdline', altpd)
def test_alter_interface_docs_for_cmdline():
alt = alter_interface_docs_for_cmdline(demo_doc)
alt_l = alt.split('\n')
# dedented
assert_false(alt_l[0].startswith(' '))
assert_false(alt_l[-1].startswith(' '))
assert_not_in('PY', alt)
assert_not_in('CMD', alt)
assert_not_in('REFLOW', alt)
assert_in("Something for the cmdline only Multiline!", alt)
# args
altarg = alter_interface_docs_for_cmdline(demo_argdoc)
# RST role markup
eq_(alter_interface_docs_for_cmdline(':murks:`me and my buddies`'),
'me and my buddies')
# spread across lines
eq_(alter_interface_docs_for_cmdline(':term:`Barbara\nStreisand`'),
'Barbara\nStreisand')
# multiple on one line
eq_(alter_interface_docs_for_cmdline(
':term:`one` bla bla :term:`two` bla'),
'one bla bla two bla')
altpd = alter_interface_docs_for_cmdline(demo_paramdoc)
assert_not_in('python', altpd)
assert_in('inbetween', altpd)
assert_in('appended', altpd)
assert_in('cmdline', altpd)
| 32.063492
| 100
| 0.659653
|
c85dd3ec6991d3f04dd19e64440042b2d4ea4667
| 7,631
|
py
|
Python
|
flax/plotters/madmax.py
|
ReadyNeutron/shitcoin-blockchain
|
80add4e545ad22a317244f7fd958d118a5a75c5d
|
[
"Apache-2.0"
] | 174
|
2021-06-16T17:49:22.000Z
|
2022-03-17T03:03:17.000Z
|
flax/plotters/madmax.py
|
ReadyNeutron/shitcoin-blockchain
|
80add4e545ad22a317244f7fd958d118a5a75c5d
|
[
"Apache-2.0"
] | 49
|
2021-06-17T14:10:53.000Z
|
2022-01-31T11:04:21.000Z
|
flax/plotters/madmax.py
|
ReadyNeutron/shitcoin-blockchain
|
80add4e545ad22a317244f7fd958d118a5a75c5d
|
[
"Apache-2.0"
] | 80
|
2021-06-17T14:23:31.000Z
|
2022-02-24T05:52:47.000Z
|
import asyncio
import traceback
import os
import logging
import sys
from pathlib import Path
from typing import Any, Dict, Optional
from flax.plotting.create_plots import resolve_plot_keys
from flax.plotters.plotters_util import run_plotter, run_command
log = logging.getLogger(__name__)
MADMAX_PLOTTER_DIR = "madmax-plotter"
def is_madmax_supported() -> bool:
return sys.platform.startswith("linux") or sys.platform in ["darwin", "win32", "cygwin"]
def get_madmax_install_path(plotters_root_path: Path) -> Path:
return plotters_root_path / MADMAX_PLOTTER_DIR
def get_madmax_package_path() -> Path:
return Path(os.path.dirname(sys.executable)) / "madmax"
def get_madmax_executable_path_for_ksize(plotters_root_path: Path, ksize: int = 32) -> Path:
madmax_dir: Path = get_madmax_package_path()
madmax_exec: str = "flax_plot"
if ksize > 32:
madmax_exec += "_k34" # Use the flax_plot_k34 executable for k-sizes > 32
if sys.platform in ["win32", "cygwin"]:
madmax_exec += ".exe"
if not madmax_dir.exists():
madmax_dir = get_madmax_install_path(plotters_root_path) / "build"
return madmax_dir / madmax_exec
def get_madmax_install_info(plotters_root_path: Path) -> Optional[Dict[str, Any]]:
info: Dict[str, Any] = {"display_name": "madMAx Plotter"}
installed: bool = False
supported: bool = is_madmax_supported()
if get_madmax_executable_path_for_ksize(plotters_root_path).exists():
try:
proc = run_command(
[os.fspath(get_madmax_executable_path_for_ksize(plotters_root_path)), "--version"],
"Failed to call madmax with --version option",
capture_output=True,
text=True,
)
version = proc.stdout.strip()
except Exception as e:
print(f"Failed to determine madmax version: {e}")
if version is not None:
installed = True
info["version"] = version
else:
installed = False
info["installed"] = installed
if installed is False:
info["can_install"] = supported
return info
def install_madmax(plotters_root_path: Path):
if is_madmax_supported():
print("Installing dependencies.")
if sys.platform.startswith("linux"):
run_command(
[
"sudo",
"apt",
"install",
"-y",
"libsodium-dev",
"cmake",
"g++",
"git",
"build-essential",
],
"Could not install dependencies",
)
if sys.platform.startswith("darwin"):
run_command(
[
"brew",
"install",
"libsodium",
"cmake",
"git",
"autoconf",
"automake",
"libtool",
"wget",
],
"Could not install dependencies",
)
run_command(["git", "--version"], "Error checking Git version.")
print("Cloning git repository.")
run_command(
[
"git",
"clone",
"https://github.com/Chia-Network/chia-plotter-madmax.git",
MADMAX_PLOTTER_DIR,
],
"Could not clone madmax git repository",
cwd=os.fspath(plotters_root_path),
)
print("Installing git submodules.")
madmax_path: str = os.fspath(get_madmax_install_path(plotters_root_path))
run_command(
[
"git",
"submodule",
"update",
"--init",
"--recursive",
],
"Could not initialize git submodules",
cwd=madmax_path,
)
print("Running install script.")
run_command(["./make_devel.sh"], "Error while running install script", cwd=madmax_path)
else:
raise RuntimeError("Platform not supported yet for madmax plotter.")
progress = {
"[P1] Table 1 took": 0.01,
"[P1] Table 2 took": 0.06,
"[P1] Table 3 took": 0.12,
"[P1] Table 4 took": 0.2,
"[P1] Table 5 took": 0.28,
"[P1] Table 6 took": 0.36,
"[P1] Table 7 took": 0.42,
"[P2] Table 7 rewrite took": 0.43,
"[P2] Table 6 rewrite took": 0.48,
"[P2] Table 5 rewrite took": 0.51,
"[P2] Table 4 rewrite took": 0.55,
"[P2] Table 3 rewrite took": 0.58,
"[P2] Table 2 rewrite took": 0.61,
"[P3-2] Table 2 took": 0.66,
"[P3-2] Table 3 took": 0.73,
"[P3-2] Table 4 took": 0.79,
"[P3-2] Table 5 took": 0.85,
"[P3-2] Table 6 took": 0.92,
"[P3-2] Table 7 took": 0.98,
}
def dir_with_trailing_slash(dir: str) -> str:
return dir if dir[-1] == os.path.sep else dir + os.path.sep
def plot_madmax(args, flax_root_path: Path, plotters_root_path: Path):
if sys.platform not in ["win32", "cygwin"]:
import resource
# madMAx has a ulimit -n requirement > 296:
# "Cannot open at least 296 files, please raise maximum open file limit in OS."
resource.setrlimit(resource.RLIMIT_NOFILE, (512, 512))
if not os.path.exists(get_madmax_executable_path_for_ksize(plotters_root_path, args.size)):
print("Installing madmax plotter.")
try:
install_madmax(plotters_root_path)
except Exception as e:
print(f"Exception while installing madmax plotter: {e}")
return
plot_keys = asyncio.get_event_loop().run_until_complete(
resolve_plot_keys(
None if args.farmerkey == b"" else args.farmerkey.hex(),
None,
None if args.pool_key == b"" else args.pool_key.hex(),
None if args.contract == "" else args.contract,
flax_root_path,
log,
args.connect_to_daemon,
)
)
call_args = []
call_args.append(os.fspath(get_madmax_executable_path_for_ksize(plotters_root_path, args.size)))
call_args.append("-f")
call_args.append(bytes(plot_keys.farmer_public_key).hex())
if plot_keys.pool_public_key is not None:
call_args.append("-p")
call_args.append(bytes(plot_keys.pool_public_key).hex())
call_args.append("-t")
# s if s[-1] == os.path.sep else s + os.path.sep
call_args.append(dir_with_trailing_slash(args.tmpdir))
call_args.append("-2")
call_args.append(dir_with_trailing_slash(args.tmpdir2))
call_args.append("-d")
call_args.append(dir_with_trailing_slash(args.finaldir))
if plot_keys.pool_contract_address is not None:
call_args.append("-c")
call_args.append(plot_keys.pool_contract_address)
call_args.append("-n")
call_args.append(str(args.count))
call_args.append("-r")
call_args.append(str(args.threads))
call_args.append("-u")
call_args.append(str(args.buckets))
call_args.append("-v")
call_args.append(str(args.buckets3))
call_args.append("-w")
call_args.append(str(int(args.waitforcopy)))
call_args.append("-K")
call_args.append(str(args.rmulti2))
if args.size != 32:
call_args.append("-k")
call_args.append(str(args.size))
try:
loop = asyncio.get_event_loop()
loop.run_until_complete(run_plotter(call_args, progress))
except Exception as e:
print(f"Exception while plotting: {type(e)} {e}")
print(f"Traceback: {traceback.format_exc()}")
| 33.178261
| 100
| 0.58721
|
d12b94978214bc265e37e6a9639e7d187a16534f
| 343
|
py
|
Python
|
security-bijective-functions.py
|
shreyakupadhyay/Hacks-py
|
d779e6eeca0a42dab68e599c1e48143338662a28
|
[
"MIT"
] | 11
|
2015-07-12T09:06:58.000Z
|
2020-09-16T06:35:39.000Z
|
security-bijective-functions.py
|
shreyakupadhyay/Hacks-py
|
d779e6eeca0a42dab68e599c1e48143338662a28
|
[
"MIT"
] | null | null | null |
security-bijective-functions.py
|
shreyakupadhyay/Hacks-py
|
d779e6eeca0a42dab68e599c1e48143338662a28
|
[
"MIT"
] | 6
|
2017-06-07T01:16:39.000Z
|
2020-04-30T21:42:21.000Z
|
# Enter your code here. Read input from STDIN. Print output to STDOUT
a=int(raw_input())
arr = [int(i) for i in raw_input().strip().split()]
array=sorted(arr)
c=1
for i in range(0,len(arr)):
if(array[i]==i+1):
continue
else:
c=0
break
if(c==0):
print "NO"
if(c==1):
print "YES"
| 16.333333
| 69
| 0.539359
|
2e969d11dcc683a40bf712de2801712b773bf1c1
| 1,566
|
py
|
Python
|
scripts/metrics.py
|
manas-avi/detection-2016-nipsws
|
b25669dbf1c5d3d1a79638f928c989aca1c32622
|
[
"MIT"
] | null | null | null |
scripts/metrics.py
|
manas-avi/detection-2016-nipsws
|
b25669dbf1c5d3d1a79638f928c989aca1c32622
|
[
"MIT"
] | null | null | null |
scripts/metrics.py
|
manas-avi/detection-2016-nipsws
|
b25669dbf1c5d3d1a79638f928c989aca1c32622
|
[
"MIT"
] | 2
|
2018-12-02T08:39:24.000Z
|
2018-12-08T15:55:54.000Z
|
import numpy as np
import cv2
import pdb
import matplotlib.pyplot as plt
def dimg(img):
plt.imshow(img)
plt.show()
def calculate_iou(img_mask, gt_mask):
gt_mask *= 1.0
img_mask *= 1.0
img_and = cv2.bitwise_and(img_mask, gt_mask)
img_or = cv2.bitwise_or(img_mask, gt_mask)
j = np.count_nonzero(img_and)
i = np.count_nonzero(img_or)
iou = float(float(j)/float(i))
return iou
def calculate_overlapping(img_mask, gt_mask):
img_mask = cv2.resize(img_mask , (32,32))
gt_mask = cv2.resize(gt_mask , (32,32))
_,img_mask = cv2.threshold(img_mask,127,255,cv2.THRESH_BINARY)
_,gt_mask = cv2.threshold(gt_mask,127,255,cv2.THRESH_BINARY)
img_and = cv2.bitwise_and(img_mask*1.0, gt_mask*1.0)
j = np.count_nonzero(img_and)
i = np.count_nonzero(gt_mask)
overlap = float(float(j)/float(i))
return overlap
def calculate_datsets_belonging( img , wgan):
# since in the formulation of wgan
# the real elements that belongs to dataset
# are given a label of -1 thus we use the same method
# to generate the reward and resize the rewards
# to lie between 0 and 1
# with 1 being the rewards given the image lies perfectly
# in the dataset distribution of the object
img = cv2.resize(img , (128,128))
_,img = cv2.threshold(img,127,255,cv2.THRESH_BINARY)
img = np.expand_dims(img, axis=0)
img = np.expand_dims(img, axis=-1)
valid = wgan.critic.predict(img)[0][0]
# chaning the range from 1:-1 to 0:1
valid = (-valid + 1) / 2
return valid
| 27.473684
| 66
| 0.676884
|
12414801a524ec97eaa59cedddb7659e2c56ad33
| 8,343
|
py
|
Python
|
tests/components/media_player/test_async_helpers.py
|
erogleva/core
|
994ae09f69afe772150a698953c0d7386a745de2
|
[
"Apache-2.0"
] | 6
|
2020-07-18T16:33:25.000Z
|
2021-09-26T09:52:04.000Z
|
tests/components/media_player/test_async_helpers.py
|
erogleva/core
|
994ae09f69afe772150a698953c0d7386a745de2
|
[
"Apache-2.0"
] | 56
|
2020-08-03T07:30:54.000Z
|
2022-03-31T06:02:04.000Z
|
tests/components/media_player/test_async_helpers.py
|
erogleva/core
|
994ae09f69afe772150a698953c0d7386a745de2
|
[
"Apache-2.0"
] | 14
|
2018-08-19T16:28:26.000Z
|
2021-09-02T18:26:53.000Z
|
"""The tests for the Async Media player helper functions."""
import asyncio
import unittest
import homeassistant.components.media_player as mp
from homeassistant.const import (
STATE_IDLE,
STATE_OFF,
STATE_ON,
STATE_PAUSED,
STATE_PLAYING,
)
from tests.common import get_test_home_assistant
class AsyncMediaPlayer(mp.MediaPlayerEntity):
"""Async media player test class."""
def __init__(self, hass):
"""Initialize the test media player."""
self.hass = hass
self._volume = 0
self._state = STATE_OFF
@property
def state(self):
"""State of the player."""
return self._state
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
return self._volume
@property
def supported_features(self):
"""Flag media player features that are supported."""
return (
mp.const.SUPPORT_VOLUME_SET
| mp.const.SUPPORT_PLAY
| mp.const.SUPPORT_PAUSE
| mp.const.SUPPORT_TURN_OFF
| mp.const.SUPPORT_TURN_ON
)
async def async_set_volume_level(self, volume):
"""Set volume level, range 0..1."""
self._volume = volume
async def async_media_play(self):
"""Send play command."""
self._state = STATE_PLAYING
async def async_media_pause(self):
"""Send pause command."""
self._state = STATE_PAUSED
async def async_turn_on(self):
"""Turn the media player on."""
self._state = STATE_ON
async def async_turn_off(self):
"""Turn the media player off."""
self._state = STATE_OFF
class SyncMediaPlayer(mp.MediaPlayerEntity):
"""Sync media player test class."""
def __init__(self, hass):
"""Initialize the test media player."""
self.hass = hass
self._volume = 0
self._state = STATE_OFF
@property
def state(self):
"""State of the player."""
return self._state
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
return self._volume
@property
def supported_features(self):
"""Flag media player features that are supported."""
return (
mp.const.SUPPORT_VOLUME_SET
| mp.const.SUPPORT_VOLUME_STEP
| mp.const.SUPPORT_PLAY
| mp.const.SUPPORT_PAUSE
| mp.const.SUPPORT_TURN_OFF
| mp.const.SUPPORT_TURN_ON
)
def set_volume_level(self, volume):
"""Set volume level, range 0..1."""
self._volume = volume
def volume_up(self):
"""Turn volume up for media player."""
if self.volume_level < 1:
self.set_volume_level(min(1, self.volume_level + 0.2))
def volume_down(self):
"""Turn volume down for media player."""
if self.volume_level > 0:
self.set_volume_level(max(0, self.volume_level - 0.2))
def media_play_pause(self):
"""Play or pause the media player."""
if self._state == STATE_PLAYING:
self._state = STATE_PAUSED
else:
self._state = STATE_PLAYING
def toggle(self):
"""Toggle the power on the media player."""
if self._state in [STATE_OFF, STATE_IDLE]:
self._state = STATE_ON
else:
self._state = STATE_OFF
async def async_media_play_pause(self):
"""Create a coroutine to wrap the future returned by ABC.
This allows the run_coroutine_threadsafe helper to be used.
"""
await super().async_media_play_pause()
async def async_toggle(self):
"""Create a coroutine to wrap the future returned by ABC.
This allows the run_coroutine_threadsafe helper to be used.
"""
await super().async_toggle()
class TestAsyncMediaPlayer(unittest.TestCase):
"""Test the media_player module."""
def setUp(self): # pylint: disable=invalid-name
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.player = AsyncMediaPlayer(self.hass)
self.addCleanup(self.tear_down_cleanup)
def tear_down_cleanup(self):
"""Shut down test instance."""
self.hass.stop()
def test_volume_up(self):
"""Test the volume_up helper function."""
assert self.player.volume_level == 0
asyncio.run_coroutine_threadsafe(
self.player.async_set_volume_level(0.5), self.hass.loop
).result()
assert self.player.volume_level == 0.5
asyncio.run_coroutine_threadsafe(
self.player.async_volume_up(), self.hass.loop
).result()
assert self.player.volume_level == 0.6
def test_volume_down(self):
"""Test the volume_down helper function."""
assert self.player.volume_level == 0
asyncio.run_coroutine_threadsafe(
self.player.async_set_volume_level(0.5), self.hass.loop
).result()
assert self.player.volume_level == 0.5
asyncio.run_coroutine_threadsafe(
self.player.async_volume_down(), self.hass.loop
).result()
assert self.player.volume_level == 0.4
def test_media_play_pause(self):
"""Test the media_play_pause helper function."""
assert self.player.state == STATE_OFF
asyncio.run_coroutine_threadsafe(
self.player.async_media_play_pause(), self.hass.loop
).result()
assert self.player.state == STATE_PLAYING
asyncio.run_coroutine_threadsafe(
self.player.async_media_play_pause(), self.hass.loop
).result()
assert self.player.state == STATE_PAUSED
def test_toggle(self):
"""Test the toggle helper function."""
assert self.player.state == STATE_OFF
asyncio.run_coroutine_threadsafe(
self.player.async_toggle(), self.hass.loop
).result()
assert self.player.state == STATE_ON
asyncio.run_coroutine_threadsafe(
self.player.async_toggle(), self.hass.loop
).result()
assert self.player.state == STATE_OFF
class TestSyncMediaPlayer(unittest.TestCase):
"""Test the media_player module."""
def setUp(self): # pylint: disable=invalid-name
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.player = SyncMediaPlayer(self.hass)
self.addCleanup(self.tear_down_cleanup)
def tear_down_cleanup(self):
"""Shut down test instance."""
self.hass.stop()
def test_volume_up(self):
"""Test the volume_up helper function."""
assert self.player.volume_level == 0
self.player.set_volume_level(0.5)
assert self.player.volume_level == 0.5
asyncio.run_coroutine_threadsafe(
self.player.async_volume_up(), self.hass.loop
).result()
assert self.player.volume_level == 0.7
def test_volume_down(self):
"""Test the volume_down helper function."""
assert self.player.volume_level == 0
self.player.set_volume_level(0.5)
assert self.player.volume_level == 0.5
asyncio.run_coroutine_threadsafe(
self.player.async_volume_down(), self.hass.loop
).result()
assert self.player.volume_level == 0.3
def test_media_play_pause(self):
"""Test the media_play_pause helper function."""
assert self.player.state == STATE_OFF
asyncio.run_coroutine_threadsafe(
self.player.async_media_play_pause(), self.hass.loop
).result()
assert self.player.state == STATE_PLAYING
asyncio.run_coroutine_threadsafe(
self.player.async_media_play_pause(), self.hass.loop
).result()
assert self.player.state == STATE_PAUSED
def test_toggle(self):
"""Test the toggle helper function."""
assert self.player.state == STATE_OFF
asyncio.run_coroutine_threadsafe(
self.player.async_toggle(), self.hass.loop
).result()
assert self.player.state == STATE_ON
asyncio.run_coroutine_threadsafe(
self.player.async_toggle(), self.hass.loop
).result()
assert self.player.state == STATE_OFF
| 32.088462
| 67
| 0.627472
|
3122030b8107f629f26470fa4bcbd229fbb77f9c
| 9,019
|
py
|
Python
|
userFunc/myFunction.py
|
pchaos/quanttesting
|
98331670547e8a45ba93b49f3e9c660495645114
|
[
"MIT"
] | 5
|
2020-04-08T14:14:05.000Z
|
2021-06-29T03:42:01.000Z
|
userFunc/myFunction.py
|
pchaos/quanttesting
|
98331670547e8a45ba93b49f3e9c660495645114
|
[
"MIT"
] | null | null | null |
userFunc/myFunction.py
|
pchaos/quanttesting
|
98331670547e8a45ba93b49f3e9c660495645114
|
[
"MIT"
] | 7
|
2020-04-15T15:07:39.000Z
|
2022-03-23T05:44:02.000Z
|
# -*- coding: utf-8 -*-
import bz2
import pickle
try:
# 在python3.x上已从cPickle更改cPickle为_pickle
import _pickle as cPickle
except ImportError:
import cpickle as cPickle
import os
import numpy as np
import pandas as pd
import QUANTAXIS as qa
from QUANTAXIS.QAUtil.QACache import QA_util_cache as qacache
def setdiff_sorted(array1, array2, assume_unique=False):
"""find elements in one list that are not in the other
list_1 = ["a", "b", "c", "d", "e"]
list_2 = ["a", "f", "c", "m"]
main_list = setdiff_sorted(list_2,list_1)
main_list = setdiff_sorted(list_2,list_1, assume_unique=True)
"""
ans = np.setdiff1d(array1, array2, assume_unique).tolist()
if assume_unique:
return sorted(ans)
return ans
def getCodeList(isTesting=True, count=5000):
"""
:param isTesting: 是否使用测试数据
:param count: 返回最多结果集数量
"""
if isTesting:
# 2018.8首板个股,测试用,减少调试时间
codelist = ['000023', '000068', '000407', '000561', '000590', '000593', '000608', '000610', '000626',
'000638',
'000657', '000659', '000663', '000669', '000677', '000705', '000759', '000766', '000780',
'000792',
'000815', '000852', '000885', '000909', '000913', '000921', '000928', '000931', '002006',
'002012',
'002034']
else:
codelist = qa.QA_fetch_stock_list_adv().code.tolist()
return codelist[:count]
def read_zxg(filename='zxg.txt', length=6):
"""从文件filename读取自选股列表
返回每行前length(默认:6)个字符(自动去除行首、行尾空格);
:param filename: 自选股文件名(默认:zxg.txt)
"""
filename = getRealFilename(filename)
resultList = alist = []
if os.path.isfile(filename):
with open(filename, 'r', encoding='UTF-8') as zxg:
alist = zxg.readlines()
for a in alist:
resultList.append(a.strip()[0:length])
return resultList
def xls2zxg(xlsfile, zxgFile):
"""xls转换成文本
"""
xlsfile = getRealFilename(xlsfile)
try:
df = pd.read_excel(xlsfile)
except Exception as e:
df = pd.read_csv(xlsfile, sep="\t", encoding="gbk", dtype={'证券代码': str})
df.to_csv(zxgFile, index=False, sep=" ", header=None)
def savexls(dataframe: pd.DataFrame, xlsfile: str, startrow=1, sheetName='Sheet1', index=True):
"""自动调整保存的列宽
Args:
dataframe:
xlsfile:
startrow:
sheetName:
index: bool
Returns:
"""
# Set destination directory to save excel.
xlsfile = r'H:\my_project' + "\\" + 'my_file_name.xlsx'
writer = pd.ExcelWriter(xlsfile, engine='xlsxwriter')
# Write excel to file using pandas to_excel
dataframe.to_excel(writer, startrow=startrow, sheet_name=sheetName, index=index)
# Indicate workbook and worksheet for formatting
workbook = writer.book
worksheet = writer.sheets[sheetName]
# Iterate through each column and set the width == the max length in that column. A padding length of 2 is also added.
for i, col in enumerate(dataframe.columns):
# find length of column i
column_len = dataframe[col].astype(str).str.len().max()
# Setting the length if the column header is larger
# than the max column value length
column_len = max(column_len, len(col)) + 2
# set the column length
worksheet.set_column(i, i, column_len)
writer.save()
def xls2Code(xlsfile):
"""提取xlsfile文件中的股票代码
"""
zxgfile = "/tmp/{}.txt".format(xlsfile)
xls2zxg(xlsfile, zxgfile)
return read_zxg(zxgfile, length=6)
def codeInETF(codes=[], filterStartWith=['159', '510', '512', '515', '513', '161', '162']):
"""股票代码过滤
"""
return [item for item in codes if item.startswith(tuple(filterStartWith))]
def etfAmountGreater(code, startDate, endDate=None, amount=1000):
"""成交额大于等于amount(万元)"""
df = qa.QA_fetch_index_day_adv(code, startDate, endDate)
return df[df['amount'] >= amount * 10000]
def codeInfo(codes):
"""返回指数或etf对应的股票信息"""
index = qa.QA_fetch_index_list_adv()
etf = qa.QA_fetch_etf_list()
return pd.concat([index[index['code'].isin(codes)], etf[etf['code'].isin(codes)]], axis=0)
def getRealFilename(filename):
"""返回第一个filename存在的文件名
"""
try:
# 当前目录
dir_path = os.path.dirname(os.path.realpath(__file__))
except:
dir_path = os.path.dirname(os.path.realpath("./"))
if not filename.find(os.sep) > -1:
if os.path.exists(filename):
# 当前目录优先
pass
else:
# 如果文件名(fname)没有目录,则加上当前目录
filename = os.path.join(dir_path, filename)
return filename
# def read_zxg_not_in_file(filename='zxg.txt', length=6):
def full_pickle(title, data):
"""Saves the "data" with the "title" and adds the .pickle
"""
pikd = open(title, 'wb')
pickle.dump(data, pikd)
pikd.close()
def loosen_pickle(file):
"""loads and returns a pickled objects
"""
pikd = open(file, 'rb')
data = pickle.load(pikd)
pikd.close()
return data
def compressed_pickle(title, data):
"""Pickle a file and then compress it into a file with extension
"""
with bz2.BZ2File(title + '.pbz2', 'w') as f:
cPickle.dump(data, f)
def decompress_pickle(file):
"""Load any compressed pickle file
:param file: 文件名
"""
if not os.path.exists(file):
file = file + '.pbz2'
data = bz2.BZ2File(file, 'rb')
data = cPickle.load(data)
return data
def somefunc(cls):
instances = {}
def _wrapper(*args, **kwargs):
if cls not in instances:
instances[cls] = cls(*args, **kwargs)
return instances[cls]
return _wrapper
def CMI(data: pd.DataFrame, n=100):
"""如何把市场划分为趋势行情和震荡行情,也就成了这个策略的关键,恒温器策略引入了市场波动指数(Choppy Market Index),简称CMI
它是一个用来判断市场走势类型的技术指标。通过计算当前收盘价与N周期前收盘价的差值与这段时间内价格波动的范围的比值,来判断目前的价格走势是趋势还是震荡。
CMI的计算公式为:
CMI=(abs(Close-ref(close,(n-1)))*100/(HHV(high,n)-LLV(low,n))
其中,abs是绝对值,n是周期数。
策略逻辑
一般来说CMI的值在0~100区间,值越大,趋势越强。当CMI的值小于20时,策略认为市场处于震荡模式;当CMI的值大于等于20时,策略认为市场处于趋势模式。
整个策略逻辑,可以简化的写成下面这样:
如果CMI < 20,执行震荡策略;
如果CMI ≥ 20,执行趋势策略;
"""
close = data.close
dict = {"CMI": np.abs((close - qa.REF(close, n - 1))) * 100 / (qa.HHV(data.high, n) - qa.LLV(data.low, n))}
return pd.DataFrame(dict)
def RSV(data: pd.DataFrame, n=14):
"""RSV=(收盘价-最低价)/(最高价-最低价)
RSV有何规律呢?很明显,在股价上涨趋势中,往往收盘价接近最高价,此时RSV值接近于1
"""
dict = {"RSV": (data.close - data.low) / (data.high - data.low)}
return pd.DataFrame(dict)
def ifupMA(data, n=[20]):
"""收盘站上n均线上
"""
if isinstance(n, int):
# 传进来的参数为整数类型时
dict = {'MA{}'.format(n): (data.close - qa.MA(data.close, n)).dropna() > 0}
elif isinstance(n, list):
dict = {'MA{}'.format(i): data.close - qa.MA(data.close, i) for i in n}
return pd.DataFrame(dict).dropna() > 0
def fourWeek(data, m=20, n=20):
"""四周规则
只要价格涨过前四个日历周内的最高价,则平回空头头寸,开立多头头寸。
只要价格跌过前四个周内(照日历算满)的最低价,则平回多头头寸,建立空头头寸。
"""
def flag(x, preFlag):
if x['close'] > x['hclose']:
preFlag[0] = 1
elif x['close'] < x['lclose']:
preFlag[0] = -1
return preFlag[0]
def highLow(data, m=20, n=20):
''' 计算n周期最 fw.join(upma)高收盘价、最低收盘价
:param data: dataFrame
:param n: 计算最高收盘价周期; 默认:20
:param n: 计算最低收盘价周期; 默认:20
:return:
'''
high = qa.HHV(data['close'], m - 1)
low = qa.LLV(data['close'], n - 1)
return pd.DataFrame({'hclose': high.shift(1), 'lclose': low.shift(1), 'close': data.close})
df = highLow(data, m, n)
preFlag = [0]
df['flag'] = df.apply(lambda x: flag(x, preFlag), axis=1);
return pd.DataFrame({'flag': df['flag']})
def TBSIndicator(data, m=20, n=20, maday=50):
"""陶博士中期信号
"""
def flag(x, preFlag):
if x['flag'] > 0 and x['MA{}'.format(maday)]:
# 中期入场信号
preFlag[0] = 1
# elif x['flag'] < 0 or x['MA{}'.format(maday)]:
elif x['flag'] < 0 and x['MA{}'.format(maday)]:
# 中期出场信号 跌破20日收盘最低价或者ma50
preFlag[0] = -1
return preFlag[0]
fw = fourWeek(data, m, n)
maday = 50
upma = ifupMA(data, maday)
preFlag = [0]
result = fw.join(upma).apply(lambda x: flag(x, preFlag), axis=1)
return pd.DataFrame({'flag': result})
def TBSMonthIndicator(data, m=10, n=20):
"""陶博士月线牛熊判断
10月交叉20月均线
"""
def flag(x, preFlag):
if x['jc']:
# 金叉
preFlag[0] = 1
elif x['sc']:
# 死叉
preFlag[0] = -1
return preFlag[0]
close = data['close']
ma1 = qa.MA(close, m)
ma2 = qa.MA(close, n)
cross1 = qa.CROSS_STATUS(ma1, ma2)
cross2 = qa.CROSS_STATUS(ma2, ma1)
preFlag = [0]
# 金叉 死叉
result = pd.DataFrame({'jc': cross1, 'sc': cross2}, index=ma1.index).apply(lambda x: flag(x, preFlag), axis=1)
return pd.DataFrame({'flag': result})
| 27.922601
| 122
| 0.598625
|
29bb92bd9913967d580149ba14cbcf1269bb8414
| 9,007
|
py
|
Python
|
document_worker/templates/steps.py
|
ds-wizard/document-worker
|
09b1f1c29b47d8bd504aea86d0b5cff1a3bbb01b
|
[
"Apache-2.0"
] | 2
|
2021-04-23T19:28:10.000Z
|
2021-04-27T05:30:07.000Z
|
document_worker/templates/steps.py
|
ds-wizard/document-worker
|
09b1f1c29b47d8bd504aea86d0b5cff1a3bbb01b
|
[
"Apache-2.0"
] | 9
|
2021-04-07T16:48:34.000Z
|
2022-02-21T15:48:31.000Z
|
document_worker/templates/steps.py
|
ds-wizard/document-worker
|
09b1f1c29b47d8bd504aea86d0b5cff1a3bbb01b
|
[
"Apache-2.0"
] | 6
|
2020-03-09T07:44:01.000Z
|
2020-12-02T18:26:37.000Z
|
import jinja2 # type: ignore
import jinja2.exceptions # type: ignore
import json
from typing import Optional
from document_worker.consts import DEFAULT_ENCODING
from document_worker.context import Context
from document_worker.conversions import Pandoc, WkHtmlToPdf, RdfLibConvert
from document_worker.documents import DocumentFile, FileFormat, FileFormats
class FormatStepException(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
class Step:
def __init__(self, template, options: dict):
self.template = template
self.options = options
def execute_first(self, context: dict) -> Optional[DocumentFile]:
return self.raise_exc('Called execute_follow on Step class')
def execute_follow(self, document: DocumentFile) -> Optional[DocumentFile]:
return self.raise_exc('Called execute_follow on Step class')
def raise_exc(self, message: str):
raise FormatStepException(message)
class JSONStep(Step):
NAME = 'json'
OUTPUT_FORMAT = FileFormats.JSON
def execute_first(self, context: dict) -> DocumentFile:
return DocumentFile(
self.OUTPUT_FORMAT,
json.dumps(context, indent=2, sort_keys=True).encode(DEFAULT_ENCODING),
DEFAULT_ENCODING
)
def execute_follow(self, document: DocumentFile) -> Optional[DocumentFile]:
return self.raise_exc(f'Step "{self.NAME}" cannot process other files')
class Jinja2Step(Step):
NAME = 'jinja'
DEFAULT_FORMAT = FileFormats.HTML
OPTION_ROOT_FILE = 'template'
OPTION_CONTENT_TYPE = 'content-type'
OPTION_EXTENSION = 'extension'
def _jinja_exception_msg(self, e: jinja2.exceptions.TemplateSyntaxError):
lines = [
'Failed loading Jinja2 template due to syntax error:',
f'- {e.message}',
f'- Filename: {e.name}',
f'- Line number: {e.lineno}',
]
return '\n'.join(lines)
def __init__(self, template, options: dict):
super().__init__(template, options)
self.root_file = self.options[self.OPTION_ROOT_FILE]
self.content_type = self.options.get(self.OPTION_CONTENT_TYPE, self.DEFAULT_FORMAT.content_type)
self.extension = self.options.get(self.OPTION_EXTENSION, self.DEFAULT_FORMAT.file_extension)
self.output_format = FileFormat(self.extension, self.content_type, self.extension)
try:
self.j2_env = jinja2.Environment(
loader=jinja2.FileSystemLoader(searchpath=template.template_dir),
extensions=['jinja2.ext.do'],
)
self._add_j2_enhancements()
self.j2_root_template = self.j2_env.get_template(self.root_file)
except jinja2.exceptions.TemplateSyntaxError as e:
self.raise_exc(self._jinja_exception_msg(e))
except Exception as e:
self.raise_exc(f'Failed loading Jinja2 template: {e}')
def _add_j2_enhancements(self):
from document_worker.templates.filters import filters
from document_worker.templates.tests import tests
self.j2_env.filters.update(filters)
self.j2_env.tests.update(tests)
def execute_first(self, context: dict) -> DocumentFile:
def asset_fetcher(file_name):
return self.template.fetch_asset(file_name)
def asset_path(file_name):
return self.template.asset_path(file_name)
content = b''
try:
content = self.j2_root_template.render(
ctx=context,
assets=asset_fetcher,
asset_path=asset_path,
).encode(DEFAULT_ENCODING)
except jinja2.exceptions.TemplateRuntimeError as e:
self.raise_exc(f'Failed rendering Jinja2 template due to'
f' {type(e).__name__}\n'
f'- {str(e)}')
return DocumentFile(self.output_format, content, DEFAULT_ENCODING)
def execute_follow(self, document: DocumentFile) -> Optional[DocumentFile]:
return self.raise_exc(f'Step "{self.NAME}" cannot process other files')
class WkHtmlToPdfStep(Step):
NAME = 'wkhtmltopdf'
INPUT_FORMAT = FileFormats.HTML
OUTPUT_FORMAT = FileFormats.PDF
def __init__(self, template, options: dict):
super().__init__(template, options)
self.wkhtmltopdf = WkHtmlToPdf(config=Context.get().app.cfg)
def execute_first(self, context: dict) -> Optional[DocumentFile]:
return self.raise_exc(f'Step "{self.NAME}" cannot be first')
def execute_follow(self, document: DocumentFile) -> DocumentFile:
if document.file_format != FileFormats.HTML:
self.raise_exc(f'WkHtmlToPdf does not support {document.file_format.name} format as input')
data = self.wkhtmltopdf(
source_format=self.INPUT_FORMAT,
target_format=self.OUTPUT_FORMAT,
data=document.content,
metadata=self.options,
workdir=str(self.template.template_dir),
)
return DocumentFile(self.OUTPUT_FORMAT, data)
class PandocStep(Step):
NAME = 'pandoc'
INPUT_FORMATS = frozenset([
FileFormats.DOCX,
FileFormats.EPUB,
FileFormats.HTML,
FileFormats.LaTeX,
FileFormats.Markdown,
FileFormats.ODT,
FileFormats.RST,
])
OUTPUT_FORMATS = frozenset([
FileFormats.ADoc,
FileFormats.DocBook4,
FileFormats.DocBook5,
FileFormats.DOCX,
FileFormats.EPUB,
FileFormats.HTML,
FileFormats.LaTeX,
FileFormats.Markdown,
FileFormats.ODT,
FileFormats.RST,
FileFormats.RTF,
])
OPTION_FROM = 'from'
OPTION_TO = 'to'
def __init__(self, template, options: dict):
super().__init__(template, options)
self.pandoc = Pandoc(config=Context.get().app.cfg)
self.input_format = FileFormats.get(options[self.OPTION_FROM])
self.output_format = FileFormats.get(options[self.OPTION_TO])
if self.input_format not in self.INPUT_FORMATS:
self.raise_exc(f'Unknown input format "{self.input_format.name}"')
if self.output_format not in self.OUTPUT_FORMATS:
self.raise_exc(f'Unknown output format "{self.output_format.name}"')
def execute_first(self, context: dict) -> Optional[DocumentFile]:
return self.raise_exc(f'Step "{self.NAME}" cannot be first')
def execute_follow(self, document: DocumentFile) -> DocumentFile:
if document.file_format != self.input_format:
self.raise_exc(f'Unexpected input {document.file_format.name} as input for pandoc')
data = self.pandoc(
source_format=self.input_format,
target_format=self.output_format,
data=document.content,
metadata=self.options,
workdir=str(self.template.template_dir),
)
return DocumentFile(self.output_format, data)
class RdfLibConvertStep(Step):
NAME = 'rdflib-convert'
INPUT_FORMATS = [
FileFormats.RDF_XML,
FileFormats.N3,
FileFormats.NTRIPLES,
FileFormats.TURTLE,
FileFormats.TRIG,
FileFormats.JSONLD,
]
OUTPUT_FORMATS = INPUT_FORMATS
OPTION_FROM = 'from'
OPTION_TO = 'to'
def __init__(self, template, options: dict):
super().__init__(template, options)
self.rdflib_convert = RdfLibConvert(config=Context.get().app.cfg)
self.input_format = FileFormats.get(options[self.OPTION_FROM])
self.output_format = FileFormats.get(options[self.OPTION_TO])
if self.input_format not in self.INPUT_FORMATS:
self.raise_exc(f'Unknown input format "{self.input_format.name}"')
if self.output_format not in self.OUTPUT_FORMATS:
self.raise_exc(f'Unknown output format "{self.output_format.name}"')
def execute_first(self, context: dict) -> Optional[DocumentFile]:
return self.raise_exc(f'Step "{self.NAME}" cannot be first')
def execute_follow(self, document: DocumentFile) -> DocumentFile:
if document.file_format != self.input_format:
self.raise_exc(f'Unexpected input {document.file_format.name} '
f'as input for rdflib-convert '
f'(expecting {self.input_format.name})')
data = self.rdflib_convert(
self.input_format, self.output_format, document.content, self.options
)
return DocumentFile(self.output_format, data)
STEPS = {
JSONStep.NAME: JSONStep,
Jinja2Step.NAME: Jinja2Step,
WkHtmlToPdfStep.NAME: WkHtmlToPdfStep,
PandocStep.NAME: PandocStep,
RdfLibConvertStep.NAME: RdfLibConvertStep,
}
def create_step(template, name: str, options: dict) -> Step:
if name not in STEPS:
raise KeyError(f'Unknown step name "{name}"')
step = STEPS[name](template, options)
return step
| 35.46063
| 104
| 0.661819
|
fb8a38d061f84ba12c33ad6178614316f8236b39
| 5,056
|
py
|
Python
|
c_01_intro_to_CV_and_Python/NumPy_tutorial.py
|
keissar3/AI_is_Math
|
ab82d37f23d9c87cc4dcdfbcbc04a127d8ca3408
|
[
"MIT"
] | null | null | null |
c_01_intro_to_CV_and_Python/NumPy_tutorial.py
|
keissar3/AI_is_Math
|
ab82d37f23d9c87cc4dcdfbcbc04a127d8ca3408
|
[
"MIT"
] | null | null | null |
c_01_intro_to_CV_and_Python/NumPy_tutorial.py
|
keissar3/AI_is_Math
|
ab82d37f23d9c87cc4dcdfbcbc04a127d8ca3408
|
[
"MIT"
] | null | null | null |
# %% [markdown]
# #Python Workshop: NumPy
# [](https://colab.research.google.com/github/YoniChechik/AI_is_Math/blob/master/c_01_intro_to_CV_and_Python/NumPy_tutorial.ipynb)
#
# <hr>
#
# Based on:
#
# This [git](https://github.com/zhiyzuo/python-tutorial) of Zhiya Zuo
#
# <hr>
#
# NumPy is the fundamental package for scientific computing with Python. It contains among other things:
#
# - Powerful N-dimensional array object.
# - Useful linear algebra, Fourier transform, and random number capabilities.
# - And much more
#
# <img src="https://upload.wikimedia.org/wikipedia/commons/1/1a/NumPy_logo.svg" alt="numpy logo" width="200"/>
#
# ## NumPy installation
#
# in the cmd run:
#
# ```bash
# pip install numpy
# ```
# ## Arrays
# %%
# After you install numpy, load it
import numpy as np # you can use np instead of numpy to call the functions in numpy package
# %%
x = np.array([1, 2, 3]) # create a numpy array object
print(type(x))
# %% [markdown]
# We can call `shape` function designed for `numpy.ndarray` class to check the dimension
# %%
x.shape # can be compared to 'len()' function that is used with list size
# %% [markdown]
# Unlike `list`, we have to use one single data type for all elements in an array
# %%
y = np.array([1, 'yes']) # automatic type conversion from int to str
y
# %% [markdown]
# ### Multidimensional arrays
# %%
arr = np.array([[1, 2, 3, 8]])
arr.shape
# %%
arr
# %%
arr = np.array([[1, 2, 3, 8], [3, 2, 3, 2], [4, 5, 0, 8]])
arr.shape
# %%
arr
# %% [markdown]
# ### Special arrays
# There are many special array initialization methods to call:
# %%
np.zeros([3, 5], dtype=int) # dtype can define the type of the array
# %%
np.ones([3, 5])
# %%
np.eye(3)
# %% [markdown]
# ## Operations
# The rules are very similar to R/Matlab: they are generally element wise
# %%
arr
# %%
arr - 5
# %%
arr * 6 # element-vise multiplication
# %%
arr * arr # element-vise multiplication of two matrices
# %%
np.exp(arr)
# %% [markdown]
# More examples:
# %%
arr_2 = np.array([[1], [3], [2], [0]])
arr_2
# %%
arr_2_T = arr_2.T # transpose
arr_2_T
# %%
arr @ arr_2 # matrix multiplication
# %%
arr
# %%
arr.max()
# %%
arr.cumsum()
# %% [markdown]
# **Note:** element-by-element operations is done row-by-row, unlike in Matlab (column-by-column)
# There are many class methods to calculate some statistics of the array itself along some axis:
# - `axis=1` means row-wise
# - `axis=0` means column-wise
# %%
arr.cumsum(axis=1)
# %% [markdown]
# ### Note about 1d arrays
# 1d array is **not a column vector** & **not entirely a row vector** and hence should be treated carefully when used with vector/matrix manipulation
# %%
a = np.array([1, 2, 3])
a, a.shape
# %%
c = np.array([[1, 2, 3]])
c, c.shape # notice the shape diff
# %%
# can be multiply like a row vector
b = np.array([[1, 2], [3, 4], [5, 6]])
b
# %%
a @ b
# %%
# can't be transformed!
a.T, a.T.shape
# %% [markdown]
# A trick to transform 1d array into 2d row vector:
# %%
a_2d = a.reshape((1, -1)) # '-1' means to put all the rest of the elements in such a way that the reshape could fit
print(a_2d)
print(a_2d.T)
# %% [markdown]
# ## Indexing and slicing
# The most important part is how to index and slice a `np.array`. It is actually very similar to `list`, except that we now may have more index elements because there are more than one dimension for most of the datasets in real life
# ### 1 dimensional case
# %%
a1 = np.array([1, 2, 8, 100])
a1
# %%
a1[0]
# %%
a1[-2]
# %%
a1[[0, 1, 3]]
# %%
a1[1:4]
# %% [markdown]
# We can also use boolean values to index
# - `True` means we want this element
# %%
a1 > 3
# %% [markdown]
# ### Masking
# replacing values of array with another values according to a boolean mask
# %%
# this is the mask
a1[a1 > 3]
# %%
# this is a use of the above mask
a1[a1 > 3] = 100
a1
# %% [markdown]
# ### 2 dimensional case
# %%
arr
# %% [markdown]
# Using only one number to index will lead to a subset of the original multidimensional array: also an array
# %%
arr[0]
# %%
type(arr[0])
# %% [markdown]
# Since we have 2 dimensions now, there are 2 indices we can use for indexing the 2 dimensions respectively
# %%
arr[0, 0]
# %% [markdown]
# We can use `:` to indicate everything along that axis
# %%
arr[1]
# %%
arr[1, :]
# %%
arr[:, 1] # watch out! we've got a 1d array again instead of column vector as maybe expected
# %%
# 2D masking
arr[arr > 3] = 55
# %% [markdown]
# ### 3 dimensional case
# As a final example, we look at a 3d array:
# %%
np.random.seed(1234)
arr_3 = np.random.randint(low=0, high=100, size=24)
arr_3
# %% [markdown]
# We can use `reshape` to manipulate the shape of an array
# %%
arr_3 = arr_3.reshape(3, 4, 2)
arr_3
# %% [markdown]
# **Note**: Are the printed array not what you though it would be? Did they mixed the shape? No!
# see [this for answers](https://stackoverflow.com/a/22982371/4879610)
# %%
arr_3[0]
# %%
arr_3[:, 3, 1]
# %%
arr_3[2, 3, 1]
| 18.385455
| 232
| 0.645174
|
a713fb8f5ad2f478cad25cd8c2d1c9b1d50e5b47
| 274
|
py
|
Python
|
authors/apps/articles/exceptions.py
|
andela/ah-backend-spaces-
|
58e031a96a6b9555f1a4133cf8cb688c236d3f3b
|
[
"BSD-3-Clause"
] | 2
|
2018-08-17T15:47:36.000Z
|
2018-09-13T13:58:34.000Z
|
authors/apps/articles/exceptions.py
|
andela/ah-backend-spaces-
|
58e031a96a6b9555f1a4133cf8cb688c236d3f3b
|
[
"BSD-3-Clause"
] | 35
|
2018-07-24T11:42:53.000Z
|
2021-06-10T20:34:41.000Z
|
authors/apps/articles/exceptions.py
|
andela/ah-backend-spaces-
|
58e031a96a6b9555f1a4133cf8cb688c236d3f3b
|
[
"BSD-3-Clause"
] | 3
|
2018-07-17T13:05:35.000Z
|
2018-09-06T16:03:52.000Z
|
from rest_framework.exceptions import APIException
class ArticlesNotExist(APIException):
status_code = 400
default_detail = 'you have no articles'
class NoResultsMatch(APIException):
status_code = 400
default_detail = 'results matching search not found'
| 22.833333
| 56
| 0.773723
|
9732b60de28d488edad208ea773e608070c5100a
| 409
|
py
|
Python
|
tests/test_helpers.py
|
BenTopping/baracoda
|
873504c678d925a8174b7d76b26f540c149c9e17
|
[
"MIT"
] | null | null | null |
tests/test_helpers.py
|
BenTopping/baracoda
|
873504c678d925a8174b7d76b26f540c149c9e17
|
[
"MIT"
] | 1
|
2021-03-04T15:01:24.000Z
|
2021-03-04T15:01:24.000Z
|
tests/test_helpers.py
|
BenTopping/baracoda
|
873504c678d925a8174b7d76b26f540c149c9e17
|
[
"MIT"
] | null | null | null |
from baracoda.helpers import get_prefix_item
def test_correct_prefix_item_is_returned(app, prefixes):
with app.app_context():
prefix_item = get_prefix_item("LEED")
assert prefix_item == {"prefix": "LEED", "sequence_name": "heron", "convert": True}
def test_none_is_returned_for_invalid_prefix(app):
with app.app_context():
prefix_item = get_prefix_item("MOON")
assert prefix_item is None
| 37.181818
| 87
| 0.760391
|
caa22032a4d0211121c6d945a0b050b76960e7d3
| 1,590
|
py
|
Python
|
backend/cw_backend/courses/helpers.py
|
Zuzanita/pyladies-courseware
|
0161a26ae318a919025fe6a2205c5948d151be37
|
[
"MIT"
] | null | null | null |
backend/cw_backend/courses/helpers.py
|
Zuzanita/pyladies-courseware
|
0161a26ae318a919025fe6a2205c5948d151be37
|
[
"MIT"
] | null | null | null |
backend/cw_backend/courses/helpers.py
|
Zuzanita/pyladies-courseware
|
0161a26ae318a919025fe6a2205c5948d151be37
|
[
"MIT"
] | null | null | null |
from datetime import date
import re
class DataProperty:
'''
Example:
class C:
def __init__(self):
self.data = {'foo': 'bar'}
foo = DataProperty('foo')
# Now you can access C().data['foo'] via C().foo
C().foo == 'bar'
C().foo = 'baz'
'''
def __init__(self, key):
self.key = key
def __get__(self, instance, owner):
return instance.data[self.key]
def __set__(self, instance, value):
instance.data[self.key] = value
def to_html(raw):
if isinstance(raw, str):
return raw
elif raw.get('markdown'):
return markdown_to_html(raw['markdown'])
else:
raise Exception(f'Unknown type (to_html): {smart_repr(raw)}')
def parse_date(s):
if not s:
return None
if not isinstance(s, str):
raise TypeError(f'parse_date argument must be str: {smart_repr(s)}')
m = re.match(r'^([0-9]{1,2})\. *([0-9]{1,2})\. *([0-9]{4})$', s)
if m:
day, month, year = m.groups()
return date(int(year), int(month), int(day))
m = re.match(r'^([0-9]{4})-([0-9]{1,2})-([0-9]{1,2})$', s)
if m:
year, month, day = m.groups()
return date(int(year), int(month), int(day))
raise Exception(f'Invalid date format: {s!r}')
def markdown_to_html(src):
from markdown import markdown
return markdown(
src,
extensions=[
'markdown.extensions.fenced_code',
'markdown.extensions.tables',
'mdx_linkify',
],
output_format='html5')
| 24.84375
| 76
| 0.543396
|
fac5f2a684a0983d8ba26b4d90120d9e0879cf1d
| 3,112
|
py
|
Python
|
metrics/extract_tfms.py
|
edhowler/qmMath
|
aee6e6eecf667cb97cc825ba728d72cd3417ffc4
|
[
"MIT"
] | 1
|
2017-09-06T02:49:13.000Z
|
2017-09-06T02:49:13.000Z
|
metrics/extract_tfms.py
|
sophiebits/KaTeX
|
1da8c8938bfb02ae049b92483fc317f68394e5fe
|
[
"MIT"
] | null | null | null |
metrics/extract_tfms.py
|
sophiebits/KaTeX
|
1da8c8938bfb02ae049b92483fc317f68394e5fe
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import collections
import json
import parse_tfm
import subprocess
import sys
def find_font_path(font_name):
try:
font_path = subprocess.check_output(['kpsewhich', font_name])
except OSError:
raise RuntimeError("Couldn't find kpsewhich program, make sure you" +
" have TeX installed")
except subprocess.CalledProcessError:
raise RuntimeError("Couldn't find font metrics: '%s'" % font_name)
return font_path.strip()
def main():
mapping = json.load(sys.stdin)
fonts = [
'cmbsy10.tfm',
'cmbx10.tfm',
'cmex10.tfm',
'cmmi10.tfm',
'cmmib10.tfm',
'cmr10.tfm',
'cmsy10.tfm',
'cmti10.tfm',
'msam10.tfm',
'msbm10.tfm',
'eufm10.tfm',
'cmtt10.tfm',
'rsfs10.tfm',
'cmss10.tfm',
]
# Extracted by running `\font\a=<font>` and then `\showthe\skewchar\a` in
# TeX, where `<font>` is the name of the font listed here. The skewchar
# will be printed out in the output. If it outputs `-1`, that means there
# is no skewchar, so we use `None` here.
font_skewchar = {
'cmbsy10': None,
'cmbx10': None,
'cmex10': None,
'cmmi10': 127,
'cmmib10': None,
'cmr10': None,
'cmsy10': 48,
'cmti10': None,
'msam10': None,
'msbm10': None,
'eufm10': None,
'cmtt10': None,
'rsfs10': None,
'cmss10': None,
}
font_name_to_tfm = {}
for font_name in fonts:
font_basename = font_name.split('.')[0]
font_path = find_font_path(font_name)
font_name_to_tfm[font_basename] = parse_tfm.read_tfm_file(font_path)
families = collections.defaultdict(dict)
for family, chars in mapping.iteritems():
for char, char_data in chars.iteritems():
char_num = int(char)
font = char_data['font']
tex_char_num = int(char_data['char'])
yshift = float(char_data['yshift'])
if family == "Script-Regular":
tfm_char = font_name_to_tfm[font].get_char_metrics(tex_char_num,
fix_rsfs=True)
else:
tfm_char = font_name_to_tfm[font].get_char_metrics(tex_char_num)
height = round(tfm_char.height + yshift / 1000.0, 5)
depth = round(tfm_char.depth - yshift / 1000.0, 5)
italic = round(tfm_char.italic_correction, 5)
skewkern = 0.0
if (font_skewchar[font] and
font_skewchar[font] in tfm_char.kern_table):
skewkern = round(
tfm_char.kern_table[font_skewchar[font]], 5)
families[family][char_num] = {
'height': height,
'depth': depth,
'italic': italic,
'skew': skewkern,
}
sys.stdout.write(
json.dumps(families, separators=(',', ':'), sort_keys=True))
if __name__ == '__main__':
main()
| 29.084112
| 81
| 0.546915
|
3bfd761fb1ef937fe388f79a0eab4c9dc0408ae0
| 10,371
|
py
|
Python
|
test/functional/rpc_listtransactions.py
|
XcelRProject/XcelR
|
991f0ca8b60078d5b8fe95c122fff0e95275009b
|
[
"MIT"
] | null | null | null |
test/functional/rpc_listtransactions.py
|
XcelRProject/XcelR
|
991f0ca8b60078d5b8fe95c122fff0e95275009b
|
[
"MIT"
] | null | null | null |
test/functional/rpc_listtransactions.py
|
XcelRProject/XcelR
|
991f0ca8b60078d5b8fe95c122fff0e95275009b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the listtransactions API."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.mininode import CTransaction, COIN
from io import BytesIO
def txFromHex(hexstring):
tx = CTransaction()
f = BytesIO(hex_str_to_bytes(hexstring))
tx.deserialize(f)
return tx
class ListTransactionsTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.enable_mocktime()
def run_test(self):
# Simple send, 0 to 1:
txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
self.sync_all()
assert_array_result(self.nodes[0].listtransactions(),
{"txid":txid},
{"category":"send","account":"","amount":Decimal("-0.1"),"confirmations":0})
assert_array_result(self.nodes[1].listtransactions(),
{"txid":txid},
{"category":"receive","account":"","amount":Decimal("0.1"),"confirmations":0})
# mine a block, confirmations should change:
self.nodes[0].generate(1)
self.sync_all()
assert_array_result(self.nodes[0].listtransactions(),
{"txid":txid},
{"category":"send","account":"","amount":Decimal("-0.1"),"confirmations":1})
assert_array_result(self.nodes[1].listtransactions(),
{"txid":txid},
{"category":"receive","account":"","amount":Decimal("0.1"),"confirmations":1})
# send-to-self:
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 0.2)
assert_array_result(self.nodes[0].listtransactions(),
{"txid":txid, "category":"send"},
{"amount":Decimal("-0.2")})
assert_array_result(self.nodes[0].listtransactions(),
{"txid":txid, "category":"receive"},
{"amount":Decimal("0.2")})
# sendmany from node1: twice to self, twice to node2:
send_to = { self.nodes[0].getnewaddress() : 0.11,
self.nodes[1].getnewaddress() : 0.22,
self.nodes[0].getaccountaddress("from1") : 0.33,
self.nodes[1].getaccountaddress("toself") : 0.44 }
txid = self.nodes[1].sendmany("", send_to)
self.sync_all()
assert_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.11")},
{"txid":txid} )
assert_array_result(self.nodes[0].listtransactions(),
{"category":"receive","amount":Decimal("0.11")},
{"txid":txid} )
assert_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.22")},
{"txid":txid} )
assert_array_result(self.nodes[1].listtransactions(),
{"category":"receive","amount":Decimal("0.22")},
{"txid":txid} )
assert_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.33")},
{"txid":txid} )
assert_array_result(self.nodes[0].listtransactions(),
{"category":"receive","amount":Decimal("0.33")},
{"txid":txid, "account" : "from1"} )
assert_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.44")},
{"txid":txid, "account" : ""} )
assert_array_result(self.nodes[1].listtransactions(),
{"category":"receive","amount":Decimal("0.44")},
{"txid":txid, "account" : "toself"} )
pubkey = self.nodes[1].validateaddress(self.nodes[1].getnewaddress())['pubkey']
multisig = self.nodes[1].createmultisig(1, [pubkey])
self.nodes[0].importaddress(multisig["redeemScript"], "watchonly", False, True)
txid = self.nodes[1].sendtoaddress(multisig["address"], 0.1)
self.nodes[1].generate(1)
self.sync_all()
assert(len(self.nodes[0].listtransactions("watchonly", 100, 0, False)) == 0)
assert_array_result(self.nodes[0].listtransactions("watchonly", 100, 0, True),
{"category":"receive","amount":Decimal("0.1")},
{"txid":txid, "account" : "watchonly"} )
# XcelR has RBF disabled
# self.run_rbf_opt_in_test()
# Check that the opt-in-rbf flag works properly, for sent and received
# transactions.
def run_rbf_opt_in_test(self):
# Check whether a transaction signals opt-in RBF itself
def is_opt_in(node, txid):
rawtx = node.getrawtransaction(txid, 1)
for x in rawtx["vin"]:
if x["sequence"] < 0xfffffffe:
return True
return False
# Find an unconfirmed output matching a certain txid
def get_unconfirmed_utxo_entry(node, txid_to_match):
utxo = node.listunspent(0, 0)
for i in utxo:
if i["txid"] == txid_to_match:
return i
return None
# 1. Chain a few transactions that don't opt-in.
txid_1 = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1)
assert(not is_opt_in(self.nodes[0], txid_1))
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_1}, {"bip125-replaceable":"no"})
sync_mempools(self.nodes)
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_1}, {"bip125-replaceable":"no"})
# Tx2 will build off txid_1, still not opting in to RBF.
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[0], txid_1)
assert_equal(utxo_to_use["safe"], True)
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[1], txid_1)
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[1], txid_1)
assert_equal(utxo_to_use["safe"], False)
# Create tx2 using createrawtransaction
inputs = [{"txid":utxo_to_use["txid"], "vout":utxo_to_use["vout"]}]
outputs = {self.nodes[0].getnewaddress(): 0.999}
tx2 = self.nodes[1].createrawtransaction(inputs, outputs)
tx2_signed = self.nodes[1].signrawtransaction(tx2)["hex"]
txid_2 = self.nodes[1].sendrawtransaction(tx2_signed)
# ...and check the result
assert(not is_opt_in(self.nodes[1], txid_2))
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_2}, {"bip125-replaceable":"no"})
sync_mempools(self.nodes)
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_2}, {"bip125-replaceable":"no"})
# Tx3 will opt-in to RBF
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[0], txid_2)
inputs = [{"txid": txid_2, "vout":utxo_to_use["vout"]}]
outputs = {self.nodes[1].getnewaddress(): 0.998}
tx3 = self.nodes[0].createrawtransaction(inputs, outputs)
tx3_modified = txFromHex(tx3)
tx3_modified.vin[0].nSequence = 0
tx3 = bytes_to_hex_str(tx3_modified.serialize())
tx3_signed = self.nodes[0].signrawtransaction(tx3)['hex']
txid_3 = self.nodes[0].sendrawtransaction(tx3_signed)
assert(is_opt_in(self.nodes[0], txid_3))
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_3}, {"bip125-replaceable":"yes"})
sync_mempools(self.nodes)
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_3}, {"bip125-replaceable":"yes"})
# Tx4 will chain off tx3. Doesn't signal itself, but depends on one
# that does.
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[1], txid_3)
inputs = [{"txid": txid_3, "vout":utxo_to_use["vout"]}]
outputs = {self.nodes[0].getnewaddress(): 0.997}
tx4 = self.nodes[1].createrawtransaction(inputs, outputs)
tx4_signed = self.nodes[1].signrawtransaction(tx4)["hex"]
txid_4 = self.nodes[1].sendrawtransaction(tx4_signed)
assert(not is_opt_in(self.nodes[1], txid_4))
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_4}, {"bip125-replaceable":"yes"})
sync_mempools(self.nodes)
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_4}, {"bip125-replaceable":"yes"})
# Replace tx3, and check that tx4 becomes unknown
tx3_b = tx3_modified
tx3_b.vout[0].nValue -= int(Decimal("0.004") * COIN) # bump the fee
tx3_b = bytes_to_hex_str(tx3_b.serialize())
tx3_b_signed = self.nodes[0].signrawtransaction(tx3_b)['hex']
txid_3b = self.nodes[0].sendrawtransaction(tx3_b_signed, True)
assert(is_opt_in(self.nodes[0], txid_3b))
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_4}, {"bip125-replaceable":"unknown"})
sync_mempools(self.nodes)
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_4}, {"bip125-replaceable":"unknown"})
# Check gettransaction as well:
for n in self.nodes[0:2]:
assert_equal(n.gettransaction(txid_1)["bip125-replaceable"], "no")
assert_equal(n.gettransaction(txid_2)["bip125-replaceable"], "no")
assert_equal(n.gettransaction(txid_3)["bip125-replaceable"], "yes")
assert_equal(n.gettransaction(txid_3b)["bip125-replaceable"], "yes")
assert_equal(n.gettransaction(txid_4)["bip125-replaceable"], "unknown")
# After mining a transaction, it's no longer BIP125-replaceable
self.nodes[0].generate(1)
assert(txid_3b not in self.nodes[0].getrawmempool())
assert_equal(self.nodes[0].gettransaction(txid_3b)["bip125-replaceable"], "no")
assert_equal(self.nodes[0].gettransaction(txid_4)["bip125-replaceable"], "unknown")
if __name__ == '__main__':
ListTransactionsTest().main()
| 51.08867
| 113
| 0.600617
|
53a647a8e06abc47f22b7aa448176be17e775353
| 3,664
|
py
|
Python
|
flatpak_indexer/models.py
|
owtaylor/flatpak-indexer
|
6d4227677d2ff1a60735ecbaa10b241336f4b55b
|
[
"MIT"
] | 6
|
2021-03-29T09:01:51.000Z
|
2021-11-29T15:28:13.000Z
|
flatpak_indexer/models.py
|
owtaylor/flatpak-indexer
|
6d4227677d2ff1a60735ecbaa10b241336f4b55b
|
[
"MIT"
] | 4
|
2020-09-14T15:55:11.000Z
|
2022-01-06T15:56:26.000Z
|
flatpak_indexer/models.py
|
owtaylor/flatpak-indexer
|
6d4227677d2ff1a60735ecbaa10b241336f4b55b
|
[
"MIT"
] | 1
|
2021-08-30T10:17:16.000Z
|
2021-08-30T10:17:16.000Z
|
from datetime import datetime
from functools import cached_property
from typing import Any, Dict, List, Optional
from .json_model import BaseModel, field
from .utils import parse_pull_spec
class TagHistoryItemModel(BaseModel):
architecture: str
date: datetime
digest: str
class TagHistoryModel(BaseModel):
name: str
items: List[TagHistoryItemModel]
class ImageModel(BaseModel):
digest: str
media_type: str
os: str = field(json_name="OS")
architecture: str
labels: Dict[str, str]
annotations: Dict[str, str]
tags: List[str]
diff_ids: List[str]
# This is the place where the image was uploaded when built, which may differ
# from the public location of the image.
pull_spec: Optional[str]
@property
def nvr(self):
name = self.labels.get("com.redhat.component")
if name:
version = self.labels["version"]
release = self.labels["release"]
return f"{name}-{version}-{release}"
else:
return None
class ListModel(BaseModel):
digest: str
media_type: str
images: List[ImageModel]
tags: List[str]
class RepositoryModel(BaseModel):
name: str
images: Dict[str, ImageModel] = field(index="digest")
lists: Dict[str, ListModel] = field(index="digest")
tag_histories: Dict[str, TagHistoryModel] = field(index="name")
class RegistryModel(BaseModel):
repositories: Dict[str, RepositoryModel] = field(index="name")
def add_image(self, name: str, image: ImageModel):
if name not in self.repositories:
self.repositories[name] = RepositoryModel(name=name)
self.repositories[name].images[image.digest] = image
class KojiBuildModel(BaseModel):
build_id: str
nvr: str
source: Optional[str]
completion_time: datetime
user_name: str
class ImageBuildModel(KojiBuildModel):
images: List[ImageModel]
@classmethod
def class_from_json(cls, data: Dict[str, Any]):
if 'ModuleBuilds' in data:
return FlatpakBuildModel
else:
return ImageBuildModel
@cached_property
def repository(self):
_, repository, _ = parse_pull_spec(self.images[0].pull_spec)
return repository
class FlatpakBuildModel(ImageBuildModel):
module_builds: List[str]
package_builds: List[str]
class ModuleBuildModel(KojiBuildModel):
modulemd: str
package_builds: List[str]
class PackageBuildModel(KojiBuildModel):
pass
class TardiffImageModel(BaseModel):
registry: str
repository: str
ref: str
class TardiffSpecModel(BaseModel):
from_image: TardiffImageModel
from_diff_id: str
to_image: TardiffImageModel
to_diff_id: str
class TardiffResultModel(BaseModel):
status: str
digest: str
size: int
message: str
from_size: Optional[int]
to_size: Optional[int]
max_mem_kib: Optional[float] = field(json_name="MaxMemKiB")
elapsed_time_s: Optional[float]
user_time_s: Optional[float]
system_time_s: Optional[float]
class ModuleImageContentsModel(BaseModel):
image_nvr: str
module_nvr: str
package_builds: List[str]
class ModuleStreamContentsModel(BaseModel):
images: Dict[str, ModuleImageContentsModel] = field(index="image_nvr")
def add_package_build(self, image_nvr: str, module_nvr: str, package_nvr: str):
if image_nvr not in self.images:
self.images[image_nvr] = ModuleImageContentsModel(image_nvr=image_nvr,
module_nvr=module_nvr)
self.images[image_nvr].package_builds.append(package_nvr)
| 24.264901
| 84
| 0.682587
|
28878225da61a435d0a7d49dbd5449df09f7a68f
| 10,418
|
py
|
Python
|
anaconda_project/test/test_project_lock_file.py
|
vertingo/Anaconda_Videos_Tutos
|
f30f2a0549a7b81c17f4d5d249edc59eb3c05458
|
[
"BSD-3-Clause"
] | null | null | null |
anaconda_project/test/test_project_lock_file.py
|
vertingo/Anaconda_Videos_Tutos
|
f30f2a0549a7b81c17f4d5d249edc59eb3c05458
|
[
"BSD-3-Clause"
] | null | null | null |
anaconda_project/test/test_project_lock_file.py
|
vertingo/Anaconda_Videos_Tutos
|
f30f2a0549a7b81c17f4d5d249edc59eb3c05458
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2016, Anaconda, Inc. All rights reserved.
#
# Licensed under the terms of the BSD 3-Clause License.
# The full license is in the file LICENSE.txt, distributed with this software.
# -----------------------------------------------------------------------------
import codecs
import os
from anaconda_project.internal.test.tmpfile_utils import with_directory_contents
from anaconda_project.project_lock_file import (ProjectLockFile, DEFAULT_PROJECT_LOCK_FILENAME,
possible_project_lock_file_names)
from anaconda_project.conda_manager import CondaLockSet
expected_default_file = """# This is an Anaconda project lock file.
# The lock file locks down exact versions of all your dependencies.
#
# In most cases, this file is automatically maintained by the `anaconda-project` command or GUI tools.
# It's best to keep this file in revision control (such as git or svn).
# The file is in YAML format, please see http://www.yaml.org/start.html for more.
#
#
# Set to false to ignore locked versions.
locking_enabled: false
#
# A key goes in here for each env spec.
#
env_specs: {}
"""
def _get_locking_enabled(lock_file, env_spec_name):
"""Library-internal method."""
enabled = lock_file.get_value(['env_specs', env_spec_name, 'locked'], None)
if enabled is not None:
return enabled
enabled = lock_file.get_value(['locking_enabled'])
if enabled is not None:
return enabled
return True
def _get_lock_set(lock_file, env_spec_name):
"""Library-internal method."""
# TODO no validation here, we'll do that by moving this
# into project.py soon
enabled = _get_locking_enabled(lock_file, env_spec_name)
packages = lock_file.get_value(['env_specs', env_spec_name, 'packages'], {})
platforms = lock_file.get_value(['env_specs', env_spec_name, 'platforms'], [])
env_spec_hash = lock_file.get_value(['env_specs', env_spec_name, 'env_spec_hash'], None)
lock_set = CondaLockSet(packages, platforms, enabled=enabled)
lock_set.env_spec_hash = env_spec_hash
return lock_set
def test_create_missing_lock_file_only_when_not_default():
def create_file(dirname):
filename = os.path.join(dirname, DEFAULT_PROJECT_LOCK_FILENAME)
assert not os.path.exists(filename)
lock_file = ProjectLockFile.load_for_directory(dirname)
assert lock_file is not None
assert not os.path.exists(filename)
assert _get_lock_set(lock_file, 'foo').disabled
assert not _get_locking_enabled(lock_file, 'foo')
lock_file.save()
# should not have saved an unmodified (default) file)
assert not os.path.exists(filename)
# make a change, which should cause us to save
lock_file.set_value(['something'], 42)
lock_file.save()
assert os.path.exists(filename)
with codecs.open(filename, 'r', 'utf-8') as file:
contents = file.read()
expected = expected_default_file + "something: 42\n"
assert expected == contents
with_directory_contents(dict(), create_file)
def _use_existing_lock_file(relative_name):
def check_file(dirname):
filename = os.path.join(dirname, relative_name)
assert os.path.exists(filename)
lock_file = ProjectLockFile.load_for_directory(dirname)
assert lock_file.get_value(['env_specs', 'foo']) is not None
assert lock_file.get_value(['locking_enabled']) is True
with_directory_contents(
{relative_name: """
locking_enabled: true
env_specs:
foo:
locked: true
platforms: [linux-32,linux-64,osx-64,win-32,win-64]
packages:
all:
- foo=1.0=1
bar:
locked: false
platforms: [linux-32,linux-64,osx-64,win-32,win-64]
packages:
all:
- bar=2.0=2
"""}, check_file)
def test_use_existing_lock_file_default_name():
_use_existing_lock_file(DEFAULT_PROJECT_LOCK_FILENAME)
def test_use_existing_lock_file_all_names():
for name in possible_project_lock_file_names:
_use_existing_lock_file(name)
def test_get_lock_set():
def check_file(dirname):
filename = os.path.join(dirname, DEFAULT_PROJECT_LOCK_FILENAME)
assert os.path.exists(filename)
lock_file = ProjectLockFile.load_for_directory(dirname)
foo_lock_set = _get_lock_set(lock_file, 'foo')
assert foo_lock_set.enabled
assert ('foo=1.0=1', ) == foo_lock_set.package_specs_for_current_platform
bar_lock_set = _get_lock_set(lock_file, 'bar')
assert bar_lock_set.disabled
with_directory_contents(
{DEFAULT_PROJECT_LOCK_FILENAME: """
locking_enabled: true
env_specs:
foo:
locked: true
platforms: [linux-32,linux-64,osx-64,win-32,win-64]
packages:
all:
- foo=1.0=1
bar:
locked: false
platforms: [linux-32,linux-64,osx-64,win-32,win-64]
packages:
all:
- bar=2.0=2
"""}, check_file)
def test_disable_single_spec_locking():
def check_file(dirname):
filename = os.path.join(dirname, DEFAULT_PROJECT_LOCK_FILENAME)
assert os.path.exists(filename)
lock_file = ProjectLockFile.load_for_directory(dirname)
foo_lock_set = _get_lock_set(lock_file, 'foo')
assert ('foo=1.0=1', ) == foo_lock_set.package_specs_for_current_platform
lock_file._disable_locking('foo')
foo_lock_set = _get_lock_set(lock_file, 'foo')
assert foo_lock_set.disabled
assert _get_locking_enabled(lock_file, 'foo') is False
with_directory_contents(
{DEFAULT_PROJECT_LOCK_FILENAME: """
locking_enabled: true
env_specs:
foo:
locked: true
platforms: [linux-32,linux-64,osx-64,win-32,win-64]
packages:
all:
- foo=1.0=1
bar:
locked: false
platforms: [linux-32,linux-64,osx-64,win-32,win-64]
packages:
all:
- bar=2.0=2
"""}, check_file)
def test_set_lock_set():
def check_file(dirname):
filename = os.path.join(dirname, DEFAULT_PROJECT_LOCK_FILENAME)
assert os.path.exists(filename)
lock_file = ProjectLockFile.load_for_directory(dirname)
# we have the global enabled flag off; individual
# env spec settings override that; foo has no setting.
foo_lock_set = _get_lock_set(lock_file, 'foo')
assert lock_file.get_value(['env_specs', 'foo', 'locked'], None) is None
assert foo_lock_set.disabled
bar_lock_set = _get_lock_set(lock_file, 'bar')
assert bar_lock_set.disabled
all_names = ['foo', 'bar']
lock_set = CondaLockSet({'all': ['something=3.0=0']},
platforms=['linux-32', 'linux-64', 'osx-64', 'win-32', 'win-64'])
lock_set.env_spec_hash = "hash-hash-hash"
lock_file._set_lock_set('bar', lock_set, all_names=all_names)
# "foo" should have been DISABLED since we had to
# enable the global flag in order to enable "bar"
foo_lock_set = _get_lock_set(lock_file, 'foo')
assert lock_file.get_value(['env_specs', 'foo', 'locked']) is False
assert foo_lock_set.disabled
assert foo_lock_set.env_spec_hash is None
bar_lock_set = _get_lock_set(lock_file, 'bar')
assert bar_lock_set.enabled
assert ('something=3.0=0', ) == bar_lock_set.package_specs_for_current_platform
assert "hash-hash-hash" == bar_lock_set.env_spec_hash
# and now we should enable "foo" when we set it to something
lock_file._set_lock_set('foo', lock_set, all_names=all_names)
foo_lock_set = _get_lock_set(lock_file, 'foo')
assert foo_lock_set.enabled
assert ('something=3.0=0', ) == foo_lock_set.package_specs_for_current_platform
# be sure we can save
lock_file.save()
reloaded = ProjectLockFile.load_for_directory(dirname)
assert ('something=3.0=0', ) == _get_lock_set(reloaded, 'bar').package_specs_for_current_platform
assert ('something=3.0=0', ) == _get_lock_set(reloaded, 'foo').package_specs_for_current_platform
# Check _set_lock_set_hash
lock_file._set_lock_set_hash('bar', 'hash2.0')
lock_file.save()
reloaded = ProjectLockFile.load_for_directory(dirname)
bar_lock_set = _get_lock_set(reloaded, 'bar')
assert bar_lock_set.env_spec_hash == 'hash2.0'
with_directory_contents(
{DEFAULT_PROJECT_LOCK_FILENAME: """
locking_enabled: false
env_specs:
foo:
platforms: [linux-32,linux-64,osx-64,win-32,win-64]
packages:
all:
- foo=1.0=1
bar:
locked: false
platforms: [linux-32,linux-64,osx-64,win-32,win-64]
packages:
all:
- bar=2.0=2
"""}, check_file)
def test_set_lock_set_has_to_create_env_specs_to_disable():
def check_file(dirname):
filename = os.path.join(dirname, DEFAULT_PROJECT_LOCK_FILENAME)
assert os.path.exists(filename)
lock_file = ProjectLockFile.load_for_directory(dirname)
all_names = ['foo', 'bar']
lock_set = CondaLockSet({'all': ['something=3.0=0']},
platforms=['linux-32', 'linux-64', 'osx-64', 'win-32', 'win-64'])
# so the point of this test is that we need to create env_specs
# dict and the 'foo' entry as a side effect of setting 'bar',
# in order to mark 'foo' disabled.
lock_file._set_lock_set('bar', lock_set, all_names=all_names)
# "foo" should have been DISABLED since we had to
# enable the global flag in order to enable "bar"
foo_lock_set = _get_lock_set(lock_file, 'foo')
assert lock_file.get_value(['env_specs', 'foo', 'locked']) is False
assert foo_lock_set.disabled
bar_lock_set = _get_lock_set(lock_file, 'bar')
assert bar_lock_set.enabled
assert ('something=3.0=0', ) == bar_lock_set.package_specs_for_current_platform
# be sure we can save
lock_file.save()
reloaded = ProjectLockFile.load_for_directory(dirname)
assert ('something=3.0=0', ) == _get_lock_set(reloaded, 'bar').package_specs_for_current_platform
assert _get_lock_set(reloaded, 'foo').disabled
with_directory_contents({DEFAULT_PROJECT_LOCK_FILENAME: """
locking_enabled: false
"""}, check_file)
| 34.611296
| 105
| 0.668362
|
e71ab575a47c483b76b6246e03cdc69757741f3c
| 924
|
py
|
Python
|
apps/platforms/linux/terminal.py
|
joshpearce/knausj_talon
|
44c49806c6e53b2e5fe90fc24fd06a1fc5125883
|
[
"MIT"
] | 1
|
2020-11-13T18:02:12.000Z
|
2020-11-13T18:02:12.000Z
|
apps/platforms/linux/terminal.py
|
joshpearce/knausj_talon
|
44c49806c6e53b2e5fe90fc24fd06a1fc5125883
|
[
"MIT"
] | null | null | null |
apps/platforms/linux/terminal.py
|
joshpearce/knausj_talon
|
44c49806c6e53b2e5fe90fc24fd06a1fc5125883
|
[
"MIT"
] | null | null | null |
from talon import Context, actions
ctx = Context()
ctx.matches = r"""
os: linux
tag: terminal
"""
@ctx.action_class('edit')
class EditActions:
#todo: generic tab commands
#tag(): tabs
def page_down():
actions.key('shift-pagedown')
def page_up():
actions.key('shift-pageup')
def paste():
actions.key('ctrl-shift-v')
def copy():
actions.key('ctrl-shift-c')
def find(text: str=None):
actions.key('ctrl-shift-f')
def word_left():
actions.key('ctrl-w left')
def word_right():
actions.key('ctrl-w right')
@ctx.action_class('app')
class AppActions:
def tab_open():
actions.key('ctrl-shift-t')
def tab_close():
actions.key('ctrl-shift-w')
def tab_next():
actions.key('ctrl-pagedown')
def tab_previous():
actions.key('ctrl-pageup')
def window_open():
actions.key('ctrl-shift-n')
| 23.692308
| 37
| 0.598485
|
f3801e8ac51568f506874febf9c27aee04d6df03
| 337
|
py
|
Python
|
src/meetings/templatetags/letter_tag.py
|
ofirr/OpenCommunity
|
7786ac2996530af8f545f4398c071793c73634c8
|
[
"BSD-3-Clause"
] | 1
|
2015-05-12T17:59:35.000Z
|
2015-05-12T17:59:35.000Z
|
src/meetings/templatetags/letter_tag.py
|
Niros/OpenCommunity
|
4c91136db6243a1cd65b55ecf5a44c2bce24a45a
|
[
"BSD-3-Clause"
] | null | null | null |
src/meetings/templatetags/letter_tag.py
|
Niros/OpenCommunity
|
4c91136db6243a1cd65b55ecf5a44c2bce24a45a
|
[
"BSD-3-Clause"
] | null | null | null |
from django import template
from django.utils.translation import ugettext_lazy as _
LETTERS = [_("a"), _("b"), _("c"), _("d"), _("e"),
_("f"), _("g"), _("h"), _("i"), _("j")]
register = template.Library()
@register.filter
def to_char(value):
if value > len(LETTERS):
return value
return LETTERS[value - 1]
| 22.466667
| 55
| 0.587537
|
10106ffc4e4db622eaab2a471d59e8165c72f8c7
| 3,258
|
py
|
Python
|
tests/test_R14.py
|
drunsinn/pyXSteam
|
8d1f178fa59ee99b269fd1d3b88d5cf4b8ad74af
|
[
"BSD-3-Clause"
] | 27
|
2015-03-14T09:24:41.000Z
|
2022-03-19T16:39:48.000Z
|
tests/test_R14.py
|
drunsinn/pyXSteam
|
8d1f178fa59ee99b269fd1d3b88d5cf4b8ad74af
|
[
"BSD-3-Clause"
] | 6
|
2017-10-11T15:13:02.000Z
|
2021-03-24T08:05:51.000Z
|
tests/test_R14.py
|
drunsinn/pyXSteam
|
8d1f178fa59ee99b269fd1d3b88d5cf4b8ad74af
|
[
"BSD-3-Clause"
] | 13
|
2017-10-11T08:07:06.000Z
|
2021-07-25T01:28:46.000Z
|
# -*- coding: utf-8 -*-
import unittest
from pyXSteam import IAPWS_R14
class R14_FunctionTester(unittest.TestCase):
def setUp(self):
self.maxError = 1E-6
self.maxError_ice_III = 0.003
self.maxError_ice_V = 0.003
self.maxError_ice_VI = 0.003
self.maxError_ice_VII = 0.007
self.maxError_ice_Ih = 0.002
def tearDown(self):
pass
def test_R14_pmelt_T_function_Ih_1(self):
error = IAPWS_R14.pmelt_T_iceIh(251.165) - 208.566
self.assertLess(error, self.maxError_ice_Ih, 'pmelt_T_iceIh not passed Error %(error)e allowed: %(max)e' % {'error': error, 'max': self.maxError_ice_Ih})
def test_R14_pmelt_T_function_Ih_2(self):
error = IAPWS_R14.pmelt_T_iceIh(254) - 268.685
self.assertLess(error, self.maxError_ice_Ih, 'pmelt_T_iceIh not passed Error %(error)e allowed: %(max)e' % {'error': error, 'max': self.maxError_ice_Ih})
def test_R14_pmelt_T_function_III_1(self):
error = IAPWS_R14.pmelt_T_iceIII(251.165) - 208.566
self.assertLess(error, self.maxError_ice_III, 'pmelt_T_iceIII not passed Error %(error)e allowed: %(max)e' % {'error': error, 'max': self.maxError_ice_III})
def test_R14_pmelt_T_function_III_2(self):
error = IAPWS_R14.pmelt_T_iceIII(254.0) - 268.685
self.assertLess(error, self.maxError_ice_III, 'pmelt_T_iceIII not passed Error %(error)e allowed: %(max)e' % {'error': error, 'max': self.maxError_ice_III})
def test_R14_pmelt_T_function_V_1(self):
error = IAPWS_R14.pmelt_T_iceV(256.164) - 350.1
self.assertLess(error, self.maxError_ice_V, 'pmelt_t not passed Error %(error)e allowed: %(max)e' % {'error': error, 'max': self.maxError_ice_V})
def test_R14_pmelt_T_function_V_2(self):
error = IAPWS_R14.pmelt_T_iceV(265) - 479.640
self.assertLess(error, self.maxError_ice_V, 'pmelt_t not passed Error %(error)e allowed: %(max)e' % {'error': error, 'max': self.maxError_ice_V})
def test_R14_pmelt_T_function_VI_1(self):
error = IAPWS_R14.pmelt_T_iceVI(273.31) - 632.4
self.assertLess(error, self.maxError_ice_VI, 'pmelt_t not passed Error %(error)e allowed: %(max)e' % {'error': error, 'max': self.maxError_ice_VI})
def test_R14_pmelt_T_function_VI_2(self):
error = IAPWS_R14.pmelt_T_iceVI(320) - 1356.76
self.assertLess(error, self.maxError_ice_VI, 'pmelt_t not passed Error %(error)e allowed: %(max)e' % {'error': error, 'max': self.maxError_ice_VI})
def test_R14_pmelt_T_function_VII_1(self):
error = IAPWS_R14.pmelt_T_iceVII(355.0) - 2216
self.assertLess(error, self.maxError_ice_VII, 'pmelt_t not passed Error %(error)e allowed: %(max)e' % {'error': error, 'max': self.maxError_ice_VII})
def test_R14_pmelt_T_function_VII_2(self):
error = IAPWS_R14.pmelt_T_iceVII(550) - 6308.71
self.assertLess(error, self.maxError_ice_VII, 'pmelt_t not passed Error %(error)e allowed: %(max)e' % {'error': error, 'max': self.maxError_ice_VII})
def test_R14_psubl_T_function(self):
error = IAPWS_R14.psubl_T(230.0) - 8.94735E-6
self.assertLess(error, self.maxError, 'psubl_t not passed Error %(error)e allowed: %(max)e' % {'error': error, 'max': self.maxError})
| 51.714286
| 164
| 0.694291
|
4b0ef688d72f3ff129c8e6de1afe6222c0b1fa26
| 14,885
|
py
|
Python
|
tests/execution/test_union_interface.py
|
corydolphin/graphql-core
|
cc303671559822d9c3991e04fee170dce0f23553
|
[
"MIT"
] | 1
|
2021-07-27T20:47:34.000Z
|
2021-07-27T20:47:34.000Z
|
tests/execution/test_union_interface.py
|
vpetrovykh/graphql-core
|
7af97e22afb27861fc1b7d7ca0292095f8427ecb
|
[
"MIT"
] | null | null | null |
tests/execution/test_union_interface.py
|
vpetrovykh/graphql-core
|
7af97e22afb27861fc1b7d7ca0292095f8427ecb
|
[
"MIT"
] | null | null | null |
from typing import Optional, Union, List
from graphql.execution import execute_sync
from graphql.language import parse
from graphql.type import (
GraphQLBoolean,
GraphQLField,
GraphQLInterfaceType,
GraphQLList,
GraphQLObjectType,
GraphQLSchema,
GraphQLString,
GraphQLUnionType,
)
class Dog:
name: str
barks: bool
mother: Optional["Dog"]
father: Optional["Dog"]
progeny: List["Dog"]
def __init__(self, name: str, barks: bool):
self.name = name
self.barks = barks
self.mother = None
self.father = None
self.progeny = []
class Cat:
name: str
meows: bool
mother: Optional["Cat"]
father: Optional["Cat"]
progeny: List["Cat"]
def __init__(self, name: str, meows: bool):
self.name = name
self.meows = meows
self.mother = None
self.father = None
self.progeny = []
class Person:
name: str
pets: Optional[List[Union[Dog, Cat]]]
friends: Optional[List[Union[Dog, Cat, "Person"]]]
def __init__(
self,
name: str,
pets: Optional[List[Union[Dog, Cat]]] = None,
friends: Optional[List[Union[Dog, Cat, "Person"]]] = None,
):
self.name = name
self.pets = pets
self.friends = friends
NamedType = GraphQLInterfaceType("Named", {"name": GraphQLField(GraphQLString)})
LifeType = GraphQLInterfaceType(
"Life", lambda: {"progeny": GraphQLField(GraphQLList(LifeType))} # type: ignore
)
MammalType = GraphQLInterfaceType(
"Mammal",
lambda: {
"progeny": GraphQLField(GraphQLList(MammalType)), # type: ignore
"mother": GraphQLField(MammalType), # type: ignore
"father": GraphQLField(MammalType), # type: ignore
},
interfaces=[LifeType],
)
DogType = GraphQLObjectType(
"Dog",
lambda: {
"name": GraphQLField(GraphQLString),
"barks": GraphQLField(GraphQLBoolean),
"progeny": GraphQLField(GraphQLList(DogType)), # type: ignore
"mother": GraphQLField(DogType), # type: ignore
"father": GraphQLField(DogType), # type: ignore
},
interfaces=[MammalType, LifeType, NamedType],
is_type_of=lambda value, info: isinstance(value, Dog),
)
CatType = GraphQLObjectType(
"Cat",
lambda: {
"name": GraphQLField(GraphQLString),
"meows": GraphQLField(GraphQLBoolean),
"progeny": GraphQLField(GraphQLList(CatType)), # type: ignore
"mother": GraphQLField(CatType), # type: ignore
"father": GraphQLField(CatType), # type: ignore
},
interfaces=[MammalType, LifeType, NamedType],
is_type_of=lambda value, info: isinstance(value, Cat),
)
def resolve_pet_type(value, _info, _type):
if isinstance(value, Dog):
return DogType.name
if isinstance(value, Cat):
return CatType.name
# Not reachable. All possible types have been considered.
raise TypeError("Unexpected pet type")
PetType = GraphQLUnionType("Pet", [DogType, CatType], resolve_type=resolve_pet_type)
PersonType = GraphQLObjectType(
"Person",
lambda: {
"name": GraphQLField(GraphQLString),
"pets": GraphQLField(GraphQLList(PetType)),
"friends": GraphQLField(GraphQLList(NamedType)),
"progeny": GraphQLField(GraphQLList(PersonType)), # type: ignore
"mother": GraphQLField(PersonType), # type: ignore
"father": GraphQLField(PersonType), # type: ignore
},
interfaces=[NamedType, MammalType, LifeType],
is_type_of=lambda value, _info: isinstance(value, Person),
)
schema = GraphQLSchema(PersonType, types=[PetType])
garfield = Cat("Garfield", False)
garfield.mother = Cat("Garfield's Mom", False)
garfield.mother.progeny = [garfield]
odie = Dog("Odie", True)
odie.mother = Dog("Odie's Mom", True)
odie.mother.progeny = [odie]
liz = Person("Liz", [], [])
john = Person("John", [garfield, odie], [liz, odie])
def describe_execute_union_and_intersection_types():
def can_introspect_on_union_and_intersection_types():
document = parse(
"""
{
Named: __type(name: "Named") {
kind
name
fields { name }
interfaces { name }
possibleTypes { name }
enumValues { name }
inputFields { name }
}
Mammal: __type(name: "Mammal") {
kind
name
fields { name }
interfaces { name }
possibleTypes { name }
enumValues { name }
inputFields { name }
}
Pet: __type(name: "Pet") {
kind
name
fields { name }
interfaces { name }
possibleTypes { name }
enumValues { name }
inputFields { name }
}
}
"""
)
assert execute_sync(schema=schema, document=document) == (
{
"Named": {
"kind": "INTERFACE",
"name": "Named",
"fields": [{"name": "name"}],
"interfaces": [],
"possibleTypes": [
{"name": "Dog"},
{"name": "Cat"},
{"name": "Person"},
],
"enumValues": None,
"inputFields": None,
},
"Mammal": {
"kind": "INTERFACE",
"name": "Mammal",
"fields": [
{"name": "progeny"},
{"name": "mother"},
{"name": "father"},
],
"interfaces": [{"name": "Life"}],
"possibleTypes": [
{"name": "Dog"},
{"name": "Cat"},
{"name": "Person"},
],
"enumValues": None,
"inputFields": None,
},
"Pet": {
"kind": "UNION",
"name": "Pet",
"fields": None,
"interfaces": None,
"possibleTypes": [{"name": "Dog"}, {"name": "Cat"}],
"enumValues": None,
"inputFields": None,
},
},
None,
)
def executes_using_union_types():
# NOTE: This is an *invalid* query, but it should be *executable*.
document = parse(
"""
{
__typename
name
pets {
__typename
name
barks
meows
}
}
"""
)
assert execute_sync(schema=schema, document=document, root_value=john) == (
{
"__typename": "Person",
"name": "John",
"pets": [
{"__typename": "Cat", "name": "Garfield", "meows": False},
{"__typename": "Dog", "name": "Odie", "barks": True},
],
},
None,
)
def executes_union_types_with_inline_fragment():
# This is the valid version of the query in the above test.
document = parse(
"""
{
__typename
name
pets {
__typename
... on Dog {
name
barks
}
... on Cat {
name
meows
}
}
}
"""
)
assert execute_sync(schema=schema, document=document, root_value=john) == (
{
"__typename": "Person",
"name": "John",
"pets": [
{"__typename": "Cat", "name": "Garfield", "meows": False},
{"__typename": "Dog", "name": "Odie", "barks": True},
],
},
None,
)
def executes_using_interface_types():
# NOTE: This is an *invalid* query, but it should be a *executable*.
document = parse(
"""
{
__typename
name
friends {
__typename
name
barks
meows
}
}
"""
)
assert execute_sync(schema=schema, document=document, root_value=john) == (
{
"__typename": "Person",
"name": "John",
"friends": [
{"__typename": "Person", "name": "Liz"},
{"__typename": "Dog", "name": "Odie", "barks": True},
],
},
None,
)
def executes_interface_types_with_inline_fragment():
# This is the valid version of the query in the above test.
document = parse(
"""
{
__typename
name
friends {
__typename
name
... on Dog {
barks
}
... on Cat {
meows
}
... on Mammal {
mother {
__typename
... on Dog {
name
barks
}
... on Cat {
name
meows
}
}
}
}
}
"""
)
assert execute_sync(schema=schema, document=document, root_value=john) == (
{
"__typename": "Person",
"name": "John",
"friends": [
{"__typename": "Person", "name": "Liz", "mother": None},
{
"__typename": "Dog",
"name": "Odie",
"barks": True,
"mother": {
"__typename": "Dog",
"name": "Odie's Mom",
"barks": True,
},
},
],
},
None,
)
def executes_interface_types_with_named_fragments():
document = parse(
"""
{
__typename
name
friends {
__typename
name
...DogBarks
...CatMeows
}
}
fragment DogBarks on Dog {
barks
}
fragment CatMeows on Cat {
meows
}
"""
)
assert execute_sync(schema=schema, document=document, root_value=john) == (
{
"__typename": "Person",
"name": "John",
"friends": [
{"__typename": "Person", "name": "Liz"},
{"__typename": "Dog", "name": "Odie", "barks": True},
],
},
None,
)
def allows_fragment_conditions_to_be_abstract_types():
document = parse(
"""
{
__typename
name
pets {
...PetFields,
...on Mammal {
mother {
...ProgenyFields
}
}
}
friends { ...FriendFields }
}
fragment PetFields on Pet {
__typename
... on Dog {
name
barks
}
... on Cat {
name
meows
}
}
fragment FriendFields on Named {
__typename
name
... on Dog {
barks
}
... on Cat {
meows
}
}
fragment ProgenyFields on Life {
progeny {
__typename
}
}
"""
)
assert execute_sync(schema=schema, document=document, root_value=john) == (
{
"__typename": "Person",
"name": "John",
"pets": [
{
"__typename": "Cat",
"name": "Garfield",
"meows": False,
"mother": {"progeny": [{"__typename": "Cat"}]},
},
{
"__typename": "Dog",
"name": "Odie",
"barks": True,
"mother": {"progeny": [{"__typename": "Dog"}]},
},
],
"friends": [
{"__typename": "Person", "name": "Liz"},
{"__typename": "Dog", "name": "Odie", "barks": True},
],
},
None,
)
# noinspection PyPep8Naming
def gets_execution_info_in_resolver():
encountered = {}
def resolve_type(_source, info, _type):
encountered["context"] = info.context
encountered["schema"] = info.schema
encountered["root_value"] = info.root_value
return PersonType2.name
NamedType2 = GraphQLInterfaceType(
"Named", {"name": GraphQLField(GraphQLString)}, resolve_type=resolve_type
)
PersonType2 = GraphQLObjectType(
"Person",
{
"name": GraphQLField(GraphQLString),
"friends": GraphQLField(GraphQLList(NamedType2)),
},
interfaces=[NamedType2],
)
schema2 = GraphQLSchema(PersonType2)
document = parse("{ name, friends { name } }")
root_value = Person("John", [], [liz])
context_value = {"authToken": "123abc"}
assert execute_sync(
schema=schema2,
document=document,
root_value=root_value,
context_value=context_value,
) == (
{"name": "John", "friends": [{"name": "Liz"}]},
None,
)
assert encountered == {
"schema": schema2,
"root_value": root_value,
"context": context_value,
}
| 28.032015
| 85
| 0.420961
|
8067862deaa2622ae7ba96e5b83fa72517582d2b
| 1,061
|
py
|
Python
|
libpaste/management/commands/cleanup_snippets.py
|
rbarrois/xelpaste
|
54ca4a60169245fb01b3b2ddae9473a706ea6ec1
|
[
"MIT"
] | null | null | null |
libpaste/management/commands/cleanup_snippets.py
|
rbarrois/xelpaste
|
54ca4a60169245fb01b3b2ddae9473a706ea6ec1
|
[
"MIT"
] | null | null | null |
libpaste/management/commands/cleanup_snippets.py
|
rbarrois/xelpaste
|
54ca4a60169245fb01b3b2ddae9473a706ea6ec1
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import argparse
from django.utils import timezone
from django.core.management.base import BaseCommand
from ...models import Snippet
class Command(BaseCommand):
help = "Purges snippets that are expired"
def add_arguments(self, parser):
parser.add_argument('--dry-run', '-n',
action='store_true',
dest='dry_run',
help="Don't do anything",
)
def handle(self, *args, **options):
deleteable_snippets = Snippet.objects.filter(
expires__isnull=False,
expire_type=Snippet.EXPIRE_TIME,
expires__lte=timezone.now()
)
self.stdout.write(u"%s snippets gets deleted:\n" % deleteable_snippets.count())
for d in deleteable_snippets:
self.stdout.write(u"- %s (%s)\n" % (d.secret_id, d.expires))
if options.get('dry_run'):
self.stdout.write(u'Dry run - Not actually deleting snippets!\n')
else:
deleteable_snippets.delete()
| 30.314286
| 87
| 0.625825
|
542b1e7ead17adb7e295a15a58f45f995c7ab440
| 7,858
|
py
|
Python
|
plaso/parsers/mac_appfirewall.py
|
jeppetrost/plaso
|
b48008c6ea79950eeeef3a05b3a859086c8704b6
|
[
"Apache-2.0"
] | null | null | null |
plaso/parsers/mac_appfirewall.py
|
jeppetrost/plaso
|
b48008c6ea79950eeeef3a05b3a859086c8704b6
|
[
"Apache-2.0"
] | null | null | null |
plaso/parsers/mac_appfirewall.py
|
jeppetrost/plaso
|
b48008c6ea79950eeeef3a05b3a859086c8704b6
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""This file contains a appfirewall.log (MacOS Firewall) parser."""
from __future__ import unicode_literals
import pyparsing
from dfdatetime import time_elements as dfdatetime_time_elements
from plaso.containers import events
from plaso.containers import time_events
from plaso.lib import errors
from plaso.lib import definitions
from plaso.lib import timelib
from plaso.parsers import logger
from plaso.parsers import manager
from plaso.parsers import text_parser
class MacAppFirewallLogEventData(events.EventData):
"""MacOS Firewall log event data.
Attributes:
action (str): action.
agent (str): agent that save the log.
computer_name (str): name of the computer.
process_name (str): name of the entity that tried do the action.
status (str): saved status action.
"""
DATA_TYPE = 'mac:appfirewall:line'
def __init__(self):
"""Initializes event data."""
super(MacAppFirewallLogEventData, self).__init__(data_type=self.DATA_TYPE)
self.action = None
self.agent = None
self.computer_name = None
self.process_name = None
self.status = None
class MacAppFirewallParser(text_parser.PyparsingSingleLineTextParser):
"""Parse text based on appfirewall.log file."""
NAME = 'mac_appfirewall_log'
DESCRIPTION = 'Parser for appfirewall.log files.'
_ENCODING = 'utf-8'
# Define how a log line should look like.
# Example: 'Nov 2 04:07:35 DarkTemplar-2.local socketfilterfw[112] '
# '<Info>: Dropbox: Allow (in:0 out:2)'
# INFO: process_name is going to have a white space at the beginning.
DATE_TIME = pyparsing.Group(
text_parser.PyparsingConstants.THREE_LETTERS.setResultsName('month') +
text_parser.PyparsingConstants.ONE_OR_TWO_DIGITS.setResultsName('day') +
text_parser.PyparsingConstants.TIME_ELEMENTS)
FIREWALL_LINE = (
DATE_TIME.setResultsName('date_time') +
pyparsing.Word(pyparsing.printables).setResultsName('computer_name') +
pyparsing.Word(pyparsing.printables).setResultsName('agent') +
pyparsing.Literal('<').suppress() +
pyparsing.CharsNotIn('>').setResultsName('status') +
pyparsing.Literal('>:').suppress() +
pyparsing.CharsNotIn(':').setResultsName('process_name') +
pyparsing.Literal(':') +
pyparsing.SkipTo(pyparsing.lineEnd).setResultsName('action'))
# Repeated line.
# Example: Nov 29 22:18:29 --- last message repeated 1 time ---
REPEATED_LINE = (
DATE_TIME.setResultsName('date_time') +
pyparsing.Literal('---').suppress() +
pyparsing.CharsNotIn('---').setResultsName('process_name') +
pyparsing.Literal('---').suppress())
LINE_STRUCTURES = [
('logline', FIREWALL_LINE),
('repeated', REPEATED_LINE)]
def __init__(self):
"""Initializes a parser object."""
super(MacAppFirewallParser, self).__init__()
self._last_month = 0
self._previous_structure = None
self._year_use = 0
def _GetTimeElementsTuple(self, structure):
"""Retrieves a time elements tuple from the structure.
Args:
structure (pyparsing.ParseResults): structure of tokens derived from
a line of a text file.
Returns:
tuple: containing:
year (int): year.
month (int): month, where 1 represents January.
day_of_month (int): day of month, where 1 is the first day of the month.
hours (int): hours.
minutes (int): minutes.
seconds (int): seconds.
"""
month, day, hours, minutes, seconds = structure.date_time
# Note that dfdatetime_time_elements.TimeElements will raise ValueError
# for an invalid month.
month = timelib.MONTH_DICT.get(month.lower(), 0)
if month != 0 and month < self._last_month:
# Gap detected between years.
self._year_use += 1
return (self._year_use, month, day, hours, minutes, seconds)
def _ParseLogLine(self, parser_mediator, structure, key):
"""Parse a single log line and produce an event object.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
key (str): identifier of the structure of tokens.
structure (pyparsing.ParseResults): structure of tokens derived from
a line of a text file.
"""
time_elements_tuple = self._GetTimeElementsTuple(structure)
try:
date_time = dfdatetime_time_elements.TimeElements(
time_elements_tuple=time_elements_tuple)
except ValueError:
parser_mediator.ProduceExtractionWarning(
'invalid date time value: {0!s}'.format(structure.date_time))
return
self._last_month = time_elements_tuple[1]
# If the actual entry is a repeated entry, we take the basic information
# from the previous entry, but use the timestamp from the actual entry.
if key == 'logline':
self._previous_structure = structure
else:
structure = self._previous_structure
event_data = MacAppFirewallLogEventData()
event_data.action = structure.action
event_data.agent = structure.agent
event_data.computer_name = structure.computer_name
# Due to the use of CharsNotIn pyparsing structure contains whitespaces
# that need to be removed.
event_data.process_name = structure.process_name.strip()
event_data.status = structure.status
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_ADDED)
parser_mediator.ProduceEventWithEventData(event, event_data)
def ParseRecord(self, parser_mediator, key, structure):
"""Parses a log record structure and produces events.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
key (str): identifier of the structure of tokens.
structure (pyparsing.ParseResults): structure of tokens derived from
a line of a text file.
Raises:
ParseError: when the structure type is unknown.
"""
if key not in ('logline', 'repeated'):
raise errors.ParseError(
'Unable to parse record, unknown structure: {0:s}'.format(key))
self._ParseLogLine(parser_mediator, structure, key)
def VerifyStructure(self, parser_mediator, line):
"""Verify that this file is a Mac AppFirewall log file.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
line (str): line from a text file.
Returns:
bool: True if the line is in the expected format, False if not.
"""
self._last_month = 0
self._year_use = parser_mediator.GetEstimatedYear()
try:
structure = self.FIREWALL_LINE.parseString(line)
except pyparsing.ParseException as exception:
logger.debug((
'Unable to parse file as a Mac AppFirewall log file with error: '
'{0!s}').format(exception))
return False
if structure.action != 'creating /var/log/appfirewall.log':
logger.debug(
'Not a Mac AppFirewall log file, invalid action: {0!s}'.format(
structure.action))
return False
if structure.status != 'Error':
logger.debug(
'Not a Mac AppFirewall log file, invalid status: {0!s}'.format(
structure.status))
return False
time_elements_tuple = self._GetTimeElementsTuple(structure)
try:
dfdatetime_time_elements.TimeElements(
time_elements_tuple=time_elements_tuple)
except ValueError:
logger.debug((
'Not a Mac AppFirewall log file, invalid date and time: '
'{0!s}').format(structure.date_time))
return False
self._last_month = time_elements_tuple[1]
return True
manager.ParsersManager.RegisterParser(MacAppFirewallParser)
| 33.87069
| 80
| 0.696742
|
0a4f07f7bcb49e069ed8958be8c70d412f0514f2
| 1,917
|
py
|
Python
|
samples/level_3/candlestick.py
|
masayang/bokeh_bokeh
|
ddc8e61abc0992bbd3694461b6d108b7f09fad27
|
[
"BSD-3-Clause"
] | null | null | null |
samples/level_3/candlestick.py
|
masayang/bokeh_bokeh
|
ddc8e61abc0992bbd3694461b6d108b7f09fad27
|
[
"BSD-3-Clause"
] | null | null | null |
samples/level_3/candlestick.py
|
masayang/bokeh_bokeh
|
ddc8e61abc0992bbd3694461b6d108b7f09fad27
|
[
"BSD-3-Clause"
] | null | null | null |
import pandas as pd
from bokeh.plotting import figure, show, output_file
from math import pi
from bokeh.models import HoverTool
from bokeh.layouts import gridplot
def read_data():
return pd.read_csv(
"SPY.csv",
names=["Date", "Open", "High", "Low", "Close", "Volume"],
parse_dates=[0]
)
def candlestick(range=100):
df = read_data()
inc = df.Close >= df.Open
dec = df.Close < df.Open
w = 12*60*60*1000 # half day in ms
plot = figure(
plot_width=1000,
x_axis_type="datetime",
title="SPY Daily",
x_range=(df.iloc[-1*range].Date, df.iloc[-1].Date),
y_range=(df.iloc[-1*range:].Low.min(), df.iloc[-1*range:].High.max())
)
plot.segment(df.Date, df.High, df.Date, df.Low, color="black")
plot.vbar(x='Date', width=w, bottom='Open', top='Close', fill_color="#D5E1DD", line_color="black", source=df[inc])
plot.vbar(x='Date', width=w, top='Open', bottom='Close', fill_color="#F2583E", line_color="black", source=df[dec])
plot.xaxis.visible = False
plot2 = figure(plot_width=1000, plot_height=125, x_range=plot.x_range, y_range=(0, df.iloc[-1*range:].Volume.max()), title=None)
plot2.vbar(x='Date', width=w, bottom=0, top='Volume', fill_color="#D5E1DD", line_color="black", source=df[inc])
plot2.vbar(x='Date', width=w, top='Volume', bottom=0, fill_color="#F2583E", line_color="black", source=df[dec])
hover_tools = HoverTool(
tooltips=[
("Date", "@Date{%F}"),
("Open", "@Open"),
("High", "@High"),
("Low", "@Low"),
("Close", "@Close"),
("Volume", "@Volume")
],
formatters={"Date": "datetime"}
)
plot.add_tools(hover_tools)
output_file("bokeh.html", title="Candle Stick Example")
return gridplot([[plot], [plot2]])
if __name__ == '__main__':
show(candlestick())
| 30.919355
| 132
| 0.591549
|
82c5fb3fcf40fbfecf8f72b0ee93f737c99fcd64
| 531
|
py
|
Python
|
Chapter03/0301.py
|
0201shj/Python-OpenCV
|
249f8cc9404e547da0f5c68000f29f2e598562a5
|
[
"MIT"
] | null | null | null |
Chapter03/0301.py
|
0201shj/Python-OpenCV
|
249f8cc9404e547da0f5c68000f29f2e598562a5
|
[
"MIT"
] | null | null | null |
Chapter03/0301.py
|
0201shj/Python-OpenCV
|
249f8cc9404e547da0f5c68000f29f2e598562a5
|
[
"MIT"
] | null | null | null |
#0301.py
import cv2
import numpy as np
#White 배경 생성
img = np.zeros(shape = (512, 512, 3), dtype = np.uint8) + 255
#img = np.ones((512, 512, 3), np.uint8) * 255
#img = np.full((512, 512, 3), (255, 255, 255), dtype = np.uint8)
#img = np.zeros((512, 512, 3), np.uint8) # Black 배경
pt1 = 100, 100
pt2 = 400, 400
cv2.rectangle(img, pt1, pt2, (0, 255, 0), 2)
cv2.line(img, (0, 0), (500, 0), (255, 0, 0), 5)
cv2.line(img, (0, 0), (0, 500), (0, 0, 255), 5)
cv2.imshow('img', img)
cv2.waitKey()
cv2.destroyAllWindows()
| 26.55
| 65
| 0.572505
|
b88b4db66bd29f0d2ec5e31f0618b1ba5fd7bd0b
| 452
|
py
|
Python
|
apps/classes/migrations/0003_alter_classes_id.py
|
Ev1dentSnow/ArtemisAPI_django
|
ca7ef0ccc97114f2c5439b7b1bbc0e635facf020
|
[
"MIT"
] | null | null | null |
apps/classes/migrations/0003_alter_classes_id.py
|
Ev1dentSnow/ArtemisAPI_django
|
ca7ef0ccc97114f2c5439b7b1bbc0e635facf020
|
[
"MIT"
] | null | null | null |
apps/classes/migrations/0003_alter_classes_id.py
|
Ev1dentSnow/ArtemisAPI_django
|
ca7ef0ccc97114f2c5439b7b1bbc0e635facf020
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.5 on 2021-08-14 21:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('classes', '0002_alter_studentclasses_table'),
]
operations = [
migrations.AlterField(
model_name='classes',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]
| 23.789474
| 111
| 0.632743
|
176c35d135bfa077e0c58dc43daf16c99749e99d
| 2,186
|
py
|
Python
|
scrapyTest/spiders/xhsd.py
|
DeSireFire/scrapyTest
|
322a0a8bdff969d0506c9169890ef2bf794fd102
|
[
"MIT"
] | null | null | null |
scrapyTest/spiders/xhsd.py
|
DeSireFire/scrapyTest
|
322a0a8bdff969d0506c9169890ef2bf794fd102
|
[
"MIT"
] | null | null | null |
scrapyTest/spiders/xhsd.py
|
DeSireFire/scrapyTest
|
322a0a8bdff969d0506c9169890ef2bf794fd102
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import scrapy,json
class XhsdSpider(scrapy.Spider):
name = 'xhsd'
allowed_domains = ['search.xhsd.com']
start_urls = ['http://search.xhsd.com/']
def start_requests(self):
url = 'https://search.xhsd.com/search?frontCategoryId=33&pageNo='
# for page in range(1,18580):
for page in range(1,2):
yield scrapy.Request(
url=url+str(page),
callback=self.parse_pages,
meta={"page": str(page)}
)
def parse_pages(self, response):
print('正在获取第 %s 页'%response.meta["page"])
for book in range(1,73):
book_info = {
'book_url':'',
'book_Img':'',
'book_Name':'',
'book_author':'',
'book_price':'',
}
book_info['book_url'] = response.xpath("//li[contains(@class, 'product')][%s]/div[contains(@class, 'product-image')]/a/@href"%book).extract()[0]
book_info['book_Img'] = response.xpath("//li[contains(@class, 'product')][%s]/div[contains(@class, 'product-image')]/a/img/@src"%book).extract()[0]
book_info['book_Name'] = response.xpath("//li[contains(@class, 'product')][%s]/p/a/text()"%book).extract()[0]
book_info['book_author'] = response.xpath("//li[contains(@class, 'product')][%s]/p[contains(@class, 'product-author')]/span/text()"%book).extract()
book_info['book_price'] = response.xpath("//li[contains(@class, 'product')][%s]/p[contains(@class, 'product-price')]/span/text()"%book).extract()
yield scrapy.Request(
url='https:'+book_info['book_url'],
callback=self.parse,
meta={"page": response.meta['page'],'book_info':book_info},
dont_filter=True
)
def parse(self, response):
book_info = response.meta
page_date = json.loads(response.xpath("//div[@class='spu-tab-item-detail']/@data-detail").extract()[0])
book_info['content-detail'] = page_date
print(response.url)
if __name__ == '__main__':
for book in range(1, 73):
print(book)
| 39.745455
| 159
| 0.554437
|
adef3f00988e694924c25a6e01a7835ba15ba88f
| 3,437
|
py
|
Python
|
core/src/epicli/cli/engine/InitEngine.py
|
AnnaKlank/epiphany
|
2ec7ec1bb9d0ec1343a0937a496c50f90cf4b5b1
|
[
"Apache-2.0"
] | null | null | null |
core/src/epicli/cli/engine/InitEngine.py
|
AnnaKlank/epiphany
|
2ec7ec1bb9d0ec1343a0937a496c50f90cf4b5b1
|
[
"Apache-2.0"
] | null | null | null |
core/src/epicli/cli/engine/InitEngine.py
|
AnnaKlank/epiphany
|
2ec7ec1bb9d0ec1343a0937a496c50f90cf4b5b1
|
[
"Apache-2.0"
] | null | null | null |
import os
from cli.helpers.Step import Step
from cli.helpers.build_saver import save_manifest, get_build_path
from cli.helpers.data_loader import load_all_yaml_objs, types
from cli.engine.ApplyEngine import ApplyEngine
from cli.helpers.objdict_helpers import remove_value
from cli.version import VERSION
from cli.helpers.doc_list_helpers import select_all, select_single
class InitEngine(Step):
def __init__(self, input_data):
super().__init__(__name__)
self.provider = input_data.provider
self.full_config = input_data.full_config
self.name = input_data.name
self.is_full_config = input_data.full_config
def __enter__(self):
super().__enter__()
return self
def __exit__(self, exc_type, exc_value, traceback):
pass
def init(self):
input = load_all_yaml_objs(types.DEFAULT, self.provider, 'configuration/minimal-cluster-config')
input[0].specification.name = self.name
if self.is_full_config:
config = self.get_config_docs(input)
config_only = select_all(config, lambda x: not(x.kind.startswith('epiphany-cluster')))
if self.provider == 'any':
# for any provider we want to use the default config from minimal-cluster-config
cluster_model = select_single(input, lambda x: x.kind == 'epiphany-cluster')
else:
# for azure|aws provider we want to use the extended defaults cluster-config after dry run.
# TODO: We probably wants this comming from seperate documents since Azure and AWS overlap now...
cluster_model = select_single(config, lambda x: x.kind == 'epiphany-cluster')
infra = self.get_infra_docs(input)
docs = [cluster_model, *config_only, *infra]
else:
docs = [*input]
# set the provider and version for all docs
for doc in docs:
doc['provider'] = self.provider
doc['version'] = VERSION
# remove SET_BY_AUTOMATION fields
remove_value(docs, 'SET_BY_AUTOMATION')
# save document
save_manifest(docs, self.name, self.name+'.yml')
self.logger.info('Initialized new configuration and saved it to "' + os.path.join(get_build_path(self.name), self.name + '.yml') + '"')
return 0
def get_config_docs(self, input_docs):
cluster_config_path = save_manifest(input_docs, self.name, self.name + '.yml')
args = type('obj', (object,), {'file': cluster_config_path})()
# generate the config documents
with ApplyEngine(args) as build:
config = build.dry_run()
return config
def get_infra_docs(self, input_docs):
if self.provider == 'any':
# For any we can include the machine documents from the minimal-cluster-config
infra = select_all(input_docs, lambda x: x.kind.startswith('infrastructure/machine'))
else:
# VMs are curently the infrastructure documents the user might interact with for:
# - type/size
# - distro
# - network security rules
# ...
# So we add the defaults here.
# TODO: Check if we want to include possible other infrastructure documents.
infra = load_all_yaml_objs(types.DEFAULT, self.provider, 'infrastructure/virtual-machine')
return infra
| 40.435294
| 143
| 0.649985
|
cb72ae39b74ec235bacbf8d51619980602a51cc5
| 2,403
|
py
|
Python
|
contrib/opentimelineio_contrib/adapters/tests/test_kdenlive_adapter.py
|
michdolan/OpenTimelineIO
|
1ec6f07f1af525ba4ca0aa91e01e5939d6237f01
|
[
"Apache-2.0"
] | null | null | null |
contrib/opentimelineio_contrib/adapters/tests/test_kdenlive_adapter.py
|
michdolan/OpenTimelineIO
|
1ec6f07f1af525ba4ca0aa91e01e5939d6237f01
|
[
"Apache-2.0"
] | 4
|
2022-03-09T22:28:42.000Z
|
2022-03-14T15:16:50.000Z
|
contrib/opentimelineio_contrib/adapters/tests/test_kdenlive_adapter.py
|
michdolan/OpenTimelineIO
|
1ec6f07f1af525ba4ca0aa91e01e5939d6237f01
|
[
"Apache-2.0"
] | null | null | null |
# SPDX-License-Identifier: Apache-2.0
# Copyright Contributors to the OpenTimelineIO project
import unittest
import opentimelineio as otio
import opentimelineio.test_utils as otio_test_utils
import os
class AdaptersKdenliveTest(unittest.TestCase, otio_test_utils.OTIOAssertions):
def __init__(self, *args, **kwargs):
super(AdaptersKdenliveTest, self).__init__(*args, **kwargs)
def test_library_roundtrip(self):
timeline = otio.adapters.read_from_file(
os.path.join(os.path.dirname(__file__), "sample_data",
"kdenlive_example.kdenlive"))
self.assertIsNotNone(timeline)
self.assertEqual(len(timeline.tracks), 5)
self.assertEqual(len(timeline.video_tracks()), 2)
self.assertEqual(len(timeline.audio_tracks()), 3)
clip_urls = (('AUD0002.OGG',),
('AUD0001.OGG', 'AUD0001.OGG'),
('VID0001.MKV', 'VID0001.MKV'),
('VID0001.MKV', 'VID0001.MKV'),
('VID0002.MKV', 'VID0003.MKV'))
for n, track in enumerate(timeline.tracks):
self.assertTupleEqual(
clip_urls[n],
tuple(c.media_reference.target_url
for c in track
if isinstance(c, otio.schema.Clip) and
isinstance(
c.media_reference,
otio.schema.ExternalReference)))
kdenlive_xml = otio.adapters.write_to_string(timeline, "kdenlive")
self.assertIsNotNone(kdenlive_xml)
new_timeline = otio.adapters.read_from_string(kdenlive_xml, "kdenlive")
self.assertJsonEqual(timeline, new_timeline)
def test_from_fcp_example(self):
timeline = otio.adapters.read_from_file(
os.path.join(
os.path.dirname(__file__),
"sample_data",
"kdenlive_example_from_fcp.xml",
),
)
kdenlive_xml = otio.adapters.write_to_string(timeline, "kdenlive")
self.assertIsNotNone(kdenlive_xml)
new_timeline = otio.adapters.read_from_string(kdenlive_xml, "kdenlive")
troublesome_clip = new_timeline.video_tracks()[0][35]
self.assertEqual(
troublesome_clip.source_range.duration.value,
807,
)
if __name__ == '__main__':
unittest.main()
| 34.328571
| 79
| 0.608822
|
7a276408c92d94fe153dd402ba55b7b6b026f61a
| 3,762
|
py
|
Python
|
cogs/inactive/memelicense.py
|
MiningMark48/Tidal-Bot
|
8db6ecb220fd35930ffe1df5653af7a1ca03c8e9
|
[
"MIT"
] | 6
|
2020-08-09T15:43:07.000Z
|
2022-03-11T15:12:21.000Z
|
cogs/inactive/memelicense.py
|
MiningMark48/Tidal-Bot
|
8db6ecb220fd35930ffe1df5653af7a1ca03c8e9
|
[
"MIT"
] | 6
|
2020-10-29T02:32:40.000Z
|
2022-01-13T03:12:45.000Z
|
cogs/inactive/memelicense.py
|
MiningMark48/Tidal-Bot
|
8db6ecb220fd35930ffe1df5653af7a1ca03c8e9
|
[
"MIT"
] | 1
|
2021-06-09T08:06:31.000Z
|
2021-06-09T08:06:31.000Z
|
import datetime
import string
from functools import partial
from io import BytesIO
from typing import Union
import aiohttp
import discord
from PIL import Image, ImageDraw, ImageFont
from discord.ext import commands
class Memes(commands.Cog):
def __init__(self, bot: commands.Bot):
self.bot = bot
self.session = aiohttp.ClientSession(loop=bot.loop)
async def get_avatar(self, user: Union[discord.User, discord.Member]) -> bytes:
avatar_url = str(user.avatar.with_format("png"))
async with self.session.get(avatar_url) as response:
avatar_bytes = await response.read()
return avatar_bytes
@staticmethod
def processing(avatar_bytes: bytes, member: discord.Member) -> BytesIO:
dt = datetime.date.today()
year = dt.year
with Image.open("./resources/images/memetemps/template_memelicense.png") as im:
im = im.convert("RGBA")
font = ImageFont.truetype(f'./resources/fonts/arial.ttf', size=18)
font_sig = ImageFont.truetype(f'./resources/fonts/kunstler.ttf', size=48)
draw = ImageDraw.Draw(im)
# font_color = member.color.to_rgb()
font_color = (0,79,74)
draw.text((510, 135), str(member.id), fill=font_color, font=font)
draw.text((510, 200), f'#{str(member.discriminator)}', fill=font_color, font=font)
draw.text((510, 250), member.name, fill=font_color, font=font)
if member.display_name != member.name and all(c in string.printable for c in member.display_name):
draw.text((510, 270), member.display_name, fill=font_color, font=font)
draw.text((820, 135), str(member.created_at.strftime("%m/%d/%Y")), fill=font_color, font=font)
draw.text((820, 170), str(member.created_at.strftime(f"%m/%d/{int(year)+5}")), fill=font_color, font=font)
draw.text((540, 445), "C", fill=font_color, font=font)
draw.text((540, 468), ("NONE" if not member.is_on_mobile() else "MOBL"), fill=font_color, font=font)
draw.text((505, 512), str(dt.strftime("%m/%d/%Y")), fill=font_color, font=font)
draw.text((90, 495), str(member.name), fill=0x000000, font=font_sig)
avatar = Image.open(BytesIO(avatar_bytes)).convert('RGB').resize((371, 371))
av_loc = (64, 118)
im.paste(avatar, av_loc)
alpha_layer = Image.new('RGBA', im.size, (0,0,0,0))
av1 = avatar.resize((150,150))
av1.putalpha(64)
av2 = av1
av2.putalpha(128)
av2 = av2.convert('LA')
av1_loc = (725, 300)
alpha_layer.paste(av1, av1_loc)
offset = 75
av2_loc = (av1_loc[0]+offset, av1_loc[1]+offset)
alpha_layer.paste(av2, av2_loc)
im = Image.alpha_composite(im, alpha_layer)
im = im.convert("RGB")
final_buffer = BytesIO()
im.save(final_buffer, "png")
final_buffer.seek(0)
return final_buffer
@commands.command(aliases=["licensememe"])
@commands.cooldown(1, 30, commands.BucketType.user)
@commands.guild_only()
async def memelicense(self, ctx):
"""Create a meme license."""
member = ctx.author
async with ctx.typing():
avatar_bytes = await self.get_avatar(member)
fn = partial(self.processing, avatar_bytes, member)
final_buffer = await self.bot.loop.run_in_executor(None, fn)
file = discord.File(filename=f"memelicense_{member.display_name}.png", fp=final_buffer)
await ctx.send(file=file)
def setup(bot: commands.Bot):
bot.add_cog(Memes(bot))
| 38
| 118
| 0.610314
|
4f9dd0b3161276864494239c35a29c918a77f350
| 9,633
|
py
|
Python
|
pandas/tools/tests/test_util.py
|
michaelaye/pandas
|
c6110e25b3eceb2f25022c2aa9ccea03c0b8b359
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause"
] | 3
|
2017-02-25T14:06:43.000Z
|
2017-05-25T05:18:03.000Z
|
pandas/tools/tests/test_util.py
|
michaelaye/pandas
|
c6110e25b3eceb2f25022c2aa9ccea03c0b8b359
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause"
] | 11
|
2020-06-05T17:24:17.000Z
|
2022-03-11T23:15:26.000Z
|
pandas/tools/tests/test_util.py
|
michaelaye/pandas
|
c6110e25b3eceb2f25022c2aa9ccea03c0b8b359
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause"
] | 3
|
2017-02-25T15:26:47.000Z
|
2017-12-20T06:27:07.000Z
|
import os
import locale
import codecs
import nose
import numpy as np
from numpy.testing import assert_equal
import pandas as pd
from pandas import date_range, Index
import pandas.util.testing as tm
from pandas.tools.util import cartesian_product, to_numeric
CURRENT_LOCALE = locale.getlocale()
LOCALE_OVERRIDE = os.environ.get('LOCALE_OVERRIDE', None)
class TestCartesianProduct(tm.TestCase):
def test_simple(self):
x, y = list('ABC'), [1, 22]
result = cartesian_product([x, y])
expected = [np.array(['A', 'A', 'B', 'B', 'C', 'C']),
np.array([1, 22, 1, 22, 1, 22])]
assert_equal(result, expected)
def test_datetimeindex(self):
# regression test for GitHub issue #6439
# make sure that the ordering on datetimeindex is consistent
x = date_range('2000-01-01', periods=2)
result = [Index(y).day for y in cartesian_product([x, x])]
expected = [np.array([1, 1, 2, 2]), np.array([1, 2, 1, 2])]
assert_equal(result, expected)
class TestLocaleUtils(tm.TestCase):
@classmethod
def setUpClass(cls):
super(TestLocaleUtils, cls).setUpClass()
cls.locales = tm.get_locales()
if not cls.locales:
raise nose.SkipTest("No locales found")
tm._skip_if_windows()
@classmethod
def tearDownClass(cls):
super(TestLocaleUtils, cls).tearDownClass()
del cls.locales
def test_get_locales(self):
# all systems should have at least a single locale
assert len(tm.get_locales()) > 0
def test_get_locales_prefix(self):
if len(self.locales) == 1:
raise nose.SkipTest("Only a single locale found, no point in "
"trying to test filtering locale prefixes")
first_locale = self.locales[0]
assert len(tm.get_locales(prefix=first_locale[:2])) > 0
def test_set_locale(self):
if len(self.locales) == 1:
raise nose.SkipTest("Only a single locale found, no point in "
"trying to test setting another locale")
if LOCALE_OVERRIDE is not None:
lang, enc = LOCALE_OVERRIDE.split('.')
else:
lang, enc = 'it_CH', 'UTF-8'
enc = codecs.lookup(enc).name
new_locale = lang, enc
if not tm._can_set_locale(new_locale):
with tm.assertRaises(locale.Error):
with tm.set_locale(new_locale):
pass
else:
with tm.set_locale(new_locale) as normalized_locale:
new_lang, new_enc = normalized_locale.split('.')
new_enc = codecs.lookup(enc).name
normalized_locale = new_lang, new_enc
self.assertEqual(normalized_locale, new_locale)
current_locale = locale.getlocale()
self.assertEqual(current_locale, CURRENT_LOCALE)
class TestToNumeric(tm.TestCase):
def test_series(self):
s = pd.Series(['1', '-3.14', '7'])
res = to_numeric(s)
expected = pd.Series([1, -3.14, 7])
tm.assert_series_equal(res, expected)
s = pd.Series(['1', '-3.14', 7])
res = to_numeric(s)
tm.assert_series_equal(res, expected)
def test_series_numeric(self):
s = pd.Series([1, 3, 4, 5], index=list('ABCD'), name='XXX')
res = to_numeric(s)
tm.assert_series_equal(res, s)
s = pd.Series([1., 3., 4., 5.], index=list('ABCD'), name='XXX')
res = to_numeric(s)
tm.assert_series_equal(res, s)
# bool is regarded as numeric
s = pd.Series([True, False, True, True],
index=list('ABCD'), name='XXX')
res = to_numeric(s)
tm.assert_series_equal(res, s)
def test_error(self):
s = pd.Series([1, -3.14, 'apple'])
with tm.assertRaises(ValueError):
to_numeric(s, errors='raise')
res = to_numeric(s, errors='ignore')
expected = pd.Series([1, -3.14, 'apple'])
tm.assert_series_equal(res, expected)
res = to_numeric(s, errors='coerce')
expected = pd.Series([1, -3.14, np.nan])
tm.assert_series_equal(res, expected)
def test_error_seen_bool(self):
s = pd.Series([True, False, 'apple'])
with tm.assertRaises(ValueError):
to_numeric(s, errors='raise')
res = to_numeric(s, errors='ignore')
expected = pd.Series([True, False, 'apple'])
tm.assert_series_equal(res, expected)
# coerces to float
res = to_numeric(s, errors='coerce')
expected = pd.Series([1., 0., np.nan])
tm.assert_series_equal(res, expected)
def test_list(self):
s = ['1', '-3.14', '7']
res = to_numeric(s)
expected = np.array([1, -3.14, 7])
tm.assert_numpy_array_equal(res, expected)
def test_list_numeric(self):
s = [1, 3, 4, 5]
res = to_numeric(s)
tm.assert_numpy_array_equal(res, np.array(s))
s = [1., 3., 4., 5.]
res = to_numeric(s)
tm.assert_numpy_array_equal(res, np.array(s))
# bool is regarded as numeric
s = [True, False, True, True]
res = to_numeric(s)
tm.assert_numpy_array_equal(res, np.array(s))
def test_numeric(self):
s = pd.Series([1, -3.14, 7], dtype='O')
res = to_numeric(s)
expected = pd.Series([1, -3.14, 7])
tm.assert_series_equal(res, expected)
s = pd.Series([1, -3.14, 7])
res = to_numeric(s)
tm.assert_series_equal(res, expected)
def test_all_nan(self):
s = pd.Series(['a', 'b', 'c'])
res = to_numeric(s, errors='coerce')
expected = pd.Series([np.nan, np.nan, np.nan])
tm.assert_series_equal(res, expected)
def test_type_check(self):
# GH 11776
df = pd.DataFrame({'a': [1, -3.14, 7], 'b': ['4', '5', '6']})
with tm.assertRaisesRegexp(TypeError, "1-d array"):
to_numeric(df)
for errors in ['ignore', 'raise', 'coerce']:
with tm.assertRaisesRegexp(TypeError, "1-d array"):
to_numeric(df, errors=errors)
def test_scalar(self):
self.assertEqual(pd.to_numeric(1), 1)
self.assertEqual(pd.to_numeric(1.1), 1.1)
self.assertEqual(pd.to_numeric('1'), 1)
self.assertEqual(pd.to_numeric('1.1'), 1.1)
with tm.assertRaises(ValueError):
to_numeric('XX', errors='raise')
self.assertEqual(to_numeric('XX', errors='ignore'), 'XX')
self.assertTrue(np.isnan(to_numeric('XX', errors='coerce')))
def test_numeric_dtypes(self):
idx = pd.Index([1, 2, 3], name='xxx')
res = pd.to_numeric(idx)
tm.assert_index_equal(res, idx)
res = pd.to_numeric(pd.Series(idx, name='xxx'))
tm.assert_series_equal(res, pd.Series(idx, name='xxx'))
res = pd.to_numeric(idx.values)
tm.assert_numpy_array_equal(res, idx.values)
idx = pd.Index([1., np.nan, 3., np.nan], name='xxx')
res = pd.to_numeric(idx)
tm.assert_index_equal(res, idx)
res = pd.to_numeric(pd.Series(idx, name='xxx'))
tm.assert_series_equal(res, pd.Series(idx, name='xxx'))
res = pd.to_numeric(idx.values)
tm.assert_numpy_array_equal(res, idx.values)
def test_str(self):
idx = pd.Index(['1', '2', '3'], name='xxx')
exp = np.array([1, 2, 3], dtype='int64')
res = pd.to_numeric(idx)
tm.assert_index_equal(res, pd.Index(exp, name='xxx'))
res = pd.to_numeric(pd.Series(idx, name='xxx'))
tm.assert_series_equal(res, pd.Series(exp, name='xxx'))
res = pd.to_numeric(idx.values)
tm.assert_numpy_array_equal(res, exp)
idx = pd.Index(['1.5', '2.7', '3.4'], name='xxx')
exp = np.array([1.5, 2.7, 3.4])
res = pd.to_numeric(idx)
tm.assert_index_equal(res, pd.Index(exp, name='xxx'))
res = pd.to_numeric(pd.Series(idx, name='xxx'))
tm.assert_series_equal(res, pd.Series(exp, name='xxx'))
res = pd.to_numeric(idx.values)
tm.assert_numpy_array_equal(res, exp)
def test_datetimelike(self):
for tz in [None, 'US/Eastern', 'Asia/Tokyo']:
idx = pd.date_range('20130101', periods=3, tz=tz, name='xxx')
res = pd.to_numeric(idx)
tm.assert_index_equal(res, pd.Index(idx.asi8, name='xxx'))
res = pd.to_numeric(pd.Series(idx, name='xxx'))
tm.assert_series_equal(res, pd.Series(idx.asi8, name='xxx'))
res = pd.to_numeric(idx.values)
tm.assert_numpy_array_equal(res, idx.asi8)
def test_timedelta(self):
idx = pd.timedelta_range('1 days', periods=3, freq='D', name='xxx')
res = pd.to_numeric(idx)
tm.assert_index_equal(res, pd.Index(idx.asi8, name='xxx'))
res = pd.to_numeric(pd.Series(idx, name='xxx'))
tm.assert_series_equal(res, pd.Series(idx.asi8, name='xxx'))
res = pd.to_numeric(idx.values)
tm.assert_numpy_array_equal(res, idx.asi8)
def test_period(self):
idx = pd.period_range('2011-01', periods=3, freq='M', name='xxx')
res = pd.to_numeric(idx)
tm.assert_index_equal(res, pd.Index(idx.asi8, name='xxx'))
# ToDo: enable when we can support native PeriodDtype
# res = pd.to_numeric(pd.Series(idx, name='xxx'))
# tm.assert_series_equal(res, pd.Series(idx.asi8, name='xxx'))
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| 33.919014
| 75
| 0.58746
|
9336b8c23bfe1937df465ef468130b1d81773ad4
| 2,327
|
py
|
Python
|
epac/tests/utils.py
|
ilgrad/pylearn-epac
|
bb1915ba999828f7fa1dd1523ebb18f38ff39b06
|
[
"BSD-3-Clause"
] | null | null | null |
epac/tests/utils.py
|
ilgrad/pylearn-epac
|
bb1915ba999828f7fa1dd1523ebb18f38ff39b06
|
[
"BSD-3-Clause"
] | null | null | null |
epac/tests/utils.py
|
ilgrad/pylearn-epac
|
bb1915ba999828f7fa1dd1523ebb18f38ff39b06
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on 20 June 2013
@author: jinpeng.li@cea.fr
@author: edouard.duchesnay@cea.fr
@author: benoit.da_mota@inria.fr
"""
import numpy as np
import copy
def _is_numeric(obj):
return isinstance(obj, (int, float, complex))
def _is_dict_or_array_or_list(obj):
if type(obj) is np.ndarray:
return True
if type(obj) is list:
return True
if type(obj) is dict:
return True
return False
def _is_array_or_list(obj):
if type(obj) is np.ndarray:
return True
if type(obj) is list:
return True
return False
def isequal(obj1, obj2):
_EPSILON = 0.00001
if _is_numeric(obj1):
if (np.absolute(obj1 - obj2) > _EPSILON):
return False
else:
return True
elif (isinstance(obj1, dict)):
for key in obj1.keys():
if not isequal(obj1[key], obj2[key]):
return False
return True
elif (_is_array_or_list(obj1)):
obj1 = np.asarray(list(obj1))
obj2 = np.asarray(list(obj2))
for index in range(len(obj1.flat)):
if not isequal(obj1.flat[index], obj2.flat[index]):
return False
return True
else:
return obj1 == obj2
def compare_leaf_res(leaf_res1, leaf_res2):
for i in range(len(leaf_res1)):
for key in leaf_res1[i][leaf_res1[i].keys()[0]].keys():
return (np.all(leaf_res1[i][leaf_res1[i].keys()[0]][key]
== leaf_res2[i][leaf_res2[i].keys()[0]][key]))
def compare_two_node(node1, node2):
leaf_res1 = []
for leaf1 in node1.walk_leaves():
res = copy.copy(leaf1.load_results())
leaf_res1.append(res)
leaf_res2 = []
for leaf2 in node2.walk_leaves():
res = copy.copy(leaf2.load_results())
leaf_res2.append(res)
return compare_leaf_res(leaf_res1, leaf_res2)
def comp_2wf_reduce_res(wf1, wf2):
res_wf1 = wf1.reduce()
res_wf2 = wf2.reduce()
return isequal(res_wf1, res_wf2)
def displayres(d, indent=0):
print(repr(d))
# for key, value in d.iteritems():
# print '\t' * indent + str(key)
# if isinstance(value, dict):
# displayres(value, indent + 1)
# else:
# print '\t' * (indent + 1) + str(value)
| 24.239583
| 68
| 0.594327
|
f2527f9b50b08f3395676d7a5c4440a528f4e4cf
| 2,753
|
py
|
Python
|
mayan/apps/converter/apps.py
|
edsonbin/maxacali
|
1fd3ac99543788f77f1a7795981179b2cc8c4421
|
[
"Apache-2.0"
] | 1
|
2020-07-15T02:56:02.000Z
|
2020-07-15T02:56:02.000Z
|
mayan/apps/converter/apps.py
|
kyper999/mayan-edms
|
ca7b8301a1f68548e8e718d42a728a500d67286e
|
[
"Apache-2.0"
] | null | null | null |
mayan/apps/converter/apps.py
|
kyper999/mayan-edms
|
ca7b8301a1f68548e8e718d42a728a500d67286e
|
[
"Apache-2.0"
] | 2
|
2020-02-24T21:02:31.000Z
|
2021-01-05T23:52:01.000Z
|
from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
from common import MayanAppConfig, menu_object, menu_sidebar
from common.classes import Package
from navigation import SourceColumn
from .links import (
link_transformation_create, link_transformation_delete,
link_transformation_edit
)
class ConverterApp(MayanAppConfig):
name = 'converter'
verbose_name = _('Converter')
def ready(self):
super(ConverterApp, self).ready()
Transformation = self.get_model('Transformation')
Package(label='Pillow', license_text='''
The Python Imaging Library (PIL) is
Copyright (c) 1997-2011 by Secret Labs AB
Copyright (c) 1995-2011 by Fredrik Lundh
By obtaining, using, and/or copying this software and/or its associated documentation, you agree that you have read, understood, and will comply with the following terms and conditions:
Permission to use, copy, modify, and distribute this software and its associated documentation for any purpose and without fee is hereby granted, provided that the above copyright notice appears in all copies, and that both that copyright notice and this permission notice appear in supporting documentation, and that the name of Secret Labs AB or the author not be used in advertising or publicity pertaining to distribution of the software without specific, written prior permission.
SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
''')
SourceColumn(source=Transformation, label=_('Order'), attribute='order')
SourceColumn(
source=Transformation, label=_('Transformation'),
func=lambda context: unicode(context['object'])
)
SourceColumn(
source=Transformation, label=_('Arguments'), attribute='arguments'
)
menu_object.bind_links(
links=(link_transformation_edit, link_transformation_delete),
sources=(Transformation,)
)
menu_sidebar.bind_links(
links=(link_transformation_create,), sources=(Transformation,)
)
menu_sidebar.bind_links(
links=(link_transformation_create,),
sources=(
'converter:transformation_create',
'converter:transformation_list'
)
)
| 45.131148
| 485
| 0.72757
|
eaf37b73c02cbfeee10775ab71693715cb7e1652
| 2,127
|
py
|
Python
|
tests/nemo_text_processing/en/test_roman.py
|
gkucsko/NeMo
|
c1ae0a7744d9a0ac206f61b2883ce00c9b8339b9
|
[
"Apache-2.0"
] | null | null | null |
tests/nemo_text_processing/en/test_roman.py
|
gkucsko/NeMo
|
c1ae0a7744d9a0ac206f61b2883ce00c9b8339b9
|
[
"Apache-2.0"
] | null | null | null |
tests/nemo_text_processing/en/test_roman.py
|
gkucsko/NeMo
|
c1ae0a7744d9a0ac206f61b2883ce00c9b8339b9
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from nemo_text_processing.text_normalization.normalize import Normalizer
from nemo_text_processing.text_normalization.normalize_with_audio import NormalizerWithAudio
from parameterized import parameterized
from ..utils import CACHE_DIR, PYNINI_AVAILABLE, RUN_AUDIO_BASED_TESTS, parse_test_case_file
class TestRoman:
normalizer_en = (
Normalizer(input_case='cased', lang='en', cache_dir=CACHE_DIR, overwrite_cache=False)
if PYNINI_AVAILABLE
else None
)
normalizer_with_audio_en = (
NormalizerWithAudio(input_case='cased', lang='en', cache_dir=CACHE_DIR, overwrite_cache=False)
if PYNINI_AVAILABLE and RUN_AUDIO_BASED_TESTS
else None
)
# address is tagged by the measure class
@parameterized.expand(parse_test_case_file('en/data_text_normalization/test_cases_roman.txt'))
@pytest.mark.skipif(
not PYNINI_AVAILABLE, reason="`pynini` not installed, please install via nemo_text_processing/setup.sh"
)
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_norm(self, test_input, expected):
# pred = self.normalizer_en.normalize(test_input, verbose=False)
# assert pred == expected
#
# if self.normalizer_with_audio_en:
# pred_non_deterministic = self.normalizer_with_audio_en.normalize(
# test_input, n_tagged=30, punct_post_process=False,
# )
# assert expected in pred_non_deterministic
pass
| 40.903846
| 111
| 0.735308
|
29238e503072468f324a14ee78bfdb30b601670a
| 3,532
|
py
|
Python
|
code.py
|
harsh199910/Movie-recommandation-
|
1dbd13d3eb7ae96428236c213a867f1bf8a2014f
|
[
"Apache-2.0"
] | 1
|
2019-04-18T19:28:02.000Z
|
2019-04-18T19:28:02.000Z
|
code.py
|
harsh199910/Movie-recommandation-
|
1dbd13d3eb7ae96428236c213a867f1bf8a2014f
|
[
"Apache-2.0"
] | null | null | null |
code.py
|
harsh199910/Movie-recommandation-
|
1dbd13d3eb7ae96428236c213a867f1bf8a2014f
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 18 18:01:19 2019
@author: HARSH
"""
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim as optim
import torch.utils.data
from torch.autograd import Variable
movies = pd.read_csv('ml-1m/movies.dat', sep ='::', header = None, engine = 'python', encoding = 'latin-1')
users = pd.read_csv('ml-1m/users.dat', sep ='::', header = None, engine = 'python', encoding = 'latin-1')
ratings = pd.read_csv('ml-1m/ratings.dat', sep ='::', header = None, engine = 'python', encoding = 'latin-1')
training_set = pd.read_csv('ml-100k/u1.base', delimiter = '\t')
training_set = np.array(training_set, dtype = 'int')
test_set = pd.read_csv('ml-100k/u1.test', delimiter = '\t')
test_set = np.array(test_set, dtype = 'int')
nb_users = int(max(max(training_set[:,0]), max(test_set[:,0])))
nb_movies = int(max(max(training_set[:,1]), max(test_set[:,1])))
def convert(data):
new_data = []
for id_users in range(1, nb_users + 1):
id_movies = data[:,1][data[:,0] == id_users]
id_ratings = data[:, 2][data[:, 0] == id_users]
ratings = np.zeros(nb_movies)
ratings[id_movies - 1] = id_ratings
new_data.append(list(ratings))
return new_data
training_set = convert(training_set)
test_set = convert(test_set)
training_set = torch.FloatTensor(training_set)
test_set = torch.FloatTensor(test_set)
training_set[training_set == 0] = -1
training_set[training_set == 1] = 0
training_set[training_set == 2] = 0
training_set[training_set >= 3] = 1
test_set[test_set == 0] = -1
test_set[test_set == 1] = 0
test_set[test_set == 2] = 0
test_set[test_set >= 3] = 1
class RBN():
def __init__(self, nv ,nh):
self.w = torch.randn(nh, nv)
self.a = torch.randn(1, nv)
self.b = torch.randn(1, nh)
def sample_h(self, x):
wx = torch.mm(x, self.w.t())
activation = wx + self.a.expand_as(wx)
p_h_given_v = torch.sigmoid(activation)
return p_h_given_v, torch.bernoulli(p_h_given_v)
def sample_v(self, y):
wy = torch.mm(y, self.w)
activation = wy + self.b.expand_as(wy)
p_v_given_h = torch.sigmoid(activation)
return p_v_given_h, torch.bernoulli(p_v_given_h)
def train(self, v0, ph0, phk):
self.w += torch.mm(v0.t(), ph0) - torch.mm(vk.t(), phk)
self.b += torch.sum((v0 - vk), 0)
self.a += torch.sum((ph0 - phk), 0)
nv = len(training_set[0])
nh = 100
batch_size = 100
rbm = RBN(nv, nh)
nb_epoch = 10
for epoch in range (1, nb_epoch + 1):
train_loss = 0
s = 0.
for id_user in range(0, nb_users - batch_size, batch_size):
vk = training_set[id_user:id_user+batch_size]
v0 = training_set[id_user:id_user+batch_size]
ph0,_ = rbm.sample_h(v0)
for k in range(10):
_,hk = rbm.sample_h(vk)
_,vk = rbm.sample_h(hk)
vk[v0<0] = v0[v0<0]
phk,_ = rbm.sample_h(vk)
rbm.train(v0, vk, ph0, phk)
train_loss += torch.mean(torch.abs(v0[v0>=0] - vk[v0>=0]))
s += 1.
print('epoch: ' +str(epoch)+' loss: '+str(train_loss/s))
test_loss = 0
s = 0.
for id_user in range(nb_users):
v = training_set[id_user:id_user+1]
vt = test_set[id_user:id_user+1]
if len(vt[vt>=0]) > 0:
_,h = rbm.sample_h(v)
_,v = rbm.sample_v(h)
test_loss += torch.mean(torch.abs(vt[vt>=0] - v[vt>=0]))
s += 1.
print('test loss: '+str(test_loss/s))
| 32.109091
| 109
| 0.614666
|
0231da0d45578b0397a541f5ed68a97448fc4c23
| 1,842
|
py
|
Python
|
datasets/characters/download.py
|
mommothazaz123/mnist_gan
|
d5abb693dc3798f459053529d036a2a39a58bb78
|
[
"MIT"
] | null | null | null |
datasets/characters/download.py
|
mommothazaz123/mnist_gan
|
d5abb693dc3798f459053529d036a2a39a58bb78
|
[
"MIT"
] | null | null | null |
datasets/characters/download.py
|
mommothazaz123/mnist_gan
|
d5abb693dc3798f459053529d036a2a39a58bb78
|
[
"MIT"
] | null | null | null |
import collections
import json
import re
import shutil
import time
import requests
HEADERS = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/60.0.3112.113 Safari/537.36"
}
with open('characters.json') as f:
characters = json.load(f)
start = time.time()
num_characters_with_image = 0
failed = 0
responses = collections.Counter()
filetypes = collections.Counter()
downloaded = set()
duplicates = 0
for i, character in enumerate(characters):
image = character['stats']['image']
match = re.match(r"(https?://)?([/.\w\s-]*)\.(jpg|gif|png|jpeg|bmp|webp)", image)
if match:
if image in downloaded:
print(f"Skipping duplicate {image}...\n")
duplicates += 1
continue
else:
downloaded.add(image)
print(f"Getting {image}...")
try:
img = requests.get(image, stream=True, headers=HEADERS)
except Exception as e:
print(f"Failed: {e}")
failed += 1
responses[None] += 1
print()
continue
print(img.status_code)
responses[img.status_code] += 1
if img.status_code == 200:
num_characters_with_image += 1
filetypes[match.group(3)] += 1
img.raw.decode_content = True
with open(f'raw/{num_characters_with_image}.{match.group(3)}', 'wb') as out_file:
shutil.copyfileobj(img.raw, out_file)
else:
failed += 1
print()
elif image:
print(f"Unknown image URL: {image}")
print()
end = time.time()
print(f"Done! Downloaded {num_characters_with_image} images with {failed} failures and {duplicates} duplicates "
f"in {end - start}s.")
print(responses)
print(filetypes)
| 28.78125
| 112
| 0.592834
|
de54e66bc3b2a932637420b79b039becefbd9abc
| 4,322
|
py
|
Python
|
tests/xbmcplugin.py
|
jelly/plugin.fosdem.org
|
6cbed96f9df3bb4165e5cc9eb26d8abb4bfc3f9e
|
[
"MIT"
] | 7
|
2019-03-27T00:02:59.000Z
|
2022-02-07T20:29:49.000Z
|
tests/xbmcplugin.py
|
jelly/plugin.fosdem.org
|
6cbed96f9df3bb4165e5cc9eb26d8abb4bfc3f9e
|
[
"MIT"
] | 12
|
2019-01-06T15:53:16.000Z
|
2022-02-13T00:59:22.000Z
|
tests/xbmcplugin.py
|
jelly/plugin.fosdem.org
|
6cbed96f9df3bb4165e5cc9eb26d8abb4bfc3f9e
|
[
"MIT"
] | 3
|
2019-02-14T16:08:15.000Z
|
2019-05-05T17:06:37.000Z
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, Dag Wieers (@dagwieers) <dag@wieers.com>
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""This file implements the Kodi xbmcplugin module, either using stubs or alternative functionality"""
# pylint: disable=invalid-name,unused-argument
from __future__ import absolute_import, division, print_function, unicode_literals
from xbmc import LOGFATAL, LOGINFO, log
from xbmcaddon import Addon
from xbmcextra import kodi_to_ansi, uri_to_path
try: # Python 3
from urllib.error import HTTPError
from urllib.request import Request, urlopen
except ImportError: # Python 2
from urllib2 import HTTPError, Request, urlopen
SORT_METHOD_NONE = 0
SORT_METHOD_LABEL = 1
SORT_METHOD_LABEL_IGNORE_THE = 2
SORT_METHOD_DATE = 3
SORT_METHOD_SIZE = 4
SORT_METHOD_FILE = 5
SORT_METHOD_DRIVE_TYPE = 6
SORT_METHOD_TRACKNUM = 7
SORT_METHOD_DURATION = 8
SORT_METHOD_TITLE = 9
SORT_METHOD_TITLE_IGNORE_THE = 10
SORT_METHOD_ARTIST = 11
SORT_METHOD_ARTIST_AND_YEAR = 12
SORT_METHOD_ARTIST_IGNORE_THE = 13
SORT_METHOD_ALBUM = 14
SORT_METHOD_ALBUM_IGNORE_THE = 15
SORT_METHOD_GENRE = 16
SORT_METHOD_COUNTRY = 17
SORT_METHOD_VIDEO_YEAR = 18 # This is SORT_METHOD_YEAR in Kodi
SORT_METHOD_VIDEO_RATING = 19
SORT_METHOD_VIDEO_USER_RATING = 20
SORT_METHOD_DATEADDED = 21
SORT_METHOD_PROGRAM_COUNT = 22
SORT_METHOD_PLAYLIST_ORDER = 23
SORT_METHOD_EPISODE = 24
SORT_METHOD_VIDEO_TITLE = 25
SORT_METHOD_VIDEO_SORT_TITLE = 26
SORT_METHOD_VIDEO_SORT_TITLE_IGNORE_THE = 27
SORT_METHOD_PRODUCTIONCODE = 28
SORT_METHOD_SONG_RATING = 29
SORT_METHOD_SONG_USER_RATING = 30
SORT_METHOD_MPAA_RATING = 31
SORT_METHOD_VIDEO_RUNTIME = 32
SORT_METHOD_STUDIO = 33
SORT_METHOD_STUDIO_IGNORE_THE = 34
SORT_METHOD_FULLPATH = 35
SORT_METHOD_LABEL_IGNORE_FOLDERS = 36
SORT_METHOD_LASTPLAYED = 37
SORT_METHOD_PLAYCOUNT = 38
SORT_METHOD_LISTENERS = 39
SORT_METHOD_UNSORTED = 40
SORT_METHOD_CHANNEL = 41
SORT_METHOD_CHANNEL_NUMBER = 42
SORT_METHOD_BITRATE = 43
SORT_METHOD_DATE_TAKEN = 44
def addDirectoryItem(handle, path, listitem, isFolder=False):
"""A reimplementation of the xbmcplugin addDirectoryItems() function"""
label = kodi_to_ansi(listitem.label)
path = uri_to_path(path) if path else ''
# perma = kodi_to_ansi(listitem.label) # FIXME: Add permalink
bullet = '»' if isFolder else '·'
print('{bullet} {label}{path}'.format(bullet=bullet, label=label, path=path))
return True
def addDirectoryItems(handle, listing, length):
"""A reimplementation of the xbmcplugin addDirectoryItems() function"""
for item in listing:
addDirectoryItem(handle, item[0], item[1], item[2])
return True
def addSortMethod(handle, sortMethod):
"""A stub implementation of the xbmcplugin addSortMethod() function"""
def endOfDirectory(handle, succeeded=True, updateListing=True, cacheToDisc=True):
"""A stub implementation of the xbmcplugin endOfDirectory() function"""
# print(kodi_to_ansi('[B]-=( [COLOR=cyan]--------[/COLOR] )=-[/B]'))
def getSetting(handle, key):
"""A stub implementation of the xbmcplugin getSetting() function"""
return Addon().getSetting(key)
def setContent(handle, content):
"""A stub implementation of the xbmcplugin setContent() function"""
def setPluginFanart(handle, image, color1=None, color2=None, color3=None):
"""A stub implementation of the xbmcplugin setPluginFanart() function"""
def setPluginCategory(handle, category):
"""A reimplementation of the xbmcplugin setPluginCategory() function"""
print(kodi_to_ansi('[B]-=( [COLOR=cyan]%s[/COLOR] )=-[/B]' % category))
def setResolvedUrl(handle, succeeded, listitem):
"""A stub implementation of the xbmcplugin setResolvedUrl() function"""
print(kodi_to_ansi('[B][COLOR=yellow]Title[/COLOR]: {label}[/B]'.format(label=listitem.label)))
print(kodi_to_ansi('[COLOR=yellow]URL[/COLOR]:\n{url}'.format(url=listitem.path)))
print(kodi_to_ansi('[COLOR=yellow]Plot[/COLOR]:\n{plot}'.format(**listitem.info)))
request = Request(listitem.path)
request.get_method = lambda: 'HEAD'
try:
response = urlopen(request)
log('Stream playing successfully: %s' % response.code, LOGINFO)
except HTTPError as exc:
log('Playing stream returned: %s' % exc, LOGFATAL)
| 34.854839
| 102
| 0.758677
|
c1228489e96857d9f4a18f2a30971b3aae585ca1
| 2,626
|
py
|
Python
|
rasa/core/constants.py
|
YourThomasLee/rasa
|
501b1b312c158e19e54c67cbca8ed3728ea60ca3
|
[
"Apache-2.0"
] | 1
|
2020-07-07T06:58:51.000Z
|
2020-07-07T06:58:51.000Z
|
rasa/core/constants.py
|
YourThomasLee/rasa
|
501b1b312c158e19e54c67cbca8ed3728ea60ca3
|
[
"Apache-2.0"
] | 75
|
2020-08-06T08:55:42.000Z
|
2022-03-01T13:22:11.000Z
|
rasa/core/constants.py
|
YourThomasLee/rasa
|
501b1b312c158e19e54c67cbca8ed3728ea60ca3
|
[
"Apache-2.0"
] | null | null | null |
from rasa.constants import DEFAULT_NLU_FALLBACK_INTENT_NAME
DEFAULT_SERVER_PORT = 5005
DEFAULT_SERVER_FORMAT = "{}://localhost:{}"
DEFAULT_SERVER_URL = DEFAULT_SERVER_FORMAT.format("http", DEFAULT_SERVER_PORT)
DEFAULT_NLU_FALLBACK_THRESHOLD = 0.0
DEFAULT_CORE_FALLBACK_THRESHOLD = 0.0
DEFAULT_REQUEST_TIMEOUT = 60 * 5 # 5 minutes
DEFAULT_RESPONSE_TIMEOUT = 60 * 60 # 1 hour
DEFAULT_LOCK_LIFETIME = 60 # in seconds
REQUESTED_SLOT = "requested_slot"
# slots for knowledge base
SLOT_LISTED_ITEMS = "knowledge_base_listed_objects"
SLOT_LAST_OBJECT = "knowledge_base_last_object"
SLOT_LAST_OBJECT_TYPE = "knowledge_base_last_object_type"
DEFAULT_KNOWLEDGE_BASE_ACTION = "action_query_knowledge_base"
# start of special user message section
INTENT_MESSAGE_PREFIX = "/"
EXTERNAL_MESSAGE_PREFIX = "EXTERNAL: "
USER_INTENT_RESTART = "restart"
USER_INTENT_SESSION_START = "session_start"
USER_INTENT_BACK = "back"
USER_INTENT_OUT_OF_SCOPE = "out_of_scope"
DEFAULT_INTENTS = [
USER_INTENT_RESTART,
USER_INTENT_BACK,
USER_INTENT_OUT_OF_SCOPE,
USER_INTENT_SESSION_START,
DEFAULT_NLU_FALLBACK_INTENT_NAME,
]
ACTION_NAME_SENDER_ID_CONNECTOR_STR = "__sender_id:"
BEARER_TOKEN_PREFIX = "Bearer "
# Key to access data in the event metadata
# It specifies if an event was caused by an external entity (e.g. a sensor).
IS_EXTERNAL = "is_external"
# the lowest priority intended to be used by machine learning policies
DEFAULT_POLICY_PRIORITY = 1
# the priority intended to be used by mapping policies
MAPPING_POLICY_PRIORITY = 2
# the priority intended to be used by memoization policies
# it is higher than default and mapping to prioritize training stories
MEMOIZATION_POLICY_PRIORITY = 3
# the priority intended to be used by fallback policies
# it is higher than memoization to prioritize fallback
FALLBACK_POLICY_PRIORITY = 4
# the priority intended to be used by form policies
# it is the highest to prioritize form to the rest of the policies
FORM_POLICY_PRIORITY = 5
UTTER_PREFIX = "utter_"
RESPOND_PREFIX = "respond_"
DIALOGUE = "dialogue"
DEFAULT_CATEGORICAL_SLOT_VALUE = "__other__"
# RabbitMQ message property header added to events published using `rasa export`
RASA_EXPORT_PROCESS_ID_HEADER_NAME = "rasa-export-process-id"
# Name of the environment variable defining the PostgreSQL schema to access. See
# https://www.postgresql.org/docs/9.1/ddl-schemas.html for more details.
POSTGRESQL_SCHEMA = "POSTGRESQL_SCHEMA"
# Names of the environment variables defining PostgreSQL pool size and max overflow
POSTGRESQL_POOL_SIZE = "SQL_POOL_SIZE"
POSTGRESQL_MAX_OVERFLOW = "SQL_MAX_OVERFLOW"
| 31.261905
| 83
| 0.809596
|
b0ffd5049752f66a25a8ba95907b8a2fadc63eb9
| 1,334
|
py
|
Python
|
tests/unit/test_imdb_title_page_parser.py
|
alexandrahably/imdb_scraper
|
e364c9cdccb42369fcc84de54c15621cfced9b5a
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/test_imdb_title_page_parser.py
|
alexandrahably/imdb_scraper
|
e364c9cdccb42369fcc84de54c15621cfced9b5a
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/test_imdb_title_page_parser.py
|
alexandrahably/imdb_scraper
|
e364c9cdccb42369fcc84de54c15621cfced9b5a
|
[
"Apache-2.0"
] | null | null | null |
import pytest
import os
from pathlib import Path
from unittest import TestCase
from tests.unit.helpers.helpers import path_for_resource
from src.imdb.parsers import IMDBTitlePageParser
def movie_without_oscars_page_text():
movie_page_no_oscars_path = path_for_resource('movie_page_no_oscars.html')
with open(movie_page_no_oscars_path, "r") as file:
text = file.read()
file.close()
return text
def movie_with_oscars_page_text():
movie_page_with_oscars_path = path_for_resource('movie_page_with_oscars.txt')
with open(movie_page_with_oscars_path, "r") as file:
text = file.read()
file.close()
return text
class TestIMDBTitlePageParser(TestCase):
def test_parse_num_of_won_oscars_from_movie_page_without_oscars(self):
movie_title_page_no_oscars_text = movie_without_oscars_page_text()
num_of_oscars = IMDBTitlePageParser.parse_num_of_won_oscars_from_movie_page(movie_title_page_no_oscars_text)
self.assertEqual(num_of_oscars, 0)
def test_parse_num_of_won_oscars_from_movie_page_with_oscars(self):
movie_title_page_with_oscars_text = movie_with_oscars_page_text()
num_of_oscars = IMDBTitlePageParser.parse_num_of_won_oscars_from_movie_page(movie_title_page_with_oscars_text)
self.assertEqual(num_of_oscars, 3)
| 37.055556
| 118
| 0.788606
|
86d31c6d3ef64efa6c86f9a1e58733d3626cdced
| 8,610
|
py
|
Python
|
editor/views/generic.py
|
NabeelSait/editor
|
035c1a7ffdaa4a9557bec17f2d5a7ddb3b316fce
|
[
"Apache-2.0"
] | null | null | null |
editor/views/generic.py
|
NabeelSait/editor
|
035c1a7ffdaa4a9557bec17f2d5a7ddb3b316fce
|
[
"Apache-2.0"
] | null | null | null |
editor/views/generic.py
|
NabeelSait/editor
|
035c1a7ffdaa4a9557bec17f2d5a7ddb3b316fce
|
[
"Apache-2.0"
] | null | null | null |
import json
from django.shortcuts import redirect
from django import http
from django.views import generic
from django.template.loader import get_template
from django.template import RequestContext
from django.template.response import TemplateResponse
import reversion
from editor.models import NewStampOfApproval, Comment, RestorePoint, EditorItem, Access
from accounts.util import user_json
def forbidden_response(request,message=None):
return TemplateResponse(
request=request,
context={'message':message},
template='403.html',
status=403
)
# from http://stackoverflow.com/questions/18172102/object-ownership-validation-in-django-updateview
class AuthorRequiredMixin(object):
def dispatch(self, request, *args, **kwargs):
if self.get_object().author != self.request.user:
template = get_template("403.html")
return http.HttpResponseForbidden(template.render(RequestContext(self.request).flatten()))
result = super(AuthorRequiredMixin, self).dispatch(request, *args, **kwargs)
return result
class CanEditMixin(object):
def dispatch(self, request, *args, **kwargs):
obj = self.get_object()
if not obj.can_be_edited_by(request.user):
template = get_template("403.html")
return http.HttpResponseForbidden(template.render(RequestContext(self.request).flatten()))
return super().dispatch(request, *args, **kwargs)
class CanViewMixin(object):
def dispatch(self, request, *args, **kwargs):
obj = self.get_object()
if not obj.can_be_viewed_by(request.user):
template = get_template("403.html")
return http.HttpResponseForbidden(template.render(RequestContext(self.request).flatten()))
return super().dispatch(request, *args, **kwargs)
class TimelineItemViewMixin(object):
def response(self):
data = {
'object_json': self.object_json(),
'html': self.object_html(),
}
return http.HttpResponse(json.dumps(data), content_type='application/json')
def object_html(self):
template = get_template(self.item.timelineitem_template)
html = template.render(RequestContext(self.request, {'item': self.item.timelineitem, 'can_delete': self.item.can_be_deleted_by(self.request.user)}).flatten())
return html
class StampView(generic.UpdateView, TimelineItemViewMixin):
def post(self, request, *args, **kwargs):
obj = self.get_object()
status = request.POST.get('status')
self.item = NewStampOfApproval.objects.create(user=request.user, object=obj.editoritem, status=status)
return self.response()
def object_json(self):
return stamp_json(self.item)
def get(self, request, *args, **kwargs):
return http.HttpResponseNotAllowed(['POST'], 'GET requests are not allowed at this URL.')
class CommentView(generic.UpdateView, TimelineItemViewMixin):
def post(self, request, *args, **kwargs):
obj = self.get_comment_object()
text = request.POST.get('text')
self.item = Comment.objects.create(user=request.user, object=obj, text=text)
return self.response()
def object_json(self):
return comment_json(self.item)
def get(self, request, *args, **kwargs):
return http.HttpResponseNotAllowed(['POST'], 'GET requests are not allowed at this URL.')
class SetRestorePointView(generic.UpdateView, TimelineItemViewMixin):
def post(self, request, *args, **kwargs):
obj = self.get_object()
description = request.POST.get('text')
if not reversion.models.Version.objects.get_for_object(obj).exists():
with reversion.create_revision():
obj.save()
reversion.set_user(request.user)
revision = reversion.models.Version.objects.get_for_object(obj).first().revision
self.item = RestorePoint.objects.create(user=request.user, object=obj.editoritem, description=description, revision=revision)
return self.response()
def object_json(self):
return restore_point_json(self.item)
def get(self, request, *args, **kwargs):
return http.HttpResponseNotAllowed(['POST'], 'GET requests are not allowed at this URL.')
class RevertRestorePointView(generic.UpdateView):
model = RestorePoint
def get(self, request, *args, **kwargs):
self.restore_point = self.get_object()
if not self.restore_point.object.can_be_edited_by(request.user):
return http.HttpResponseForbidden()
oei = self.restore_point.object
project = oei.project
self.restore_point.revision.revert()
ei = EditorItem.objects.get(pk=oei.pk)
ei.project = project
ei.save()
return redirect(self.restore_point.object.get_absolute_url())
# JSON representation of a editor.models.StampOfApproval object
def stamp_json(stamp, **kwargs):
if stamp.pk is None:
return {
'pk': None,
'status': 'draft',
'status_display': 'Draft',
'user': None,
}
else:
return {
'pk': stamp.pk,
'date': stamp.timelineitem.date.strftime('%Y-%m-%d %H:%M:%S'),
'status': stamp.status,
'status_display': stamp.get_status_display(),
'user': user_json(stamp.user),
}
# JSON representation of a editor.models.Comment object
def comment_json(comment, **kwargs):
return {
'pk': comment.pk,
'date': comment.timelineitem.date.strftime('%Y-%m-%d %H:%M:%S'),
'text': comment.text,
'user': user_json(comment.user),
}
def restore_point_json(restore_point, **kwargs):
return {
'pk': restore_point.pk,
'date': restore_point.timelineitem.date.strftime('%Y-%m-%d %H:%M:%S'),
'description': restore_point.description,
'user': user_json(restore_point.user),
}
def ability_framework_json(ability_framework):
return {
'pk': ability_framework.pk,
'name': ability_framework.name,
'description': ability_framework.description,
'levels': [ability_level_json(l) for l in ability_framework.levels.all()],
}
def ability_level_json(ability_level):
return {
'pk': ability_level.pk,
'name': ability_level.name,
'description': ability_level.description,
'framework': ability_level.framework.pk,
'start': float(ability_level.start),
'end': float(ability_level.end),
}
class DeleteStampView(generic.DeleteView):
model = NewStampOfApproval
def delete(self, request, *args, **kwargs):
self.object = self.get_object()
if self.object.can_be_deleted_by(self.request.user):
self.object.delete()
ei = self.object.object
now_current_stamp = EditorItem.objects.get(pk=ei.pk).get_current_stamp()
data = stamp_json(now_current_stamp)
return http.HttpResponse(json.dumps({'current_stamp':data}), content_type='application/json')
else:
return http.HttpResponseForbidden('You don\'t have the necessary access rights.')
class ShareLinkView(generic.RedirectView):
permanent = False
def get_redirect_url(self, *args, **kwargs):
access = kwargs['access']
try:
if access == 'edit':
q = self.model.objects.get(editoritem__share_uuid_edit=kwargs['share_uuid'])
elif access == 'view':
q = self.model.objects.get(editoritem__share_uuid_view=kwargs['share_uuid'])
except (ValueError, self.model.DoesNotExist):
raise http.Http404
user = self.request.user
if access == 'view':
has_access = q.editoritem.can_be_viewed_by(user)
elif access == 'edit':
has_access = q.editoritem.can_be_edited_by(user)
if not has_access:
try:
ea = Access.objects.get(item=q.editoritem, user=user)
except Access.DoesNotExist:
ea = Access(item=q.editoritem, user=user, access=access)
ea.access = access
ea.save()
return q.get_absolute_url()
class ProjectQuerysetMixin(object):
""" Set the queryset for the form's project field to the projects available to the user """
def get_form(self):
form = super(ProjectQuerysetMixin, self).get_form()
form.fields['project'].queryset = self.request.user.userprofile.projects().order_by('name').distinct()
return form
| 36.95279
| 166
| 0.655749
|
3506ffe2980142eda2755c1e36e516831344a27f
| 5,314
|
py
|
Python
|
sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2015_06_15/aio/operations/_virtual_machine_sizes_operations.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 2,728
|
2015-01-09T10:19:32.000Z
|
2022-03-31T14:50:33.000Z
|
sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2015_06_15/aio/operations/_virtual_machine_sizes_operations.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 17,773
|
2015-01-05T15:57:17.000Z
|
2022-03-31T23:50:25.000Z
|
sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2015_06_15/aio/operations/_virtual_machine_sizes_operations.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 1,916
|
2015-01-19T05:05:41.000Z
|
2022-03-31T19:36:44.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class VirtualMachineSizesOperations:
"""VirtualMachineSizesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2015_06_15.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
location: str,
**kwargs: Any
) -> AsyncIterable["_models.VirtualMachineSizeListResult"]:
"""Lists all available virtual machine sizes for a subscription in a location.
:param location: The location upon which virtual-machine-sizes is queried.
:type location: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VirtualMachineSizeListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2015_06_15.models.VirtualMachineSizeListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualMachineSizeListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2015-06-15"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'location': self._serialize.url("location", location, 'str', pattern=r'^[-\w\._]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('VirtualMachineSizeListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/vmSizes'} # type: ignore
| 47.026549
| 135
| 0.660896
|
c4e8382ed32ac5f8b39890e227d726526ae55458
| 147
|
py
|
Python
|
text_nlp/apps.py
|
hamza-manssor/News-Category-Classification-using-ML-in-Django
|
8d8579e6010bee1451df5653eb6bc45b4a1072ef
|
[
"MIT"
] | null | null | null |
text_nlp/apps.py
|
hamza-manssor/News-Category-Classification-using-ML-in-Django
|
8d8579e6010bee1451df5653eb6bc45b4a1072ef
|
[
"MIT"
] | null | null | null |
text_nlp/apps.py
|
hamza-manssor/News-Category-Classification-using-ML-in-Django
|
8d8579e6010bee1451df5653eb6bc45b4a1072ef
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class TextNlpConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'text_nlp'
| 21
| 56
| 0.761905
|
fee1aceef4f99873d3ac5fae941f37132b5d5d4a
| 364
|
py
|
Python
|
taobao-tianmao/top/api/rest/AlibabaWdkMarketingOpenHeartbeatRequest.py
|
ScottLeeF/python-example
|
0b230ba80fe5020d70329a9d73e058013f0ca111
|
[
"Apache-2.0"
] | null | null | null |
taobao-tianmao/top/api/rest/AlibabaWdkMarketingOpenHeartbeatRequest.py
|
ScottLeeF/python-example
|
0b230ba80fe5020d70329a9d73e058013f0ca111
|
[
"Apache-2.0"
] | 7
|
2021-03-19T02:12:42.000Z
|
2022-03-12T00:25:28.000Z
|
taobao-tianmao/top/api/rest/AlibabaWdkMarketingOpenHeartbeatRequest.py
|
ScottLeeF/python-example
|
0b230ba80fe5020d70329a9d73e058013f0ca111
|
[
"Apache-2.0"
] | null | null | null |
'''
Created by auto_sdk on 2019.03.26
'''
from top.api.base import RestApi
class AlibabaWdkMarketingOpenHeartbeatRequest(RestApi):
def __init__(self, domain='gw.api.taobao.com', port=80):
RestApi.__init__(self, domain, port)
self.heart_beat = None
def getapiname(self):
return 'alibaba.wdk.marketing.open.heartbeat'
| 26
| 61
| 0.681319
|
37ea2899cde8120df2cdbd0511e09986d0fbdaee
| 20,201
|
py
|
Python
|
SMPyBandits/Environment/EvaluatorSparseMultiPlayers.py
|
balbok0/SMPyBandits
|
c8ff765687989e0c20ab42c2e2e1d8440923225b
|
[
"MIT"
] | 309
|
2018-03-03T22:07:59.000Z
|
2022-03-26T08:15:58.000Z
|
Environment/EvaluatorSparseMultiPlayers.py
|
98k-bot/SMPyBandits
|
35e675bde29dafbec68288fcfcd14ef3b0f058b2
|
[
"MIT"
] | 125
|
2018-02-27T22:54:03.000Z
|
2021-11-05T10:50:15.000Z
|
Environment/EvaluatorSparseMultiPlayers.py
|
98k-bot/SMPyBandits
|
35e675bde29dafbec68288fcfcd14ef3b0f058b2
|
[
"MIT"
] | 60
|
2018-04-30T20:54:24.000Z
|
2022-02-21T22:41:46.000Z
|
# -*- coding: utf-8 -*-
""" EvaluatorSparseMultiPlayers class to wrap and run the simulations, for the multi-players case with sparse activated players.
Lots of plotting methods, to have various visualizations. See documentation.
.. warning:: FIXME this environment is not as up-to-date as :class:`Environment.EvaluatorMultiPlayers`.
"""
from __future__ import division, print_function # Python 2 compatibility
__author__ = "Lilian Besson"
__version__ = "0.9"
# Generic imports
from copy import deepcopy
from re import search
import random
from random import random as uniform_in_zero_one
# Scientific imports
import numpy as np
import matplotlib.pyplot as plt
try:
# Local imports, libraries
from .usejoblib import USE_JOBLIB, Parallel, delayed
from .usetqdm import USE_TQDM, tqdm
# Local imports, tools and config
from .plotsettings import BBOX_INCHES, signature, maximizeWindow, palette, makemarkers, add_percent_formatter, wraptext, wraplatex, legend, show_and_save, nrows_ncols
from .sortedDistance import weightedDistance, manhattan, kendalltau, spearmanr, gestalt, meanDistance, sortedDistance
from .fairnessMeasures import amplitude_fairness, std_fairness, rajjain_fairness, mean_fairness, fairnessMeasure, fairness_mapping
# Local imports, objects and functions
from .CollisionModels import onlyUniqUserGetsRewardSparse, full_lost_if_collision
from .MAB import MAB, MarkovianMAB, ChangingAtEachRepMAB
from .ResultMultiPlayers import ResultMultiPlayers
# Inheritance
from .EvaluatorMultiPlayers import EvaluatorMultiPlayers, _extract
except ImportError:
# Local imports, libraries
from usejoblib import USE_JOBLIB, Parallel, delayed
from usetqdm import USE_TQDM, tqdm
# Local imports, tools and config
from plotsettings import BBOX_INCHES, signature, maximizeWindow, palette, makemarkers, add_percent_formatter, wraptext, wraplatex, legend, show_and_save, nrows_ncols
from sortedDistance import weightedDistance, manhattan, kendalltau, spearmanr, gestalt, meanDistance, sortedDistance
from fairnessMeasures import amplitude_fairness, std_fairness, rajjain_fairness, mean_fairness, fairnessMeasure, fairness_mapping
# Local imports, objects and functions
from CollisionModels import onlyUniqUserGetsRewardSparse, full_lost_if_collision
from MAB import MAB, MarkovianMAB, ChangingAtEachRepMAB
from ResultMultiPlayers import ResultMultiPlayers
# Inheritance
from EvaluatorMultiPlayers import EvaluatorMultiPlayers, _extract
REPETITIONS = 1 #: Default nb of repetitions
ACTIVATION = 1 #: Default probability of activation
DELTA_T_PLOT = 50 #: Default sampling rate for plotting
MORE_ACCURATE = False #: Use the count of selections instead of rewards for a more accurate mean/std reward measure.
MORE_ACCURATE = True #: Use the count of selections instead of rewards for a more accurate mean/std reward measure.
FINAL_RANKS_ON_AVERAGE = True #: Default value for ``finalRanksOnAverage``
USE_JOBLIB_FOR_POLICIES = False #: Default value for ``useJoblibForPolicies``. Does not speed up to use it (too much overhead in using too much threads); so it should really be disabled.
PICKLE_IT = True #: Default value for ``pickleit`` for saving the figures. If True, then all ``plt.figure`` object are saved (in pickle format).
# --- Class EvaluatorSparseMultiPlayers
class EvaluatorSparseMultiPlayers(EvaluatorMultiPlayers):
""" Evaluator class to run the simulations, for the multi-players case.
"""
def __init__(self, configuration,
moreAccurate=MORE_ACCURATE):
super(EvaluatorSparseMultiPlayers, self).__init__(configuration, moreAccurate=moreAccurate)
self.activations = self.cfg.get('activations', ACTIVATION) #: Probability of activations
assert np.min(self.activations) > 0 and np.max(self.activations) <= 1, "Error: probability of activations = {} were not all in (0, 1] ...".format(self.activations) # DEBUG
self.collisionModel = self.cfg.get('collisionModel', onlyUniqUserGetsRewardSparse) #: Which collision model should be used
self.full_lost_if_collision = full_lost_if_collision.get(self.collisionModel.__name__, True) #: Is there a full loss of rewards if collision ? To compute the correct decomposition of regret
print("Using collision model {} (function {}).\nMore details:\n{}".format(self.collisionModel.__name__, self.collisionModel, self.collisionModel.__doc__))
# --- Start computation
def startOneEnv(self, envId, env):
"""Simulate that env."""
print("\n\nEvaluating environment:", repr(env)) # DEBUG
self.players = []
self.__initPlayers__(env)
# Get the position of the best arms
means = env.means
bestarm = env.maxArm
indexes_bestarm = np.nonzero(np.isclose(means, bestarm))[0]
def store(r, repeatId):
"""Store the result of the experiment r."""
self.rewards[envId] += np.cumsum(r.rewards, axis=1) # cumsum on time
self.lastCumRewards[envId][repeatId] = np.sum(r.rewards) # sum on time and sum on policies
self.pulls[envId] += r.pulls
self.lastPulls[envId][:, :, repeatId] = r.pulls
self.allPulls[envId] += r.allPulls
self.collisions[envId] += r.collisions
self.lastCumCollisions[envId][:, repeatId] = np.sum(r.collisions, axis=1) # sum on time
for playerId in range(self.nbPlayers):
self.nbSwitchs[envId][playerId, 1:] += (np.diff(r.choices[playerId, :]) != 0)
self.bestArmPulls[envId][playerId, :] += np.cumsum(np.in1d(r.choices[playerId, :], indexes_bestarm))
# FIXME there is probably a bug in this computation
self.freeTransmissions[envId][playerId, :] += np.array([r.choices[playerId, t] not in r.collisions[:, t] for t in range(self.horizon)])
# Start now
if self.useJoblib:
seeds = np.random.randint(low=0, high=100 * self.repetitions, size=self.repetitions)
repeatIdout = 0
for r in Parallel(n_jobs=self.cfg['n_jobs'], verbose=self.cfg['verbosity'])(
delayed(delayed_play)(env, self.players, self.horizon, self.collisionModel, self.activations, seed=seeds[repeatId], repeatId=repeatId)
for repeatId in tqdm(range(self.repetitions), desc="Repeat||")
):
store(r, repeatIdout)
repeatIdout += 1
if env.isChangingAtEachRepetition:
env._t += self.repetitions # new self.repetitions draw!
else:
for repeatId in tqdm(range(self.repetitions), desc="Repeat"):
r = delayed_play(env, self.players, self.horizon, self.collisionModel, self.activations, repeatId=repeatId)
store(r, repeatId)
# --- Getter methods
def getCentralizedRegret_LessAccurate(self, envId=0):
"""Compute the empirical centralized regret: cumsum on time of the mean rewards of the M best arms - cumsum on time of the empirical rewards obtained by the players, based on accumulated rewards."""
meansArms = np.sort(self.envs[envId].means)
sumBestMeans = self.envs[envId].sumBestMeans(min(self.envs[envId].nbArms, self.nbPlayers))
# FIXED how to count it when there is more players than arms ?
# FIXME it depends on the collision model !
if self.envs[envId].nbArms < self.nbPlayers:
# sure to have collisions, then the best strategy is to put all the collisions in the worse arm
worseArm = np.min(meansArms)
sumBestMeans -= worseArm # This count the collisions
averageBestRewards = self._times * sumBestMeans
# And for the actual rewards, the collisions are counted in the rewards logged in self.getRewards
actualRewards = np.sum(self.rewards[envId][:, :], axis=0) / float(self.repetitions)
return averageBestRewards - actualRewards
# --- Three terms in the regret
def getFirstRegretTerm(self, envId=0):
"""Extract and compute the first term :math:`(a)` in the centralized regret: losses due to pulling suboptimal arms."""
means = self.envs[envId].means
sortingIndex = np.argsort(means)
means = np.sort(means)
deltaMeansWorstArms = means[-min(self.envs[envId].nbArms, self.nbPlayers)] - means[:-min(self.envs[envId].nbArms, self.nbPlayers)]
allPulls = self.allPulls[envId] / float(self.repetitions) # Shape: (nbPlayers, nbArms, duration)
allWorstPulls = allPulls[:, sortingIndex[:-min(self.envs[envId].nbArms, self.nbPlayers)], :]
worstPulls = np.sum(allWorstPulls, axis=0) # sum for all players
losses = np.dot(deltaMeansWorstArms, worstPulls) # Count and sum on k in Mworst
firstRegretTerm = np.cumsum(losses) # Accumulate losses
return firstRegretTerm
def getSecondRegretTerm(self, envId=0):
"""Extract and compute the second term :math:`(b)` in the centralized regret: losses due to not pulling optimal arms."""
means = self.envs[envId].means
sortingIndex = np.argsort(means)
means = np.sort(means)
deltaMeansBestArms = means[-min(self.envs[envId].nbArms, self.nbPlayers):] - means[-min(self.envs[envId].nbArms, self.nbPlayers)]
allPulls = self.allPulls[envId] / float(self.repetitions) # Shape: (nbPlayers, nbArms, duration)
allBestPulls = allPulls[:, sortingIndex[-min(self.envs[envId].nbArms, self.nbPlayers):], :]
bestMisses = 1 - np.sum(allBestPulls, axis=0) # sum for all players
losses = np.dot(deltaMeansBestArms, bestMisses) # Count and sum on k in Mbest
secondRegretTerm = np.cumsum(losses) # Accumulate losses
return secondRegretTerm
def getThirdRegretTerm(self, envId=0):
"""Extract and compute the third term :math:`(c)` in the centralized regret: losses due to collisions."""
means = self.envs[envId].means
countCollisions = self.collisions[envId] # Shape: (nbArms, duration)
if not self.full_lost_if_collision:
print("Warning: the collision model ({}) does *not* yield a loss in communication when colliding (one user can communicate, or in average one user can communicate), so countCollisions -= 1 for the 3rd regret term ...".format(self.collisionModel.__name__)) # DEBUG
countCollisions = np.maximum(0, countCollisions - 1)
losses = np.dot(means, countCollisions / float(self.repetitions)) # Count and sum on k in 1...K
thirdRegretTerm = np.cumsum(losses) # Accumulate losses
return thirdRegretTerm
def getCentralizedRegret_MoreAccurate(self, envId=0):
"""Compute the empirical centralized regret, based on counts of selections and not actual rewards."""
return self.getFirstRegretTerm(envId=envId) + self.getSecondRegretTerm(envId=envId) + self.getThirdRegretTerm(envId=envId)
def getCentralizedRegret(self, envId=0, moreAccurate=None):
"""Using either the more accurate or the less accurate regret count."""
moreAccurate = moreAccurate if moreAccurate is not None else self.moreAccurate
# print("Computing the vector of mean cumulated regret with '{}' accurate method...".format("more" if moreAccurate else "less")) # DEBUG
if moreAccurate:
return self.getCentralizedRegret_MoreAccurate(envId=envId)
else:
return self.getCentralizedRegret_LessAccurate(envId=envId)
# --- Last regrets
def getLastRegrets_LessAccurate(self, envId=0):
"""Extract last regrets, based on accumulated rewards."""
meansArms = np.sort(self.envs[envId].means)
sumBestMeans = self.envs[envId].sumBestMeans(self.nbPlayers)
# FIXED how to count it when there is more players than arms ?
# FIXME it depends on the collision model !
if self.envs[envId].nbArms < self.nbPlayers:
# sure to have collisions, then the best strategy is to put all the collisions in the worse arm
worseArm = np.min(meansArms)
sumBestMeans -= worseArm # This count the collisions
return self.horizon * sumBestMeans - self.lastCumRewards[envId]
def getAllLastWeightedSelections(self, envId=0):
"""Extract weighted count of selections."""
all_last_weighted_selections = np.zeros(self.repetitions)
lastCumCollisions = self.lastCumCollisions[envId]
for armId, mean in enumerate(self.envs[envId].means):
last_selections = np.sum(self.lastPulls[envId][:, armId, :], axis=0) # sum on players
all_last_weighted_selections += mean * (last_selections - lastCumCollisions[armId, :])
return all_last_weighted_selections
def getLastRegrets_MoreAccurate(self, envId=0):
"""Extract last regrets, based on counts of selections and not actual rewards."""
meansArms = np.sort(self.envs[envId].means)
sumBestMeans = self.envs[envId].sumBestMeans(self.nbPlayers)
# FIXED how to count it when there is more players than arms ?
# FIXME it depends on the collision model !
if self.envs[envId].nbArms < self.nbPlayers:
# sure to have collisions, then the best strategy is to put all the collisions in the worse arm
worseArm = np.min(meansArms)
sumBestMeans -= worseArm # This count the collisions
return self.horizon * sumBestMeans - self.getAllLastWeightedSelections(envId=envId)
def getLastRegrets(self, envId=0, moreAccurate=None):
"""Using either the more accurate or the less accurate regret count."""
moreAccurate = moreAccurate if moreAccurate is not None else self.moreAccurate
# print("Computing the vector of last cumulated regrets (on repetitions) with '{}' accurate method...".format("more" if moreAccurate else "less")) # DEBUG
if moreAccurate:
return self.getLastRegrets_MoreAccurate(envId=envId)
else:
return self.getLastRegrets_LessAccurate(envId=envId)
def strPlayers(self, short=False, latex=True):
"""Get a string of the players and their activations probability for this environment."""
listStrPlayersActivations = [("%s, $p=%s$" if latex else "%s, p=%s") % (_extract(str(player)), str(activation)) for (player, activation) in zip(self.players, self.activations)]
if len(set(listStrPlayersActivations)) == 1: # Unique user and unique activation
if latex:
text = r'${} \times$ {}'.format(self.nbPlayers, listStrPlayersActivations[0])
else:
text = r'{} x {}'.format(self.nbPlayers, listStrPlayersActivations[0])
else:
text = ', '.join(listStrPlayersActivations)
text = wraptext(text)
if not short:
text = '{} players: {}'.format(self.nbPlayers, text)
return text
def delayed_play(env, players, horizon, collisionModel, activations,
seed=None, repeatId=0):
"""Helper function for the parallelization."""
# Give a unique seed to random & numpy.random for each call of this function
try:
if seed is not None:
np.random.seed(seed)
random.seed(seed)
except (ValueError, SystemError):
print("Warning: setting random.seed and np.random.seed seems to not be available. Are you using Windows?") # XXX
means = env.means
if env.isChangingAtEachRepetition:
means = env.newRandomArms()
players = deepcopy(players)
nbArms = env.nbArms
nbPlayers = len(players)
# Start game
for player in players:
player.startGame()
# Store results
result = ResultMultiPlayers(env.nbArms, horizon, nbPlayers, means=means)
rewards = np.zeros(nbPlayers)
choices = np.zeros(nbPlayers, dtype=int)
pulls = np.zeros((nbPlayers, nbArms), dtype=int)
collisions = np.zeros(nbArms, dtype=int)
nbActivations = np.zeros(nbPlayers, dtype=int)
prettyRange = tqdm(range(horizon), desc="Time t") if repeatId == 0 else range(horizon)
for t in prettyRange:
# Reset the array, faster than reallocating them!
rewards.fill(0)
choices.fill(-100000)
pulls.fill(0)
collisions.fill(0)
# Decide who gets activated
# # 1. pure iid Bernoulli activations, so sum(random_activations) == np.random.binomial(nbPlayers, activation) if activations are all the same
# random_activations = np.random.random_sample(nbPlayers) <= activations
# FIXME finish these experiments
# 2. maybe first decide how many players from [0, nbArms] or [0, nbPlayers] are activated, then who
# nb_activated_players = np.random.binomial(nbArms, np.mean(activations))
nb_activated_players = np.random.binomial(nbPlayers, np.mean(activations))
# who_is_activated = np.random.choice(nbPlayers, size=nb_activated_players, replace=False)
who_is_activated = np.random.choice(nbPlayers, size=nb_activated_players, replace=False, p=np.asarray(activations)/np.sum(activations))
random_activations = np.in1d(np.arange(nbPlayers), who_is_activated)
# Every player decides which arm to pull
for playerId, player in enumerate(players):
# if with_proba(activations[playerId]):
if random_activations[playerId]:
nbActivations[playerId] += 1
choices[playerId] = player.choice()
# print(" Round t = \t{}, player \t#{:>2}/{} ({}) \tgot activated and chose : {} ...".format(t, playerId + 1, len(players), player, choices[playerId])) # DEBUG
# else:
# print(" Round t = \t{}, player \t#{:>2}/{} ({}) \tdid not get activated ...".format(t, playerId + 1, len(players), player)) # DEBUG
# Then we decide if there is collisions and what to do why them
# XXX It is here that the player may receive a reward, if there is no collisions
collisionModel(t, env.arms, players, choices, rewards, pulls, collisions)
# Finally we store the results
result.store(t, choices, rewards, pulls, collisions)
# Print the quality of estimation of arm ranking for this policy, just for 1st repetition
if repeatId == 0:
print("\nNumber of activations by players:")
for playerId, player in enumerate(players):
try:
print("\nThe policy {} was activated {} times after {} steps...".format(player, nbActivations[playerId], horizon))
order = player.estimatedOrder()
print("Estimated order by the policy {} after {} steps: {} ...".format(player, horizon, order))
print(" ==> Optimal arm identification: {:.2%} (relative success)...".format(weightedDistance(order, env.means, n=nbPlayers)))
# print(" ==> Manhattan distance from optimal ordering: {:.2%} (relative success)...".format(manhattan(order)))
# print(" ==> Spearman distance from optimal ordering: {:.2%} (relative success)...".format(spearmanr(order)))
# print(" ==> Gestalt distance from optimal ordering: {:.2%} (relative success)...".format(gestalt(order)))
print(" ==> Mean distance from optimal ordering: {:.2%} (relative success)...".format(meanDistance(order)))
except AttributeError:
print("Unable to print the estimated ordering, no method estimatedOrder was found!")
return result
def with_proba(proba):
"""`True` with probability = `proba`, `False` with probability = `1 - proba`.
Examples:
>>> import random; random.seed(0)
>>> tosses = [with_proba(0.6) for _ in range(10000)]; sum(tosses)
5977
>>> tosses = [with_proba(0.111) for _ in range(100000)]; sum(tosses)
11158
"""
return uniform_in_zero_one() <= proba
# --- Debugging
if __name__ == "__main__":
# Code for debugging purposes.
from doctest import testmod
print("\nTesting automatically all the docstring written in each functions of this module :")
testmod(verbose=True)
| 57.389205
| 276
| 0.681501
|
3df92b04a84daad85b4a4bd4386d6c188902c278
| 3,495
|
py
|
Python
|
chaingreen/timelord/timelord_launcher.py
|
WaitWha/chaingreen-blockchain
|
959443f03420b80f66028c2183525712aa933465
|
[
"Apache-2.0"
] | 103
|
2021-05-30T02:09:28.000Z
|
2022-03-17T20:45:49.000Z
|
chaingreen/timelord/timelord_launcher.py
|
WaitWha/chaingreen-blockchain
|
959443f03420b80f66028c2183525712aa933465
|
[
"Apache-2.0"
] | 107
|
2021-05-23T02:20:26.000Z
|
2022-03-29T17:07:43.000Z
|
chaingreen/timelord/timelord_launcher.py
|
WaitWha/chaingreen-blockchain
|
959443f03420b80f66028c2183525712aa933465
|
[
"Apache-2.0"
] | 50
|
2021-05-23T02:19:06.000Z
|
2022-01-24T07:32:50.000Z
|
import asyncio
import logging
import pathlib
import signal
import socket
import time
from typing import Dict, List
import pkg_resources
from chaingreen.util.chaingreen_logging import initialize_logging
from chaingreen.util.config import load_config
from chaingreen.util.default_root import DEFAULT_ROOT_PATH
from chaingreen.util.setproctitle import setproctitle
active_processes: List = []
stopped = False
lock = asyncio.Lock()
log = logging.getLogger(__name__)
async def kill_processes():
global stopped
global active_processes
async with lock:
stopped = True
for process in active_processes:
try:
process.kill()
except ProcessLookupError:
pass
def find_vdf_client() -> pathlib.Path:
p = pathlib.Path(pkg_resources.get_distribution("chiavdf").location) / "vdf_client"
if p.is_file():
return p
raise FileNotFoundError("can't find vdf_client binary")
async def spawn_process(host: str, port: int, counter: int):
global stopped
global active_processes
path_to_vdf_client = find_vdf_client()
first_10_seconds = True
start_time = time.time()
while not stopped:
try:
dirname = path_to_vdf_client.parent
basename = path_to_vdf_client.name
resolved = socket.gethostbyname(host)
proc = await asyncio.create_subprocess_shell(
f"{basename} {resolved} {port} {counter}",
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
env={"PATH": dirname},
)
except Exception as e:
log.warning(f"Exception while spawning process {counter}: {(e)}")
continue
async with lock:
active_processes.append(proc)
stdout, stderr = await proc.communicate()
if stdout:
log.info(f"VDF client {counter}: {stdout.decode().rstrip()}")
if stderr:
if first_10_seconds:
if time.time() - start_time > 10:
first_10_seconds = False
else:
log.error(f"VDF client {counter}: {stderr.decode().rstrip()}")
log.info(f"Process number {counter} ended.")
async with lock:
if proc in active_processes:
active_processes.remove(proc)
await asyncio.sleep(0.1)
async def spawn_all_processes(config: Dict, net_config: Dict):
await asyncio.sleep(5)
port = config["port"]
process_count = config["process_count"]
awaitables = [spawn_process(net_config["self_hostname"], port, i) for i in range(process_count)]
await asyncio.gather(*awaitables)
def main():
root_path = DEFAULT_ROOT_PATH
setproctitle("chaingreen_timelord_launcher")
net_config = load_config(root_path, "config.yaml")
config = net_config["timelord_launcher"]
initialize_logging("TLauncher", config["logging"], root_path)
def signal_received():
asyncio.create_task(kill_processes())
loop = asyncio.get_event_loop()
try:
loop.add_signal_handler(signal.SIGINT, signal_received)
loop.add_signal_handler(signal.SIGTERM, signal_received)
except NotImplementedError:
log.info("signal handlers unsupported")
try:
loop.run_until_complete(spawn_all_processes(config, net_config))
finally:
log.info("Launcher fully closed.")
loop.close()
if __name__ == "__main__":
main()
| 30.391304
| 100
| 0.656366
|
8d67a183619a90f435572dc5c386f70dc3005c6c
| 23,161
|
py
|
Python
|
statsmodels/tsa/tests/test_deterministic.py
|
CCHiggins/statsmodels
|
300b6fba90c65c8e94b4f83e04f7ae1b0ceeac2e
|
[
"BSD-3-Clause"
] | 1
|
2022-01-16T01:33:03.000Z
|
2022-01-16T01:33:03.000Z
|
statsmodels/tsa/tests/test_deterministic.py
|
CCHiggins/statsmodels
|
300b6fba90c65c8e94b4f83e04f7ae1b0ceeac2e
|
[
"BSD-3-Clause"
] | 5
|
2022-02-13T14:38:04.000Z
|
2022-02-15T00:13:07.000Z
|
statsmodels/tsa/tests/test_deterministic.py
|
CCHiggins/statsmodels
|
300b6fba90c65c8e94b4f83e04f7ae1b0ceeac2e
|
[
"BSD-3-Clause"
] | 4
|
2022-02-04T22:58:27.000Z
|
2022-02-14T19:29:18.000Z
|
from statsmodels.compat.pandas import PD_LT_1_0_0, is_int_index
from statsmodels.compat.pytest import pytest_warns
from typing import Hashable, Tuple
import numpy as np
import pandas as pd
import pytest
from statsmodels.tsa.deterministic import (
CalendarFourier,
CalendarSeasonality,
CalendarTimeTrend,
DeterministicProcess,
DeterministicTerm,
Fourier,
Seasonality,
TimeTrend,
)
@pytest.fixture(scope="module", params=[True, False])
def time_index(request):
idx = pd.date_range("2000-01-01", periods=833, freq="B")
if request.param:
return idx.to_period("B")
return idx
@pytest.fixture(
scope="module", params=["range", "period", "datetime", "fib", "int64"]
)
def index(request):
param = request.param
if param in ("period", "datetime"):
idx = pd.date_range("2000-01-01", periods=137, freq="M")
if param == "period":
idx = idx.to_period("M")
elif param == "range":
idx = pd.RangeIndex(0, 123)
elif param == "int64":
idx = pd.Index(np.arange(123))
elif param == "fib":
fib = [0, 1]
for _ in range(113):
fib.append(fib[-2] + fib[-1])
idx = pd.Index(fib)
else:
raise NotImplementedError()
return idx
@pytest.fixture(scope="module", params=[None, "period", False, "list"])
def forecast_index(request):
idx = pd.date_range("2000-01-01", periods=400, freq="B")
if request.param is None:
return None
elif request.param == "period":
return idx.to_period("B")
elif request.param == "list":
return list(idx)
return idx
@pytest.mark.smoke
def test_time_trend_smoke(index, forecast_index):
tt = TimeTrend(True, 2)
tt.in_sample(index)
steps = 83 if forecast_index is None else len(forecast_index)
warn = None
if (
is_int_index(index)
and np.any(np.diff(index) != 1)
or (
type(index) is pd.Index
and max(index) > 2 ** 63
and forecast_index is None
)
):
warn = UserWarning
with pytest_warns(warn):
tt.out_of_sample(steps, index, forecast_index)
str(tt)
hash(tt)
assert isinstance(tt.order, int)
assert isinstance(tt._constant, bool)
assert TimeTrend.from_string("ctt") == tt
assert TimeTrend.from_string("ct") != tt
assert TimeTrend.from_string("t") != tt
assert TimeTrend.from_string("n") != tt
assert Seasonality(12) != tt
tt0 = TimeTrend(False, 0)
tt0.in_sample(index)
str(tt0)
@pytest.mark.smoke
def test_seasonality_smoke(index, forecast_index):
s = Seasonality(12)
s.in_sample(index)
steps = 83 if forecast_index is None else len(forecast_index)
warn = None
if (
is_int_index(index)
and np.any(np.diff(index) != 1)
or (
type(index) is pd.Index
and max(index) > 2 ** 63
and forecast_index is None
)
):
warn = UserWarning
with pytest_warns(warn):
s.out_of_sample(steps, index, forecast_index)
assert isinstance(s.period, int)
str(s)
hash(s)
if isinstance(index, (pd.DatetimeIndex, pd.PeriodIndex)) and index.freq:
s = Seasonality.from_index(index)
s.in_sample(index)
s.out_of_sample(steps, index, forecast_index)
Seasonality.from_index(list(index))
@pytest.mark.smoke
def test_fourier_smoke(index, forecast_index):
f = Fourier(12, 2)
f.in_sample(index)
steps = 83 if forecast_index is None else len(forecast_index)
warn = None
if (
is_int_index(index)
and np.any(np.diff(index) != 1)
or (
type(index) is pd.Index
and max(index) > 2 ** 63
and forecast_index is None
)
):
warn = UserWarning
with pytest_warns(warn):
f.out_of_sample(steps, index, forecast_index)
assert isinstance(f.period, float)
assert isinstance(f.order, int)
str(f)
hash(f)
with pytest.raises(ValueError, match=r"2 \* order must be <= period"):
Fourier(12, 7)
@pytest.mark.smoke
def test_calendar_time_trend_smoke(time_index, forecast_index):
ct = CalendarTimeTrend("A", order=2)
ct.in_sample(time_index)
steps = 83 if forecast_index is None else len(forecast_index)
ct.out_of_sample(steps, time_index, forecast_index)
str(ct)
hash(ct)
assert isinstance(ct.order, int)
assert isinstance(ct.constant, bool)
assert isinstance(ct.freq, str)
assert ct.base_period is None
@pytest.mark.smoke
def test_calendar_fourier_smoke(time_index, forecast_index):
cf = CalendarFourier("A", 2)
cf.in_sample(time_index)
steps = 83 if forecast_index is None else len(forecast_index)
cf.out_of_sample(steps, time_index, forecast_index)
assert isinstance(cf.order, int)
assert isinstance(cf.freq, str)
str(cf)
repr(cf)
hash(cf)
params = CalendarSeasonality._supported
cs_params = [(k, k2) for k, v in params.items() for k2 in v.keys()]
@pytest.mark.parametrize("freq_period", cs_params)
def test_calendar_seasonality(time_index, forecast_index, freq_period):
freq, period = freq_period
cs = CalendarSeasonality(period, freq)
cs.in_sample(time_index)
steps = 83 if forecast_index is None else len(forecast_index)
cs.out_of_sample(steps, time_index, forecast_index)
assert isinstance(cs.period, str)
assert isinstance(cs.freq, str)
str(cs)
repr(cs)
hash(cs)
cs2 = CalendarSeasonality(period, freq)
assert cs == cs2
def test_forbidden_index():
index = pd.RangeIndex(0, 10)
ct = CalendarTimeTrend("A", order=2)
with pytest.raises(TypeError, match="CalendarTimeTrend terms can only"):
ct.in_sample(index)
def test_calendar_time_trend_base(time_index):
ct = CalendarTimeTrend("M", True, order=3, base_period="1960-1-1")
ct2 = CalendarTimeTrend("M", True, order=3)
assert ct != ct2
str(ct)
str(ct2)
assert ct.base_period is not None
assert ct2.base_period is None
def test_invalid_freq_period(time_index):
with pytest.raises(ValueError, match="The combination of freq="):
CalendarSeasonality("H", "A")
cs = CalendarSeasonality("B", "W")
with pytest.raises(ValueError, match="freq is B but index contains"):
cs.in_sample(pd.date_range("2000-1-1", periods=10, freq="D"))
def test_check_index_type():
ct = CalendarTimeTrend("A", True, order=3)
idx = pd.RangeIndex(0, 20)
with pytest.raises(TypeError, match="CalendarTimeTrend terms can only"):
ct._check_index_type(idx, pd.DatetimeIndex)
with pytest.raises(TypeError, match="CalendarTimeTrend terms can only"):
ct._check_index_type(idx, (pd.DatetimeIndex,))
with pytest.raises(TypeError, match="CalendarTimeTrend terms can only"):
ct._check_index_type(idx, (pd.DatetimeIndex, pd.PeriodIndex))
idx = pd.Index([0, 1, 1, 2, 3, 5, 8, 13])
with pytest.raises(TypeError, match="CalendarTimeTrend terms can only"):
types = (pd.DatetimeIndex, pd.PeriodIndex, pd.RangeIndex)
ct._check_index_type(idx, types)
def test_unknown_freq():
with pytest.raises(ValueError, match="freq is not understood by pandas"):
CalendarTimeTrend("unknown", True, order=3)
def test_invalid_formcast_index(index):
tt = TimeTrend(order=4)
with pytest.raises(ValueError, match="The number of values in forecast_"):
tt.out_of_sample(10, index, pd.RangeIndex(11))
def test_seasonal_from_index_err():
index = pd.Index([0, 1, 1, 2, 3, 5, 8, 12])
with pytest.raises(TypeError):
Seasonality.from_index(index)
index = pd.date_range("2000-1-1", periods=10)[[0, 1, 2, 3, 5, 8]]
with pytest.raises(ValueError):
Seasonality.from_index(index)
def test_time_trend(index):
tt = TimeTrend(constant=True)
const = tt.in_sample(index)
assert const.shape == (index.shape[0], 1)
assert np.all(const == 1)
pd.testing.assert_index_equal(const.index, index)
warn = None
if (is_int_index(index) and np.any(np.diff(index) != 1)) or (
type(index) is pd.Index and max(index) > 2 ** 63
):
warn = UserWarning
with pytest_warns(warn):
const_fcast = tt.out_of_sample(23, index)
assert np.all(const_fcast == 1)
tt = TimeTrend(constant=False)
empty = tt.in_sample(index)
assert empty.shape == (index.shape[0], 0)
tt = TimeTrend(constant=False, order=2)
t2 = tt.in_sample(index)
assert t2.shape == (index.shape[0], 2)
assert list(t2.columns) == ["trend", "trend_squared"]
tt = TimeTrend(constant=True, order=2)
final = tt.in_sample(index)
expected = pd.concat([const, t2], axis=1)
pd.testing.assert_frame_equal(final, expected)
tt = TimeTrend(constant=True, order=2)
short = tt.in_sample(index[:-50])
with pytest_warns(warn):
remainder = tt.out_of_sample(50, index[:-50])
direct = tt.out_of_sample(
steps=50, index=index[:-50], forecast_index=index[-50:]
)
combined = pd.concat([short, remainder], axis=0)
if isinstance(index, (pd.DatetimeIndex, pd.RangeIndex)):
pd.testing.assert_frame_equal(combined, final)
combined = pd.concat([short, direct], axis=0)
pd.testing.assert_frame_equal(combined, final, check_index_type=False)
def test_seasonality(index):
s = Seasonality(period=12)
exog = s.in_sample(index)
assert s.is_dummy
assert exog.shape == (index.shape[0], 12)
pd.testing.assert_index_equal(exog.index, index)
assert np.all(exog.sum(1) == 1.0)
assert list(exog.columns) == [f"s({i},12)" for i in range(1, 13)]
expected = np.zeros((index.shape[0], 12))
for i in range(12):
expected[i::12, i] = 1.0
np.testing.assert_equal(expected, np.asarray(exog))
warn = None
if (is_int_index(index) and np.any(np.diff(index) != 1)) or (
type(index) is pd.Index and max(index) > 2 ** 63
):
warn = UserWarning
with pytest_warns(warn):
fcast = s.out_of_sample(steps=12, index=index)
assert fcast.iloc[0, len(index) % 12] == 1.0
assert np.all(fcast.sum(1) == 1)
s = Seasonality(period=7, initial_period=3)
exog = s.in_sample(index)
assert exog.iloc[0, 2] == 1.0
assert exog.iloc[0].sum() == 1.0
assert s.initial_period == 3
with pytest.raises(ValueError, match="initial_period must be in"):
Seasonality(period=12, initial_period=-3)
with pytest.raises(ValueError, match="period must be >= 2"):
Seasonality(period=1)
def test_seasonality_time_index(time_index):
tt = Seasonality.from_index(time_index)
assert tt.period == 5
fcast = tt.out_of_sample(steps=12, index=time_index)
new_idx = DeterministicTerm._extend_index(time_index, 12)
pd.testing.assert_index_equal(fcast.index, new_idx)
def test_fourier(index):
f = Fourier(period=12, order=3)
terms = f.in_sample(index)
assert f.order == 3
assert terms.shape == (index.shape[0], 2 * f.order)
loc = np.arange(index.shape[0]) / 12
for i, col in enumerate(terms):
j = i // 2 + 1
fn = np.cos if (i % 2) else np.sin
expected = fn(2 * np.pi * j * loc)
np.testing.assert_allclose(terms[col], expected, atol=1e-8)
cols = []
for i in range(2 * f.order):
fn = "cos" if (i % 2) else "sin"
cols.append(f"{fn}({(i // 2) + 1},12)")
assert list(terms.columns) == cols
@pytest.mark.skipif(PD_LT_1_0_0, reason="bug in old pandas")
def test_index_like():
idx = np.empty((100, 2))
with pytest.raises(TypeError, match="index must be a pandas"):
DeterministicTerm._index_like(idx)
def test_calendar_fourier(reset_randomstate):
inc = np.abs(np.random.standard_normal(1000))
inc = np.cumsum(inc)
inc = 10 * inc / inc[-1]
offset = (24 * 3600 * inc).astype(np.int64)
base = pd.Timestamp("2000-1-1")
index = [base + pd.Timedelta(val, unit="s") for val in offset]
index = pd.Index(index)
cf = CalendarFourier("D", 2)
assert cf.order == 2
terms = cf.in_sample(index)
cols = []
for i in range(2 * cf.order):
fn = "cos" if (i % 2) else "sin"
cols.append(f"{fn}({(i // 2) + 1},freq=D)")
assert list(terms.columns) == cols
inc = offset / (24 * 3600)
loc = 2 * np.pi * (inc - np.floor(inc))
expected = []
for i in range(4):
scale = i // 2 + 1
fn = np.cos if (i % 2) else np.sin
expected.append(fn(scale * loc))
expected = np.column_stack(expected)
np.testing.assert_allclose(expected, terms.values)
def test_calendar_time_trend(reset_randomstate):
inc = np.abs(np.random.standard_normal(1000))
inc = np.cumsum(inc)
inc = 10 * inc / inc[-1]
offset = (24 * 3600 * inc).astype(np.int64)
base = pd.Timestamp("2000-1-1")
index = [base + pd.Timedelta(val, "s") for val in offset]
index = pd.Index(index)
ctt = CalendarTimeTrend("D", True, order=3, base_period=base)
assert ctt.order == 3
terms = ctt.in_sample(index)
cols = ["const", "trend", "trend_squared", "trend_cubed"]
assert list(terms.columns) == cols
inc = 1 + offset / (24 * 3600)
expected = []
for i in range(4):
expected.append(inc ** i)
expected = np.column_stack(expected)
np.testing.assert_allclose(expected, terms.values)
ctt = CalendarTimeTrend("D", True, order=2, base_period=base)
ctt2 = CalendarTimeTrend.from_string("D", trend="ctt", base_period=base)
pd.testing.assert_frame_equal(ctt.in_sample(index), ctt2.in_sample(index))
ct = CalendarTimeTrend("D", True, order=1, base_period=base)
ct2 = CalendarTimeTrend.from_string("D", trend="ct", base_period=base)
pd.testing.assert_frame_equal(ct.in_sample(index), ct2.in_sample(index))
ctttt = CalendarTimeTrend("D", True, order=4, base_period=base)
assert ctttt.order == 4
terms = ctttt.in_sample(index)
cols = ["const", "trend", "trend_squared", "trend_cubed", "trend**4"]
assert list(terms.columns) == cols
def test_calendar_seasonal_period_w():
period = "W"
index = pd.date_range("2000-01-03", freq="H", periods=600)
cs = CalendarSeasonality("H", period=period)
terms = cs.in_sample(index)
assert np.all(terms.sum(1) == 1.0)
for i in range(index.shape[0]):
assert terms.iloc[i, i % 168] == 1.0
index = pd.date_range("2000-01-03", freq="B", periods=600)
cs = CalendarSeasonality("B", period=period)
terms = cs.in_sample(index)
assert np.all(terms.sum(1) == 1.0)
for i in range(index.shape[0]):
assert terms.iloc[i, i % 5] == 1.0
index = pd.date_range("2000-01-03", freq="D", periods=600)
cs = CalendarSeasonality("D", period=period)
terms = cs.in_sample(index)
assert np.all(terms.sum(1) == 1.0)
for i in range(index.shape[0]):
assert terms.iloc[i, i % 7] == 1.0
def test_calendar_seasonal_period_d():
period = "D"
index = pd.date_range("2000-01-03", freq="H", periods=600)
cs = CalendarSeasonality("H", period=period)
terms = cs.in_sample(index)
assert np.all(terms.sum(1) == 1.0)
for i in range(index.shape[0]):
assert terms.iloc[i, i % 24] == 1.0
def test_calendar_seasonal_period_q():
period = "Q"
index = pd.date_range("2000-01-01", freq="M", periods=600)
cs = CalendarSeasonality("M", period=period)
terms = cs.in_sample(index)
assert np.all(terms.sum(1) == 1.0)
for i in range(index.shape[0]):
assert terms.iloc[i, i % 3] == 1.0
def test_calendar_seasonal_period_a():
period = "A"
index = pd.date_range("2000-01-01", freq="M", periods=600)
cs = CalendarSeasonality("M", period=period)
terms = cs.in_sample(index)
assert np.all(terms.sum(1) == 1.0)
for i in range(index.shape[0]):
assert terms.iloc[i, i % 12] == 1.0
cs = CalendarSeasonality("Q", period=period)
terms = cs.in_sample(index)
assert np.all(terms.sum(1) == 1.0)
for i in range(index.shape[0]):
assert terms.iloc[i, (i % 12) // 3] == 1.0
@pytest.mark.parametrize("constant", [True, False])
@pytest.mark.parametrize("order", [0, 1])
@pytest.mark.parametrize("seasonal", [True, False])
@pytest.mark.parametrize("fourier", [0, 1])
@pytest.mark.parametrize("period", [None, 10])
@pytest.mark.parametrize("drop", [True, False])
def test_deterministic_process(
time_index, constant, order, seasonal, fourier, period, drop
):
if seasonal and fourier:
return
dp = DeterministicProcess(
time_index,
constant=constant,
order=order,
seasonal=seasonal,
fourier=fourier,
period=period,
drop=drop,
)
terms = dp.in_sample()
pd.testing.assert_index_equal(terms.index, time_index)
terms = dp.out_of_sample(23)
assert isinstance(terms, pd.DataFrame)
def test_deterministic_process_errors(time_index):
with pytest.raises(ValueError, match="seasonal and fourier"):
DeterministicProcess(time_index, seasonal=True, fourier=2, period=5)
with pytest.raises(TypeError, match="All additional terms"):
DeterministicProcess(time_index, seasonal=True, additional_terms=[1])
def test_range_error():
idx = pd.Index([0, 1, 1, 2, 3, 5, 8, 13])
dp = DeterministicProcess(
idx, constant=True, order=2, seasonal=True, period=2
)
with pytest.raises(TypeError, match="The index in the deterministic"):
dp.range(0, 12)
def test_range_index_basic():
idx = pd.date_range("2000-1-1", freq="M", periods=120)
dp = DeterministicProcess(idx, constant=True, order=1, seasonal=True)
dp.range("2001-1-1", "2008-1-1")
dp.range("2001-1-1", "2015-1-1")
dp.range("2013-1-1", "2008-1-1")
dp.range(0, 100)
dp.range(100, 150)
dp.range(130, 150)
with pytest.raises(ValueError):
dp.range("1990-1-1", "2010-1-1")
idx = pd.period_range("2000-1-1", freq="M", periods=120)
dp = DeterministicProcess(idx, constant=True, order=1, seasonal=True)
dp.range("2001-1-1", "2008-1-1")
dp.range("2001-1-1", "2015-1-1")
dp.range("2013-1-1", "2008-1-1")
with pytest.raises(ValueError, match="start must be non-negative"):
dp.range(-7, 200)
dp.range(0, 100)
dp.range(100, 150)
dp.range(130, 150)
idx = pd.RangeIndex(0, 120)
dp = DeterministicProcess(
idx, constant=True, order=1, seasonal=True, period=12
)
dp.range(0, 100)
dp.range(100, 150)
dp.range(120, 150)
dp.range(130, 150)
with pytest.raises(ValueError):
dp.range(-10, 0)
def test_range_casting():
idx = np.arange(120)
dp = DeterministicProcess(
idx, constant=True, order=1, seasonal=True, period=12
)
idx = pd.RangeIndex(0, 120)
dp2 = DeterministicProcess(
idx, constant=True, order=1, seasonal=True, period=12
)
pd.testing.assert_frame_equal(dp.in_sample(), dp2.in_sample())
pd.testing.assert_frame_equal(dp.range(100, 150), dp2.range(100, 150))
def test_non_unit_range():
idx = pd.RangeIndex(0, 700, 7)
dp = DeterministicProcess(idx, constant=True)
with pytest.raises(ValueError, match="The step of the index is not 1"):
dp.range(11, 900)
def test_additional_terms(time_index):
add_terms = [TimeTrend(True, order=1)]
dp = DeterministicProcess(time_index, additional_terms=add_terms)
dp2 = DeterministicProcess(time_index, constant=True, order=1)
pd.testing.assert_frame_equal(dp.in_sample(), dp2.in_sample())
with pytest.raises(
ValueError, match="One or more terms in additional_terms"
):
DeterministicProcess(
time_index, additional_terms=add_terms + add_terms
)
with pytest.raises(
ValueError, match="One or more terms in additional_terms"
):
DeterministicProcess(
time_index, constant=True, order=1, additional_terms=add_terms
)
def test_drop_two_consants(time_index):
tt = TimeTrend(constant=True, order=1)
dp = DeterministicProcess(
time_index, constant=True, additional_terms=[tt], drop=True
)
assert dp.in_sample().shape[1] == 2
dp2 = DeterministicProcess(time_index, additional_terms=[tt], drop=True)
pd.testing.assert_frame_equal(dp.in_sample(), dp2.in_sample())
@pytest.mark.parametrize(
"index",
[
pd.RangeIndex(0, 200),
pd.Index(np.arange(200)),
pd.date_range("2000-1-1", freq="M", periods=200),
pd.period_range("2000-1-1", freq="M", periods=200),
],
)
def test_determintic_term_equiv(index):
base = DeterministicProcess(pd.RangeIndex(0, 200), constant=True, order=2)
dp = DeterministicProcess(index, constant=True, order=2)
np.testing.assert_array_equal(base.in_sample(), dp.in_sample())
np.testing.assert_array_equal(base.out_of_sample(37), dp.out_of_sample(37))
np.testing.assert_array_equal(base.range(200, 237), dp.range(200, 237))
np.testing.assert_array_equal(base.range(50, 150), dp.range(50, 150))
np.testing.assert_array_equal(base.range(50, 250), dp.range(50, 250))
class DummyTerm(DeterministicTerm):
@property
def _eq_attr(self) -> Tuple[Hashable, ...]:
return ("Dummy",)
def __str__(self) -> str:
return "Dummy"
columns = [
"const1",
"const2",
"trend1",
"trend2",
"normal1",
"normal2",
"dummy1_1",
"dummy1_2",
"always_drop1",
"always_drop2",
"dummy2_1",
"dummy2_2",
]
def in_sample(self, index: pd.Index) -> pd.DataFrame:
nobs = index.shape[0]
terms = np.empty((index.shape[0], 12))
for i in range(0, 12, 2):
if i == 0:
value = 1
elif i == 2:
value = np.arange(nobs)
elif i == 4:
value = np.random.standard_normal(nobs)
elif i == 6:
value = np.zeros(nobs)
value[::2] = 1
elif i == 8:
value = 0
else: # elif i == 8:
value = np.zeros(nobs)
value[1::2] = 1
terms[:, i] = terms[:, i + 1] = value
return pd.DataFrame(terms, columns=self.columns, index=index)
def out_of_sample(
self,
steps: int,
index: pd.Index,
forecast_index: pd.Index = None,
) -> pd.DataFrame:
fcast_index = self._extend_index(index, steps, forecast_index)
terms = np.random.standard_normal((steps, 12))
return pd.DataFrame(terms, columns=self.columns, index=fcast_index)
def test_drop():
index = pd.RangeIndex(0, 200)
dummy = DummyTerm()
str(dummy)
assert dummy != TimeTrend()
dp = DeterministicProcess(index, additional_terms=[dummy], drop=True)
in_samp = dp.in_sample()
assert in_samp.shape == (200, 4)
oos = dp.out_of_sample(37)
assert oos.shape == (37, 4)
assert list(oos.columns) == list(in_samp.columns)
valid = ("const", "trend", "dummy", "normal")
for valid_col in valid:
assert sum([1 for col in oos if valid_col in col]) == 1
| 32.575246
| 79
| 0.638358
|
49fed8d661e55df92e6ec66e045b6d92cad4271d
| 1,315
|
py
|
Python
|
app/core/tests/test_admin.py
|
shravands/django-restapi-recipe
|
c21d01ab3b0d92d249e638a1b503ea54dd6d69bd
|
[
"MIT"
] | null | null | null |
app/core/tests/test_admin.py
|
shravands/django-restapi-recipe
|
c21d01ab3b0d92d249e638a1b503ea54dd6d69bd
|
[
"MIT"
] | null | null | null |
app/core/tests/test_admin.py
|
shravands/django-restapi-recipe
|
c21d01ab3b0d92d249e638a1b503ea54dd6d69bd
|
[
"MIT"
] | null | null | null |
from django.test import TestCase, Client
from django.contrib.auth import get_user_model
from django.urls import reverse
class AdminSiteTests(TestCase):
def setUp(self):
self.client = Client()
self.admin_user = get_user_model().objects.create_superuser(
email='admin@app.com',
password='password@123'
)
self.client.force_login(self.admin_user)
self.user = get_user_model().objects.create_user(
email='test@app.com',
password='password@123',
name='Test full user name'
)
def test_users_listed(self):
"""Test that users are listed on user page"""
url = reverse('admin:core_user_changelist')
res = self.client.get(url)
self.assertContains(res, self.user.name)
self.assertContains(res, self.user.email)
def test_user_change_page(self):
"""Test that the users edit page works"""
url = reverse('admin:core_user_change', args=[self.user.id])
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
def test_create_user_page(self):
"""Test that the create user page works"""
url = reverse('admin:core_user_add')
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
| 32.073171
| 68
| 0.638023
|
3411a4a3bb630e8042bfdf1c4f5c16218df48540
| 1,100
|
py
|
Python
|
salsa_env/lib/python3.7/site-packages/openai/api_resources/file.py
|
shahjaidev/Salsa-GPT3
|
41ecf0d7268300a2435a8eb8f4f4c68139de6e79
|
[
"MIT"
] | 1
|
2020-11-26T10:28:31.000Z
|
2020-11-26T10:28:31.000Z
|
salsa_env/lib/python3.7/site-packages/openai/api_resources/file.py
|
shahjaidev/Salsa-GPT3
|
41ecf0d7268300a2435a8eb8f4f4c68139de6e79
|
[
"MIT"
] | null | null | null |
salsa_env/lib/python3.7/site-packages/openai/api_resources/file.py
|
shahjaidev/Salsa-GPT3
|
41ecf0d7268300a2435a8eb8f4f4c68139de6e79
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import, division, print_function
import json
import os
import tempfile
import openai
from openai import api_requestor, util
from openai.api_resources.abstract import (
APIResource,
CreateableAPIResource,
DeletableAPIResource,
ListableAPIResource,
UpdateableAPIResource,
)
from openai.util import log_info
class File(ListableAPIResource):
OBJECT_NAME = "file"
@classmethod
def create(
cls, api_key=None, api_base=None, api_version=None, organization=None, **params
):
requestor = api_requestor.APIRequestor(
api_key,
api_base=openai.file_api_base or openai.api_base,
api_version=api_version,
organization=organization,
)
url = cls.class_url()
supplied_headers = {"Content-Type": "multipart/form-data"}
response, _, api_key = requestor.request(
"post", url, params=params, headers=supplied_headers
)
return util.convert_to_openai_object(
response, api_key, api_version, organization
)
| 27.5
| 87
| 0.683636
|
04152833046b319516ecf8c22d871a14f9a6d4d2
| 3,010
|
py
|
Python
|
linetools/lists/tests/test_init_linelist.py
|
marijana777/linetools
|
73720a2f6df42b7dde1f35055cd40ad970200f7f
|
[
"BSD-3-Clause"
] | null | null | null |
linetools/lists/tests/test_init_linelist.py
|
marijana777/linetools
|
73720a2f6df42b7dde1f35055cd40ad970200f7f
|
[
"BSD-3-Clause"
] | null | null | null |
linetools/lists/tests/test_init_linelist.py
|
marijana777/linetools
|
73720a2f6df42b7dde1f35055cd40ad970200f7f
|
[
"BSD-3-Clause"
] | null | null | null |
# Module to run tests on Generating a LineList
# Also tests some simple functionality
from __future__ import print_function, absolute_import, division, unicode_literals
# TEST_UNICODE_LITERALS
import pdb
import pytest
from astropy import units as u
import numpy as np
from linetools.lists.linelist import LineList
from linetools.lists import mk_sets as llmk
def test_ism_read_source_catalogues():
ism = LineList('ISM', use_ISM_table=False)
np.testing.assert_allclose(ism['HI 1215']['wrest'], 1215.6700*u.AA, rtol=1e-7)
# ISM LineList
def test_ism():
ism = LineList('ISM')
#
np.testing.assert_allclose(ism['HI 1215']['wrest'], 1215.6700*u.AA, rtol=1e-7)
# Test update_fval
def test_updfval():
ism = LineList('ISM')
#
np.testing.assert_allclose(ism['FeII 1133']['f'], 0.0055)
# Test update_gamma
def test_updgamma():
ism = LineList('ISM')
#
np.testing.assert_allclose(ism['HI 1215']['gamma'], 626500000.0/u.s)
# Strong ISM LineList
def test_strong():
strng = LineList('Strong')
#
assert len(strng._data) < 200
# Strong ISM LineList
def test_euv():
euv = LineList('EUV')
#
assert np.max(euv._data['wrest']) < 1000.
# Test for X-ray lines
ovii = euv['OVII 21']
assert np.isclose(ovii['wrest'].value, 21.6019)
# HI LineList
def test_h1():
HI = LineList('HI')
#
for name in HI.name:
assert name[0:2] == 'HI'
# H2 LineList
def test_h2():
h2 = LineList('H2')
#
np.testing.assert_allclose(h2[911.967*u.AA]['f'], 0.001315, rtol=1e-5)
# CO LineList
def test_co():
CO = LineList('CO')
#
np.testing.assert_allclose(CO[1322.133*u.AA]['f'], 0.0006683439, rtol=1e-5)
# Galactic LineList
def test_galx():
galx = LineList('Galaxy')
#
np.testing.assert_allclose(galx["Halpha"]['wrest'], 6564.613*u.AA, rtol=1e-5)
# Unknown lines
def test_unknown():
ism = LineList('ISM')
unknown = ism.unknown_line()
assert unknown['name'] == 'unknown', 'There is a problem in the LineList.unknown_line()'
assert unknown['wrest'] == 0.*u.AA, 'There is a problem in the LineList.unknown_line()'
print(ism['unknown'])
def test_mk_sets():
import imp
llmk.mk_hi(outfil='tmp.lst', stop=False)
lt_path = imp.find_module('linetools')[1]
llmk.add_galaxy_lines('tmp.lst', infil=lt_path+'/lists/sets/llist_v0.1.ascii', stop=False)
def test_set_extra_columns_to_datatable():
ism = LineList('ISM')
# bad calls
try:
ism.set_extra_columns_to_datatable(abundance_type='incorrect_one')
except ValueError:
pass
try:
ism.set_extra_columns_to_datatable(ion_correction='incorrect_one')
except ValueError:
pass
# test expected strongest value
ism.set_extra_columns_to_datatable(ion_correction='none', abundance_type='solar')
np.testing.assert_allclose(ism['HI 1215']['rel_strength'], 14.704326420257642)
tab = ism._data
np.testing.assert_allclose(np.max(tab['rel_strength']), 14.704326420257642)
| 26.875
| 94
| 0.680731
|
31c91875c9c182f5229edf1ed446c63b4652f90f
| 7,923
|
py
|
Python
|
task_geo/data_sources/covid/spain/es_covid_formatter.py
|
Brian-V/task-geo
|
f03776b63d9dd60f7fd5795023d36370c70a63b0
|
[
"MIT"
] | null | null | null |
task_geo/data_sources/covid/spain/es_covid_formatter.py
|
Brian-V/task-geo
|
f03776b63d9dd60f7fd5795023d36370c70a63b0
|
[
"MIT"
] | null | null | null |
task_geo/data_sources/covid/spain/es_covid_formatter.py
|
Brian-V/task-geo
|
f03776b63d9dd60f7fd5795023d36370c70a63b0
|
[
"MIT"
] | null | null | null |
import pandas as pd
def es_covid_formatter(df):
"""Formats data retrieved from https://covid19.isciii.es
Arguments:
raw(pandas.DataFrame):
Returns:
pandas.DataFrame
"""
assert df[pd.isnull(df['CCAA Codigo ISO'])].empty
df.rename(columns={'CCAA Codigo ISO': 'autonomous_community_iso', 'Fecha': 'date',
'Casos ': 'cases', 'Hospitalizados': 'hospitalized', 'UCI': 'icu',
'Fallecidos': 'deceased', 'Recuperados': 'recovered'}, inplace=True)
print(df.head())
# ### Replace NaN with 0
df['cases'].fillna(value=0, inplace=True)
df['hospitalized'].fillna(value=0, inplace=True)
df['icu'].fillna(value=0, inplace=True)
df['deceased'].fillna(value=0, inplace=True)
df['recovered'].fillna(value=0, inplace=True)
# Update date
# Transform to compliance with https://coronawhy.github.io/task-geo/data_model.html
df['date'] = pd.to_datetime(df['date'])
# Undo cumulative sums
# create a copy of the dataframe, without date
unrolled_df = df.copy()
unrolled_df.drop(['date'], axis=1, inplace=True)
# unroll (i.e. undo the cumulative values)
unrolled_df = unrolled_df.groupby('autonomous_community_iso').diff().fillna(unrolled_df)
# add back autonomous_community_iso, date columns
unrolled_df = pd.concat([df[['autonomous_community_iso', 'date']], unrolled_df], axis=1)
# Insert Country
unrolled_df.insert(0, 'country', 'Spain')
# Remove rows that are not a region. This is significant because the last row includes
# some text
unrolled_df = unrolled_df[unrolled_df['autonomous_community_iso'].isin(
["CE", "AR", "CM", "PV", "MC", "AS", "AN", "CL", "CT", "MD", "IB", "GA", "CN", "VC", "RI",
"NC", "EX", "ME", "CB"])]
# Add Area, Population, Density, GDP
unrolled_df['area_km_squared'] = unrolled_df['autonomous_community_iso'].map({"CE": 18.5,
"AR": 47719,
"CM": 79463,
"PV": 7234,
"MC": 11313,
"AS": 10604,
"AN": 87268,
"CL": 94223,
"CT": 32114,
"MD": 8028,
"IB": 4992,
"GA": 29574,
"CN": 7447,
"VC": 23255,
"RI": 5045,
"NC": 10391,
"EX": 41634,
"ME": 12.3,
"CB": 5321
})
unrolled_df['population'] = unrolled_df['autonomous_community_iso'].map({"CE": 84777,
"AR": 1319291,
"CM": 2032863,
"PV": 2207776,
"MC": 1493898,
"AS": 1022800,
"AN": 8414240,
"CL": 2399548,
"CT": 7675217,
"MD": 6663394,
"IB": 1149460,
"GA": 2699499,
"CN": 2153389,
"VC": 5003769,
"RI": 316798,
"NC": 654214,
"EX": 1067710,
"ME": 86487,
"CB": 581078
})
unrolled_df['density_pop_per_km_squared'] = unrolled_df['population'] / unrolled_df[
'area_km_squared']
unrolled_df['gdp_per_capita_euros'] = unrolled_df['autonomous_community_iso'].map({"CE": 19335,
"AR": 25540,
"CM": 17698,
"PV": 30829,
"MC": 18520,
"AS": 21035,
"AN": 16960,
"CL": 22289,
"CT": 27248,
"MD": 29385,
"IB": 24393,
"GA": 20723,
"CN": 19568,
"VC": 19964,
"RI": 25508,
"NC": 29071,
"EX": 15394,
"ME": 16981,
"CB": 22341
})
# Reorder Columns
return unrolled_df[
['country', 'autonomous_community_iso', 'area_km_squared', 'population',
'gdp_per_capita_euros', 'density_pop_per_km_squared', 'date', 'cases', 'hospitalized',
'icu', 'deceased', 'recovered']]
| 65.479339
| 99
| 0.258109
|
f08e9dd3539677ad16d77417e0706cbe93871bd0
| 693
|
py
|
Python
|
wavencoder/models/baseline.py
|
shangeth/wavencoder
|
cd1a277c2cc44075c9f4506e344b3a725ad5b9fe
|
[
"MIT"
] | 56
|
2020-10-10T18:11:28.000Z
|
2022-03-05T03:06:27.000Z
|
wavencoder/models/baseline.py
|
shangeth/wavencoder
|
cd1a277c2cc44075c9f4506e344b3a725ad5b9fe
|
[
"MIT"
] | 9
|
2020-12-04T07:49:25.000Z
|
2021-11-26T13:16:31.000Z
|
wavencoder/models/baseline.py
|
shangeth/wavencoder
|
cd1a277c2cc44075c9f4506e344b3a725ad5b9fe
|
[
"MIT"
] | 10
|
2020-10-10T18:06:47.000Z
|
2022-03-28T20:40:40.000Z
|
import torch
import torch.nn as nn
import torch.functional as F
class CNN1d(nn.Module):
def __init__(self, conv_layers):
super(CNN1d, self).__init__()
self.conv_layers = nn.ModuleList()
in_d = 1
for dim, k, s in conv_layers:
self.conv_layers.append(self.cnn_block(in_d, dim, k, s))
in_d = dim
def cnn_block(self, n_in, n_out, k, s):
block = nn.Sequential(
nn.Conv1d(n_in, n_out, kernel_size=k, stride=s),
nn.BatchNorm1d(n_out),
nn.LeakyReLU()
)
return block
def forward(self, x):
for conv in self.conv_layers:
x = conv(x)
return x
| 23.896552
| 68
| 0.5671
|
13ed024deec5372eaab4637d53e63c831a150651
| 7,297
|
py
|
Python
|
hummingbot/connector/budget_checker.py
|
BGTCapital/hummingbot
|
2c50f50d67cedccf0ef4d8e3f4c8cdce3dc87242
|
[
"Apache-2.0"
] | 2
|
2022-03-03T10:00:27.000Z
|
2022-03-08T13:57:56.000Z
|
hummingbot/connector/budget_checker.py
|
BGTCapital/hummingbot
|
2c50f50d67cedccf0ef4d8e3f4c8cdce3dc87242
|
[
"Apache-2.0"
] | 6
|
2022-01-31T15:44:54.000Z
|
2022-03-06T04:27:12.000Z
|
hummingbot/connector/budget_checker.py
|
BGTCapital/hummingbot
|
2c50f50d67cedccf0ef4d8e3f4c8cdce3dc87242
|
[
"Apache-2.0"
] | 1
|
2022-03-28T09:28:40.000Z
|
2022-03-28T09:28:40.000Z
|
import typing
from collections import defaultdict
from copy import copy
from decimal import Decimal
from typing import Dict, List
from hummingbot.core.data_type.order_candidate import OrderCandidate
if typing.TYPE_CHECKING: # avoid circular import problems
from hummingbot.connector.exchange_base import ExchangeBase
class BudgetChecker:
def __init__(self, exchange: "ExchangeBase"):
"""
Provides utilities for strategies to check the potential impact of order proposals on the user account balances.
Mainly used to determine if sufficient balance is available to place a set of strategy-proposed orders.
The strategy can size a list of proposed order candidates by calling the `adjust_candidates` method.
For a more fine-grained control, the strategy can call `adjust_candidate_and_lock_available_collateral`
for each one of the orders it intends to place. On each call, the `BudgetChecker` locks in the collateral
amount needed for that order and makes it unavailable for the following hypothetical orders.
Once the orders are sent to the exchange, the strategy must call `reset_locked_collateral` to
free the hypothetically locked assets for the next set of checks.
:param exchange: The exchange against which available collateral assets will be checked.
"""
self._exchange = exchange
self._locked_collateral: Dict[str, Decimal] = defaultdict(lambda: Decimal("0"))
def reset_locked_collateral(self):
"""
Frees collateral assets locked for hypothetical orders.
"""
self._locked_collateral.clear()
def adjust_candidates(
self, order_candidates: List[OrderCandidate], all_or_none: bool = True
) -> List[OrderCandidate]:
"""
Fills in the collateral and returns fields of the order candidates.
If there is insufficient assets to cover the collateral requirements, the order amount is adjusted.
See the doc string for `adjust_candidate` to learn more about how the adjusted order
amount is derived.
:param order_candidates: A list of candidate orders to check and adjust.
:param all_or_none: Should the order amount be set to zero on insufficient balance.
:return: The list of adjusted order candidates.
"""
self.reset_locked_collateral()
adjusted_candidates = [
self.adjust_candidate_and_lock_available_collateral(order_candidate, all_or_none)
for order_candidate in order_candidates
]
self.reset_locked_collateral()
return adjusted_candidates
def adjust_candidate_and_lock_available_collateral(
self, order_candidate: OrderCandidate, all_or_none: bool = True
) -> OrderCandidate:
"""
Fills in the collateral and returns fields of the order candidates.
If there is insufficient assets to cover the collateral requirements, the order amount is adjusted.
See the doc string for `adjust_candidate` to learn more about how the adjusted order
amount is derived.
This method also locks in the collateral amount for the given collateral token and makes
it unavailable on subsequent calls to this method until the `reset_locked_collateral`
method is called.
:param order_candidate: The candidate order to check and adjust.
:param all_or_none: Should the order amount be set to zero on insufficient balance.
:return: The adjusted order candidate.
"""
adjusted_candidate = self.adjust_candidate(order_candidate, all_or_none)
self._lock_available_collateral(adjusted_candidate)
return adjusted_candidate
def adjust_candidate(
self, order_candidate: OrderCandidate, all_or_none: bool = True
) -> OrderCandidate:
"""
Fills in the collateral and returns fields of the order candidates.
If there is insufficient collateral to cover the proposed order amount and
the `all_or_none` parameter is set to `False`, the order amount will be adjusted
to the greatest amount that the remaining collateral can provide for. If the parameter
is set to `True`, the order amount is set to zero.
:param order_candidate: The candidate order to be checked and adjusted.
:param all_or_none: Should the order amount be set to zero on insufficient balance.
:return: The adjusted order candidate.
"""
order_candidate = self.populate_collateral_entries(order_candidate)
available_balances = self._get_available_balances(order_candidate)
order_candidate.adjust_from_balances(available_balances)
if order_candidate.resized:
if all_or_none:
order_candidate.set_to_zero()
else:
order_candidate = self._quantize_adjusted_order(order_candidate)
return order_candidate
def populate_collateral_entries(self, order_candidate: OrderCandidate) -> OrderCandidate:
"""
Populates the collateral and returns fields of the order candidates.
This implementation assumes a spot-specific configuration for collaterals (i.e. the quote
token for buy orders, and base token for sell orders). It can be overridden to provide other
configurations.
:param order_candidate: The candidate order to check and adjust.
:return: The adjusted order candidate.
"""
order_candidate = copy(order_candidate)
order_candidate.populate_collateral_entries(self._exchange)
return order_candidate
def _get_available_balances(self, order_candidate: OrderCandidate) -> Dict[str, Decimal]:
available_balances = {}
if order_candidate.order_collateral is not None:
token, _ = order_candidate.order_collateral
available_balances[token] = (
self._exchange.get_available_balance(token) - self._locked_collateral[token]
)
if order_candidate.percent_fee_collateral is not None:
token, _ = order_candidate.percent_fee_collateral
available_balances[token] = (
self._exchange.get_available_balance(token) - self._locked_collateral[token]
)
for entry in order_candidate.fixed_fee_collaterals:
token, _ = entry
available_balances[token] = (
self._exchange.get_available_balance(token) - self._locked_collateral[token]
)
return available_balances
def _quantize_adjusted_order(self, order_candidate: OrderCandidate) -> OrderCandidate:
trading_pair = order_candidate.trading_pair
adjusted_amount = order_candidate.amount
quantized_amount = self._exchange.quantize_order_amount(trading_pair, adjusted_amount)
if adjusted_amount != quantized_amount:
order_candidate.amount = quantized_amount
order_candidate = self.populate_collateral_entries(order_candidate)
return order_candidate
def _lock_available_collateral(self, order_candidate: OrderCandidate):
for token, amount in order_candidate.collateral_dict.items():
self._locked_collateral[token] += amount
| 46.477707
| 120
| 0.711388
|
49dc4e9535355ec9753e36daf84aaaa21e272ed4
| 594
|
py
|
Python
|
test_api.py
|
CoffeeStraw/CapsNet-Knowledge-Extractor
|
99dc665bcce394e4dfa0b8a6deda28d1e3713509
|
[
"MIT"
] | 5
|
2020-09-29T13:34:23.000Z
|
2021-12-01T15:27:53.000Z
|
test_api.py
|
CoffeeStraw/CapsNet-Knowledge-Extractor
|
99dc665bcce394e4dfa0b8a6deda28d1e3713509
|
[
"MIT"
] | null | null | null |
test_api.py
|
CoffeeStraw/CapsNet-Knowledge-Extractor
|
99dc665bcce394e4dfa0b8a6deda28d1e3713509
|
[
"MIT"
] | null | null | null |
"""
Simple test file that performs some requests to the API
"""
import requests
# API / getModels
response = requests.get("http://127.0.0.1:5000/api/getModels")
print(response.text)
# API / computeTrainingStep
response = requests.post(
"http://127.0.0.1:5000/api/computeStep",
json={"model": "Simple", "step": "trained", "dataset": "MNIST", "index": 0},
)
print(response.text)
# API / computeTrainingStep
response = requests.post(
"http://127.0.0.1:5000/api/computeStep",
json={"model": "Original", "step": "trained", "dataset": "MNIST", "index": 42},
)
print(response.text)
| 25.826087
| 83
| 0.671717
|
9aaf01c44d0a02ebd701c23944224c9c3a9e35db
| 7,242
|
py
|
Python
|
sarpy/processing/fft_base.py
|
khavernathy/sarpy
|
e0c2bb1a55f153628162ef7be89e9b7de34602df
|
[
"MIT"
] | null | null | null |
sarpy/processing/fft_base.py
|
khavernathy/sarpy
|
e0c2bb1a55f153628162ef7be89e9b7de34602df
|
[
"MIT"
] | null | null | null |
sarpy/processing/fft_base.py
|
khavernathy/sarpy
|
e0c2bb1a55f153628162ef7be89e9b7de34602df
|
[
"MIT"
] | null | null | null |
"""
Helper classes and methods for Fourier processing schemes.
"""
__classification__ = "UNCLASSIFIED"
__author__ = 'Thomas McCullough'
import logging
from sarpy.compliance import int_func
from sarpy.io.general.base import BaseReader
from sarpy.io.complex.sicd_elements.SICD import SICDType
from sarpy.processing.ortho_rectify import FullResolutionFetcher
# NB: the below are intended as common imports from other locations
# leave them here, even if unused
import numpy
import scipy
if scipy.__version__ < '1.4':
# noinspection PyUnresolvedReferences
from scipy.fftpack import fft, ifft, fftshift, ifftshift
else:
# noinspection PyUnresolvedReferences
from scipy.fft import fft, ifft, fftshift, ifftshift
class FFTCalculator(FullResolutionFetcher):
"""
Base Fourier processing calculator class.
This is intended for processing schemes where full resolution is required
along the processing dimension, so sub-sampling along the processing
dimension does not decrease the amount of data which must be fetched.
"""
__slots__ = (
'_platform_direction', '_fill')
def __init__(self, reader, dimension=0, index=0, block_size=10):
"""
Parameters
----------
reader : str|BaseReader
Input file path or reader object, which must be of sicd type.
dimension : int
The dimension over which to split the sub-aperture.
index : int
The sicd index to use.
block_size : int
The approximate processing block size to fetch, given in MB. The
minimum value for use here will be 1.
"""
self._platform_direction = None # set with the index setter
self._fill = None # set implicitly with _set_fill()
super(FFTCalculator, self).__init__(reader, dimension=dimension, index=index, block_size=block_size)
@property
def dimension(self):
# type: () -> int
"""
int: The dimension along which to perform the color subaperture split.
"""
return self._dimension
@dimension.setter
def dimension(self, value):
value = int_func(value)
if value not in [0, 1]:
raise ValueError('dimension must be 0 or 1, got {}'.format(value))
self._dimension = value
self._set_fill()
@property
def index(self):
# type: () -> int
"""
int: The index of the reader.
"""
return self._index
@index.setter
def index(self, value):
super(FFTCalculator, self)._set_index(value)
if self._sicd.SCPCOA is None or self._sicd.SCPCOA.SideOfTrack is None:
logging.warning(
'The sicd object at index {} has unpopulated SCPCOA.SideOfTrack. '
'Defaulting to "R", which may be incorrect.')
self._platform_direction = 'R'
else:
self._platform_direction = self._sicd.SCPCOA.SideOfTrack
self._set_fill()
@property
def fill(self):
# type: () -> float
"""
float: The fill factor for the fourier processing.
"""
return self._fill
def _set_fill(self):
self._fill = None
if self._dimension is None:
return
if self._index is None:
return
if self.dimension == 0:
try:
fill = 1.0/(self.sicd.Grid.Row.SS*self.sicd.Grid.Row.ImpRespBW)
except (ValueError, AttributeError, TypeError):
fill = 1.0
else:
try:
fill = 1.0/(self.sicd.Grid.Col.SS*self.sicd.Grid.Col.ImpRespBW)
except (ValueError, AttributeError, TypeError):
fill = 1.0
self._fill = max(1.0, float(fill))
def __getitem__(self, item):
"""
Fetches the processed data based on the input slice.
Parameters
----------
item
Returns
-------
numpy.ndarray
"""
raise NotImplementedError
def _validate_fft_input(array):
"""
Validate the fft input.
Parameters
----------
array : numpy.ndarray
Returns
-------
None
"""
if not isinstance(array, numpy.ndarray):
raise TypeError('array must be a numpy array')
if not numpy.iscomplexobj(array):
raise ValueError('array must have a complex data type')
if array.ndim != 2:
raise ValueError('array must be a two-dimensional array. Got shape {}'.format(array.shape))
def _determine_direction(sicd, dimension):
"""
Determine the default sign for the fft.
Parameters
----------
sicd : SICDType
dimension : int
Returns
-------
int
"""
sgn = None
if dimension == 0:
try:
sgn = sicd.Grid.Row.Sgn
except AttributeError:
pass
elif dimension == 1:
try:
sgn = sicd.Grid.Col.Sgn
except AttributeError:
pass
else:
raise ValueError('dimension must be one of 0 or 1.')
return -1 if sgn is None else sgn
def fft_sicd(array, dimension, sicd):
"""
Apply the forward one-dimensional forward fft to data associated with the
given sicd along the given dimension/axis, in accordance with the sign
populated in the SICD structure (default is -1).
Parameters
----------
array : numpy.ndarray
The data array, which must be two-dimensional and complex.
dimension : int
Must be one of 0, 1.
sicd : SICDType
The associated SICD structure.
Returns
-------
numpy.ndarray
"""
sgn = _determine_direction(sicd, dimension)
return fft(array, axis=dimension) if sgn < 0 else ifft(array, axis=dimension)
def ifft_sicd(array, dimension, sicd):
"""
Apply the inverse one-dimensional fft to data associated with the given sicd
along the given dimension/axis.
Parameters
----------
array : numpy.ndarray
The data array, which must be two-dimensional and complex.
dimension : int
Must be one of 0, 1.
sicd : SICDType
The associated SICD structure.
Returns
-------
numpy.ndarray
"""
sgn = _determine_direction(sicd, dimension)
return ifft(array, axis=dimension) if sgn < 0 else fft(array, axis=dimension)
def fft2_sicd(array, sicd):
"""
Apply the forward two-dimensional fft (i.e. both axes) to data associated with
the given sicd.
Parameters
----------
array : numpy.ndarray
The data array, which must be two-dimensional and complex.
sicd : SICDType
The associated SICD structure.
Returns
-------
numpy.ndarray
"""
return fft_sicd(fft_sicd(array, 0, sicd), 1, sicd)
def ifft2_sicd(array, sicd):
"""
Apply the inverse two-dimensional fft (i.e. both axes) to data associated with
the given sicd.
Parameters
----------
array : numpy.ndarray
The data array, which must be two-dimensional and complex.
sicd : SICDType
The associated SICD structure.
Returns
-------
numpy.ndarray
"""
return ifft_sicd(ifft_sicd(array, 0, sicd), 1, sicd)
| 26.05036
| 108
| 0.611571
|
49a600324f741b555b094c9c52d5934fa3e76cf2
| 16,914
|
py
|
Python
|
sdk/managedservices/azure-mgmt-managedservices/azure/mgmt/managedservices/aio/operations/_registration_definitions_operations.py
|
GoWang/azure-sdk-for-python
|
f241e3734a50953c2a37c10d2d84eb4c013b3ba0
|
[
"MIT"
] | 2
|
2021-03-24T06:26:11.000Z
|
2021-04-18T15:55:59.000Z
|
sdk/managedservices/azure-mgmt-managedservices/azure/mgmt/managedservices/aio/operations/_registration_definitions_operations.py
|
GoWang/azure-sdk-for-python
|
f241e3734a50953c2a37c10d2d84eb4c013b3ba0
|
[
"MIT"
] | 2
|
2021-11-03T06:10:36.000Z
|
2021-12-01T06:29:39.000Z
|
sdk/managedservices/azure-mgmt-managedservices/azure/mgmt/managedservices/aio/operations/_registration_definitions_operations.py
|
GoWang/azure-sdk-for-python
|
f241e3734a50953c2a37c10d2d84eb4c013b3ba0
|
[
"MIT"
] | 1
|
2021-05-19T02:55:10.000Z
|
2021-05-19T02:55:10.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class RegistrationDefinitionsOperations:
"""RegistrationDefinitionsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.managedservices.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def get(
self,
scope: str,
registration_definition_id: str,
**kwargs
) -> "_models.RegistrationDefinition":
"""Gets the registration definition details.
:param scope: Scope of the resource.
:type scope: str
:param registration_definition_id: Guid of the registration definition.
:type registration_definition_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RegistrationDefinition, or the result of cls(response)
:rtype: ~azure.mgmt.managedservices.models.RegistrationDefinition
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RegistrationDefinition"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-02-01-preview"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'scope': self._serialize.url("scope", scope, 'str', skip_quote=True),
'registrationDefinitionId': self._serialize.url("registration_definition_id", registration_definition_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('RegistrationDefinition', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/{scope}/providers/Microsoft.ManagedServices/registrationDefinitions/{registrationDefinitionId}'} # type: ignore
async def delete(
self,
registration_definition_id: str,
scope: str,
**kwargs
) -> None:
"""Deletes the registration definition.
:param registration_definition_id: Guid of the registration definition.
:type registration_definition_id: str
:param scope: Scope of the resource.
:type scope: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-02-01-preview"
accept = "application/json"
# Construct URL
url = self.delete.metadata['url'] # type: ignore
path_format_arguments = {
'registrationDefinitionId': self._serialize.url("registration_definition_id", registration_definition_id, 'str'),
'scope': self._serialize.url("scope", scope, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/{scope}/providers/Microsoft.ManagedServices/registrationDefinitions/{registrationDefinitionId}'} # type: ignore
async def _create_or_update_initial(
self,
registration_definition_id: str,
scope: str,
request_body: "_models.RegistrationDefinition",
**kwargs
) -> "_models.RegistrationDefinition":
cls = kwargs.pop('cls', None) # type: ClsType["_models.RegistrationDefinition"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-02-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'registrationDefinitionId': self._serialize.url("registration_definition_id", registration_definition_id, 'str'),
'scope': self._serialize.url("scope", scope, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(request_body, 'RegistrationDefinition')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('RegistrationDefinition', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('RegistrationDefinition', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/{scope}/providers/Microsoft.ManagedServices/registrationDefinitions/{registrationDefinitionId}'} # type: ignore
async def begin_create_or_update(
self,
registration_definition_id: str,
scope: str,
request_body: "_models.RegistrationDefinition",
**kwargs
) -> AsyncLROPoller["_models.RegistrationDefinition"]:
"""Creates or updates a registration definition.
:param registration_definition_id: Guid of the registration definition.
:type registration_definition_id: str
:param scope: Scope of the resource.
:type scope: str
:param request_body: The parameters required to create new registration definition.
:type request_body: ~azure.mgmt.managedservices.models.RegistrationDefinition
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either RegistrationDefinition or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.managedservices.models.RegistrationDefinition]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.RegistrationDefinition"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
registration_definition_id=registration_definition_id,
scope=scope,
request_body=request_body,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('RegistrationDefinition', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'registrationDefinitionId': self._serialize.url("registration_definition_id", registration_definition_id, 'str'),
'scope': self._serialize.url("scope", scope, 'str', skip_quote=True),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/{scope}/providers/Microsoft.ManagedServices/registrationDefinitions/{registrationDefinitionId}'} # type: ignore
def list(
self,
scope: str,
**kwargs
) -> AsyncIterable["_models.RegistrationDefinitionList"]:
"""Gets a list of the registration definitions.
:param scope: Scope of the resource.
:type scope: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RegistrationDefinitionList or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.managedservices.models.RegistrationDefinitionList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RegistrationDefinitionList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-02-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'scope': self._serialize.url("scope", scope, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('RegistrationDefinitionList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/{scope}/providers/Microsoft.ManagedServices/registrationDefinitions'} # type: ignore
| 47.64507
| 163
| 0.670273
|
0c6e008fa59f44a97c1b1a6796294c39e67e5ec4
| 3,170
|
py
|
Python
|
discord/backoff.py
|
Buster-2002/discord.py-self
|
83ab03aacac7571f1787220a3265e0697f6d374a
|
[
"MIT"
] | null | null | null |
discord/backoff.py
|
Buster-2002/discord.py-self
|
83ab03aacac7571f1787220a3265e0697f6d374a
|
[
"MIT"
] | null | null | null |
discord/backoff.py
|
Buster-2002/discord.py-self
|
83ab03aacac7571f1787220a3265e0697f6d374a
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import random
import time
class ExponentialBackoff:
"""An implementation of the exponential backoff algorithm
Provides a convenient interface to implement an exponential backoff
for reconnecting or retrying transmissions in a distributed network.
Once instantiated, the delay method will return the next interval to
wait for when retrying a connection or transmission. The maximum
delay increases exponentially with each retry up to a maximum of
2^10 * base, and is reset if no more attempts are needed in a period
of 2^11 * base seconds.
Parameters
----------
base: :class:`int`
The base delay in seconds. The first retry-delay will be up to
this many seconds.
integral: :class:`bool`
Set to ``True`` if whole periods of base is desirable, otherwise any
number in between may be returned.
"""
def __init__(self, base=1, *, integral=False):
self._base = base
self._exp = 0
self._max = 10
self._reset_time = base * 2 ** 11
self._last_invocation = time.monotonic()
# Use our own random instance to avoid messing with global one
rand = random.Random()
rand.seed()
self._randfunc = rand.randrange if integral else rand.uniform
def delay(self):
"""Compute the next delay
Returns the next delay to wait according to the exponential
backoff algorithm. This is a value between 0 and base * 2^exp
where exponent starts off at 1 and is incremented at every
invocation of this method up to a maximum of 10.
If a period of more than base * 2^11 has passed since the last
retry, the exponent is reset to 1.
"""
invocation = time.monotonic()
interval = invocation - self._last_invocation
self._last_invocation = invocation
if interval > self._reset_time:
self._exp = 0
self._exp = min(self._exp + 1, self._max)
return self._randfunc(0, self._base * 2 ** self._exp)
| 36.436782
| 76
| 0.702839
|
2f891c789b98e1a97f0a9cd6198bac77e6d0a37e
| 6,382
|
py
|
Python
|
jiminy/utils/__init__.py
|
sibeshkar/jiminy
|
7754f86fb0f246e7d039ea0cbfd9950fcae4adfb
|
[
"MIT"
] | 3
|
2020-03-16T13:50:40.000Z
|
2021-06-09T05:26:13.000Z
|
jiminy/utils/__init__.py
|
sibeshkar/jiminy
|
7754f86fb0f246e7d039ea0cbfd9950fcae4adfb
|
[
"MIT"
] | null | null | null |
jiminy/utils/__init__.py
|
sibeshkar/jiminy
|
7754f86fb0f246e7d039ea0cbfd9950fcae4adfb
|
[
"MIT"
] | null | null | null |
import logging
import six
import sys
if six.PY2:
import Queue as queue
else:
import queue
import threading
import signal
from twisted.internet import defer
from jiminy.twisty import reactor
logger = logging.getLogger(__name__)
class ErrorBuffer(object):
def __init__(self):
self.queue = queue.Queue()
def __enter__(self):
pass
def __exit__(self, type, value, traceback):
if value is not None:
self.record(value)
def __call__(self, error, wrap=True):
self.record(error, wrap=True)
def record(self, error, wrap=True):
logger.debug('Error in thread %s: %s', threading.current_thread().name, error)
if wrap:
error = format_error(error)
try:
self.queue.put_nowait(error)
except queue.Full:
pass
def check(self, timeout=None):
if timeout is None:
timeout = 0
try:
error = self.queue.get(timeout=timeout)
except queue.Empty:
return
else:
raise error
def blocking_check(self, timeout=None):
# TODO: get rid of this method
if timeout is None:
while True:
self.check(timeout=3600)
else:
self.check(timeout)
from twisted.python import failure
import traceback
import threading
from jiminy import error
def format_error(e):
# errback automatically wraps everything in a Twisted Failure
if isinstance(e, failure.Failure):
e = e.value
if isinstance(e, str):
err_string = e
elif six.PY2:
err_string = traceback.format_exc(e).rstrip()
else:
err_string = ''.join(traceback.format_exception(type(e), e, e.__traceback__)).rstrip()
if err_string == 'None':
# Reasonable heuristic for exceptions that were created by hand
last = traceback.format_stack()[-2]
err_string = '{}\n {}'.format(e, last)
# Quick and dirty hack for now.
err_string = err_string.replace('Connection to the other side was lost in a non-clean fashion', 'Connection to the other side was lost in a non-clean fashion (HINT: this generally actually means we got a connection refused error. Check that the remote is actually running.)')
return error.Error(err_string)
def queue_get(local_queue):
while True:
try:
result = local_queue.get(timeout=1000)
except queue.Empty:
pass
else:
return result
def blockingCallFromThread(f, *a, **kw):
local_queue = queue.Queue()
def _callFromThread():
result = defer.maybeDeferred(f, *a, **kw)
result.addBoth(local_queue.put)
reactor.callFromThread(_callFromThread)
result = queue_get(local_queue)
if isinstance(result, failure.Failure):
if result.frames:
e = error.Error(str(result))
else:
e = result.value
raise e
return result
from jiminy.gym import spaces
def repeat_space(space, n):
return spaces.Tuple([space] * n)
import base64
import uuid
def random_alphanumeric(length=14):
buf = []
while len(buf) < length:
entropy = base64.encodestring(uuid.uuid4().bytes).decode('ascii')
bytes = [c for c in entropy if c.isalnum()]
buf += bytes
return ''.join(buf)[:length]
def best_effort(function, *args, **kwargs):
try:
return function(*args, **kwargs)
except:
if six.PY2:
logging.error('Error in %s:', function.__name__)
traceback.print_exc()
else:
logging.error('Error in %s:', function.__name__)
logger.error(traceback.format_exc())
return None
import base64
def basic_auth_encode(username, password=''):
fmt = '{}:{}'.format(username, password)
return 'Basic ' + base64.encodestring(fmt.encode('utf-8')).rstrip().decode('utf-8')
def basic_auth_decode(header):
if header.startswith('Basic '):
header = header[len('Basic '):]
decoded = base64.decodestring(header.encode('utf-8')).decode('utf-8')
username, password = decoded.split(':')
return username, password
else:
return None
import os
def default_password():
if os.path.exists('/usr/local/boxware/privileged_state/password'):
with open('/usr/local/boxware/privileged_state/password') as f:
return f.read().strip()
return 'boxware'
import logging
import time
logger = logging.getLogger(__name__)
class PeriodicLog(object):
def log(self, obj, name, msg, *args, **kwargs):
try:
info = obj._periodic_log_info
except AttributeError:
info = obj._periodic_log_info = {}
# Would be better to use a frequency=... arg after kwargs, but
# that isn't py2 compatible.
frequency = kwargs.pop('frequency', 1)
delay = kwargs.pop('delay', 0)
last_log = info.setdefault(name, time.time()-frequency+delay)
if time.time() - last_log < frequency:
return
info[name] = time.time()
logger.info('[{}] {}'.format(name, msg), *args)
def log_debug(self, obj, name, msg, *args, **kwargs):
try:
info = obj._periodic_log_debug
except AttributeError:
info = obj._periodic_log_debug = {}
frequency = kwargs.pop('frequency', 1)
delay = kwargs.pop('delay', 0)
last_log = info.setdefault(name, time.time()-frequency+delay)
if time.time() - last_log < frequency:
return
info[name] = time.time()
logger.debug('[{}] {}'.format(name, msg), *args)
_periodic = PeriodicLog()
periodic_log = _periodic.log
periodic_log_debug = _periodic.log_debug
import threading
def thread_name():
return threading.current_thread().name
def exit_on_signal():
"""
Install a signal handler for HUP, INT, and TERM to call exit, allowing clean shutdown.
When running a jiminy environment, it's important to shut down the container when the
agent dies so you should either call this or otherwise arrange to exit on signals.
"""
def shutdown(signal, frame):
logger.warn('Received signal %s: exiting', signal)
sys.exit(128+signal)
signal.signal(signal.SIGHUP, shutdown)
signal.signal(signal.SIGINT, shutdown)
signal.signal(signal.SIGTERM, shutdown)
| 30.390476
| 279
| 0.631777
|
ba4906813a5e95f39aa7dedcea6cf35078344326
| 5,417
|
py
|
Python
|
convs/dyres_conv.py
|
Nyquixt/DyConv
|
255193068424aaa83352bee258d34cb8b32b6ee6
|
[
"MIT"
] | null | null | null |
convs/dyres_conv.py
|
Nyquixt/DyConv
|
255193068424aaa83352bee258d34cb8b32b6ee6
|
[
"MIT"
] | null | null | null |
convs/dyres_conv.py
|
Nyquixt/DyConv
|
255193068424aaa83352bee258d34cb8b32b6ee6
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
import torch.nn.functional as F
__all__ = ['DyResConv']
class route_func(nn.Module):
def __init__(self, in_channels, num_experts=3, reduction=16, mode='A'):
super().__init__()
assert mode == 'A' or mode == 'B' or mode == 'S'
self.mode = mode
# Global Average Pool
if mode == 'A' or mode == 'B':
self.gap1 = nn.AdaptiveAvgPool2d(1)
self.gap3 = nn.AdaptiveAvgPool2d(3)
self.gap5 = nn.AdaptiveAvgPool2d(5)
else:
self.gap1 = nn.AdaptiveAvgPool2d(1)
self.gap3 = nn.AdaptiveAvgPool2d(3)
squeeze_channels = max(in_channels // reduction, reduction)
if mode == 'A': # 1-3-3-1
self.dwise_separable = nn.Sequential(
nn.Conv2d(3 * in_channels, squeeze_channels, kernel_size=1, stride=1, groups=1, bias=False),
nn.ReLU(inplace=True),
nn.Conv2d(squeeze_channels, squeeze_channels, kernel_size=3, stride=1, groups=squeeze_channels, bias=False),
nn.ReLU(inplace=True),
nn.Conv2d(squeeze_channels, squeeze_channels, kernel_size=3, stride=1, groups=squeeze_channels, bias=False),
nn.ReLU(inplace=True),
nn.Conv2d(squeeze_channels, num_experts * in_channels, kernel_size=1, stride=1, groups=1, bias=False)
)
elif mode == 'B': # 3-1-1-3
self.dwise_separable = nn.Sequential(
nn.Conv2d(3 * in_channels, in_channels, kernel_size=3, stride=1, groups=in_channels, bias=False),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels, squeeze_channels, kernel_size=1, stride=1, groups=1, bias=False),
nn.ReLU(inplace=True),
nn.Conv2d(squeeze_channels, in_channels, kernel_size=1, stride=1, groups=1, bias=False),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels, num_experts * in_channels, kernel_size=3, stride=1, groups=in_channels, bias=False)
)
elif mode == 'S': # simplified mode
self.dwise_separable = nn.Sequential(
nn.Conv2d(2 * in_channels, squeeze_channels, kernel_size=1, stride=1, groups=1, bias=False),
nn.ReLU(inplace=True),
nn.Conv2d(squeeze_channels, squeeze_channels, kernel_size=3, stride=1, groups=squeeze_channels, bias=False),
nn.ReLU(inplace=True),
nn.Conv2d(squeeze_channels, num_experts * in_channels, kernel_size=1, stride=1, groups=1, bias=False)
)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
b, _, _, _ = x.size()
a1 = self.gap1(x)
if self.mode == 'A' or self.mode == 'B':
a3 = F.interpolate(self.gap3(x), 5, mode='bicubic', align_corners=False)
a5 = self.gap5(x)
a1 = a1.expand_as(a5)
attention = torch.cat([a1, a3, a5], dim=1)
else:
a3 = self.gap3(x)
a1 = a1.expand_as(a3)
attention = torch.cat([a1, a3], dim=1)
attention = self.sigmoid(self.dwise_separable(attention))
return attention
class DyResConv(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, num_experts=3, stride=1, padding=0, groups=1, reduction=16, mode='A', deploy=False):
super().__init__()
assert mode == 'A' or mode == 'B' or mode == 'S'
self.deploy = deploy
self.num_experts = num_experts
self.stride = stride
self.padding = padding
self.groups = groups
# routing function
self.routing_func = route_func(in_channels, num_experts, reduction, mode)
# convs
if deploy:
self.convs = [nn.Parameter(torch.Tensor(out_channels, in_channels, kernel_size, kernel_size)) for i in range(num_experts)]
else:
self.convs = nn.ModuleList([nn.Conv2d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, groups=groups) for i in range(num_experts)])
self.bns = nn.ModuleList([nn.BatchNorm2d(out_channels) for i in range(num_experts)])
def forward(self, x):
_, c_in, _, _ = x.size()
routing_weight = self.routing_func(x) # N x k x C
if self.deploy:
convs = []
for i in range(self.num_experts):
route = routing_weight[:, i * c_in : (i+1) * c_in]
weight = self.convs[i]
weight = weight * route
convs.append(weight)
conv = sum(convs)
output = F.conv2d(x, weight=conv, stride=self.stride, padding=self.padding, groups=self.groups)
else:
outputs = []
for i in range(self.num_experts):
route = routing_weight[:, i*c_in:(i+1)*c_in]
attention = x * route.expand_as(x)
out = self.convs[i](attention)
out = self.bns[i](out)
outputs.append(out)
output = sum(outputs)
return output
def test():
x = torch.randn(1, 16, 32, 32)
conv = DyResConv(16, 64, 3, padding=1, mode='A')
y = conv(x)
print(y.shape)
conv = DyResConv(16, 64, 3, padding=1, mode='B')
y = conv(x)
print(y.shape)
conv = DyResConv(16, 64, 3, padding=1, mode='S')
y = conv(x)
print(y.shape)
# test()
| 43.336
| 166
| 0.57818
|
ffc094997b99e67efa620c4daa1898abc5d851de
| 8,311
|
py
|
Python
|
mbrl/third_party/pytorch_sac_pranz24/sac.py
|
maxyang27896/mbrl-lib
|
4543fc929321fdd6e6522528c68e54d822ad2a6a
|
[
"MIT"
] | null | null | null |
mbrl/third_party/pytorch_sac_pranz24/sac.py
|
maxyang27896/mbrl-lib
|
4543fc929321fdd6e6522528c68e54d822ad2a6a
|
[
"MIT"
] | null | null | null |
mbrl/third_party/pytorch_sac_pranz24/sac.py
|
maxyang27896/mbrl-lib
|
4543fc929321fdd6e6522528c68e54d822ad2a6a
|
[
"MIT"
] | null | null | null |
import os
import torch
import torch.nn.functional as F
from torch.optim import Adam
from mbrl.third_party.pytorch_sac_pranz24.model import (
DeterministicPolicy,
GaussianPolicy,
QNetwork,
)
from mbrl.third_party.pytorch_sac_pranz24.utils import hard_update, soft_update
class SAC(object):
def __init__(self, num_inputs, action_space, args):
self.args = args
self.gamma = args.gamma
self.tau = args.tau
self.alpha = args.alpha
self.policy_type = args.policy
self.target_update_interval = args.target_update_interval
self.automatic_entropy_tuning = args.automatic_entropy_tuning
self.device = args.device
self.critic = QNetwork(num_inputs, action_space.shape[0], args.hidden_size).to(
device=self.device
)
self.critic_optim = Adam(self.critic.parameters(), lr=args.lr)
self.critic_target = QNetwork(
num_inputs, action_space.shape[0], args.hidden_size
).to(self.device)
hard_update(self.critic_target, self.critic)
if self.policy_type == "Gaussian":
# Target Entropy = −dim(A) (e.g. , -6 for HalfCheetah-v2) as given in the paper
if self.automatic_entropy_tuning is True:
if args.target_entropy is None:
self.target_entropy = -torch.prod(
torch.Tensor(action_space.shape).to(self.device)
).item()
else:
self.target_entropy = args.target_entropy
self.log_alpha = torch.zeros(1, requires_grad=True, device=self.device)
self.alpha_optim = Adam([self.log_alpha], lr=args.lr)
self.policy = GaussianPolicy(
num_inputs, action_space.shape[0], args.hidden_size, action_space
).to(self.device)
self.policy_optim = Adam(self.policy.parameters(), lr=args.lr)
else:
self.alpha = 0
self.automatic_entropy_tuning = False
self.policy = DeterministicPolicy(
num_inputs, action_space.shape[0], args.hidden_size, action_space
).to(self.device)
self.policy_optim = Adam(self.policy.parameters(), lr=args.lr)
def select_action(self, state, batched=False, evaluate=False):
state = torch.FloatTensor(state)
if not batched:
state = state.unsqueeze(0)
state = state.to(self.device)
if evaluate is False:
action, _, _ = self.policy.sample(state)
else:
_, _, action = self.policy.sample(state)
if batched:
return action.detach().cpu().numpy()
return action.detach().cpu().numpy()[0]
def update_parameters(
self, memory, batch_size, updates, logger=None, reverse_mask=False
):
# Sample a batch from memory
(
state_batch,
action_batch,
next_state_batch,
reward_batch,
mask_batch,
) = memory.sample(batch_size).astuple()
state_batch = torch.FloatTensor(state_batch).to(self.device)
next_state_batch = torch.FloatTensor(next_state_batch).to(self.device)
action_batch = torch.FloatTensor(action_batch).to(self.device)
reward_batch = torch.FloatTensor(reward_batch).to(self.device).unsqueeze(1)
mask_batch = torch.FloatTensor(mask_batch).to(self.device).unsqueeze(1)
if reverse_mask:
mask_batch = mask_batch.logical_not()
with torch.no_grad():
next_state_action, next_state_log_pi, _ = self.policy.sample(
next_state_batch
)
qf1_next_target, qf2_next_target = self.critic_target(
next_state_batch, next_state_action
)
min_qf_next_target = (
torch.min(qf1_next_target, qf2_next_target)
- self.alpha * next_state_log_pi
)
next_q_value = reward_batch + mask_batch * self.gamma * (min_qf_next_target)
qf1, qf2 = self.critic(
state_batch, action_batch
) # Two Q-functions to mitigate positive bias in the policy improvement step
qf1_loss = F.mse_loss(
qf1, next_q_value
) # JQ = 𝔼(st,at)~D[0.5(Q1(st,at) - r(st,at) - γ(𝔼st+1~p[V(st+1)]))^2]
qf2_loss = F.mse_loss(
qf2, next_q_value
) # JQ = 𝔼(st,at)~D[0.5(Q1(st,at) - r(st,at) - γ(𝔼st+1~p[V(st+1)]))^2]
qf_loss = qf1_loss + qf2_loss
self.critic_optim.zero_grad()
qf_loss.backward()
self.critic_optim.step()
pi, log_pi, _ = self.policy.sample(state_batch)
qf1_pi, qf2_pi = self.critic(state_batch, pi)
min_qf_pi = torch.min(qf1_pi, qf2_pi)
policy_loss = (
(self.alpha * log_pi) - min_qf_pi
).mean() # Jπ = 𝔼st∼D,εt∼N[α * logπ(f(εt;st)|st) − Q(st,f(εt;st))]
self.policy_optim.zero_grad()
policy_loss.backward()
self.policy_optim.step()
if self.automatic_entropy_tuning:
alpha_loss = -(
self.log_alpha * (log_pi + self.target_entropy).detach()
).mean()
self.alpha_optim.zero_grad()
alpha_loss.backward()
self.alpha_optim.step()
self.alpha = self.log_alpha.exp()
alpha_tlogs = self.alpha.clone() # For TensorboardX logs
else:
alpha_loss = torch.tensor(0.0).to(self.device)
alpha_tlogs = torch.tensor(self.alpha) # For TensorboardX logs
if updates % self.target_update_interval == 0:
soft_update(self.critic_target, self.critic, self.tau)
if logger is not None:
logger.log("train/batch_reward", reward_batch.mean(), updates)
logger.log("train_critic/loss", qf_loss, updates)
logger.log("train_actor/loss", policy_loss, updates)
if self.automatic_entropy_tuning:
logger.log("train_actor/target_entropy", self.target_entropy, updates)
else:
logger.log("train_actor/target_entropy", 0, updates)
logger.log("train_actor/entropy", -log_pi.mean(), updates)
logger.log("train_alpha/loss", alpha_loss, updates)
logger.log("train_alpha/value", self.alpha, updates)
return (
qf1_loss.item(),
qf2_loss.item(),
policy_loss.item(),
alpha_loss.item(),
alpha_tlogs.item(),
)
# Save model parameters
def save_checkpoint(self, env_name=None, suffix="", ckpt_path=None):
if ckpt_path is None:
assert env_name is not None
if not os.path.exists("checkpoints/"):
os.makedirs("checkpoints/")
ckpt_path = "checkpoints/sac_checkpoint_{}_{}".format(env_name, suffix)
print("Saving models to {}".format(ckpt_path))
torch.save(
{
"policy_state_dict": self.policy.state_dict(),
"critic_state_dict": self.critic.state_dict(),
"critic_target_state_dict": self.critic_target.state_dict(),
"critic_optimizer_state_dict": self.critic_optim.state_dict(),
"policy_optimizer_state_dict": self.policy_optim.state_dict(),
},
ckpt_path,
)
# Load model parameters
def load_checkpoint(self, ckpt_path, evaluate=False):
print("Loading models from {}".format(ckpt_path))
if ckpt_path is not None:
checkpoint = torch.load(ckpt_path)
self.policy.load_state_dict(checkpoint["policy_state_dict"])
self.critic.load_state_dict(checkpoint["critic_state_dict"])
self.critic_target.load_state_dict(checkpoint["critic_target_state_dict"])
self.critic_optim.load_state_dict(checkpoint["critic_optimizer_state_dict"])
self.policy_optim.load_state_dict(checkpoint["policy_optimizer_state_dict"])
if evaluate:
self.policy.eval()
self.critic.eval()
self.critic_target.eval()
else:
self.policy.train()
self.critic.train()
self.critic_target.train()
| 39.20283
| 91
| 0.60089
|
cf6ef6036daf1b79be4a2dd64771287167822fb3
| 3,316
|
py
|
Python
|
src/maggma/api/resource/core.py
|
materialsproject/maggflow
|
9f8d7a0865ec13212a3fd00d5edebd3cb7b40e7d
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
src/maggma/api/resource/core.py
|
materialsproject/maggflow
|
9f8d7a0865ec13212a3fd00d5edebd3cb7b40e7d
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
src/maggma/api/resource/core.py
|
materialsproject/maggflow
|
9f8d7a0865ec13212a3fd00d5edebd3cb7b40e7d
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
import logging
from abc import ABCMeta, abstractmethod
from typing import Dict, Type
from fastapi import APIRouter, FastAPI, Response, Request
from monty.json import MontyDecoder, MSONable
from pydantic import BaseModel
from starlette.responses import RedirectResponse
from maggma.api.utils import STORE_PARAMS, api_sanitize
from maggma.utils import dynamic_import
class Resource(MSONable, metaclass=ABCMeta):
"""
Base class for a REST Compatible Resource
"""
def __init__(
self, model: Type[BaseModel],
):
"""
Args:
model: the pydantic model this Resource represents
"""
if not issubclass(model, BaseModel):
raise ValueError("The resource model has to be a PyDantic Model")
self.model = api_sanitize(model, allow_dict_msonable=True)
self.logger = logging.getLogger(type(self).__name__)
self.logger.addHandler(logging.NullHandler())
self.router = APIRouter()
self.prepare_endpoint()
self.setup_redirect()
def on_startup(self):
"""
Callback to perform some work on resource initialization
"""
pass
@abstractmethod
def prepare_endpoint(self):
"""
Internal method to prepare the endpoint by setting up default handlers
for routes.
"""
pass
def setup_redirect(self):
@self.router.get("$", include_in_schema=False)
def redirect_unslashed():
"""
Redirects unforward slashed url to resource
url with the forward slash
"""
url = self.router.url_path_for("/")
return RedirectResponse(url=url, status_code=301)
def run(self): # pragma: no cover
"""
Runs the Endpoint cluster locally
This is intended for testing not production
"""
import uvicorn
app = FastAPI()
app.include_router(self.router, prefix="")
uvicorn.run(app)
def as_dict(self) -> Dict:
"""
Special as_dict implemented to convert pydantic models into strings
"""
d = super().as_dict() # Ensures sub-classes serialize correctly
d["model"] = f"{self.model.__module__}.{self.model.__name__}"
return d
@classmethod
def from_dict(cls, d: Dict):
if isinstance(d["model"], str):
d["model"] = dynamic_import(d["model"])
d = {k: MontyDecoder().process_decoded(v) for k, v in d.items()}
return cls(**d)
class HintScheme(MSONable, metaclass=ABCMeta):
"""
Base class for generic hint schemes generation
"""
@abstractmethod
def generate_hints(self, query: STORE_PARAMS) -> STORE_PARAMS:
"""
This method takes in a MongoDB query and returns hints
"""
class HeaderProcessor(MSONable, metaclass=ABCMeta):
"""
Base class for generic header processing
"""
@abstractmethod
def process_header(self, response: Response, request: Request):
"""
This method takes in a FastAPI Response object and processes a new header for it in-place.
It can use data in the upstream request to generate the header.
(https://fastapi.tiangolo.com/advanced/response-headers/#use-a-response-parameter)
"""
| 29.087719
| 98
| 0.633896
|
2a495d6b63b5e22d176bd17569b0c05d76761bc1
| 800
|
py
|
Python
|
puzzle_editing/messaging.py
|
jwmclaren/puzzlord
|
a0420be33658bb88cd34dddf6364f4da76453253
|
[
"X11"
] | null | null | null |
puzzle_editing/messaging.py
|
jwmclaren/puzzlord
|
a0420be33658bb88cd34dddf6364f4da76453253
|
[
"X11"
] | null | null | null |
puzzle_editing/messaging.py
|
jwmclaren/puzzlord
|
a0420be33658bb88cd34dddf6364f4da76453253
|
[
"X11"
] | null | null | null |
from django.conf import settings
from django.core.mail.message import EmailMultiAlternatives
from django.template.loader import render_to_string
def send_mail_wrapper(subject, template, context, recipients):
if recipients:
mail = EmailMultiAlternatives(
subject=settings.EMAIL_SUBJECT_PREFIX + subject,
body=render_to_string(template + ".txt", context),
from_email=settings.EMAIL_SENDER,
to=recipients,
alternatives=[(render_to_string(template + ".html", context), "text/html")],
reply_to=[settings.EMAIL_REPLY_TO],
)
send_res = mail.send()
if send_res != 1:
raise RuntimeError(
"Unknown failure sending mail??? {} {}".format(recipients, send_res)
)
| 38.095238
| 88
| 0.64625
|
d03e4d085323f4e2e5432f1094f91139c8f0f3f5
| 648
|
py
|
Python
|
please2/command/git/cmd_git_new_branch.py
|
jadnohra/please2
|
8654347b72758f8f6cd255ef97500da55839a62a
|
[
"MIT"
] | null | null | null |
please2/command/git/cmd_git_new_branch.py
|
jadnohra/please2
|
8654347b72758f8f6cd255ef97500da55839a62a
|
[
"MIT"
] | null | null | null |
please2/command/git/cmd_git_new_branch.py
|
jadnohra/please2
|
8654347b72758f8f6cd255ef97500da55839a62a
|
[
"MIT"
] | null | null | null |
import please2.reg_cmd as reg_cmd
from ..cmd_base import Command, Match
from .cmd_git_util import make_error_result, run_git_get_lines
from please2.util.args import get_positional_after
class CommandGitNewBranch(Command):
def help(self):
return self.key() + ' <branch-name> [@ <dir>]'
def opt_keys(self):
return set(['@'])
def key(self):
return 'git new branch'
def run_match(self, args, params):
name = get_positional_after(args.args, self.key().split()[-1])
run_git_get_lines(args, params, ['branch', name])
return Match('')
reg_cmd.register_command(CommandGitNewBranch())
| 24.923077
| 70
| 0.680556
|
d92c5e75218bd59c6e3054065483ca124ad43be6
| 179
|
py
|
Python
|
problem0642.py
|
kmarcini/Project-Euler-Python
|
d644e8e1ec4fac70a9ab407ad5e1f0a75547c8d3
|
[
"BSD-3-Clause"
] | null | null | null |
problem0642.py
|
kmarcini/Project-Euler-Python
|
d644e8e1ec4fac70a9ab407ad5e1f0a75547c8d3
|
[
"BSD-3-Clause"
] | null | null | null |
problem0642.py
|
kmarcini/Project-Euler-Python
|
d644e8e1ec4fac70a9ab407ad5e1f0a75547c8d3
|
[
"BSD-3-Clause"
] | null | null | null |
###########################
#
# #642 Sum of largest prime factors - Project Euler
# https://projecteuler.net/problem=642
#
# Code by Kevin Marciniak
#
###########################
| 19.888889
| 51
| 0.497207
|
78389bd0d69b5b7f89c349fa2f266a35b561142b
| 3,226
|
py
|
Python
|
bluegraph/downstream/data_structures.py
|
BlueBrain/BlueGraph
|
7a7df9c97d01d7ebfa77cf911f187b18e2d6dc02
|
[
"Apache-2.0"
] | 25
|
2021-04-09T09:57:10.000Z
|
2022-02-09T14:26:55.000Z
|
bluegraph/downstream/data_structures.py
|
BlueBrain/BlueGraph
|
7a7df9c97d01d7ebfa77cf911f187b18e2d6dc02
|
[
"Apache-2.0"
] | 39
|
2021-04-09T08:23:28.000Z
|
2021-11-10T08:35:59.000Z
|
bluegraph/downstream/data_structures.py
|
BlueBrain/BlueGraph
|
7a7df9c97d01d7ebfa77cf911f187b18e2d6dc02
|
[
"Apache-2.0"
] | 3
|
2021-07-30T19:00:24.000Z
|
2022-03-16T21:01:48.000Z
|
# BlueGraph: unifying Python framework for graph analytics and co-occurrence analysis.
# Copyright 2020-2021 Blue Brain Project / EPFL
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
import numpy as np
class ElementClassifier(ABC):
"""Interface for graph element classification models.
It wraps a predictive classification model provided by the user
and a set of configs that allow the user to fit the model
and make predictions on the input PGFrames. Its main goal is to
hide the details on converting element (node or edge) properties
into data tables that can be provided to the predictive model.
"""
def __init__(self, model, feature_vector_prop=None, feature_props=None,
**kwargs):
"""Initialize the classifier."""
self.model = model
self.feature_vector_prop = feature_vector_prop
self.feature_props = feature_props
def _concatenate_feature_props(self, pgframe, nodes):
if self.feature_props is None or len(self.feature_props) == 0:
raise ValueError
return pgframe.nodes(
raw_frame=True).loc[nodes, self.feature_props].to_numpy()
def _get_node_features(self, pgframe, nodes):
if self.feature_vector_prop:
features = pgframe.get_node_property_values(
self.feature_vector_prop, nodes=nodes).tolist()
else:
features = self._concatenate_feature_props(pgframe, nodes)
return np.array(features)
@abstractmethod
def _generate_train_elements(self, pgfame, elements=None):
pass
@abstractmethod
def _generate_predict_elements(self, pgfame, elements=None):
pass
@abstractmethod
def _generate_train_labels(self, pgframe, elements, label_prop=None):
pass
@abstractmethod
def _generate_data_table(self, pgframe, elements):
pass
def fit(self, pgframe, train_elements=None, labels=None, label_prop=None,
**kwargs):
"""Fit the classifier."""
train_elements = self._generate_train_elements(
pgframe, train_elements, **kwargs)
labels = self._generate_train_labels(
pgframe, train_elements, label_prop) if labels is None else labels
data = self._generate_data_table(pgframe, train_elements)
self.model.fit(data, labels)
def predict(self, pgframe, predict_elements=None):
"""Run prediction on the input graph."""
predict_elements = self._generate_predict_elements(
pgframe, predict_elements)
data = self._generate_data_table(pgframe, predict_elements)
return self.model.predict(data)
| 38.404762
| 87
| 0.697458
|
95ff0faea82be8741f6ef17bf80c6fed00f81a11
| 5,226
|
py
|
Python
|
unforeseen/analysis/calibrate.py
|
petoor/unforeseen-client
|
bd068ddc4029cc66c76f96ce9fc90e4e29106379
|
[
"MIT"
] | null | null | null |
unforeseen/analysis/calibrate.py
|
petoor/unforeseen-client
|
bd068ddc4029cc66c76f96ce9fc90e4e29106379
|
[
"MIT"
] | null | null | null |
unforeseen/analysis/calibrate.py
|
petoor/unforeseen-client
|
bd068ddc4029cc66c76f96ce9fc90e4e29106379
|
[
"MIT"
] | null | null | null |
# Standard python imports
import sys, os, logging, threading, argparse
import numpy as np
# Gstreamer related
import gi
gi.require_version('Gst', '1.0')
gi.require_version('GstBase', '1.0')
gi.require_version('Gtk', '3.0')
from gi.repository import GLib, Gst, Gtk
from gstreamer import GstPipeline, Gst
import gstreamer.utils as utils
# AI Model
#from unforeseen.analysis.models.people_torchvision import PeopleDetect
#from models.people_jetson import PeopleDetect
from unforeseen.config import setup_loader
# Apply camera settings
from unforeseen.cameras.camera_settings import apply_camera_settings
class GstPipeline:
"""
https://github.com/google-coral/examples-camera/blob/master/gstreamer/gstreamer.py
"""
def __init__(self, pipeline, camera):
self.running = False
self.camera = camera
self.gstsample = None
self.frameid = 0
self.condition = threading.Condition()
self.player = Gst.parse_launch(pipeline)
# Fetch different pads from pipeline for manipulation
appsink = self.player.get_by_name("appsink")
appsink.connect("new-preroll", self.on_new_sample, True)
appsink.connect("new_sample", self.on_new_sample, False)
# Set up a pipeline bus watch to catch errors.
bus = self.player.get_bus()
bus.add_signal_watch()
bus.connect('message', self.on_bus_message)
def run(self):
self.running = True
worker = threading.Thread(target=self.inference_loop)
worker.start()
# State to start pipeline (player)
self.player.set_state(Gst.State.PLAYING)
try:
Gtk.main()
except Exception as e:
print("Error: ", e)
# Clean up.
self.pipeline.set_state(Gst.State.NULL)
while GLib.MainContext.default().iteration(False):
pass
with self.condition:
self.running = False
self.condition.notify_all()
worker.join()
def on_bus_message(self, bus, message):
t = message.type
if t == Gst.MessageType.EOS:
Gtk.main_quit()
elif t == Gst.MessageType.WARNING:
err, debug = message.parse_warning()
sys.stderr.write('Warning: %s: %s\n' % (err, debug))
elif t == Gst.MessageType.ERROR:
err, debug = message.parse_error()
sys.stderr.write('Error: %s: %s\n' % (err, debug))
Gtk.main_quit()
return True
def on_new_sample(self, sink, preroll):
sample = sink.emit('pull-preroll' if preroll else 'pull-sample')
with self.condition:
self.gstsample = sample
self.condition.notify_all()
return Gst.FlowReturn.OK
def inference_loop(self):
while True:
with self.condition:
while not self.gstsample and self.running:
self.condition.wait()
if not self.running:
break
gstsample = self.gstsample
self.frameid +=1
self.gstsample = None
if self.frameid % 60 == 0:
apply_camera_settings(self.camera)
if __name__=="__main__":
Gst.init(None)
parser = argparse.ArgumentParser(description='Calibration script')
parser.add_argument('--pipeline_path', default="analysis/pipelines/record-and-display-raw.txt",
help='The path to the gstreamer pipeline you would like to use')
parser.add_argument('--camera', default="/dev/video0",
help='The camera used')
args = parser.parse_args()
pipeline_path = args.pipeline_path
# Check if pipeline txt is found
if os.path.isfile(pipeline_path):
logging.info("Pipeline found")
else:
logging.critical("Pipeline file not found")
setup = setup_loader()
camera_used = None
for camera in setup.get("cameras"):
if camera.get(args.camera) is not None:
camera_used = camera
break
if camera_used is None:
logging.critical("Camera not found !")
apply_camera_settings(args.camera)
camera_settings = camera_used[f"{args.camera}"]
camera_format = camera_settings.get("camera_format")
height = camera_settings.get("height")
width = camera_settings.get("width")
framerate = camera_settings.get("framerate")
bitrate = camera_settings.get("bitrate")
udpsink_port = setup.get("server").get("udp_sink")
ip = setup.get("device").get("ip")
with open(pipeline_path, "r") as pipeline:
pipeline = pipeline.read()
pipeline = pipeline.replace("{camera}", str(args.camera))
pipeline = pipeline.replace("{camera_format}", str(camera_format))
pipeline = pipeline.replace("{width}", str(width))
pipeline = pipeline.replace("{height}", str(height))
pipeline = pipeline.replace("{framerate}", str(framerate))
pipeline = pipeline.replace("{bitrate}", str(bitrate))
pipeline = pipeline.replace("{udpsink_port}", str(udpsink_port))
pipeline = pipeline.replace("{ip}", str(ip))
GstPipeline(pipeline, args.camera).run()
| 33.5
| 99
| 0.628205
|
e731ffabf724e78f27f461f059e3bdf3b3c92720
| 12,642
|
py
|
Python
|
generated/python/gapic-google-cloud-speech-v1/google/cloud/gapic/speech/v1/speech_client.py
|
landrito/api-client-staging
|
140c312c9335af160efce5b37842c995308e0148
|
[
"BSD-3-Clause"
] | 18
|
2016-12-08T20:47:57.000Z
|
2022-01-29T19:36:04.000Z
|
generated/python/gapic-google-cloud-speech-v1/google/cloud/gapic/speech/v1/speech_client.py
|
landrito/api-client-staging
|
140c312c9335af160efce5b37842c995308e0148
|
[
"BSD-3-Clause"
] | 252
|
2016-09-21T20:51:36.000Z
|
2021-03-25T23:02:36.000Z
|
generated/python/gapic-google-cloud-speech-v1/google/cloud/gapic/speech/v1/speech_client.py
|
landrito/api-client-staging
|
140c312c9335af160efce5b37842c995308e0148
|
[
"BSD-3-Clause"
] | 37
|
2016-09-19T21:13:16.000Z
|
2022-01-29T19:36:07.000Z
|
# Copyright 2017, Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# EDITING INSTRUCTIONS
# This file was generated from the file
# https://github.com/google/googleapis/blob/master/google/cloud/speech/v1/cloud_speech.proto,
# and updates to that file get reflected here through a refresh process.
# For the short term, the refresh process will only be runnable by Google engineers.
#
# The only allowed edits are to method and file documentation. A 3-way
# merge preserves those additions if the generated source changes.
"""Accesses the google.cloud.speech.v1 Speech API."""
import collections
import json
import os
import pkg_resources
import platform
from google.gapic.longrunning import operations_client
from google.gax import api_callable
from google.gax import config
from google.gax import path_template
from google.gax.utils import oneof
import google.gax
from google.cloud.gapic.speech.v1 import enums
from google.cloud.proto.speech.v1 import cloud_speech_pb2
class SpeechClient(object):
"""Service that implements Google Cloud Speech API."""
SERVICE_ADDRESS = 'speech.googleapis.com'
"""The default address of the service."""
DEFAULT_SERVICE_PORT = 443
"""The default port of the service."""
# The scopes needed to make gRPC calls to all of the methods defined in
# this service
_ALL_SCOPES = ('https://www.googleapis.com/auth/cloud-platform', )
def __init__(self,
service_path=SERVICE_ADDRESS,
port=DEFAULT_SERVICE_PORT,
channel=None,
credentials=None,
ssl_credentials=None,
scopes=None,
client_config=None,
app_name=None,
app_version='',
lib_name=None,
lib_version='',
metrics_headers=()):
"""Constructor.
Args:
service_path (string): The domain name of the API remote host.
port (int): The port on which to connect to the remote host.
channel (:class:`grpc.Channel`): A ``Channel`` instance through
which to make calls.
credentials (object): The authorization credentials to attach to
requests. These credentials identify this application to the
service.
ssl_credentials (:class:`grpc.ChannelCredentials`): A
``ChannelCredentials`` instance for use with an SSL-enabled
channel.
scopes (list[string]): A list of OAuth2 scopes to attach to requests.
client_config (dict):
A dictionary for call options for each method. See
:func:`google.gax.construct_settings` for the structure of
this data. Falls back to the default config if not specified
or the specified config is missing data points.
app_name (string): The name of the application calling
the service. Recommended for analytics purposes.
app_version (string): The version of the application calling
the service. Recommended for analytics purposes.
lib_name (string): The API library software used for calling
the service. (Unless you are writing an API client itself,
leave this as default.)
lib_version (string): The API library software version used
for calling the service. (Unless you are writing an API client
itself, leave this as default.)
metrics_headers (dict): A dictionary of values for tracking
client library metrics. Ultimately serializes to a string
(e.g. 'foo/1.2.3 bar/3.14.1'). This argument should be
considered private.
Returns:
A SpeechClient object.
"""
# Unless the calling application specifically requested
# OAuth scopes, request everything.
if scopes is None:
scopes = self._ALL_SCOPES
# Initialize an empty client config, if none is set.
if client_config is None:
client_config = {}
# Initialize metrics_headers as an ordered dictionary
# (cuts down on cardinality of the resulting string slightly).
metrics_headers = collections.OrderedDict(metrics_headers)
metrics_headers['gl-python'] = platform.python_version()
# The library may or may not be set, depending on what is
# calling this client. Newer client libraries set the library name
# and version.
if lib_name:
metrics_headers[lib_name] = lib_version
# Finally, track the GAPIC package version.
metrics_headers['gapic'] = pkg_resources.get_distribution(
'gapic-google-cloud-speech-v1', ).version
# Load the configuration defaults.
default_client_config = json.loads(
pkg_resources.resource_string(
__name__, 'speech_client_config.json').decode())
defaults = api_callable.construct_settings(
'google.cloud.speech.v1.Speech',
default_client_config,
client_config,
config.STATUS_CODE_NAMES,
metrics_headers=metrics_headers, )
self.speech_stub = config.create_stub(
cloud_speech_pb2.SpeechStub,
channel=channel,
service_path=service_path,
service_port=port,
credentials=credentials,
scopes=scopes,
ssl_credentials=ssl_credentials)
self.operations_client = operations_client.OperationsClient(
service_path=service_path,
port=port,
channel=channel,
credentials=credentials,
ssl_credentials=ssl_credentials,
scopes=scopes,
client_config=client_config,
metrics_headers=metrics_headers, )
self._recognize = api_callable.create_api_call(
self.speech_stub.Recognize, settings=defaults['recognize'])
self._long_running_recognize = api_callable.create_api_call(
self.speech_stub.LongRunningRecognize,
settings=defaults['long_running_recognize'])
self._streaming_recognize = api_callable.create_api_call(
self.speech_stub.StreamingRecognize,
settings=defaults['streaming_recognize'])
# Service calls
def recognize(self, config, audio, options=None):
"""
Performs synchronous speech recognition: receive results after all audio
has been sent and processed.
Example:
>>> from google.cloud.gapic.speech.v1 import speech_client
>>> from google.cloud.gapic.speech.v1 import enums
>>> from google.cloud.proto.speech.v1 import cloud_speech_pb2
>>> client = speech_client.SpeechClient()
>>> encoding = enums.RecognitionConfig.AudioEncoding.FLAC
>>> sample_rate_hertz = 44100
>>> language_code = 'en-US'
>>> config = cloud_speech_pb2.RecognitionConfig(encoding=encoding, sample_rate_hertz=sample_rate_hertz, language_code=language_code)
>>> uri = 'gs://bucket_name/file_name.flac'
>>> audio = cloud_speech_pb2.RecognitionAudio(uri=uri)
>>> response = client.recognize(config, audio)
Args:
config (:class:`google.cloud.proto.speech.v1.cloud_speech_pb2.RecognitionConfig`): *Required* Provides information to the recognizer that specifies how to
process the request.
audio (:class:`google.cloud.proto.speech.v1.cloud_speech_pb2.RecognitionAudio`): *Required* The audio data to be recognized.
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries etc.
Returns:
A :class:`google.cloud.proto.speech.v1.cloud_speech_pb2.RecognizeResponse` instance.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
:exc:`ValueError` if the parameters are invalid.
"""
# Create the request object.
request = cloud_speech_pb2.RecognizeRequest(config=config, audio=audio)
return self._recognize(request, options)
def long_running_recognize(self, config, audio, options=None):
"""
Performs asynchronous speech recognition: receive results via the
google.longrunning.Operations interface. Returns either an
``Operation.error`` or an ``Operation.response`` which contains
a ``LongRunningRecognizeResponse`` message.
Example:
>>> from google.cloud.gapic.speech.v1 import speech_client
>>> from google.cloud.gapic.speech.v1 import enums
>>> from google.cloud.proto.speech.v1 import cloud_speech_pb2
>>> client = speech_client.SpeechClient()
>>> encoding = enums.RecognitionConfig.AudioEncoding.FLAC
>>> sample_rate_hertz = 44100
>>> language_code = 'en-US'
>>> config = cloud_speech_pb2.RecognitionConfig(encoding=encoding, sample_rate_hertz=sample_rate_hertz, language_code=language_code)
>>> uri = 'gs://bucket_name/file_name.flac'
>>> audio = cloud_speech_pb2.RecognitionAudio(uri=uri)
>>> response = client.long_running_recognize(config, audio)
>>>
>>> def callback(operation_future):
>>> # Handle result.
>>> result = operation_future.result()
>>>
>>> response.add_done_callback(callback)
>>>
>>> # Handle metadata.
>>> metadata = response.metadata()
Args:
config (:class:`google.cloud.proto.speech.v1.cloud_speech_pb2.RecognitionConfig`): *Required* Provides information to the recognizer that specifies how to
process the request.
audio (:class:`google.cloud.proto.speech.v1.cloud_speech_pb2.RecognitionAudio`): *Required* The audio data to be recognized.
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries etc.
Returns:
A :class:`google.gax._OperationFuture` instance.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
:exc:`ValueError` if the parameters are invalid.
"""
# Create the request object.
request = cloud_speech_pb2.LongRunningRecognizeRequest(
config=config, audio=audio)
return google.gax._OperationFuture(
self._long_running_recognize(request,
options), self.operations_client,
cloud_speech_pb2.LongRunningRecognizeResponse,
cloud_speech_pb2.LongRunningRecognizeMetadata, options)
def streaming_recognize(self, requests, options=None):
"""
Performs bidirectional streaming speech recognition: receive results while
sending audio. This method is only available via the gRPC API (not REST).
EXPERIMENTAL: This method interface might change in the future.
Example:
>>> from google.cloud.gapic.speech.v1 import speech_client
>>> from google.cloud.proto.speech.v1 import cloud_speech_pb2
>>> client = speech_client.SpeechClient()
>>> request = cloud_speech_pb2.StreamingRecognizeRequest()
>>> requests = [request]
>>> for element in client.streaming_recognize(requests):
>>> # process element
>>> pass
Args:
requests (iterator[:class:`google.cloud.proto.speech.v1.cloud_speech_pb2.StreamingRecognizeRequest`]): The input objects.
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries etc.
Returns:
iterator[:class:`google.cloud.proto.speech.v1.cloud_speech_pb2.StreamingRecognizeResponse`].
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
:exc:`ValueError` if the parameters are invalid.
"""
return self._streaming_recognize(requests, options)
| 44.202797
| 164
| 0.657174
|
90bb2395dfb8226545426e4203bbedb1d4d0b049
| 6,057
|
py
|
Python
|
src/conways.py
|
atonymartin20/Cellular-Automata
|
01e28eb593de8ab97d82ce045e449562fd5a5e31
|
[
"MIT"
] | null | null | null |
src/conways.py
|
atonymartin20/Cellular-Automata
|
01e28eb593de8ab97d82ce045e449562fd5a5e31
|
[
"MIT"
] | null | null | null |
src/conways.py
|
atonymartin20/Cellular-Automata
|
01e28eb593de8ab97d82ce045e449562fd5a5e31
|
[
"MIT"
] | null | null | null |
import pygame, random, time
# Define some colors and other constants
BLACK = (0, 0, 0)
BLUE = (0, 0, 255)
WHITE = (255, 255, 255)
GREEN = (49, 214, 0)
MAGENTA = (206, 41, 255)
GRAY = (154, 162, 151)
fps = 5
WIN_SIZE = 505
# 1. Create a set of initial states with simple pattern (Ex. blinker)
initial_states = [0] * 400
initial_states[10] = 1
initial_states[30] = 1
initial_states[50] = 1
current_states = initial_states
generation = 0
is_paused = True
# 1 v.2 Fill current_states with random states
# current_states = [0] * 400
# for i in range( 0, len(current_states) ):
# current_states[i] = random.randint(0,1)
# 1 v.3 Allow users to choose between several predefined init states
pygame.init()
# Set the width and height of the screen [width, height]
size = (WIN_SIZE, WIN_SIZE)
screen = pygame.display.set_mode(size)
# Add a title
pygame.display.set_caption("Conway's Game of Life")
# Buttons
def button(text, xPOS, yPOS, width, height, color, fontColor, action=None):
mouse = pygame.mouse.get_pos()
click = pygame.mouse.get_pressed()
pygame.draw.rect(screen, color, (xPOS, yPOS, width, height))
if xPOS + width > mouse[0] > xPOS and yPOS + height > mouse[1] > yPOS:
if click[0] == 1 and action != None:
action()
font = pygame.font.Font('freesansbold.ttf', 14)
textSurf, textRect = text_objects(text, font, fontColor)
textRect.center = ( (xPOS + (width / 2)), (yPOS + (height / 2)) )
screen.blit(textSurf, textRect)
# Actions
def playAction():
global is_paused
is_paused = False
def pauseAction():
global is_paused
is_paused = True
def text_objects(text, font, fontColor):
textSurface = font.render(text, True, fontColor)
return textSurface, textSurface.get_rect()
def FasterAction():
global fps
fps += 5
def SlowerAction():
global fps
fps -= 2
def RestartAction():
global generation
global is_paused
global current_states
is_paused = True
generation = 0
current_states = initial_states
# Loop until the user clicks the close button.
done = False
# Used to manage how fast the screen updates
clock = pygame.time.Clock()
# -------- Main Program Loop -----------
while not done:
# --- Main event loop
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
# --- Game logic should go here
pygame.display.set_caption("Conway's Game of Life, Generation: " + str(generation))
# MAIN SIMULATION LOGIC
if not is_paused:
generation += 1
new_states = [0] * 400
for index in range(len(current_states)):
width = 20
e = index + width
w = index - width
n = index - 1
s = index + 1
ne = n + width
nw = n - width
se = s + width
sw = s - width
live_neighbors = 0
if e < len(current_states) and current_states[e] == 1:
live_neighbors += 1
if w > 0 and current_states[w] == 1:
live_neighbors += 1
if n % width != width - 1 and current_states[n] == 1:
live_neighbors += 1
if s % width != 0 and current_states[s] == 1:
live_neighbors += 1
if ne < len(current_states) and ne % width != width - 1 and current_states[ne] == 1:
live_neighbors += 1
if se < len(current_states) and se % width != 0 and current_states[se] == 1:
live_neighbors += 1
if nw > 0 and nw % width != width - 1 and current_states[nw] == 1:
live_neighbors += 1
if sw > 0 and sw % width != 0 and current_states[sw] == 1:
live_neighbors += 1
if current_states[index] == 1:
# Any live cell with fewer than two live neighbours dies, as if by underpopulation.
if live_neighbors < 2:
new_states[index] = 0
# Any live cell with more than three live neighbours dies, as if by overpopulation.
if live_neighbors > 3:
new_states[index] = 0
# Any live cell with two or three live neighbours lives on to the next generation.
else:
new_states[index] = 1
else:
# Any dead cell with exactly three live neighbours becomes a live cell, as if by reproduction.
if live_neighbors == 3:
new_states[index] = 1
else:
# Dead cell stays dead
new_states[index] = 0
current_states = new_states
# --- Screen-clearing code goes here
# Here, we clear the screen to gray. Don't put other drawing commands
# above this, or they will be erased with this command.
screen.fill(GRAY)
# --- Drawing code should go here
current_index = 0
x = 5
while x < 500:
y = 5
while y < 500:
# 2. Draw based on values in current_states
state = current_states[current_index]
# 4. Draw based on values in next_states
if state == 0:
pygame.draw.rect(screen, GREEN, pygame.Rect(x, y, 20, 20) )
else:
pygame.draw.rect(screen, MAGENTA, pygame.Rect(x, y, 20, 20) )
current_index += 1
y += 25
x += 25
# All Buttons:
button('Play', 5, 450, 60, 50, BLUE, GRAY, playAction)
button('Pause', 70, 450, 60, 50, MAGENTA, GRAY, pauseAction)
button('Faster', 135, 450, 75, 50, BLACK, WHITE, FasterAction)
button('Slower', 215, 450, 75, 50, GRAY, WHITE, SlowerAction)
button('Restart', 295, 450, 75, 50, GRAY, WHITE, RestartAction)
button(str(generation) + ' generations', 375, 450, 125, 50, BLACK, WHITE)
# --- Go ahead and update the screen with what we've drawn.
pygame.display.flip()
# --- Limit to 5 frames per second
clock.tick(fps)
# Close the window and quit.
pygame.quit()
| 31.546875
| 106
| 0.585273
|
b7cda316bb6b0a9e7c364ac2129c8602f2a7cf24
| 6,682
|
py
|
Python
|
stl10/train.py
|
EricElmoznino/OrthogonalLowrankEmbedding
|
cce12ca5cb34f7cb888b04739724bdbbd18b1e2d
|
[
"MIT"
] | null | null | null |
stl10/train.py
|
EricElmoznino/OrthogonalLowrankEmbedding
|
cce12ca5cb34f7cb888b04739724bdbbd18b1e2d
|
[
"MIT"
] | null | null | null |
stl10/train.py
|
EricElmoznino/OrthogonalLowrankEmbedding
|
cce12ca5cb34f7cb888b04739724bdbbd18b1e2d
|
[
"MIT"
] | null | null | null |
"""
OLE training script
"""
from __future__ import print_function
import argparse
import os, sys
import time
from utee import misc
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
import dataset
import model
from IPython import embed
# OLE Loss
from OLE import *
import uuid
import sys
from datetime import datetime
parser = argparse.ArgumentParser(description='PyTorch STL10 with OLE')
parser.add_argument('--channel', type=int, default=32, help='first conv channel (default: 32)')
parser.add_argument('--wd', type=float, default=1e-3, help='weight decay')
parser.add_argument('--batch_size', type=int, default=32, help='input batch size for training (default: 32)')
parser.add_argument('--epochs', type=int, default=164, help='number of epochs to train (default: 10)')
parser.add_argument('--lr', type=float, default=0.001, help='learning rate (default: 1e-3)')
parser.add_argument('--lambda_', type=float, default=0.25, help='OLE loss weight \lambda (default: 0.25)')
parser.add_argument('--gpu', default='0', help='index of gpus to use')
parser.add_argument('--ngpu', type=int, default=2, help='number of gpus to use')
parser.add_argument('--seed', type=int, default=117, help='random seed (default: 1)')
parser.add_argument('--num_samples', type=int, default=500, help='number of training samples per class to use')
parser.add_argument('--data_augment', type=int, default=1, help='use data augmentation, 1: yes (default), 0: no')
parser.add_argument('--validation', type=int, default=0, help='run validation on 10%% of training set 0: no (default), 1: yes')
parser.add_argument('--log_interval', type=int, default=125, help='how many batches to wait before logging training status')
parser.add_argument('--test_interval', type=int, default=5, help='how many epochs to wait before another test')
parser.add_argument('--logdir', default='log/default', help='folder to save to the log')
parser.add_argument('--decreasing_lr', default='81,122', help='decreasing strategy')
args = parser.parse_args()
foldername = 'results/wd_%s_batch_%s_channel_%s_samples_%s/' % (str(args.wd), str(args.batch_size), str(args.channel), str(args.num_samples)) + str(uuid.uuid4())
args.logdir = os.path.join(os.path.dirname(__file__), foldername)
misc.logger.init(args.logdir, 'train_log')
print = misc.logger.info
# select gpu
args.gpu = misc.auto_select_gpu(utility_bound=0, num_gpu=args.ngpu, selected_gpus=args.gpu)
args.ngpu = len(args.gpu)
# logger
misc.ensure_dir(args.logdir)
print("=================FLAGS==================")
for k, v in args.__dict__.items():
print('{}: {}'.format(k, v))
print("========================================")
# seed
args.cuda = torch.cuda.is_available()
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
# data loader and model
train_loader, test_loader = dataset.get(batch_size=args.batch_size, num_workers=1, num_samples=args.num_samples, data_augment=args.data_augment, validation=args.validation)
if args.validation or (args.num_samples!=500):
Ntrain = len(train_loader.sampler.indices)
else:
Ntrain = len(train_loader.dataset)
if args.validation:
Ntest = len(test_loader.sampler.indices)
else:
Ntest = len(test_loader.dataset)
model = model.stl10(n_channel=args.channel)
model = torch.nn.DataParallel(model, device_ids= range(args.ngpu))
if args.cuda:
print('USING CUDA')
model.cuda()
# optimizer
optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.wd)
decreasing_lr = list(map(int, args.decreasing_lr.split(',')))
print('decreasing_lr: ' + str(decreasing_lr))
best_acc, old_file = 0, None
t_begin = time.time()
crit0 = nn.CrossEntropyLoss()
crit1 = OLELoss(n_classes=10, lambda_=args.lambda_)
# ready to go
for epoch in range(args.epochs):
model.train()
if epoch in decreasing_lr:
optimizer.param_groups[0]['lr'] *= 0.1
for batch_idx, (data, target) in enumerate(train_loader):
indx_target = target.clone()
if args.cuda:
data, target = data.cuda(), target.cuda()
optimizer.zero_grad()
output = model(data)
loss = crit0(output[0], target)
OLE_loss = crit1(output[1], target)
if args.lambda_ >0:
loss += OLE_loss
loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0 and batch_idx > 0:
pred = output[0].data.max(1)[1] # get the index of the max log-probability
correct = pred.cpu().eq(indx_target).sum()
acc = correct * 1.0 / len(data)
print('Train Epoch: {} [{}/{}] Loss: {:.6f} OLE Loss: {:.6f} Acc: {:.4f} lr: {:.2e}'.format(
epoch, batch_idx * len(data), Ntrain,
loss.item(), OLE_loss.item(), acc, optimizer.param_groups[0]['lr']))
elapse_time = time.time() - t_begin
speed_epoch = elapse_time / (epoch + 1)
speed_batch = speed_epoch / len(train_loader)
eta = speed_epoch * args.epochs - elapse_time
print("Elapsed {:.2f}s, {:.2f} s/epoch, {:.2f} s/batch, ets {:.2f}s".format(
elapse_time, speed_epoch, speed_batch, eta))
misc.model_snapshot(model, os.path.join(args.logdir, 'latest.pth'))
if epoch % args.test_interval == 0:
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
indx_target = target.clone()
if args.cuda:
data, target = data.cuda(), target.cuda().long().squeeze()
output = model(data)
test_loss += crit0(output[0], target)
test_OLE_loss = crit1(output[1], target)
if args.lambda_ >0:
test_loss += test_OLE_loss
# test_loss += F.cross_entropy(output, target).data[0]
pred = output[0].data.max(1)[1] # get the index of the max log-probability
correct += pred.cpu().eq(indx_target).sum()
test_loss = test_loss.item() / len(test_loader) # average over number of mini-batch
test_OLE_loss = test_OLE_loss.item() # already averaged over minibatch
test_acc = 100. * correct / float(Ntest)
print('\tTest set {}/{}: Average loss: {:.4f}, OLE loss: {:.4f} Accuracy: {}/{} ({:.2f}%)'.format( epoch, args.epochs, test_loss, test_OLE_loss, correct, Ntest, test_acc))
new_file = os.path.join(args.logdir, 'checkpoint-{}.pth'.format(epoch))
misc.model_snapshot(model, new_file, verbose=True)
print("Total Elapse: {:.2f}, Final Test Result: {:.3f}%".format(time.time()-t_begin, test_acc))
| 36.917127
| 180
| 0.66088
|
1e073f9e35be8d230a5558148b3772b034c7c335
| 15,653
|
py
|
Python
|
simulate.py
|
nathancowieson/saxs_scripts
|
274472f28a630fe688ec00fd535c6679d7d7ef73
|
[
"MIT"
] | null | null | null |
simulate.py
|
nathancowieson/saxs_scripts
|
274472f28a630fe688ec00fd535c6679d7d7ef73
|
[
"MIT"
] | null | null | null |
simulate.py
|
nathancowieson/saxs_scripts
|
274472f28a630fe688ec00fd535c6679d7d7ef73
|
[
"MIT"
] | 2
|
2021-03-03T10:29:04.000Z
|
2021-03-03T15:39:26.000Z
|
#!/anaconda/bin/python
'''
Created on Apr 03, 2014
@author: nathan
'''
import sys, logging, os, re, math, numpy, random
from subprocess import check_output
class Simulate():
"""Simulate SAXS data from a PDB file
The Simulate class contains various functions for calculating SAXS
data from a pdb file. The script attempts to generate the kind of
errors and noise that you would find in real, measured SAXS data
as an aid to experimental design.
"""
'''
Constructor
'''
def __init__(self, options):
###start a log file
self.logger = logging.getLogger('Simulate')
self.logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s: %(levelname)s: %(module)s: %(message)s',"[%Y-%m-%d %H:%M:%S]")
streamhandler = logging.StreamHandler()
streamhandler.setFormatter(formatter)
self.logger.addHandler(streamhandler)
self.logger.info('Starting a new Simulate job')
try:
self._options = dict(options)
except:
self.logger.error('cound not read in the command line options')
sys.exit()
# try:
# self.qmin = float(min(self._options['qrange'].split('-')))
# self.qmax = float(max(self._options['qrange'].split('-')))
# except:
# self.logger.error('Q min and Q max were not defined correctly. Must# be in the format i.e. 0.01-0.55')
# sys.exit()
def RunCrysol(self):
self.logger.info('Running Crysol')
filelist_before = os.listdir(os.getcwd())
command = 'crysol -ns '+str(self._options['no_points'])+' -sm '+str(self.highq)+' '+str(self._options['file'])
self.output = check_output(command, shell=True)
filelist_after = os.listdir(os.getcwd())
self.crysol_files = list(set(filelist_after) - set(filelist_before))
def ParseIntFile(self):
for file in self.crysol_files:
if file[-4:] == '.int':
intfile=open(file)
intdata=intfile.readlines()
self.qdata = []
self.idata = []
if self._options['vacuum']:
self.logger.info('Outputting the in vacuum data')
for row in intdata:
try:
q = float(row.split()[0])
if self._options['vacuum']:
i = float(row.split()[2]) + ( self._options['background'] * 1E6 )
else:
i = float(row.split()[1]) + ( self._options['background'] * 1E6 )
if q > self.lowq and q < self.highq:
self.qdata.append(q)
self.idata.append(i)
if q == 0:
self.izero = i
except:
pass
self.logger.info('From crysol, min Q: '+str(min(self.qdata))+', max Q: '+str(max(self.qdata)))
try:
float(self.izero)
except:
self.logger.error('could not determine I(0) from int file')
sys.exit()
###GET MW
try:
self.output = self.output.split('\n')
pattern = re.compile('.*Molecular weight:.*')
for line in self.output:
if re.match(pattern, line):
self.molwt = float(line.split()[2]) / 1000
except:
self.logger.error('could not get MW from crysol output')
sys.exit()
def SFtoPhotons(self):
pixel_size = 0.000172 # in meters
camera_length = self._options['camera'] / 1000 # in meters
solid_angle = (pixel_size / camera_length)**2 #steradians
attenuation = self._options['attenuation']
full_flux = 2.00E+13
intensity = full_flux * attenuation
exposure_time = self._options['time']
electron_radius = 2.82E-15
avogadros = 6.02E+23
path_length = self._options['pathlength']/1000 # in meters
concentration = self._options['concentration']
mol_weight = self.molwt *1000
water_sf = 6.6
water_conc = 1000
water_mw = 18
self.proteinphotons = []
self.i_photons = []
for i in self.idata:
protein = solid_angle * intensity * exposure_time * (electron_radius**2)*avogadros*1000*concentration*path_length/mol_weight*i
water = solid_angle * intensity * exposure_time * (electron_radius**2)*avogadros*1000*water_conc*path_length/water_mw*water_sf
self.i_photons.append(protein + water)
self.proteinphotons.append(protein)
def ScaleIntensities(self):
###CALCULATE EXPECTED I(0) BASED ON MW AND CONC
avogadros = 6.022E+23
io_calc = ( self.molwt * (self._options['sld'] * self._options['vbar'])**2 * self._options['concentration'] ) / avogadros
self.logger.info('calculated molecular weight at '+str(self.molwt))
try:
scale_factor = self.izero / io_calc
scaled_idata = []
for i in self.idata:
i = (i / scale_factor)
scaled_idata.append(i)
self.idata = scaled_idata
except:
scaled_idata = []
for i in self.idata:
scaled_idata.append(0)
self.idata = scaled_idata
def SimulateImage(self):
self.logger.info('Simulating pilatus image')
beamx = 559.94
beamy = 467.5
sizex = 981
sizey = 1043
pixelsize = 0.172
camera_length = self._options['camera']
energy = self._options['energy']
wavelength = 12.398521 / energy
beamstop_radius = 15 #(pixels)
beamstop_arm_gradient = -0.4673
beamstop_arm_width = 9
horizontal_module_boundaries = [ ( 195, 212 ), ( 407, 424 ), ( 619, 636 ), ( 831, 848 ) ]
vertical_module_boundaries = [ ( 487, 494 ) ]
###A LIST OF ALL PIXEL COORDINATES
pixel_array = []
for xcoord in range(1, sizex+1):
for ycoord in range(1, sizey+1):
pixel_array.append( ( xcoord, ycoord ) )
###REMOVE PIXELS THAT ARE MASKED BY THE HORIZONTAL MODULE BOUNDARIES
delete_list = []
for mask in horizontal_module_boundaries:
for pixel in pixel_array:
if pixel[1] > mask[0] and pixel[1] < mask[1]:
delete_list.append(pixel)
s = set(delete_list)
new_array = [x for x in pixel_array if x not in s]
pixel_array = new_array
###REMOVE PIXELS THAT ARE MASKED BY THE VERTICAL MODULE BOUNDARIES
delete_list = []
for mask in vertical_module_boundaries:
for pixel in pixel_array:
if pixel[0] > mask[0] and pixel[0] < mask[1]:
delete_list.append(pixel)
s = set(delete_list)
new_array = [x for x in pixel_array if x not in s]
pixel_array = new_array
###DELETE ITEMS BEHIND BACKSTOP ARM
delete_list = []
for pixel in pixel_array:
if pixel[0] > beamx:
lowerlimit = beamstop_arm_gradient * pixel[0] + ((beamy+262.3) - (beamstop_arm_width/2))
upperlimit = beamstop_arm_gradient * pixel[0] + ((beamy+262.3) + (beamstop_arm_width/2))
if pixel[1] > lowerlimit and pixel[1] < upperlimit:
delete_list.append(pixel)
s = set(delete_list)
new_array = [x for x in pixel_array if x not in s]
pixel_array = new_array
###DELETE ITEMS BEHIND THE BACKTOP SHADOW
delete_list = []
for pixel in pixel_array:
distance = math.sqrt(( pixel[0] - beamx )**2 + ( pixel[1] - beamy )**2 )
if distance < beamstop_radius:
delete_list.append(pixel)
s = set(delete_list)
new_array = [x for x in pixel_array if x not in s]
pixel_array = new_array
self.q_array = []
for pixel in pixel_array:
###CALCULATE PIXEL DISTANCE FROM DIRECT BEAM
distance_pixels = math.sqrt(( pixel[0] - beamx )**2 + ( pixel[1] - beamy )**2 )
distance_mm = distance_pixels * pixelsize
angle_theta = math.asin( distance_mm / camera_length )/2#in rad
angle_q = 4*math.pi*math.sin(angle_theta)/wavelength
self.q_array.append(angle_q)
self.lowq = min(self.q_array)
self.highq = max(self.q_array)
self.logger.info('From simulation, min Q: '+str(round(self.lowq, 6))+', max Q: '+str(round(self.highq, 6)))
def GenerateErrors(self):
self.logger.info('Generating errors and adding noise to data')
bins = numpy.linspace(self.lowq, self.highq, len(self.qdata))
binned = numpy.histogram(self.q_array, bins=len(self.qdata))
self.binsizes = binned[0]
self.noisy_i = []
self.edata = []
for q in self.qdata:
index = self.qdata.index(q)
i_inv_cm = self.idata[index]
abs_cal_scale = 0.00072*2 #0.0142
i_photons = (i_inv_cm / abs_cal_scale)*self._options['time']
error = (2*(math.sqrt(i_photons)/math.sqrt(self.binsizes[index])) * abs_cal_scale) / self._options['time']
new_i = random.gauss(self.idata[index], error)
self.noisy_i.append(new_i)
self.edata.append(error)
def GenerateErrors2(self):
self.logger.info('Generating errors and adding noise to data')
bins = numpy.linspace(self.lowq, self.highq, len(self.qdata))
binned = numpy.histogram(self.q_array, bins=len(self.qdata))
self.binsizes = binned[0]
self.noisy_i = []
self.edata = []
for q in self.qdata:
index = self.qdata.index(q)
if q == 0.300068:
print str(q)+','+str(self.binsizes[index])
i_inv_cm = self.idata[index]
background_error = 10E-7
photons_per_bin = self.i_photons[index] * self.binsizes[index]
proteinphotons_per_bin = self.proteinphotons[index] * self.binsizes[index]
total_photons = 2* photons_per_bin - proteinphotons_per_bin
error_in_photons = math.sqrt(total_photons + 0.0030**2*total_photons**2)
try:
photon_to_invcm_scale = (proteinphotons_per_bin / self.idata[index])
error = (error_in_photons / photon_to_invcm_scale) + background_error
except:
error = background_error
# std_error = error / math.sqrt(self.binsize[index]-1)
#error = (2*(math.sqrt(self.i_photons[index])/math.sqrt(self.binsizes[index])))
#scale = self.i_photons[index]/self.idata[index]
#error = 64* error/scale + background_error #double it because of buffer subtraction
new_i = random.gauss(self.idata[index], error/2)
self.noisy_i.append(new_i)
self.edata.append(error)
def OutputDatFile(self):
string_list = []
string_list.append("Simulated data from "+self._options['file'])
string_list.append("%-15s %-18s %-15s" % ("Q(A-1)","I(au)","Error"))
for q in self.qdata:
index = self.qdata.index(q)
q = self.qdata[index]
i = self.noisy_i[index]
e = self.edata[index]
string_list.append("%-15s %-18s %-15s" % (q,i,e))
outfile = open(self._options['outfile'], 'w')
outfile.write('\n'.join(string_list))
outfile.close()
self.logger.info('Output file '+str(self._options['outfile']))
def CleanUpFiles(self):
for file in self.crysol_files:
os.remove(file)
self.logger.info('Deleted all files made by Crysol for this run')
if __name__ == '__main__':
from optparse import OptionParser
from optparse import OptionGroup
if len(sys.argv) < 2:
sys.argv.append('-h')
'''
parse command line options
'''
parser = OptionParser()
required = OptionGroup(parser, "Required Arguments")
required.add_option("-f", "--file", action="store", type="string", dest="file", help="The pdb file you want to use to simulate data")
optional = OptionGroup(parser, "Optional Arguments")
optional.add_option("-c", "--concentration", action="store", type="float", dest="concentration", default=1.0, help="The concentration in mg/ml of the protein (default 1 mg/ml)")
optional.add_option("-t", "--time", action="store", type="float", dest="time", default=10.0, help="The exposure time for the sample, (default 10 secs)")
optional.add_option("-s", "--sld", action="store", type="float", dest="sld", default=2.67E+10, help="The scattering length density of the protein (default is 2.67E+10)")
optional.add_option("-v", "--vbar", action="store", type="float", dest="vbar", default=0.73, help="The partial specific volume of the protein (default is 0.73)")
optional.add_option("-o", "--outfile", action="store", type="string", dest="outfile", help="The name of an output file, the default name is the rootname of your pdb file with .dat at the end.")
optional.add_option("-n", "--number", action="store", type="int", dest="no_points", default=455, help="The number of points in the output file, (default 455)")
optional.add_option("-l", "--length", action="store", type="float", dest="camera", default=1492, help="The length of the SAXS camera in mm, (default 1492)")
optional.add_option("-p", "--pathlength", action="store", type="float", dest="pathlength", default=1.5, help="The pathlength, ie width of capillary in mm, (default 1.5)")
optional.add_option("-e", "--energy", action="store", type="float", dest="energy", default=11, help="The wavelength in KeV, (default 11)")
optional.add_option("-a", "--attenuation", action="store", type="float", dest="attenuation", default=0.3, help="The fractional percentage that the beam was attenuated (default 0.3)")
optional.add_option("-q", "--quiet", action="store_false", dest="quiet", default=True, help="don't print to st. out or display graphics")
optional.add_option("-d", "--dehydrated", action="store_true", dest="vacuum", default=False, help="Use the in vacuum column instead of the hydrated one. Default is hydrated")
optional.add_option("-b", "--background", action="store", type="float", dest="background", default=0, help="simulate a % error in background, (default is 0). i.e. 1.01 would oversubtract a background and 0.99 would undersubtract by 1%")
parser.add_option_group(required)
parser.add_option_group(optional)
(options, args) = parser.parse_args()
'''
fail if you didn't choose a valid pdb file
'''
if os.path.isfile(options.file):
rootname = os.path.splitext(os.path.basename(options.file))[0]
cwd = os.path.split(os.path.realpath(options.file))[0]
os.chdir(cwd)
if not options.outfile:
options.outfile = rootname+'.dat'
pass
else:
sys.exit('The pdb file you specified does not exist')
options = eval(str(options))
job = Simulate(options)
job.SimulateImage()
job.RunCrysol()
job.ParseIntFile()
job.SFtoPhotons()
job.ScaleIntensities()
job.GenerateErrors2()
job.OutputDatFile()
job.CleanUpFiles()
| 43.969101
| 240
| 0.586788
|
818326d93ccc2f59b76103917c937749d6997e6b
| 4,595
|
py
|
Python
|
configs/body/2d_kpt_sview_rgb_img/topdown_heatmap/mpii/hrnet_w48_mpii_256x256.py
|
pallgeuer/mmpose
|
d3c17d5e6bdb9dbaca19f3bf53aa2802105355fd
|
[
"Apache-2.0"
] | null | null | null |
configs/body/2d_kpt_sview_rgb_img/topdown_heatmap/mpii/hrnet_w48_mpii_256x256.py
|
pallgeuer/mmpose
|
d3c17d5e6bdb9dbaca19f3bf53aa2802105355fd
|
[
"Apache-2.0"
] | null | null | null |
configs/body/2d_kpt_sview_rgb_img/topdown_heatmap/mpii/hrnet_w48_mpii_256x256.py
|
pallgeuer/mmpose
|
d3c17d5e6bdb9dbaca19f3bf53aa2802105355fd
|
[
"Apache-2.0"
] | null | null | null |
_base_ = [
'../../../../_base_/default_runtime.py',
'../../../../_base_/datasets/mpii.py'
]
evaluation = dict(interval=10, metric='PCKh', save_best='PCKh')
optimizer = dict(
type='Adam',
lr=5e-4,
)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[170, 200])
total_epochs = 210
log_config = dict(
interval=50, hooks=[
dict(type='TextLoggerHook'),
])
channel_cfg = dict(
num_output_channels=16,
dataset_joints=16,
dataset_channel=list(range(16)),
inference_channel=list(range(16)))
# model settings
model = dict(
type='TopDown',
pretrained='https://download.openmmlab.com/mmpose/'
'pretrain_models/hrnet_w48-8ef0771d.pth',
backbone=dict(
type='HRNet',
in_channels=3,
extra=dict(
stage1=dict(
num_modules=1,
num_branches=1,
block='BOTTLENECK',
num_blocks=(4, ),
num_channels=(64, )),
stage2=dict(
num_modules=1,
num_branches=2,
block='BASIC',
num_blocks=(4, 4),
num_channels=(48, 96)),
stage3=dict(
num_modules=4,
num_branches=3,
block='BASIC',
num_blocks=(4, 4, 4),
num_channels=(48, 96, 192)),
stage4=dict(
num_modules=3,
num_branches=4,
block='BASIC',
num_blocks=(4, 4, 4, 4),
num_channels=(48, 96, 192, 384))),
),
keypoint_head=dict(
type='TopdownHeatmapSimpleHead',
in_channels=48,
out_channels=channel_cfg['num_output_channels'],
num_deconv_layers=0,
extra=dict(final_conv_kernel=1, ),
loss_keypoint=dict(type='JointsMSELoss', use_target_weight=True)),
train_cfg=dict(),
test_cfg=dict(
flip_test=True,
post_process='default',
shift_heatmap=True,
modulate_kernel=11))
data_cfg = dict(
image_size=[256, 256],
heatmap_size=[64, 64],
num_output_channels=channel_cfg['num_output_channels'],
num_joints=channel_cfg['dataset_joints'],
dataset_channel=channel_cfg['dataset_channel'],
inference_channel=channel_cfg['inference_channel'],
use_gt_bbox=True,
bbox_file=None,
)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='TopDownGetBboxCenterScale', padding=1.25),
dict(type='TopDownRandomFlip', flip_prob=0.5),
dict(
type='TopDownGetRandomScaleRotation', rot_factor=40, scale_factor=0.5),
dict(type='TopDownAffine'),
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
dict(type='TopDownGenerateTarget', sigma=2),
dict(
type='Collect',
keys=['img', 'target', 'target_weight'],
meta_keys=[
'image_file', 'joints_3d', 'joints_3d_visible', 'center', 'scale',
'rotation', 'flip_pairs'
]),
]
val_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='TopDownGetBboxCenterScale', padding=1.25),
dict(type='TopDownAffine'),
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
dict(
type='Collect',
keys=['img'],
meta_keys=['image_file', 'center', 'scale', 'rotation', 'flip_pairs']),
]
test_pipeline = val_pipeline
data_root = 'data/mpii'
data = dict(
samples_per_gpu=64,
workers_per_gpu=2,
val_dataloader=dict(samples_per_gpu=32),
test_dataloader=dict(samples_per_gpu=32),
train=dict(
type='TopDownMpiiDataset',
ann_file=f'{data_root}/annotations/mpii_train.json',
img_prefix=f'{data_root}/images/',
data_cfg=data_cfg,
pipeline=train_pipeline,
dataset_info={{_base_.dataset_info}}),
val=dict(
type='TopDownMpiiDataset',
ann_file=f'{data_root}/annotations/mpii_val.json',
img_prefix=f'{data_root}/images/',
data_cfg=data_cfg,
pipeline=val_pipeline,
dataset_info={{_base_.dataset_info}}),
test=dict(
type='TopDownMpiiDataset',
ann_file=f'{data_root}/annotations/mpii_val.json',
img_prefix=f'{data_root}/images/',
data_cfg=data_cfg,
pipeline=test_pipeline,
dataset_info={{_base_.dataset_info}}),
)
| 29.267516
| 79
| 0.591513
|
df95beb71c633f86803037122e89db4234027c1f
| 3,718
|
py
|
Python
|
azure-mgmt-network/azure/mgmt/network/v2017_11_01/models/application_gateway_http_listener_py3.py
|
Christina-Kang/azure-sdk-for-python
|
bbf982eb06aab04b8151f69f1d230b7f5fb96ebf
|
[
"MIT"
] | 1
|
2022-03-30T22:39:15.000Z
|
2022-03-30T22:39:15.000Z
|
azure-mgmt-network/azure/mgmt/network/v2017_11_01/models/application_gateway_http_listener_py3.py
|
Christina-Kang/azure-sdk-for-python
|
bbf982eb06aab04b8151f69f1d230b7f5fb96ebf
|
[
"MIT"
] | 54
|
2016-03-25T17:25:01.000Z
|
2018-10-22T17:27:54.000Z
|
azure-mgmt-network/azure/mgmt/network/v2017_11_01/models/application_gateway_http_listener_py3.py
|
Christina-Kang/azure-sdk-for-python
|
bbf982eb06aab04b8151f69f1d230b7f5fb96ebf
|
[
"MIT"
] | 2
|
2017-01-20T18:25:46.000Z
|
2017-05-12T21:31:47.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource import SubResource
class ApplicationGatewayHttpListener(SubResource):
"""Http listener of an application gateway.
:param id: Resource ID.
:type id: str
:param frontend_ip_configuration: Frontend IP configuration resource of an
application gateway.
:type frontend_ip_configuration:
~azure.mgmt.network.v2017_11_01.models.SubResource
:param frontend_port: Frontend port resource of an application gateway.
:type frontend_port: ~azure.mgmt.network.v2017_11_01.models.SubResource
:param protocol: Protocol. Possible values include: 'Http', 'Https'
:type protocol: str or
~azure.mgmt.network.v2017_11_01.models.ApplicationGatewayProtocol
:param host_name: Host name of HTTP listener.
:type host_name: str
:param ssl_certificate: SSL certificate resource of an application
gateway.
:type ssl_certificate: ~azure.mgmt.network.v2017_11_01.models.SubResource
:param require_server_name_indication: Applicable only if protocol is
https. Enables SNI for multi-hosting.
:type require_server_name_indication: bool
:param provisioning_state: Provisioning state of the HTTP listener
resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:type provisioning_state: str
:param name: Name of the resource that is unique within a resource group.
This name can be used to access the resource.
:type name: str
:param etag: A unique read-only string that changes whenever the resource
is updated.
:type etag: str
:param type: Type of the resource.
:type type: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'frontend_ip_configuration': {'key': 'properties.frontendIPConfiguration', 'type': 'SubResource'},
'frontend_port': {'key': 'properties.frontendPort', 'type': 'SubResource'},
'protocol': {'key': 'properties.protocol', 'type': 'str'},
'host_name': {'key': 'properties.hostName', 'type': 'str'},
'ssl_certificate': {'key': 'properties.sslCertificate', 'type': 'SubResource'},
'require_server_name_indication': {'key': 'properties.requireServerNameIndication', 'type': 'bool'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(self, *, id: str=None, frontend_ip_configuration=None, frontend_port=None, protocol=None, host_name: str=None, ssl_certificate=None, require_server_name_indication: bool=None, provisioning_state: str=None, name: str=None, etag: str=None, type: str=None, **kwargs) -> None:
super(ApplicationGatewayHttpListener, self).__init__(id=id, **kwargs)
self.frontend_ip_configuration = frontend_ip_configuration
self.frontend_port = frontend_port
self.protocol = protocol
self.host_name = host_name
self.ssl_certificate = ssl_certificate
self.require_server_name_indication = require_server_name_indication
self.provisioning_state = provisioning_state
self.name = name
self.etag = etag
self.type = type
| 48.921053
| 289
| 0.673749
|
5589b97ad49d1d4c965638dd8c5ea281caa83e20
| 22,832
|
py
|
Python
|
tools/interop_matrix/client_matrix.py
|
qstanczyk/grpc
|
34338e5798aa6756d66c1f9b64ba8e94a4aefbee
|
[
"Apache-2.0"
] | null | null | null |
tools/interop_matrix/client_matrix.py
|
qstanczyk/grpc
|
34338e5798aa6756d66c1f9b64ba8e94a4aefbee
|
[
"Apache-2.0"
] | null | null | null |
tools/interop_matrix/client_matrix.py
|
qstanczyk/grpc
|
34338e5798aa6756d66c1f9b64ba8e94a4aefbee
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python2.7
# Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Defines languages, runtimes and releases for backward compatibility testing
from collections import OrderedDict
def get_github_repo(lang):
return {
'dart': 'https://github.com/grpc/grpc-dart.git',
'go': 'https://github.com/grpc/grpc-go.git',
'java': 'https://github.com/grpc/grpc-java.git',
'node': 'https://github.com/grpc/grpc-node.git',
# all other languages use the grpc.git repo.
}.get(lang, 'https://github.com/grpc/grpc.git')
def get_release_tags(lang):
"""Returns list of known releases for given language."""
return list(LANG_RELEASE_MATRIX[lang].keys())
def get_runtimes_for_lang_release(lang, release):
"""Get list of valid runtimes for given release of lang."""
runtimes = list(LANG_RUNTIME_MATRIX[lang])
release_info = LANG_RELEASE_MATRIX[lang].get(release)
if release_info and release_info.runtimes:
runtimes = list(release_info.runtimes)
return runtimes
def should_build_docker_interop_image_from_release_tag(lang):
# All dockerfile definitions live in grpc/grpc repository.
# For language that have a separate repo, we need to use
# dockerfile definitions from head of grpc/grpc.
if lang in ['go', 'java', 'node']:
return False
return True
# Dictionary of default runtimes per language
LANG_RUNTIME_MATRIX = {
'cxx': ['cxx'], # This is actually debian8.
'go': ['go1.8', 'go1.11', 'go1.16'],
'java': ['java'],
'python': ['python', 'pythonasyncio'],
'node': ['node'],
'ruby': ['ruby'],
'php': ['php7'],
'csharp': ['csharp', 'csharpcoreclr'],
}
class ReleaseInfo:
"""Info about a single release of a language"""
def __init__(self, patch=[], runtimes=[], testcases_file=None):
self.patch = patch
self.runtimes = runtimes
self.testcases_file = testcases_file
# Dictionary of known releases for given language.
LANG_RELEASE_MATRIX = {
'cxx':
OrderedDict([
('v1.0.1', ReleaseInfo(testcases_file='cxx__v1.0.1')),
('v1.1.4', ReleaseInfo(testcases_file='cxx__v1.0.1')),
('v1.2.5', ReleaseInfo(testcases_file='cxx__v1.0.1')),
('v1.3.9', ReleaseInfo(testcases_file='cxx__v1.0.1')),
('v1.4.2', ReleaseInfo(testcases_file='cxx__v1.0.1')),
('v1.6.6', ReleaseInfo(testcases_file='cxx__v1.0.1')),
('v1.7.2', ReleaseInfo(testcases_file='cxx__v1.0.1')),
('v1.8.0', ReleaseInfo(testcases_file='cxx__v1.0.1')),
('v1.9.1', ReleaseInfo(testcases_file='cxx__v1.0.1')),
('v1.10.1', ReleaseInfo(testcases_file='cxx__v1.0.1')),
('v1.11.1', ReleaseInfo(testcases_file='cxx__v1.0.1')),
('v1.12.0', ReleaseInfo(testcases_file='cxx__v1.0.1')),
('v1.13.0', ReleaseInfo(testcases_file='cxx__v1.0.1')),
('v1.14.1', ReleaseInfo(testcases_file='cxx__v1.0.1')),
('v1.15.0', ReleaseInfo(testcases_file='cxx__v1.0.1')),
('v1.16.0', ReleaseInfo(testcases_file='cxx__v1.0.1')),
('v1.17.1', ReleaseInfo(testcases_file='cxx__v1.0.1')),
('v1.18.0', ReleaseInfo(testcases_file='cxx__v1.0.1')),
('v1.19.0', ReleaseInfo(testcases_file='cxx__v1.0.1')),
('v1.20.0', ReleaseInfo(testcases_file='cxx__v1.31.1')),
('v1.21.4', ReleaseInfo(testcases_file='cxx__v1.31.1')),
('v1.22.0', ReleaseInfo(testcases_file='cxx__v1.31.1')),
('v1.22.1', ReleaseInfo(testcases_file='cxx__v1.31.1')),
('v1.23.0', ReleaseInfo(testcases_file='cxx__v1.31.1')),
('v1.24.0', ReleaseInfo(testcases_file='cxx__v1.31.1')),
('v1.25.0', ReleaseInfo(testcases_file='cxx__v1.31.1')),
('v1.26.0', ReleaseInfo(testcases_file='cxx__v1.31.1')),
('v1.27.3', ReleaseInfo(testcases_file='cxx__v1.31.1')),
('v1.30.0', ReleaseInfo(testcases_file='cxx__v1.31.1')),
('v1.31.1', ReleaseInfo(testcases_file='cxx__v1.31.1')),
('v1.32.0', ReleaseInfo()),
('v1.33.2', ReleaseInfo()),
('v1.34.0', ReleaseInfo()),
('v1.35.0', ReleaseInfo()),
('v1.36.3', ReleaseInfo()),
('v1.37.0', ReleaseInfo()),
('v1.38.0', ReleaseInfo()),
]),
'go':
OrderedDict([
('v1.0.5',
ReleaseInfo(runtimes=['go1.8'], testcases_file='go__v1.0.5')),
('v1.2.1',
ReleaseInfo(runtimes=['go1.8'], testcases_file='go__v1.0.5')),
('v1.3.0',
ReleaseInfo(runtimes=['go1.8'], testcases_file='go__v1.0.5')),
('v1.4.2',
ReleaseInfo(runtimes=['go1.8'], testcases_file='go__v1.0.5')),
('v1.5.2',
ReleaseInfo(runtimes=['go1.8'], testcases_file='go__v1.0.5')),
('v1.6.0',
ReleaseInfo(runtimes=['go1.8'], testcases_file='go__v1.0.5')),
('v1.7.4',
ReleaseInfo(runtimes=['go1.8'], testcases_file='go__v1.0.5')),
('v1.8.2',
ReleaseInfo(runtimes=['go1.8'], testcases_file='go__v1.0.5')),
('v1.9.2',
ReleaseInfo(runtimes=['go1.8'], testcases_file='go__v1.0.5')),
('v1.10.1',
ReleaseInfo(runtimes=['go1.8'], testcases_file='go__v1.0.5')),
('v1.11.3',
ReleaseInfo(runtimes=['go1.8'], testcases_file='go__v1.0.5')),
('v1.12.2',
ReleaseInfo(runtimes=['go1.8'], testcases_file='go__v1.0.5')),
('v1.13.0',
ReleaseInfo(runtimes=['go1.8'], testcases_file='go__v1.0.5')),
('v1.14.0',
ReleaseInfo(runtimes=['go1.8'], testcases_file='go__v1.0.5')),
('v1.15.0',
ReleaseInfo(runtimes=['go1.8'], testcases_file='go__v1.0.5')),
('v1.16.0',
ReleaseInfo(runtimes=['go1.8'], testcases_file='go__v1.0.5')),
('v1.17.0',
ReleaseInfo(runtimes=['go1.11'], testcases_file='go__v1.0.5')),
('v1.18.0',
ReleaseInfo(runtimes=['go1.11'], testcases_file='go__v1.0.5')),
('v1.19.0',
ReleaseInfo(runtimes=['go1.11'], testcases_file='go__v1.0.5')),
('v1.20.0',
ReleaseInfo(runtimes=['go1.11'], testcases_file='go__v1.20.0')),
('v1.21.3',
ReleaseInfo(runtimes=['go1.11'], testcases_file='go__v1.20.0')),
('v1.22.3',
ReleaseInfo(runtimes=['go1.11'], testcases_file='go__v1.20.0')),
('v1.23.1',
ReleaseInfo(runtimes=['go1.11'], testcases_file='go__v1.20.0')),
('v1.24.0',
ReleaseInfo(runtimes=['go1.11'], testcases_file='go__v1.20.0')),
('v1.25.0',
ReleaseInfo(runtimes=['go1.11'], testcases_file='go__v1.20.0')),
('v1.26.0',
ReleaseInfo(runtimes=['go1.11'], testcases_file='go__v1.20.0')),
('v1.27.1',
ReleaseInfo(runtimes=['go1.11'], testcases_file='go__v1.20.0')),
('v1.28.0',
ReleaseInfo(runtimes=['go1.11'], testcases_file='go__v1.20.0')),
('v1.29.0',
ReleaseInfo(runtimes=['go1.11'], testcases_file='go__v1.20.0')),
('v1.30.0',
ReleaseInfo(runtimes=['go1.11'], testcases_file='go__v1.20.0')),
('v1.31.1',
ReleaseInfo(runtimes=['go1.11'], testcases_file='go__v1.20.0')),
('v1.32.0',
ReleaseInfo(runtimes=['go1.11'], testcases_file='go__v1.20.0')),
('v1.33.1',
ReleaseInfo(runtimes=['go1.11'], testcases_file='go__v1.20.0')),
('v1.34.0', ReleaseInfo(runtimes=['go1.11'])),
('v1.35.0', ReleaseInfo(runtimes=['go1.11'])),
('v1.36.0', ReleaseInfo(runtimes=['go1.11'])),
('v1.37.0', ReleaseInfo(runtimes=['go1.11'])),
# NOTE: starting from release v1.38.0, use runtimes=['go1.16']
('v1.38.1', ReleaseInfo(runtimes=['go1.16'])),
('v1.39.1', ReleaseInfo(runtimes=['go1.16'])),
('v1.40.0', ReleaseInfo(runtimes=['go1.16'])),
]),
'java':
OrderedDict([
('v1.0.3',
ReleaseInfo(runtimes=['java_oracle8'],
testcases_file='java__v1.0.3')),
('v1.1.2',
ReleaseInfo(runtimes=['java_oracle8'],
testcases_file='java__v1.0.3')),
('v1.2.0',
ReleaseInfo(runtimes=['java_oracle8'],
testcases_file='java__v1.0.3')),
('v1.3.1',
ReleaseInfo(runtimes=['java_oracle8'],
testcases_file='java__v1.0.3')),
('v1.4.0',
ReleaseInfo(runtimes=['java_oracle8'],
testcases_file='java__v1.0.3')),
('v1.5.0',
ReleaseInfo(runtimes=['java_oracle8'],
testcases_file='java__v1.0.3')),
('v1.6.1',
ReleaseInfo(runtimes=['java_oracle8'],
testcases_file='java__v1.0.3')),
('v1.7.1', ReleaseInfo(testcases_file='java__v1.0.3')),
('v1.8.0',
ReleaseInfo(runtimes=['java_oracle8'],
testcases_file='java__v1.0.3')),
('v1.9.1',
ReleaseInfo(runtimes=['java_oracle8'],
testcases_file='java__v1.0.3')),
('v1.10.1',
ReleaseInfo(runtimes=['java_oracle8'],
testcases_file='java__v1.0.3')),
('v1.11.0',
ReleaseInfo(runtimes=['java_oracle8'],
testcases_file='java__v1.0.3')),
('v1.12.1', ReleaseInfo(testcases_file='java__v1.0.3')),
('v1.13.2', ReleaseInfo(testcases_file='java__v1.0.3')),
('v1.14.0',
ReleaseInfo(runtimes=['java_oracle8'],
testcases_file='java__v1.0.3')),
('v1.15.1', ReleaseInfo(testcases_file='java__v1.0.3')),
('v1.16.1',
ReleaseInfo(runtimes=['java_oracle8'],
testcases_file='java__v1.0.3')),
('v1.17.2', ReleaseInfo(testcases_file='java__v1.0.3')),
('v1.18.0',
ReleaseInfo(runtimes=['java_oracle8'],
testcases_file='java__v1.0.3')),
('v1.19.0',
ReleaseInfo(runtimes=['java_oracle8'],
testcases_file='java__v1.0.3')),
('v1.20.0', ReleaseInfo(runtimes=['java_oracle8'])),
('v1.21.1', ReleaseInfo()),
('v1.22.2', ReleaseInfo()),
('v1.23.0', ReleaseInfo()),
('v1.24.0', ReleaseInfo()),
('v1.25.0', ReleaseInfo()),
('v1.26.1', ReleaseInfo()),
('v1.27.2', ReleaseInfo()),
('v1.28.1', ReleaseInfo()),
('v1.29.0', ReleaseInfo()),
('v1.30.2', ReleaseInfo()),
('v1.31.2', ReleaseInfo()),
('v1.32.3', ReleaseInfo()),
('v1.33.1', ReleaseInfo()),
('v1.34.1', ReleaseInfo()),
('v1.35.1', ReleaseInfo()),
('v1.36.1', ReleaseInfo()),
('v1.37.1', ReleaseInfo()),
('v1.38.1', ReleaseInfo()),
('v1.39.0', ReleaseInfo()),
]),
'python':
OrderedDict([
('v1.0.x',
ReleaseInfo(runtimes=['python'], testcases_file='python__v1.0.x')),
('v1.1.4',
ReleaseInfo(runtimes=['python'], testcases_file='python__v1.0.x')),
('v1.2.5',
ReleaseInfo(runtimes=['python'], testcases_file='python__v1.0.x')),
('v1.3.9',
ReleaseInfo(runtimes=['python'], testcases_file='python__v1.0.x')),
('v1.4.2',
ReleaseInfo(runtimes=['python'], testcases_file='python__v1.0.x')),
('v1.6.6',
ReleaseInfo(runtimes=['python'], testcases_file='python__v1.0.x')),
('v1.7.2',
ReleaseInfo(runtimes=['python'], testcases_file='python__v1.0.x')),
('v1.8.1',
ReleaseInfo(runtimes=['python'], testcases_file='python__v1.0.x')),
('v1.9.1',
ReleaseInfo(runtimes=['python'], testcases_file='python__v1.0.x')),
('v1.10.1',
ReleaseInfo(runtimes=['python'], testcases_file='python__v1.0.x')),
('v1.11.1',
ReleaseInfo(runtimes=['python'],
testcases_file='python__v1.11.1')),
('v1.12.0',
ReleaseInfo(runtimes=['python'],
testcases_file='python__v1.11.1')),
('v1.13.0',
ReleaseInfo(runtimes=['python'],
testcases_file='python__v1.11.1')),
('v1.14.1',
ReleaseInfo(runtimes=['python'],
testcases_file='python__v1.11.1')),
('v1.15.0',
ReleaseInfo(runtimes=['python'],
testcases_file='python__v1.11.1')),
('v1.16.0',
ReleaseInfo(runtimes=['python'],
testcases_file='python__v1.11.1')),
('v1.17.1',
ReleaseInfo(runtimes=['python'],
testcases_file='python__v1.11.1')),
('v1.18.0', ReleaseInfo(runtimes=['python'])),
('v1.19.0', ReleaseInfo(runtimes=['python'])),
('v1.20.0', ReleaseInfo(runtimes=['python'])),
('v1.21.4', ReleaseInfo(runtimes=['python'])),
('v1.22.0', ReleaseInfo(runtimes=['python'])),
('v1.22.1', ReleaseInfo(runtimes=['python'])),
('v1.23.0', ReleaseInfo(runtimes=['python'])),
('v1.24.0', ReleaseInfo(runtimes=['python'])),
('v1.25.0', ReleaseInfo(runtimes=['python'])),
('v1.26.0', ReleaseInfo(runtimes=['python'])),
('v1.27.3', ReleaseInfo(runtimes=['python'])),
('v1.30.0', ReleaseInfo(runtimes=['python'])),
('v1.31.1', ReleaseInfo(runtimes=['python'])),
('v1.32.0', ReleaseInfo(runtimes=['python'])),
('v1.33.2', ReleaseInfo(runtimes=['python'])),
('v1.34.0', ReleaseInfo(runtimes=['python'])),
('v1.35.0', ReleaseInfo(runtimes=['python'])),
('v1.36.3', ReleaseInfo(runtimes=['python'])),
('v1.37.0', ReleaseInfo(runtimes=['python'])),
('v1.38.0', ReleaseInfo(runtimes=['python'])),
]),
'node':
OrderedDict([
('v1.0.1', ReleaseInfo(testcases_file='node__v1.0.1')),
('v1.1.4', ReleaseInfo(testcases_file='node__v1.1.4')),
('v1.2.5', ReleaseInfo(testcases_file='node__v1.1.4')),
('v1.3.9', ReleaseInfo(testcases_file='node__v1.1.4')),
('v1.4.2', ReleaseInfo(testcases_file='node__v1.1.4')),
('v1.6.6', ReleaseInfo(testcases_file='node__v1.1.4')),
# TODO: https://github.com/grpc/grpc-node/issues/235.
# ('v1.7.2', ReleaseInfo()),
('v1.8.4', ReleaseInfo()),
('v1.9.1', ReleaseInfo()),
('v1.10.0', ReleaseInfo()),
('v1.11.3', ReleaseInfo()),
('v1.12.4', ReleaseInfo()),
]),
'ruby':
OrderedDict([
('v1.0.1',
ReleaseInfo(patch=[
'tools/dockerfile/interoptest/grpc_interop_ruby/Dockerfile',
'tools/dockerfile/interoptest/grpc_interop_ruby/build_interop.sh',
],
testcases_file='ruby__v1.0.1')),
('v1.1.4', ReleaseInfo(testcases_file='ruby__v1.1.4')),
('v1.2.5', ReleaseInfo(testcases_file='ruby__v1.1.4')),
('v1.3.9', ReleaseInfo(testcases_file='ruby__v1.1.4')),
('v1.4.2', ReleaseInfo(testcases_file='ruby__v1.1.4')),
('v1.6.6', ReleaseInfo(testcases_file='ruby__v1.1.4')),
('v1.7.2', ReleaseInfo(testcases_file='ruby__v1.1.4')),
('v1.8.0', ReleaseInfo(testcases_file='ruby__v1.1.4')),
('v1.9.1', ReleaseInfo(testcases_file='ruby__v1.1.4')),
('v1.10.1', ReleaseInfo(testcases_file='ruby__v1.1.4')),
('v1.11.1', ReleaseInfo(testcases_file='ruby__v1.1.4')),
('v1.12.0', ReleaseInfo(testcases_file='ruby__v1.1.4')),
('v1.13.0', ReleaseInfo(testcases_file='ruby__v1.1.4')),
('v1.14.1', ReleaseInfo(testcases_file='ruby__v1.1.4')),
('v1.15.0', ReleaseInfo(testcases_file='ruby__v1.1.4')),
('v1.16.0', ReleaseInfo(testcases_file='ruby__v1.1.4')),
('v1.17.1', ReleaseInfo(testcases_file='ruby__v1.1.4')),
('v1.18.0',
ReleaseInfo(patch=[
'tools/dockerfile/interoptest/grpc_interop_ruby/build_interop.sh',
])),
('v1.19.0', ReleaseInfo()),
('v1.20.0', ReleaseInfo()),
('v1.21.4', ReleaseInfo()),
('v1.22.0', ReleaseInfo()),
('v1.22.1', ReleaseInfo()),
('v1.23.0', ReleaseInfo()),
('v1.24.0', ReleaseInfo()),
('v1.25.0', ReleaseInfo()),
# TODO: https://github.com/grpc/grpc/issues/18262.
# If you are not encountering the error in above issue
# go ahead and upload the docker image for new releases.
('v1.26.0', ReleaseInfo()),
('v1.27.3', ReleaseInfo()),
('v1.30.0', ReleaseInfo()),
('v1.31.1', ReleaseInfo()),
('v1.32.0', ReleaseInfo()),
('v1.33.2', ReleaseInfo()),
('v1.34.0', ReleaseInfo()),
('v1.35.0', ReleaseInfo()),
('v1.36.3', ReleaseInfo()),
('v1.37.0', ReleaseInfo()),
('v1.38.0', ReleaseInfo()),
]),
'php':
OrderedDict([
('v1.0.1', ReleaseInfo(testcases_file='php__v1.0.1')),
('v1.1.4', ReleaseInfo(testcases_file='php__v1.0.1')),
('v1.2.5', ReleaseInfo(testcases_file='php__v1.0.1')),
('v1.3.9', ReleaseInfo(testcases_file='php__v1.0.1')),
('v1.4.2', ReleaseInfo(testcases_file='php__v1.0.1')),
('v1.6.6', ReleaseInfo(testcases_file='php__v1.0.1')),
('v1.7.2', ReleaseInfo(testcases_file='php__v1.0.1')),
('v1.8.0', ReleaseInfo(testcases_file='php__v1.0.1')),
('v1.9.1', ReleaseInfo(testcases_file='php__v1.0.1')),
('v1.10.1', ReleaseInfo(testcases_file='php__v1.0.1')),
('v1.11.1', ReleaseInfo(testcases_file='php__v1.0.1')),
('v1.12.0', ReleaseInfo(testcases_file='php__v1.0.1')),
('v1.13.0', ReleaseInfo(testcases_file='php__v1.0.1')),
('v1.14.1', ReleaseInfo(testcases_file='php__v1.0.1')),
('v1.15.0', ReleaseInfo(testcases_file='php__v1.0.1')),
('v1.16.0', ReleaseInfo(testcases_file='php__v1.0.1')),
('v1.17.1', ReleaseInfo(testcases_file='php__v1.0.1')),
('v1.18.0', ReleaseInfo()),
# v1.19 and v1.20 were deliberately omitted here because of an issue.
# See https://github.com/grpc/grpc/issues/18264
('v1.21.4', ReleaseInfo()),
('v1.22.0', ReleaseInfo()),
('v1.22.1', ReleaseInfo()),
('v1.23.0', ReleaseInfo()),
('v1.24.0', ReleaseInfo()),
('v1.25.0', ReleaseInfo()),
('v1.26.0', ReleaseInfo()),
('v1.27.3', ReleaseInfo()),
('v1.30.0', ReleaseInfo()),
('v1.31.1', ReleaseInfo()),
('v1.32.0', ReleaseInfo()),
('v1.33.2', ReleaseInfo()),
('v1.34.0', ReleaseInfo()),
('v1.35.0', ReleaseInfo()),
('v1.36.3', ReleaseInfo()),
('v1.37.0', ReleaseInfo()),
('v1.38.0', ReleaseInfo()),
]),
'csharp':
OrderedDict([
('v1.0.1',
ReleaseInfo(patch=[
'tools/dockerfile/interoptest/grpc_interop_csharp/Dockerfile',
'tools/dockerfile/interoptest/grpc_interop_csharpcoreclr/Dockerfile',
],
testcases_file='csharp__v1.1.4')),
('v1.1.4', ReleaseInfo(testcases_file='csharp__v1.1.4')),
('v1.2.5', ReleaseInfo(testcases_file='csharp__v1.1.4')),
('v1.3.9', ReleaseInfo(testcases_file='csharp__v1.3.9')),
('v1.4.2', ReleaseInfo(testcases_file='csharp__v1.3.9')),
('v1.6.6', ReleaseInfo(testcases_file='csharp__v1.3.9')),
('v1.7.2', ReleaseInfo(testcases_file='csharp__v1.3.9')),
('v1.8.0', ReleaseInfo(testcases_file='csharp__v1.3.9')),
('v1.9.1', ReleaseInfo(testcases_file='csharp__v1.3.9')),
('v1.10.1', ReleaseInfo(testcases_file='csharp__v1.3.9')),
('v1.11.1', ReleaseInfo(testcases_file='csharp__v1.3.9')),
('v1.12.0', ReleaseInfo(testcases_file='csharp__v1.3.9')),
('v1.13.0', ReleaseInfo(testcases_file='csharp__v1.3.9')),
('v1.14.1', ReleaseInfo(testcases_file='csharp__v1.3.9')),
('v1.15.0', ReleaseInfo(testcases_file='csharp__v1.3.9')),
('v1.16.0', ReleaseInfo(testcases_file='csharp__v1.3.9')),
('v1.17.1', ReleaseInfo(testcases_file='csharp__v1.3.9')),
('v1.18.0', ReleaseInfo(testcases_file='csharp__v1.18.0')),
('v1.19.0', ReleaseInfo(testcases_file='csharp__v1.18.0')),
('v1.20.0', ReleaseInfo()),
('v1.21.4', ReleaseInfo()),
('v1.22.0', ReleaseInfo()),
('v1.22.1', ReleaseInfo()),
('v1.23.0', ReleaseInfo()),
('v1.24.0', ReleaseInfo()),
('v1.25.0', ReleaseInfo()),
('v1.26.0', ReleaseInfo()),
('v1.27.3', ReleaseInfo()),
('v1.30.0', ReleaseInfo()),
('v1.31.1', ReleaseInfo()),
('v1.32.0', ReleaseInfo()),
('v1.33.2', ReleaseInfo()),
('v1.34.0', ReleaseInfo()),
('v1.35.0', ReleaseInfo()),
('v1.36.3', ReleaseInfo()),
('v1.37.0', ReleaseInfo()),
]),
}
| 47.173554
| 86
| 0.522819
|
ddf241f76afbd61aba466a59fd0facef6be69598
| 8,857
|
py
|
Python
|
controls.py
|
zhangenter/tetris
|
300c668d9732cd037bfc6f47c289bd5ee4a009b2
|
[
"Apache-2.0"
] | 3
|
2019-05-08T14:49:10.000Z
|
2021-01-20T13:22:45.000Z
|
controls.py
|
zhangenter/tetris
|
300c668d9732cd037bfc6f47c289bd5ee4a009b2
|
[
"Apache-2.0"
] | null | null | null |
controls.py
|
zhangenter/tetris
|
300c668d9732cd037bfc6f47c289bd5ee4a009b2
|
[
"Apache-2.0"
] | 2
|
2020-01-28T14:37:06.000Z
|
2020-04-03T13:37:14.000Z
|
# -*- coding=utf-8 -*-
import random
import os,pickle
import pygame
from globals import *
from matrix import Matrix
class VirtualHintBox(object):
pid = 0
block_manage=None
next_block= None
def __init__(self, pid, block_manage):
#print pid
self.pid = pid
self.block_manage = block_manage
def take_block(self):
block = self.next_block
if block is None: # make first block
block = self.block_manage.get_block(self.pid)
self.next_block = self.block_manage.get_block(self.pid)
return block
def paint(self):
pass
class HintBox(VirtualHintBox):
def __init__(self, bg, block_size, position, block_manage):
super(HintBox, self).__init__(0, block_manage)
self._bg = bg;
self._x, self._y, self._width, self._height = position
self._block_size = block_size
self._bgcolor = [0, 0, 0]
self.block_manage = block_manage
def paint(self):
mid_x = self._x + self._width / 2
pygame.draw.line(self._bg, self._bgcolor, [mid_x, self._y], [mid_x, self._y + self._height], self._width)
bz = self._block_size
if self.next_block:
arr = self.next_block.get_rect_arr()
minx, miny = arr[0]
maxx, maxy = arr[0]
for x, y in arr:
if x < minx: minx = x
if x > maxx: maxx = x
if y < miny: miny = y
if y > maxy: maxy = y
w = (maxx - minx) * bz
h = (maxy - miny) * bz
cx = self._width / 2 - w / 2 - minx * bz - bz / 2
cy = self._height / 2 - h / 2 - miny * bz - bz / 2
for rect in arr:
x, y = rect
pygame.draw.line(self._bg, self.next_block.color,
[self._x + x * bz + cx + bz / 2, self._y + cy + y * bz],
[self._x + x * bz + cx + bz / 2, self._y + cy + (y + 1) * bz], bz)
pygame.draw.rect(self._bg, [255, 255, 255],
[self._x + x * bz + cx, self._y + y * bz + cy, bz + 1, bz + 1], 1)
class ScoreBox(object):
total_score = 0
high_score = 0
db_file = 'tetris.db'
def __init__(self, bg, block_size, position):
self._bg = bg;
self._x, self._y, self._width, self._height = position
self._block_size = block_size
self._bgcolor = [0, 0, 0]
if os.path.exists(self.db_file): self.high_score = pickle.load(open(self.db_file, 'rb'))
def paint(self):
myfont = get_user_font(24)
white = 255, 255, 255
textImage = myfont.render(LanguageLib.instance().get_text('high') + ': %06d' % (self.high_score), True, white)
self._bg.blit(textImage, (self._x, self._y - 10))
textImage = myfont.render(LanguageLib.instance().get_text('score') + ':%06d' % (self.total_score), True, white)
self._bg.blit(textImage, (self._x, self._y + 20))
def add_score(self, score):
self.total_score += score
if self.total_score > self.high_score:
self.high_score = self.total_score
pickle.dump(self.high_score, open(self.db_file, 'wb+'))
class VirtualScoreBox(object):
total_score = 0
def __init__(self, bg, position):
self._bg = bg;
self._x, self._y, self._width, self._height = position
self._bgcolor = [0, 0, 0]
def paint(self):
myfont = get_user_font(16)
white = 255, 255, 255
textImage = myfont.render(LanguageLib.instance().get_text('player2 score') + ':%06d' % (self.total_score), True, white)
self._bg.blit(textImage, (self._x, self._y))
def add_score(self, score):
self.total_score += score
class Panel(object):
attack_num = 0
block_id = 0
rect_arr = []
moving_block = None
hint_box = None
score_box = None
def __init__(self, bg, block_size, position):
self._bg = bg;
self._x, self._y, self._width, self._height = position
self._block_size = block_size
self._bgcolor = [0, 0, 0]
self.block_id = 0
self.rect_arr = []
self.moving_block = None
def get_rect_matrix(self):
matrix = Matrix(ROW_COUNT, COL_COUNT)
for rect_info in self.rect_arr:
matrix.set_val(rect_info.x, rect_info.y, 1)
return matrix
def add_block(self, block):
#print block.get_rect_arr()
for x, y in block.get_rect_arr():
self.rect_arr.append(RectInfo(x, y, block.color))
#print len(self.rect_arr)
def create_move_block(self):
self.block_id += 1
block = self.hint_box.take_block()
# block = create_block()
block.move(COL_COUNT / 2 - 2, -2) # move block to top center
self.moving_block = block
def check_overlap(self, diffx, diffy, check_arr=None):
if check_arr is None: check_arr = self.moving_block.get_rect_arr()
for x, y in check_arr:
for rect_info in self.rect_arr:
if x + diffx == rect_info.x and y + diffy == rect_info.y:
return True
return False
def control_block(self, diffx, diffy):
if self.moving_block.can_move(diffx, diffy) and not self.check_overlap(diffx, diffy):
self.moving_block.move(diffx, diffy)
def change_block(self):
if self.moving_block:
new_arr = self.moving_block.change()
if new_arr and not self.check_overlap(0, 0, check_arr=new_arr):
self.moving_block.rect_arr = new_arr
def move_block(self):
if self.moving_block is None: self.create_move_block()
if self.moving_block.can_move(0, 1) and not self.check_overlap(0, 1):
self.moving_block.move(0, 1)
return 1
else:
self.add_block(self.moving_block)
self.check_clear()
for rect_info in self.rect_arr:
if rect_info.y < 0: return 9 # gameover
self.create_move_block()
return 2
def check_clear(self):
tmp_arr = [[] for i in range(20)]
for rect_info in self.rect_arr:
if rect_info.y < 0: return
tmp_arr[rect_info.y].append(rect_info)
clear_num = 0
clear_lines = set([])
y_clear_diff_arr = [[] for i in range(20)]
for y in range(19, -1, -1):
if len(tmp_arr[y]) == 10:
clear_lines.add(y)
clear_num += 1
y_clear_diff_arr[y] = clear_num
if clear_num > 0:
new_arr = []
for y in range(19, -1, -1):
if y in clear_lines: continue
tmp_row = tmp_arr[y]
y_clear_diff = y_clear_diff_arr[y]
for rect_info in tmp_row:
# new_arr.append([x,y+y_clear_diff])
new_arr.append(RectInfo(rect_info.x, rect_info.y + y_clear_diff, rect_info.color))
self.rect_arr = new_arr
score = SCORE_MAP[clear_num - 1]
self.score_box.add_score(score)
def get_attach_num(self):
if self.score_box.total_score / 1000 > self.attack_num:
self.attack_num += 1
return 1
else:
return 0
def add_hinder(self):
hinder_lines = 2
for tmp in self.rect_arr:
tmp.y -= hinder_lines
for y in range(hinder_lines):
arr = range(10)
for i in range(5):
n = random.randint(0, len(arr) - 1)
arr.pop(n)
for x in arr:
self.rect_arr.append(RectInfo(x, 19 - y, [0, 0, 255]))
def paint(self):
mid_x = self._x + self._width / 2
pygame.draw.line(self._bg, self._bgcolor, [mid_x, self._y], [mid_x, self._y + self._height],
self._width)
bz = self._block_size
for rect_info in self.rect_arr:
x = rect_info.x
y = rect_info.y
pygame.draw.line(self._bg, rect_info.color, [self._x + x * bz + bz / 2, self._y + y * bz],
[self._x + x * bz + bz / 2, self._y + (y + 1) * bz], bz)
pygame.draw.rect(self._bg, [255, 255, 255], [self._x + x * bz, self._y + y * bz, bz + 1, bz + 1], 1)
if self.moving_block:
for rect in self.moving_block.get_rect_arr():
x, y = rect
pygame.draw.line(self._bg, self.moving_block.color, [self._x + x * bz + bz / 2, self._y + y * bz],
[self._x + x * bz + bz / 2, self._y + (y + 1) * bz], bz)
pygame.draw.rect(self._bg, [255, 255, 255], [self._x + x * bz, self._y + y * bz, bz + 1, bz + 1], 1)
self.score_box.paint()
self.hint_box.paint()
| 35.007905
| 127
| 0.550525
|
aeae930842d50a4f37ab6f347a0debb42629a5d2
| 1,594
|
py
|
Python
|
Two Sum II.py
|
H-isaac23/LeetCode-Challenges
|
dcce6c2c45cccd39a45d4b9874561c12f2990cc6
|
[
"MIT"
] | null | null | null |
Two Sum II.py
|
H-isaac23/LeetCode-Challenges
|
dcce6c2c45cccd39a45d4b9874561c12f2990cc6
|
[
"MIT"
] | null | null | null |
Two Sum II.py
|
H-isaac23/LeetCode-Challenges
|
dcce6c2c45cccd39a45d4b9874561c12f2990cc6
|
[
"MIT"
] | null | null | null |
# Given a 1-indexed array of integers numbers that is already sorted in non-decreasing order, find two numbers such that
# they add up to a specific target number. Let these two numbers be numbers[index1] and numbers[index2] where
# 1 <= index1 < index2 <= numbers.length.
#
# Return the indices of the two numbers, index1 and index2, added by one as an integer array [index1, index2] of length 2.
# The tests are generated such that there is exactly one solution. You may not use the same element twice.
# Your solution must use only constant extra space.
#
# Example 1:
# Input: numbers = [2,7,11,15], target = 9
# Output: [1,2]
# Explanation: The sum of 2 and 7 is 9. Therefore, index1 = 1, index2 = 2. We return [1, 2].
#
# Example 2:
# Input: numbers = [2,3,4], target = 6
# Output: [1,3]
# Explanation: The sum of 2 and 4 is 6. Therefore index1 = 1, index2 = 3. We return [1, 3].
#
# Example 3:
# Input: numbers = [-1,0], target = -1
# Output: [1,2]
# Explanation: The sum of -1 and 0 is -1. Therefore index1 = 1, index2 = 2. We return [1, 2].
#
# Constraints:
#
# 2 <= numbers.length <= 3 * 104
# -1000 <= numbers[i] <= 1000
# numbers is sorted in non-decreasing order.
# -1000 <= target <= 1000
# The tests are generated such that there is exactly one solution.
class Solution:
def twoSum(self, nums: List[int], target: int) -> List[int]:
l, r = 0, len(nums) - 1
while l < r:
if nums[l] + nums[r] == target:
return [l + 1, r + 1]
if nums[l] + nums[r] > target:
r -= 1
else:
l += 1
| 37.952381
| 122
| 0.624216
|
7292b0bfcd898e03486646fdada6580a8c41d0d8
| 3,093
|
py
|
Python
|
mmdet/models/roi_extractors/single_level.py
|
shaoqb/multi_scale_booster
|
8e5155c1ff3ba907966aa3971ce2057deafc27e7
|
[
"Apache-2.0"
] | 24
|
2019-07-06T07:50:54.000Z
|
2021-12-12T03:38:24.000Z
|
mmdet/models/roi_extractors/single_level.py
|
shaoqb/multi_scale_booster
|
8e5155c1ff3ba907966aa3971ce2057deafc27e7
|
[
"Apache-2.0"
] | 3
|
2019-07-29T11:38:30.000Z
|
2021-11-30T16:11:33.000Z
|
mmdet/models/roi_extractors/single_level.py
|
shaoqb/multi_scale_booster
|
8e5155c1ff3ba907966aa3971ce2057deafc27e7
|
[
"Apache-2.0"
] | 7
|
2019-07-22T03:14:57.000Z
|
2021-06-26T01:11:50.000Z
|
from __future__ import division
import torch
import torch.nn as nn
from mmdet import ops
from ..registry import ROI_EXTRACTORS
@ROI_EXTRACTORS.register_module
class SingleRoIExtractor(nn.Module):
"""Extract RoI features from a single level feature map.
If there are mulitple input feature levels, each RoI is mapped to a level
according to its scale.
Args:
roi_layer (dict): Specify RoI layer type and arguments.
out_channels (int): Output channels of RoI layers.
featmap_strides (int): Strides of input feature maps.
finest_scale (int): Scale threshold of mapping to level 0.
"""
def __init__(self,
roi_layer,
out_channels,
featmap_strides,
finest_scale=56,
):
super(SingleRoIExtractor, self).__init__()
self.roi_layers = self.build_roi_layers(roi_layer, featmap_strides)
self.out_channels = out_channels
self.featmap_strides = featmap_strides
self.finest_scale = finest_scale
@property
def num_inputs(self):
"""int: Input feature map levels."""
return len(self.featmap_strides)
def init_weights(self):
pass
def build_roi_layers(self, layer_cfg, featmap_strides):
cfg = layer_cfg.copy()
layer_type = cfg.pop('type')
assert hasattr(ops, layer_type)
layer_cls = getattr(ops, layer_type)
roi_layers = nn.ModuleList(
[layer_cls(spatial_scale=1 / s, **cfg) for s in featmap_strides])
return roi_layers
def map_roi_levels(self, rois, num_levels):
"""Map rois to corresponding feature levels by scales.
- scale < finest_scale: level 0
- finest_scale <= scale < finest_scale * 2: level 1
- finest_scale * 2 <= scale < finest_scale * 4: level 2
- scale >= finest_scale * 4: level 3
Args:
rois (Tensor): Input RoIs, shape (k, 5).
num_levels (int): Total level number.
Returns:
Tensor: Level index (0-based) of each RoI, shape (k, )
"""
scale = torch.sqrt(
(rois[:, 3] - rois[:, 1] + 1) * (rois[:, 4] - rois[:, 2] + 1))
target_lvls = torch.floor(torch.log2(scale / self.finest_scale + 1e-6))
target_lvls = target_lvls.clamp(min=0, max=num_levels - 1).long()
return target_lvls
def forward(self, feats, rois):
if len(feats) == 1:
return self.roi_layers[0](feats[0], rois)
out_size = self.roi_layers[0].out_size
num_levels = len(feats)
target_lvls = self.map_roi_levels(rois, num_levels)
roi_feats = torch.cuda.FloatTensor(rois.size()[0], self.out_channels,
out_size, out_size).fill_(0)
for i in range(num_levels):
inds = target_lvls == i
if inds.any():
rois_ = rois[inds, :]
roi_feats_t = self.roi_layers[i](feats[i], rois_)
roi_feats[inds] += roi_feats_t
return roi_feats
| 34.366667
| 79
| 0.602005
|
2db38a2ef06e18adfced4fea42907f2bd434180a
| 1,400
|
py
|
Python
|
tests/token/conftest.py
|
isaac-philip/xyz-spaces-python
|
67b06efdc4a76934c54c1a828087a27cad26aa5d
|
[
"Apache-2.0"
] | 1
|
2021-02-20T10:14:36.000Z
|
2021-02-20T10:14:36.000Z
|
tests/token/conftest.py
|
GhostUser/xyz-spaces-python
|
646aaa74a180871318f3e9aa12acc8e25a1f3b33
|
[
"Apache-2.0"
] | null | null | null |
tests/token/conftest.py
|
GhostUser/xyz-spaces-python
|
646aaa74a180871318f3e9aa12acc8e25a1f3b33
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (C) 2019-2020 HERE Europe B.V.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: Apache-2.0
# License-Filename: LICENSE
"""Module for providing test fixtures for the Token API tests."""
import os
import warnings
import pytest
from xyzspaces.apis import TokenApi
from xyzspaces.exceptions import AuthenticationError
HERE_USER = os.environ.get("HERE_USER")
HERE_PASSWORD = os.environ.get("HERE_PASSWORD")
@pytest.fixture()
def api():
"""Create shared XYZ Token API instance as a pytest fixture."""
credentials = {"username": HERE_USER, "password": HERE_PASSWORD}
try:
api = TokenApi(credentials=credentials)
except AuthenticationError:
api = TokenApi()
warnings.warn(
"Ignoring invalid credentials, creating TokenApi "
"instance without. Access limitations may apply."
)
return api
| 31.111111
| 74
| 0.726429
|
8dce6fde79d0090a247e0e9e6d6b896f2c57033c
| 3,643
|
py
|
Python
|
pajbot/modules/maxmsglength.py
|
jardg/pajbot
|
e1fca604fe25e12dd4761cb0bfc15c140e7cf012
|
[
"MIT"
] | null | null | null |
pajbot/modules/maxmsglength.py
|
jardg/pajbot
|
e1fca604fe25e12dd4761cb0bfc15c140e7cf012
|
[
"MIT"
] | null | null | null |
pajbot/modules/maxmsglength.py
|
jardg/pajbot
|
e1fca604fe25e12dd4761cb0bfc15c140e7cf012
|
[
"MIT"
] | null | null | null |
import logging
from pajbot.managers.handler import HandlerManager
from pajbot.modules import BaseModule
from pajbot.modules import ModuleSetting
log = logging.getLogger(__name__)
class MaxMsgLengthModule(BaseModule):
ID = __name__.split('.')[-1]
NAME = 'Maximum message length'
DESCRIPTION = 'Times out users who post messages that contain too many characters.'
CATEGORY = 'Filter'
SETTINGS = [
ModuleSetting(
key='max_msg_length',
label='Max message length (Online chat)',
type='number',
required=True,
placeholder='',
default=400,
constraints={
'min_value': 40,
'max_value': 1000,
}),
ModuleSetting(
key='max_msg_length_offline',
label='Max message length (Offline chat)',
type='number',
required=True,
placeholder='',
default=400,
constraints={
'min_value': 40,
'max_value': 1000,
}),
ModuleSetting(
key='timeout_length',
label='Timeout length',
type='number',
required=True,
placeholder='Timeout length in seconds',
default=120,
constraints={
'min_value': 30,
'max_value': 3600,
}),
ModuleSetting(
key='bypass_level',
label='Level to bypass module',
type='number',
required=True,
placeholder='',
default=500,
constraints={
'min_value': 100,
'max_value': 1000,
})
]
def __init__(self):
super().__init__()
self.bot = None
def on_pubmsg(self, source, message):
if self.bot.is_online:
if len(message) > self.settings['max_msg_length'] and source.level < self.settings['bypass_level'] and source.moderator is False:
duration, punishment = self.bot.timeout_warn(source, self.settings['timeout_length'], reason='Message too long')
""" We only send a notification to the user if he has spent more than
one hour watching the stream. """
if duration > 0 and source.minutes_in_chat_online > 60:
self.bot.whisper(source.username, 'You have been {punishment} because your message was too long.'.format(punishment=punishment))
return False
else:
if len(message) > self.settings['max_msg_length_offline'] and source.level < self.settings['bypass_level'] and source.moderator is False:
duration, punishment = self.bot.timeout_warn(source, self.settings['timeout_length'], reason='Message too long')
""" We only send a notification to the user if he has spent more than
one hour watching the stream. """
if duration > 0 and source.minutes_in_chat_online > 60:
self.bot.whisper(source.username, 'You have been {punishment} because your message was too long.'.format(punishment=punishment))
return False
def enable(self, bot):
HandlerManager.add_handler('on_pubmsg', self.on_pubmsg)
self.bot = bot
def disable(self, bot):
HandlerManager.remove_handler('on_pubmsg', self.on_pubmsg)
| 40.032967
| 149
| 0.540214
|
14dcefc12311b0db733e8d8426f4f41a2e91f475
| 532
|
py
|
Python
|
tests/fixtures/exceptions/solution.py
|
danieleades/cleo
|
76a4e64668670b4cbfe68ec3ec0ec592a3eadbbd
|
[
"MIT"
] | null | null | null |
tests/fixtures/exceptions/solution.py
|
danieleades/cleo
|
76a4e64668670b4cbfe68ec3ec0ec592a3eadbbd
|
[
"MIT"
] | null | null | null |
tests/fixtures/exceptions/solution.py
|
danieleades/cleo
|
76a4e64668670b4cbfe68ec3ec0ec592a3eadbbd
|
[
"MIT"
] | null | null | null |
from crashtest.contracts.base_solution import BaseSolution
from crashtest.contracts.provides_solution import ProvidesSolution
class CustomError(ProvidesSolution, Exception):
@property
def solution(self) -> BaseSolution:
solution = BaseSolution("Solution Title.", "Solution Description")
solution.documentation_links.append("https://example.com")
solution.documentation_links.append("https://example2.com")
return solution
def call() -> None:
raise CustomError("Error with solution")
| 31.294118
| 74
| 0.746241
|
bcc57e73b9df081a4e6bb1a91731a0db8c3b1a56
| 1,169
|
py
|
Python
|
journyio/user_identified.py
|
journy-io/python-sdk
|
ef5a216680b754ec2ea184d03431cdc754462656
|
[
"MIT"
] | null | null | null |
journyio/user_identified.py
|
journy-io/python-sdk
|
ef5a216680b754ec2ea184d03431cdc754462656
|
[
"MIT"
] | 2
|
2021-04-09T10:38:09.000Z
|
2021-06-29T09:43:08.000Z
|
journyio/user_identified.py
|
journy-io/python-sdk
|
ef5a216680b754ec2ea184d03431cdc754462656
|
[
"MIT"
] | null | null | null |
from .utils import assert_journy
class UserIdentified(object):
def __init__(self, user_id: str or None, email: str or None):
assert_journy(user_id or email, "User id and email can not both be empty")
if user_id:
assert_journy(isinstance(user_id, str), "The user id is not a string.")
if email:
assert_journy(isinstance(email, str), "The email is not a string.")
self.user_id = user_id
self.email = email
def format_identification(self):
identification = {}
if self.email:
identification["email"] = self.email
if self.user_id:
identification["userId"] = self.user_id
return identification
@staticmethod
def by_user_id(user_id: str):
assert_journy(user_id, "User id can not be empty!")
return UserIdentified(user_id, None)
@staticmethod
def by_email(email: str):
assert_journy(email, "Email can not be empty!")
return UserIdentified(None, email)
def __str__(self):
return f"UserIdentified({self.user_id}, {self.email})"
def __repr__(self):
return self.__str__()
| 29.974359
| 83
| 0.634731
|
34fbe440d4d41d09e19d3870247859f50a0ef4ea
| 3,349
|
py
|
Python
|
nexus_constructor/json/filewriter_json_reader.py
|
trnielsen/nexus-constructor
|
65efb6eedca30250b75f142dd29a46bc909958df
|
[
"BSD-2-Clause"
] | null | null | null |
nexus_constructor/json/filewriter_json_reader.py
|
trnielsen/nexus-constructor
|
65efb6eedca30250b75f142dd29a46bc909958df
|
[
"BSD-2-Clause"
] | null | null | null |
nexus_constructor/json/filewriter_json_reader.py
|
trnielsen/nexus-constructor
|
65efb6eedca30250b75f142dd29a46bc909958df
|
[
"BSD-2-Clause"
] | null | null | null |
import json
import h5py
import numpy as np
import uuid
from typing import Union, List
from nexus_constructor.common_attrs import CommonAttrs
"""
Read the JSON and construct an in-memory NeXus file from the nexus_structure field
"""
NexusObject = Union[h5py.Group, h5py.Dataset]
_json_type_to_numpy = {
"string": h5py.special_dtype(vlen=str),
"float": np.float32,
"double": np.float64,
"int32": np.int32,
"int64": np.int64,
"uint32": np.uint32,
"uint64": np.uint64,
}
TYPE = "type"
def _add_to_nexus(children: List[dict], current_group: h5py.Group):
"""
Go top down through JSON description constructing NeXus file
"""
for child in children:
if child[TYPE] == "group":
_add_group(child, current_group)
elif child[TYPE] == "dataset":
_add_dataset(child, current_group)
elif child[TYPE] == "stream":
_add_stream(child, current_group)
elif child[TYPE] == "link":
_add_link(child, current_group)
def _add_stream(json_object: dict, current_group: h5py.Group):
current_group.attrs[CommonAttrs.NX_CLASS] = CommonAttrs.NC_STREAM
add_datasets(json_object["stream"], current_group)
def add_datasets(json_object: dict, stream_group: h5py.Group):
for field_name, field_value in json_object.items():
if isinstance(field_value, dict):
new_group = stream_group.create_group(field_name)
add_datasets(field_value, new_group)
else:
stream_group.create_dataset(name=field_name, data=field_value)
def _add_link(json_object: dict, current_group: h5py.Group):
current_group[json_object["name"]] = h5py.SoftLink(json_object["target"])
def _add_dataset(json_object: dict, current_group: h5py.Group):
numpy_type = _json_type_to_numpy[json_object["dataset"][TYPE]]
values = json_object["values"]
if json_object["dataset"][TYPE] == "string" and isinstance(
json_object["values"], list
):
values = [value.encode("utf8") for value in json_object["values"]]
new_dataset = current_group.create_dataset(
json_object["name"], dtype=numpy_type, data=values
)
_add_attributes(json_object, new_dataset)
def _add_group(json_object: dict, current_group: h5py.Group):
new_group = current_group.create_group(json_object["name"])
_add_attributes(json_object, new_group)
_add_to_nexus(json_object["children"], new_group)
def _add_attributes(json_object: dict, nexus_object: NexusObject):
if "attributes" in json_object:
for attribute in json_object["attributes"]:
nexus_object.attrs[attribute["name"]] = attribute["values"]
def _create_in_memory_file(filename: str) -> h5py.File:
return h5py.File(filename, mode="x", driver="core", backing_store=False)
def json_to_nexus(json_input: str) -> h5py.File:
"""
Convert JSON to in-memory NeXus file
:param json_input:
:return: NeXus file and any warning messages produced from validating the JSON
"""
if not json_input:
raise ValueError("Empty json string, nothing to load!")
json_data = json.loads(json_input)
nexus_file = _create_in_memory_file(str(uuid.uuid4()))
nexus_structure = json_data["nexus_structure"]
_add_to_nexus(nexus_structure["children"], nexus_file)
return nexus_file
| 31.59434
| 82
| 0.701403
|
ee0f7a11047fe64fb853aa10eab43f5205523e36
| 3,785
|
py
|
Python
|
openstackclient/common/exceptions.py
|
ankur-gupta91/osc-ip-cap
|
9a64bbc31fcc0872f52ad2d92c550945eea5cc97
|
[
"Apache-2.0"
] | null | null | null |
openstackclient/common/exceptions.py
|
ankur-gupta91/osc-ip-cap
|
9a64bbc31fcc0872f52ad2d92c550945eea5cc97
|
[
"Apache-2.0"
] | null | null | null |
openstackclient/common/exceptions.py
|
ankur-gupta91/osc-ip-cap
|
9a64bbc31fcc0872f52ad2d92c550945eea5cc97
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2012-2013 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Exception definitions."""
class CommandError(Exception):
pass
class AuthorizationFailure(Exception):
pass
class NoTokenLookupException(Exception):
"""This does not support looking up endpoints from an existing token."""
pass
class EndpointNotFound(Exception):
"""Could not find Service or Region in Service Catalog."""
pass
class UnsupportedVersion(Exception):
"""The user is trying to use an unsupported version of the API"""
pass
class ClientException(Exception):
"""The base exception class for all exceptions this library raises."""
def __init__(self, code, message=None, details=None):
self.code = code
self.message = message or self.__class__.message
self.details = details
def __str__(self):
return "%s (HTTP %s)" % (self.message, self.code)
class BadRequest(ClientException):
"""HTTP 400 - Bad request: you sent some malformed data."""
http_status = 400
message = "Bad request"
class Unauthorized(ClientException):
"""HTTP 401 - Unauthorized: bad credentials."""
http_status = 401
message = "Unauthorized"
class Forbidden(ClientException):
"""HTTP 403 - Forbidden: not authorized to access to this resource."""
http_status = 403
message = "Forbidden"
class NotFound(ClientException):
"""HTTP 404 - Not found"""
http_status = 404
message = "Not found"
class Conflict(ClientException):
"""HTTP 409 - Conflict"""
http_status = 409
message = "Conflict"
class OverLimit(ClientException):
"""HTTP 413 - Over limit: reached the API limits for this time period."""
http_status = 413
message = "Over limit"
# NotImplemented is a python keyword.
class HTTPNotImplemented(ClientException):
"""HTTP 501 - Not Implemented: server does not support this operation."""
http_status = 501
message = "Not Implemented"
# In Python 2.4 Exception is old-style and thus doesn't have a __subclasses__()
# so we can do this:
# _code_map = dict((c.http_status, c)
# for c in ClientException.__subclasses__())
#
# Instead, we have to hardcode it:
_code_map = dict((c.http_status, c) for c in [
BadRequest,
Unauthorized,
Forbidden,
NotFound,
OverLimit,
HTTPNotImplemented
])
def from_response(response, body):
"""Return an instance of a ClientException based on an httplib2 response.
Usage::
resp, body = http.request(...)
if resp.status != 200:
raise exception_from_response(resp, body)
"""
cls = _code_map.get(response.status, ClientException)
if body:
if hasattr(body, 'keys'):
error = body[list(body.keys())[0]]
message = error.get('message')
details = error.get('details')
else:
# If we didn't get back a properly formed error message we
# probably couldn't communicate with Keystone at all.
message = "Unable to communicate with image service: %s." % body
details = None
return cls(code=response.status, message=message, details=details)
else:
return cls(code=response.status)
| 27.830882
| 79
| 0.670806
|
7519e5cf7d1cebc826756df3cab39c1c02663ffb
| 2,765
|
py
|
Python
|
Modules/ThirdParty/pygccxml/src/pygccxml/declarations/typedef.py
|
nalinimsingh/ITK_4D
|
95a2eacaeaffe572889832ef0894239f89e3f303
|
[
"Apache-2.0"
] | 3
|
2018-10-01T20:46:17.000Z
|
2019-12-17T19:39:50.000Z
|
Modules/ThirdParty/pygccxml/src/pygccxml/declarations/typedef.py
|
nalinimsingh/ITK_4D
|
95a2eacaeaffe572889832ef0894239f89e3f303
|
[
"Apache-2.0"
] | null | null | null |
Modules/ThirdParty/pygccxml/src/pygccxml/declarations/typedef.py
|
nalinimsingh/ITK_4D
|
95a2eacaeaffe572889832ef0894239f89e3f303
|
[
"Apache-2.0"
] | 4
|
2018-05-17T16:34:54.000Z
|
2020-09-24T02:12:40.000Z
|
# Copyright 2014-2016 Insight Software Consortium.
# Copyright 2004-2008 Roman Yakovenko.
# Distributed under the Boost Software License, Version 1.0.
# See http://www.boost.org/LICENSE_1_0.txt
"""
defines class that describes C++ typedef declaration
"""
import warnings
from . import declaration
from . import class_declaration
class typedef_t(declaration.declaration_t):
"""describes C++ typedef declaration"""
def __init__(self, name='', type=None, decl_type=None):
"""creates class that describes C++ typedef"""
if type is not None:
warnings.warn(
"The type argument is deprecated. \n" +
"Please use the decl_type argument instead.",
DeprecationWarning)
if decl_type is not None:
raise (
"Please use only either the type or " +
"decl_type argument.")
# Still allow to use the old type for the moment.
decl_type = type
declaration.declaration_t.__init__(self, name)
self._decl_type = decl_type
def _get__cmp__items(self):
"""implementation details"""
return [self.decl_type]
def __eq__(self, other):
if not declaration.declaration_t.__eq__(self, other):
return False
return self.decl_type == other.decl_type
def __hash__(self):
return super.__hash__(self)
@property
def type(self):
"""
Deprecated since v1.8.0. Will be removed in v1.9.0
"""
warnings.warn(
"typedef_t.type is deprecated.\n" +
"Please use typedef_t.decl_type instead.", DeprecationWarning)
return self._decl_type
@type.setter
def type(self, _decl_type):
"""
Deprecated since v1.8.0. Will be removed in v1.9.0
"""
warnings.warn(
"typedef_t.type is deprecated.\n" +
"Please use typedef_t.decl_type instead.", DeprecationWarning)
self._decl_type = _decl_type
@property
def decl_type(self):
"""reference to the original :class:`decl_type <type_t>`"""
return self._decl_type
@decl_type.setter
def decl_type(self, decl_type):
self._decl_type = decl_type
def i_depend_on_them(self, recursive=True):
return [class_declaration.dependency_info_t(self, self.decl_type)]
@property
def byte_size(self):
"""Size of this type in bytes @type: int"""
return self._decl_type.byte_size
@property
def byte_align(self):
"""alignment of this type in bytes @type: int"""
return self._decl_type.byte_align
| 29.414894
| 75
| 0.599638
|
0dc8d48e84f92512e56f3ad585afba37fa1b553e
| 14,659
|
py
|
Python
|
api/tests/test_organizations.py
|
amcquistan/project-time-tracker-api-django
|
da8a4129964fa4e330939178f12f24097527e77d
|
[
"MIT"
] | null | null | null |
api/tests/test_organizations.py
|
amcquistan/project-time-tracker-api-django
|
da8a4129964fa4e330939178f12f24097527e77d
|
[
"MIT"
] | null | null | null |
api/tests/test_organizations.py
|
amcquistan/project-time-tracker-api-django
|
da8a4129964fa4e330939178f12f24097527e77d
|
[
"MIT"
] | 1
|
2021-01-01T14:58:11.000Z
|
2021-01-01T14:58:11.000Z
|
from copy import deepcopy
from django.core.exceptions import ObjectDoesNotExist
from django.shortcuts import reverse
from django.test import TestCase
from rest_framework import status
from rest_framework.test import APIClient
from api.tests.testing_utils import (
create_user,
authenticate_jwt,
admin_creds,
johndoe_creds,
janedoe_creds,
batman_creds,
)
from core.models import Organization
class TestOrganizationAPI(TestCase):
create_list_view_name = 'organization-list-create'
detail_view_name = 'organization-detail'
@classmethod
def setUpTestData(cls):
cls.admin_user = admin_creds.create_user(
is_active=True,
is_staff=True
)
cls.johndoe_user = johndoe_creds.create_user(is_active=True)
cls.janedoe_user = janedoe_creds.create_user(is_active=True)
cls.batman_user = batman_creds.create_user(is_active=True)
def test_create_organization_with_admin_user_succeeds(self):
'''Tests that admin user (is_staff = True) can create organization'''
client = APIClient()
authenticate_jwt(admin_creds, client)
url = reverse(self.create_list_view_name)
payload = {
'name': 'Org1',
'contact': self.johndoe_user.id
}
response = client.post(url, payload, format='json')
self.assertEqual(status.HTTP_201_CREATED, response.status_code)
data = response.data
self.assertIn('name', data)
self.assertIn('slug', data)
self.assertIn('created_at', data)
self.assertIn('updated_at', data)
self.assertIn('contact', data)
self.assertEqual(payload['name'], data['name'])
self.assertEqual(payload['contact'], data['contact'])
self.assertEqual(1, len(data['members']))
def test_create_organization_with_non_admin_user_fails(self):
'''Tests that non admin user (is_staff = False) cannot create organization'''
client = APIClient()
authenticate_jwt(johndoe_creds, client)
url = reverse(self.create_list_view_name)
payload = { 'name': 'Org1', 'contact': self.johndoe_user.id }
response = client.post(url, payload, format='json')
self.assertEqual(status.HTTP_403_FORBIDDEN, response.status_code)
data = response.data
self.assertNotIn('name', data)
self.assertNotIn('slug', data)
self.assertNotIn('created_at', data)
self.assertNotIn('updated_at', data)
self.assertNotIn('contact', data)
def test_list_organization_with_admin_user_succeeds(self):
'''Tests that admin user (is_staff = True) can view organization listing'''
client = APIClient()
authenticate_jwt(admin_creds, client)
url = reverse(self.create_list_view_name)
payload = { 'name': 'Org1', 'contact': self.johndoe_user.id }
client.post(url, payload, format='json')
payload2 = { 'name': 'Org2', 'contact': self.janedoe_user.id }
client.post(url, payload2, format='json')
response = client.get(url, format='json')
self.assertEqual(status.HTTP_200_OK, response.status_code)
data = response.data
self.assertIsInstance(data, list)
self.assertEqual(2, len(data))
def test_list_organization_with_org_contact_succeeds(self):
'''Tests that an organization contact can view the listing API but,
only their org's come back'''
admin_client = APIClient()
authenticate_jwt(admin_creds, admin_client)
url = reverse(self.create_list_view_name)
payload = { 'name': 'Org1', 'contact': self.johndoe_user.id }
admin_client.post(url, payload, format='json')
payload2 = { 'name': 'Org2', 'contact': self.janedoe_user.id }
admin_client.post(url, payload2, format='json')
johndoe_client = APIClient()
authenticate_jwt(johndoe_creds, johndoe_client)
response = johndoe_client.get(url, format='json')
self.assertEqual(status.HTTP_200_OK, response.status_code)
data = response.data
self.assertIsInstance(data, list)
self.assertEqual(1, len(data))
def test_list_organization_with_non_admin_user_fails(self):
'''Tests that non admin user (is_staff = False) cannot view organization listing'''
admin_client = APIClient()
authenticate_jwt(admin_creds, admin_client)
url = reverse(self.create_list_view_name)
payload = { 'name': 'Org1', 'contact': self.johndoe_user.id }
admin_client.post(url, payload, format='json')
payload2 = { 'name': 'Org2', 'contact': self.johndoe_user.id }
admin_client.post(url, payload2, format='json')
janedoe_client = APIClient()
authenticate_jwt(janedoe_creds, janedoe_client)
response = janedoe_client.get(url, format='json')
self.assertEqual(status.HTTP_403_FORBIDDEN, response.status_code)
data = response.data
self.assertFalse(isinstance(data, list))
def test_view_organization_with_admin_user_succeeds(self):
'''Tests that admin user (is_staff = True) can view orgnaization details'''
client = APIClient()
authenticate_jwt(admin_creds, client)
payload = { 'name': 'Org1', 'contact': self.johndoe_user.id }
create_response = client.post(reverse(self.create_list_view_name), payload, format='json')
slug = create_response.data['slug']
url = reverse(self.detail_view_name, kwargs={'org_slug': slug})
response = client.get(url, format='json')
self.assertEqual(status.HTTP_200_OK, response.status_code)
data = response.data
self.assertIn('name', data)
self.assertIn('slug', data)
self.assertIn('created_at', data)
self.assertIn('updated_at', data)
self.assertIn('contact', data)
self.assertEqual(payload['name'], data['name'])
self.assertEqual(payload['contact'], data['contact'])
def test_view_organization_with_org_contact_succeeds(self):
'''Tests that organization contact can view the details of organization'''
admin_client = APIClient()
authenticate_jwt(admin_creds, admin_client)
payload = { 'name': 'Org1', 'contact': self.johndoe_user.id }
create_response = admin_client.post(reverse(self.create_list_view_name), payload, format='json')
slug = create_response.data['slug']
url = reverse(self.detail_view_name, kwargs={'org_slug': slug})
johndoe_client = APIClient()
authenticate_jwt(johndoe_creds, johndoe_client)
response = johndoe_client.get(url, format='json')
self.assertEqual(status.HTTP_200_OK, response.status_code)
data = response.data
self.assertIn('name', data)
self.assertIn('slug', data)
self.assertIn('created_at', data)
self.assertIn('updated_at', data)
self.assertIn('contact', data)
self.assertEqual(payload['name'], data['name'])
self.assertEqual(payload['contact'], data['contact'])
def test_view_organization_with_non_admin_user_fails(self):
'''Tests that non admin user (is_staff = False) cannot view organization details'''
admin_client = APIClient()
authenticate_jwt(admin_creds, admin_client)
payload = { 'name': 'Org1', 'contact': self.johndoe_user.id }
create_response = admin_client.post(reverse(self.create_list_view_name), payload, format='json')
slug = create_response.data['slug']
url = reverse(self.detail_view_name, kwargs={'org_slug': slug})
janedoe_client = APIClient()
authenticate_jwt(janedoe_creds, janedoe_client)
response = janedoe_client.get(url, format='json')
self.assertEqual(status.HTTP_403_FORBIDDEN, response.status_code)
data = response.data
self.assertNotIn('name', data)
self.assertNotIn('slug', data)
self.assertNotIn('created_at', data)
self.assertNotIn('updated_at', data)
self.assertNotIn('contact', data)
def test_update_organization_with_admin_succeeds(self):
'''Tests that admin (is_staff = True) can update organization'''
client = APIClient()
authenticate_jwt(admin_creds, client)
payload = { 'name': 'Org1', 'contact': self.johndoe_user.id }
create_response = client.post(reverse(self.create_list_view_name), payload, format='json')
slug = create_response.data['slug']
url = reverse(self.detail_view_name, kwargs={'org_slug': slug})
updated_payload = deepcopy(payload)
updated_payload['contact'] = self.janedoe_user.id
updated_payload['name'] = 'Updated Org'
response = client.put(url, updated_payload, format='json')
self.assertEqual(status.HTTP_200_OK, response.status_code)
data = response.data
self.assertIn('name', data)
self.assertIn('slug', data)
self.assertIn('created_at', data)
self.assertIn('updated_at', data)
self.assertIn('contact', data)
self.assertEqual(updated_payload['name'], data['name'])
self.assertEqual(updated_payload['contact'], data['contact'])
def test_update_organization_with_organization_contact_succeeds(self):
'''Tests that an org contact can update their organization'''
admin_client = APIClient()
authenticate_jwt(admin_creds, admin_client)
payload = { 'name': 'Org1', 'contact': self.johndoe_user.id }
create_response = admin_client.post(reverse(self.create_list_view_name), payload, format='json')
slug = create_response.data['slug']
url = reverse(self.detail_view_name, kwargs={'org_slug': slug})
johndoe_client = APIClient()
authenticate_jwt(johndoe_creds, johndoe_client)
updated_payload = deepcopy(payload)
updated_payload['name'] = 'Updated Org'
response = johndoe_client.put(url, updated_payload, format='json')
self.assertEqual(status.HTTP_200_OK, response.status_code)
data = response.data
self.assertIn('name', data)
self.assertIn('slug', data)
self.assertIn('created_at', data)
self.assertIn('updated_at', data)
self.assertIn('contact', data)
self.assertEqual(updated_payload['name'], data['name'])
self.assertEqual(updated_payload['contact'], data['contact'])
# def test_update_organization_with_org_contact_change_contact_fails(self):
# '''Tests that an organization contact cannot udpate org's contact to someone else'''
# admin_client = APIClient()
# authenticate_jwt(admin_creds, admin_client)
# payload = { 'name': 'Org1', 'contact': self.johndoe_user.id }
# create_response = admin_client.post(reverse(self.create_list_view_name), payload, format='json')
# slug = create_response.data['slug']
# url = reverse(self.detail_view_name, kwargs={'org_slug': slug})
# johndoe_client = APIClient()
# authenticate_jwt(johndoe_creds, johndoe_client)
# updated_payload = deepcopy(payload)
# updated_payload['name'] = 'Updated Org'
# updated_payload['contact'] = self.janedoe_user.id
# response = johndoe_client.put(url, updated_payload, format='json')
# self.assertEqual(status.HTTP_403_FORBIDDEN, response.status_code)
# data = response.data
# self.assertNotIn('name', data)
# self.assertNotIn('slug', data)
# self.assertNotIn('created_at', data)
# self.assertNotIn('updated_at', data)
# self.assertNotIn('contact', data)
def test_update_organization_with_non_admin_non_contact_fails(self):
'''Tests that a user who is not an admin (is_staff = False) and also
not organization contact cannot update an organization'''
admin_client = APIClient()
authenticate_jwt(admin_creds, admin_client)
payload = { 'name': 'Org1', 'contact': self.janedoe_user.id }
create_response = admin_client.post(reverse(self.create_list_view_name), payload, format='json')
slug = create_response.data['slug']
url = reverse(self.detail_view_name, kwargs={'org_slug': slug})
johndoe_client = APIClient()
authenticate_jwt(johndoe_creds, johndoe_client)
updated_payload = deepcopy(payload)
updated_payload['name'] = 'Updated Org'
updated_payload['contact'] = self.janedoe_user.id
response = johndoe_client.put(url, updated_payload, format='json')
self.assertEqual(status.HTTP_403_FORBIDDEN, response.status_code)
data = response.data
self.assertNotIn('name', data)
self.assertNotIn('slug', data)
self.assertNotIn('created_at', data)
self.assertNotIn('updated_at', data)
self.assertNotIn('contact', data)
def test_delete_organization_with_admin_succeeds(self):
'''Tests that a admin (is_staff = True) can delete an organization'''
admin_client = APIClient()
authenticate_jwt(admin_creds, admin_client)
payload = { 'name': 'Org1', 'contact': self.janedoe_user.id }
create_response = admin_client.post(reverse(self.create_list_view_name), payload, format='json')
slug = create_response.data['slug']
url = reverse(self.detail_view_name, kwargs={'org_slug': slug})
response = admin_client.delete(url, format='json')
self.assertEqual(status.HTTP_204_NO_CONTENT, response.status_code)
with self.assertRaises(ObjectDoesNotExist):
Organization.objects.get(slug=slug)
def test_delete_organization_with_non_admin_fails(self):
'''Tests that a non-admin (is_staff = False) cannot delete an organization,
note that this is different from other mutations which allow organization contact
to make changes'''
admin_client = APIClient()
authenticate_jwt(admin_creds, admin_client)
payload = { 'name': 'Org1', 'contact': self.janedoe_user.id }
create_response = admin_client.post(reverse(self.create_list_view_name), payload, format='json')
slug = create_response.data['slug']
url = reverse(self.detail_view_name, kwargs={'org_slug': slug})
janedoe_client = APIClient()
authenticate_jwt(janedoe_creds, janedoe_client)
response = janedoe_client.delete(url, format='json')
self.assertEqual(status.HTTP_403_FORBIDDEN, response.status_code)
self.assertIsNotNone(Organization.objects.get(slug=slug))
| 40.832869
| 106
| 0.670987
|
5c343d13b7ec3463fbff70b8c8d18fe128ca81d3
| 260
|
py
|
Python
|
src/053. Combinatoric selections/053.py
|
yuhao600/project-euler
|
201fc68aa9cca63b751036bb61623c12939dcac4
|
[
"MIT"
] | 15
|
2015-02-04T13:47:04.000Z
|
2021-12-22T08:40:13.000Z
|
src/053. Combinatoric selections/053.py
|
yuhao600/project-euler
|
201fc68aa9cca63b751036bb61623c12939dcac4
|
[
"MIT"
] | null | null | null |
src/053. Combinatoric selections/053.py
|
yuhao600/project-euler
|
201fc68aa9cca63b751036bb61623c12939dcac4
|
[
"MIT"
] | 3
|
2016-02-19T10:47:31.000Z
|
2017-03-03T16:44:15.000Z
|
factorials = [1]
for n in range(1, 101):
factorials.append(factorials[n - 1] * n)
count = 0
for n in range(1, 101):
for r in range(1, n + 1):
if factorials[n] / factorials[r] / factorials[n - r] > 1000000:
count += 1
print(count)
| 21.666667
| 71
| 0.573077
|
4cfbde17d4d4b6ea90e3fa11422b315b87c2ef86
| 45,119
|
py
|
Python
|
simple_demux_map/_bcs.py
|
Uditgulati/grocsvs
|
e7225b0e65e40138053a214130ebaeec1e1448d8
|
[
"MIT"
] | 39
|
2016-09-11T03:11:09.000Z
|
2021-04-27T17:08:05.000Z
|
simple_demux_map/_bcs.py
|
Uditgulati/grocsvs
|
e7225b0e65e40138053a214130ebaeec1e1448d8
|
[
"MIT"
] | 34
|
2016-10-24T22:24:49.000Z
|
2021-04-12T14:08:54.000Z
|
simple_demux_map/_bcs.py
|
Uditgulati/grocsvs
|
e7225b0e65e40138053a214130ebaeec1e1448d8
|
[
"MIT"
] | 7
|
2017-10-26T00:55:47.000Z
|
2020-07-28T10:57:28.000Z
|
#!/usr/bin/env python
#
# Copyright (c) 2014 10X Genomics, Inc. All rights reserved.
#
# What is considered a high confidence mapped read pair
HIGH_CONF_MAPQ = 60
# Distance to mate to ensure the
MIN_MATE_OFFSET_DUP_FILTER = 20
# Sequencing settings
ILLUMINA_MAX_QUAL = 50 # higher than possible right now
ILLUMINA_QUAL_OFFSET = 33
DEFAULT_HIGH_MAPQ = 60
# Demultiplex settings
DEMULTIPLEX_DEFAULT_SAMPLE_INDEX_LENGTH = 8
DEMULTIPLEX_BARCODE_LENGTH = 14
# Tail trim fraction when computing depth CV
COVERAGE_TRIM_TAIL = 0.01
# Subsampling coverages for dup rate
DUPLICATE_SUBSAMPLE_COVERAGES = [16.0, 8.0, 4.0, 2.0, 1.0, 0.5, 0.25, 0.125]
# Phasing confidence required to call a fragment as phased
FRAGMENT_PHASING_THRESHOLD = 0.995
# TAG names
RAW_BARCODE_TAG = 'RX'
PROCESSED_BARCODE_TAG = 'BX'
RAW_BARCODE_QUAL_TAG = 'QX'
SAMPLE_INDEX_TAG = 'BC'
SAMPLE_INDEX_QUAL_TAG = 'QT'
PHASE_SET_BAM_TAG = 'PS'
HAPLOTYPE_BAM_TAG = 'HP'
PHASING_CONF_BAM_TAG = 'PC'
TRIM_TAG = 'TR'
TRIM_QUAL_TAG = 'TQ'
MOLECULE_ID_BAM_TAG = 'MI'
# Parallelization settings
PARALLEL_LOCUS_SIZE = int(4E7)
PARALLEL_NUM_READS_SIZE = 1000000
# Single partition fragment calling
FRAGMENT_LINK_DISTANCE = 30000
#
# Settings for metrics computation
#
# Quality cutoffs for bcs
BC_QUAL_CUTOFFS = [15, 20, 25, 30]
# Map quality cutoffs for insert sizes
INSERT_MAPQ_CUTOFFS = [0, 30, 60]
# Map quality cutoffs for target distances
TARGET_MAPQ_CUTOFFS = [0, 30, 60]
# What is considered a high confidence mapped single read
MODERATE_CONF_MAPQ = 29
# Longest insert size to tabulate
MAX_INSERT_SIZE = 10000
# Longest target distance to tabulate
MAX_TARGET_DIST = 10000
# Distance to consider reads far away for far chimeras
READ_MATE_FAR_DIST = 5000
# Distance to exclude directions of reads as untrustworthy for outer and same-dir chimeras
READ_MATE_CHIM_TOO_CLOSE_DIST = 20
# left coverage tail cutoff for customers
CUSTOMER_LEFT_TAIL_COVERAGE = 5
# Bin size for fragment length histogram passed to loupe
FRAG_LEN_HIST_BIN_SIZE = 100
# Variant Calling filter string
QUAL_FILTER = '(%QUAL <= 15 || (AF[0] > 0.5 && %QUAL < 50))'
LARIAT_RESCUE_FILTER = '(((RESCUED+NOT_RESCUED) > 0 & RESCUED/(RESCUED+NOT_RESCUED) > 0.1) & (MMD == -1 | MMD >= 3.0)) '
ALLELE_FRACTION_FILTER = '(AO[0] < 2 || AO[0]/(AO[0] + RO) < 0.15)'
VARIANT_CALL_FILTER = {'10X_RESCUED_MOLECULE_HIGH_DIVERSITY':LARIAT_RESCUE_FILTER,'10X_QUAL_FILTER':QUAL_FILTER, '10X_ALLELE_FRACTION_FILTER': ALLELE_FRACTION_FILTER}
VCF_WHITE_LIST_INFO_FIELDS = {'AA','AN','CIGAR','END','DB','H2', 'H3', '1000G','SOMATIC','VALIDATED', 'TENX'}
# Preflight constants
MIN_PROCESS_NOFILE = 1024
MIN_GLOBAL_NOFILE = 2**17
REQUIRED_NUM_READS = 3
GLOBAL_NOFILE_PATH = '/proc/sys/fs/file-max'
BCL_PROCESSOR_FASTQ_MODE = 'BCL_PROCESSOR'
ILMN_BCL2FASTQ_FASTQ_MODE = 'ILMN_BCL2FASTQ'
PACKAGE_VERSION_CMDS = [
{
'name': 'mrc',
'cmd' : 'mrc --version',
},
{
'name': 'mrp',
'cmd' : 'mrp --version',
},
{
'name': 'Anaconda',
'cmd' : 'python --version 2>&1 | cat ',
},
{
'name': 'numpy',
'cmd' : 'python -c "import numpy; print numpy.__version__"'
},
{
'name': 'scipy',
'cmd' : 'python -c "import scipy; print scipy.__version__"'
},
{
'name': 'pysam',
'cmd' : 'python -c "import pysam; print pysam.__version__"'
},
{
'name': 'PyVCF',
'cmd' : 'python -c "import vcf; print vcf.VERSION"'
},
{
'name': 'h5py',
'cmd' : 'python -c "import h5py; print h5py.__version__"'
},
{
'name': 'pandas',
'cmd' : 'python -c "import pandas; print pandas.__version__"'
},
{
'name': 'bwa',
'cmd' : 'bwa 2>&1 | grep "^ *Version"'
},
{
'name': 'samtools',
'cmd' : 'samtools 2>&1 | grep "^ *Version"'
},
{
'name': 'freebayes',
'cmd' : 'freebayes -h | grep ^version',
},
]
# Product microfluidics settings
FLUIDICS_PARAMS = {
'GemCode': {
'z2_vol_per_gem': 144e-12, # 144pL
'total_z2_vol_input': 65e-6, # 65uL
},
'Chromium': {
'z2_vol_per_gem': 24.5e-12, # 24.5pL
'total_z2_vol_input': 90e-6, # 90uL
}
}
# Sample index map
# GemCode Tubes
SI_001 = ['TCGCCATA', 'GTATACAC', 'AATGGTGG', 'CGCATGCT']
SI_002 = ['TATCCTCG', 'GCGAGGTC', 'CGCTTCAA', 'ATAGAAGT']
SI_003 = ['TGACGTCG', 'CTTGTGTA', 'ACGACCGT', 'GACTAAAC']
SI_004 = ['ATCTAGCT', 'GAGCGTAC', 'TCAGCCTG', 'CGTATAGA']
SI_005 = ['CCGTTCCC', 'ATACAGTT', 'TGTAGTAA', 'GACGCAGG']
SI_006 = ['TCAATTGG', 'AGTTAGAA', 'GAGCGCTT', 'CTCGCACC']
SI_007 = ['CTGCCTTG', 'ACTAGCCC', 'GGCTAGAT', 'TAAGTAGA']
SI_008 = ['GGCAGAAA', 'ACGGTTCT', 'CATTCGTC', 'TTACACGG']
# GemCode Plate
SI_P01_A1 = ['TTGTAAGA', 'GGCGTTTC', 'CCTACCAT', 'AAACGGCG']
SI_P01_B1 = ['CTAGCTGT', 'GCCAACAA', 'AGGCTACC', 'TATTGGTG']
SI_P01_C1 = ['GATGCAGT', 'AGACTTTC', 'TTCTACAG', 'CCGAGGCA']
SI_P01_D1 = ['AGCTGCGT', 'GTGGAGCA', 'TCTATTAG', 'CAACCATC']
SI_P01_E1 = ['CGCCCGTA', 'GTTTGCCT', 'TAAGTTAG', 'ACGAAAGC']
SI_P01_F1 = ['TGACTAGT', 'GATAGGTA', 'CCGTACAG', 'ATCGCTCC']
SI_P01_G1 = ['CATATGCG', 'ATGCGATT', 'TCCGCTAC', 'GGATACGA']
SI_P01_H1 = ['TCTTGTCC', 'CGGGAGTA', 'GTCACAGG', 'AAACTCAT']
SI_P01_A2 = ['AGCCCTTT', 'TCTTAGGC', 'GTGAGAAG', 'CAAGTCCA']
SI_P01_B2 = ['GGTCGAGC', 'TTAGATTG', 'CCCACCCA', 'AAGTTGAT']
SI_P01_C2 = ['CCGAGAAC', 'TGCTCTGT', 'GTAGTGCG', 'AATCACTA']
SI_P01_D2 = ['ACATTCCG', 'GACACAAT', 'CTGCGGTA', 'TGTGATGC']
SI_P01_E2 = ['TCATCAAG', 'GTTGGTCC', 'AGGCTGGT', 'CACAACTA']
SI_P01_F2 = ['TGCCCGCT', 'GCAAACGC', 'CATTGATA', 'ATGGTTAG']
SI_P01_G2 = ['CGGTGAGC', 'ATAACCTA', 'TCCGAGCG', 'GATCTTAT']
SI_P01_H2 = ['CCGAACTC', 'AACGGTCA', 'TTATTGGT', 'GGTCCAAG']
SI_P01_A3 = ['AAAGCATA', 'GCCTTTAT', 'CTGCAGCC', 'TGTAGCGG']
SI_P01_B3 = ['TCATCCTT', 'ATTGGACG', 'CAGCTTAC', 'GGCAAGGA']
SI_P01_C3 = ['ACGTTACA', 'TTACCTAC', 'GACGACGG', 'CGTAGGTT']
SI_P01_D3 = ['GAGCACGC', 'CGAAGTTG', 'TTCGTGAA', 'ACTTCACT']
SI_P01_E3 = ['TCTGCAGG', 'CGGCTCCA', 'AACAAGTC', 'GTATGTAT']
SI_P01_F3 = ['TTAGGACC', 'AGTCTGTA', 'GCCTCCGT', 'CAGAATAG']
SI_P01_G3 = ['TACGAGTT', 'ATGTCCAG', 'GCTATAGC', 'CGACGTCA']
SI_P01_H3 = ['TTGGGCTT', 'GACAAACC', 'ACACCTAA', 'CGTTTGGG']
SI_P01_A4 = ['CATGGCAG', 'AGAACGCC', 'GTCTTTGA', 'TCGCAATT']
SI_P01_B4 = ['GACAGGCT', 'CCTCTAAC', 'AGGGACTG', 'TTATCTGA']
SI_P01_C4 = ['ACATTGGC', 'GAGCCCAT', 'CTTAGTCA', 'TGCGAATG']
SI_P01_D4 = ['AAATCGTC', 'GCGATCGG', 'CTTCGAAT', 'TGCGATCA']
SI_P01_E4 = ['GTAAACAT', 'TATCCTGA', 'AGCTGACG', 'CCGGTGTC']
SI_P01_F4 = ['GCATGATA', 'CGTCCTCT', 'AACGACAC', 'TTGATGGG']
SI_P01_G4 = ['CCTGCGGT', 'GTACAACG', 'AGCTTCTC', 'TAGAGTAA']
SI_P01_H4 = ['TTCCATCT', 'ACTGGAGC', 'CGGTCGTG', 'GAAATCAA']
SI_P01_A5 = ['CAGTCTGG', 'TCACACTC', 'ATTGGGAA', 'GGCATACT']
SI_P01_B5 = ['TCATGCGA', 'ATCGTACT', 'CATCAGTG', 'GGGACTAC']
SI_P01_C5 = ['TCAGTCAA', 'CACTGACT', 'ATGCATTC', 'GGTACGGG']
SI_P01_D5 = ['GTCGACTC', 'AGACGGAT', 'CCTTTAGA', 'TAGACTCG']
SI_P01_E5 = ['CCGTTGAA', 'TATGCTCT', 'ATCCAAGG', 'GGAAGCTC']
SI_P01_F5 = ['TCTGACTA', 'GTACGGCT', 'CGGTTTAG', 'AACACAGC']
SI_P01_G5 = ['ATGAAGTA', 'GAAGCTCG', 'TCTTTCGT', 'CGCCGAAC']
SI_P01_H5 = ['ATAGTATG', 'TATAAGGA', 'GGCTCCAC', 'CCGCGTCT']
SI_P01_A6 = ['CTTTCGAC', 'ACGGGACT', 'TGCATCTG', 'GAACATGA']
SI_P01_B6 = ['GCGCACCT', 'AACGCGAA', 'CTATTTGG', 'TGTAGATC']
SI_P01_C6 = ['CGCTCAGG', 'GAGGTTTA', 'ACTCAGAC', 'TTAAGCCT']
SI_P01_D6 = ['GAAGTCTT', 'TGCAGGGC', 'ATGCCAAA', 'CCTTATCG']
SI_P01_E6 = ['TGATGGCT', 'GCCATTTG', 'ATTGAAAC', 'CAGCCCGA']
SI_P01_F6 = ['GACTTCCT', 'TGAGGAAG', 'ATGCCGGC', 'CCTAATTA']
SI_P01_G6 = ['GTGCGACA', 'TCAGTGTT', 'AGCACTGG', 'CATTACAC']
SI_P01_H6 = ['AAGCATAA', 'CCCATCGC', 'TTAGCGCT', 'GGTTGATG']
SI_P01_A7 = ['CTCATCAT', 'TAACGTCC', 'AGGTCATA', 'GCTGAGGG']
SI_P01_B7 = ['TCCACACG', 'CTTCTGTT', 'GAATGCAC', 'AGGGATGA']
SI_P01_C7 = ['GGCGGAAT', 'ACACCGGG', 'CATAATCC', 'TTGTTCTA']
SI_P01_D7 = ['CCGGATCC', 'GGTCGCAT', 'TTAACGTG', 'AACTTAGA']
SI_P01_E7 = ['AAGACGTG', 'CCATGTGT', 'GTTCACAA', 'TGCGTACC']
SI_P01_F7 = ['GGTTAGAC', 'CAAACTTT', 'ACCCGAGA', 'TTGGTCCG']
SI_P01_G7 = ['GCCGGTAA', 'TGACTGCC', 'ATTACCGG', 'CAGTAATT']
SI_P01_H7 = ['TGGCACGA', 'AACGGGTG', 'CTAATTCT', 'GCTTCAAC']
SI_P01_A8 = ['GACTGTTC', 'ATGATACG', 'CCACAGAA', 'TGTGCCGT']
SI_P01_B8 = ['ACGTTCAC', 'TGCCCAGA', 'CAAGGTCT', 'GTTAAGTG']
SI_P01_C8 = ['TTTATCCC', 'GCACGTTT', 'CAGGAAGA', 'AGCTCGAG']
SI_P01_D8 = ['AATCTTTG', 'GGATGAGT', 'CTCAAGAC', 'TCGGCCCA']
SI_P01_E8 = ['GCTTACAT', 'TAGGGTGC', 'AGCCTATG', 'CTAACGCA']
SI_P01_F8 = ['AGTTGGGA', 'TACATTCT', 'CCAGAAAG', 'GTGCCCTC']
SI_P01_G8 = ['AAGTACTC', 'GGAACTCT', 'TCCCTGAG', 'CTTGGAGA']
SI_P01_H8 = ['AAGAGCGG', 'TCATAGCA', 'GGCCCATC', 'CTTGTTAT']
SI_P01_A9 = ['GAGTGCGT', 'CTCCAACA', 'ACAACTTG', 'TGTGTGAC']
SI_P01_B9 = ['AAGCGTGT', 'CTTGACCG', 'TGATTAAC', 'GCCACGTA']
SI_P01_C9 = ['AGATCGGT', 'CATCGTCG', 'GTCATATA', 'TCGGACAC']
SI_P01_D9 = ['CAAGGGAC', 'ACCTACTG', 'GGGACACA', 'TTTCTTGT']
SI_P01_E9 = ['AGTAAGCA', 'TACCGCGG', 'CCGGTAAT', 'GTATCTTC']
SI_P01_F9 = ['AGTTAGTT', 'GTACTTAA', 'CACGCACG', 'TCGAGCGC']
SI_P01_G9 = ['TTGACTTC', 'GCCGAAGT', 'CAATGGCA', 'AGTCTCAG']
SI_P01_H9 = ['GGAATATG', 'ACCTGCCA', 'CTTCATAC', 'TAGGCGGT']
SI_P01_A10 = ['ACAGCAAC', 'TTTCGCGA', 'CGCAATTT', 'GAGTTGCG']
SI_P01_B10 = ['ACCATTAA', 'CTGGACGT', 'GAACGGTC', 'TGTTCACG']
SI_P01_C10 = ['CGTGCTAA', 'TCACTCCT', 'ATCTGATC', 'GAGAAGGG']
SI_P01_D10 = ['CTTATTTG', 'GCGGGCAT', 'AGATAACA', 'TACCCGGC']
SI_P01_E10 = ['GCACCAGT', 'CGCAGGAG', 'TAGTACCA', 'ATTGTTTC']
SI_P01_F10 = ['TCGACAAT', 'GAATACTG', 'ATTCGTGC', 'CGCGTGCA']
SI_P01_G10 = ['CGGAGACT', 'TCCTATGA', 'ATACTGAG', 'GATGCCTC']
SI_P01_H10 = ['GACCGCCA', 'TCGAATTG', 'ATTTCAGC', 'CGAGTGAT']
SI_P01_A11 = ['CTTTCCTT', 'TAGGTAAA', 'ACCAGTCC', 'GGACAGGG']
SI_P01_B11 = ['TCCAGATA', 'GATTCGCT', 'CGACATAG', 'ATGGTCGC']
SI_P01_C11 = ['GTTTGTGG', 'ACCGAACA', 'TAGACGAC', 'CGACTCTT']
SI_P01_D11 = ['GCTACTTC', 'CACCTCAG', 'ATATGAGA', 'TGGGAGCT']
SI_P01_E11 = ['ATCGCCAT', 'TCACGGTA', 'GGGTTTCC', 'CATAAAGG']
SI_P01_F11 = ['GAACCCGG', 'AGCAGTTA', 'TCGTAGAT', 'CTTGTACC']
SI_P01_G11 = ['AGGGCGTT', 'CTATACGC', 'TACATAAG', 'GCTCGTCA']
SI_P01_H11 = ['TCTCGACT', 'AGGATCGA', 'CACGATTC', 'GTATCGAG']
SI_P01_A12 = ['TTATGGAA', 'ACTACTGT', 'CGGGAACG', 'GACCTCTC']
SI_P01_B12 = ['GAAAGACA', 'CGCTACAT', 'ACGCTTGG', 'TTTGCGTC']
SI_P01_C12 = ['TAAGCCAC', 'CCGTTATG', 'GGTAATGT', 'ATCCGGCA']
SI_P01_D12 = ['GCTGTGTA', 'AGAAACGT', 'CACTCAAC', 'TTGCGTCG']
SI_P01_E12 = ['CGCTATCC', 'ACGCGGAA', 'TAAATCGT', 'GTTGCATG']
SI_P01_F12 = ['AATTGAAC', 'TGGACCCT', 'CCAGTGGA', 'GTCCATTG']
SI_P01_G12 = ['CATGCGTA', 'ACCCGCAC', 'TGATATCG', 'GTGATAGT']
SI_P01_H12 = ['TGTGTATA', 'GTCCAGGC', 'CAATCCCT', 'ACGAGTAG']
SI_3A_A1 = SI_P03_A1 = ['AAACGGCG', 'CCTACCAT', 'GGCGTTTC', 'TTGTAAGA']
SI_3A_B1 = SI_P03_B1 = ['AGGCTACC', 'CTAGCTGT', 'GCCAACAA', 'TATTGGTG']
SI_3A_C1 = SI_P03_C1 = ['AGACTTTC', 'CCGAGGCA', 'GATGCAGT', 'TTCTACAG']
SI_3A_D1 = SI_P03_D1 = ['AGCTGCGT', 'CAACCATC', 'GTGGAGCA', 'TCTATTAG']
SI_3A_E1 = SI_P03_E1 = ['ACGAAAGC', 'CGCCCGTA', 'GTTTGCCT', 'TAAGTTAG']
SI_3A_F1 = SI_P03_F1 = ['ATCGCTCC', 'CCGTACAG', 'GATAGGTA', 'TGACTAGT']
SI_3A_G1 = SI_P03_G1 = ['ATGCGATT', 'CATATGCG', 'GGATACGA', 'TCCGCTAC']
SI_3A_H1 = SI_P03_H1 = ['AAACTCAT', 'CGGGAGTA', 'GTCACAGG', 'TCTTGTCC']
SI_3A_A2 = SI_P03_A2 = ['AGCCCTTT', 'CAAGTCCA', 'GTGAGAAG', 'TCTTAGGC']
SI_3A_B2 = SI_P03_B2 = ['AAGTTGAT', 'CCCACCCA', 'GGTCGAGC', 'TTAGATTG']
SI_3A_C2 = SI_P03_C2 = ['AATCACTA', 'CCGAGAAC', 'GTAGTGCG', 'TGCTCTGT']
SI_3A_D2 = SI_P03_D2 = ['ACATTCCG', 'CTGCGGTA', 'GACACAAT', 'TGTGATGC']
SI_3A_E2 = SI_P03_E2 = ['AGGCTGGT', 'CACAACTA', 'GTTGGTCC', 'TCATCAAG']
SI_3A_F2 = SI_P03_F2 = ['ATGGTTAG', 'CATTGATA', 'GCAAACGC', 'TGCCCGCT']
SI_3A_G2 = SI_P03_G2 = ['ATAACCTA', 'CGGTGAGC', 'GATCTTAT', 'TCCGAGCG']
SI_3A_H2 = SI_P03_H2 = ['AACGGTCA', 'CCGAACTC', 'GGTCCAAG', 'TTATTGGT']
SI_3A_A3 = SI_P03_A3 = ['AAAGCATA', 'CTGCAGCC', 'GCCTTTAT', 'TGTAGCGG']
SI_3A_B3 = SI_P03_B3 = ['ATTGGACG', 'CAGCTTAC', 'GGCAAGGA', 'TCATCCTT']
SI_3A_C3 = SI_P03_C3 = ['ACGTTACA', 'CGTAGGTT', 'GACGACGG', 'TTACCTAC']
SI_3A_D3 = SI_P03_D3 = ['ACTTCACT', 'CGAAGTTG', 'GAGCACGC', 'TTCGTGAA']
SI_3A_E3 = SI_P03_E3 = ['AACAAGTC', 'CGGCTCCA', 'GTATGTAT', 'TCTGCAGG']
SI_3A_F3 = SI_P03_F3 = ['AGTCTGTA', 'CAGAATAG', 'GCCTCCGT', 'TTAGGACC']
SI_3A_G3 = SI_P03_G3 = ['ATGTCCAG', 'CGACGTCA', 'GCTATAGC', 'TACGAGTT']
SI_3A_H3 = SI_P03_H3 = ['ACACCTAA', 'CGTTTGGG', 'GACAAACC', 'TTGGGCTT']
SI_3A_A4 = SI_P03_A4 = ['AGAACGCC', 'CATGGCAG', 'GTCTTTGA', 'TCGCAATT']
SI_3A_B4 = SI_P03_B4 = ['AGGGACTG', 'CCTCTAAC', 'GACAGGCT', 'TTATCTGA']
SI_3A_C4 = SI_P03_C4 = ['ACATTGGC', 'CTTAGTCA', 'GAGCCCAT', 'TGCGAATG']
SI_3A_D4 = SI_P03_D4 = ['AAATCGTC', 'CTTCGAAT', 'GCGATCGG', 'TGCGATCA']
SI_3A_E4 = SI_P03_E4 = ['AGCTGACG', 'CCGGTGTC', 'GTAAACAT', 'TATCCTGA']
SI_3A_F4 = SI_P03_F4 = ['AACGACAC', 'CGTCCTCT', 'GCATGATA', 'TTGATGGG']
SI_3A_G4 = SI_P03_G4 = ['AGCTTCTC', 'CCTGCGGT', 'GTACAACG', 'TAGAGTAA']
SI_3A_H4 = SI_P03_H4 = ['ACTGGAGC', 'CGGTCGTG', 'GAAATCAA', 'TTCCATCT']
SI_3A_A5 = SI_P03_A5 = ['ATTGGGAA', 'CAGTCTGG', 'GGCATACT', 'TCACACTC']
SI_3A_B5 = SI_P03_B5 = ['ATCGTACT', 'CATCAGTG', 'GGGACTAC', 'TCATGCGA']
SI_3A_C5 = SI_P03_C5 = ['ATGCATTC', 'CACTGACT', 'GGTACGGG', 'TCAGTCAA']
SI_3A_D5 = SI_P03_D5 = ['AGACGGAT', 'CCTTTAGA', 'GTCGACTC', 'TAGACTCG']
SI_3A_E5 = SI_P03_E5 = ['ATCCAAGG', 'CCGTTGAA', 'GGAAGCTC', 'TATGCTCT']
SI_3A_F5 = SI_P03_F5 = ['AACACAGC', 'CGGTTTAG', 'GTACGGCT', 'TCTGACTA']
SI_3A_G5 = SI_P03_G5 = ['ATGAAGTA', 'CGCCGAAC', 'GAAGCTCG', 'TCTTTCGT']
SI_3A_H5 = SI_P03_H5 = ['ATAGTATG', 'CCGCGTCT', 'GGCTCCAC', 'TATAAGGA']
SI_3A_A6 = SI_P03_A6 = ['ACGGGACT', 'CTTTCGAC', 'GAACATGA', 'TGCATCTG']
SI_3A_B6 = SI_P03_B6 = ['AACGCGAA', 'CTATTTGG', 'GCGCACCT', 'TGTAGATC']
SI_3A_C6 = SI_P03_C6 = ['ACTCAGAC', 'CGCTCAGG', 'GAGGTTTA', 'TTAAGCCT']
SI_3A_D6 = SI_P03_D6 = ['ATGCCAAA', 'CCTTATCG', 'GAAGTCTT', 'TGCAGGGC']
SI_3A_E6 = SI_P03_E6 = ['ATTGAAAC', 'CAGCCCGA', 'GCCATTTG', 'TGATGGCT']
SI_3A_F6 = SI_P03_F6 = ['ATGCCGGC', 'CCTAATTA', 'GACTTCCT', 'TGAGGAAG']
SI_3A_G6 = SI_P03_G6 = ['AGCACTGG', 'CATTACAC', 'GTGCGACA', 'TCAGTGTT']
SI_3A_H6 = SI_P03_H6 = ['AAGCATAA', 'CCCATCGC', 'GGTTGATG', 'TTAGCGCT']
SI_3A_A7 = SI_P03_A7 = ['AGGTCATA', 'CTCATCAT', 'GCTGAGGG', 'TAACGTCC']
SI_3A_B7 = SI_P03_B7 = ['AGGGATGA', 'CTTCTGTT', 'GAATGCAC', 'TCCACACG']
SI_3A_C7 = SI_P03_C7 = ['ACACCGGG', 'CATAATCC', 'GGCGGAAT', 'TTGTTCTA']
SI_3A_D7 = SI_P03_D7 = ['AACTTAGA', 'CCGGATCC', 'GGTCGCAT', 'TTAACGTG']
SI_3A_E7 = SI_P03_E7 = ['AAGACGTG', 'CCATGTGT', 'GTTCACAA', 'TGCGTACC']
SI_3A_F7 = SI_P03_F7 = ['ACCCGAGA', 'CAAACTTT', 'GGTTAGAC', 'TTGGTCCG']
SI_3A_G7 = SI_P03_G7 = ['ATTACCGG', 'CAGTAATT', 'GCCGGTAA', 'TGACTGCC']
SI_3A_H7 = SI_P03_H7 = ['AACGGGTG', 'CTAATTCT', 'GCTTCAAC', 'TGGCACGA']
SI_3A_A8 = SI_P03_A8 = ['ATGATACG', 'CCACAGAA', 'GACTGTTC', 'TGTGCCGT']
SI_3A_B8 = SI_P03_B8 = ['ACGTTCAC', 'CAAGGTCT', 'GTTAAGTG', 'TGCCCAGA']
SI_3A_C8 = SI_P03_C8 = ['AGCTCGAG', 'CAGGAAGA', 'GCACGTTT', 'TTTATCCC']
SI_3A_D8 = SI_P03_D8 = ['AATCTTTG', 'CTCAAGAC', 'GGATGAGT', 'TCGGCCCA']
SI_3A_E8 = SI_P03_E8 = ['AGCCTATG', 'CTAACGCA', 'GCTTACAT', 'TAGGGTGC']
SI_3A_F8 = SI_P03_F8 = ['AGTTGGGA', 'CCAGAAAG', 'GTGCCCTC', 'TACATTCT']
SI_3A_G8 = SI_P03_G8 = ['AAGTACTC', 'CTTGGAGA', 'GGAACTCT', 'TCCCTGAG']
SI_3A_H8 = SI_P03_H8 = ['AAGAGCGG', 'CTTGTTAT', 'GGCCCATC', 'TCATAGCA']
SI_3A_A9 = SI_P03_A9 = ['ACAACTTG', 'CTCCAACA', 'GAGTGCGT', 'TGTGTGAC']
SI_3A_B9 = SI_P03_B9 = ['AAGCGTGT', 'CTTGACCG', 'GCCACGTA', 'TGATTAAC']
SI_3A_C9 = SI_P03_C9 = ['AGATCGGT', 'CATCGTCG', 'GTCATATA', 'TCGGACAC']
SI_3A_D9 = SI_P03_D9 = ['ACCTACTG', 'CAAGGGAC', 'GGGACACA', 'TTTCTTGT']
SI_3A_E9 = SI_P03_E9 = ['AGTAAGCA', 'CCGGTAAT', 'GTATCTTC', 'TACCGCGG']
SI_3A_F9 = SI_P03_F9 = ['AGTTAGTT', 'CACGCACG', 'GTACTTAA', 'TCGAGCGC']
SI_3A_G9 = SI_P03_G9 = ['AGTCTCAG', 'CAATGGCA', 'GCCGAAGT', 'TTGACTTC']
SI_3A_H9 = SI_P03_H9 = ['ACCTGCCA', 'CTTCATAC', 'GGAATATG', 'TAGGCGGT']
SI_3A_A10 = SI_P03_A10 = ['ACAGCAAC', 'CGCAATTT', 'GAGTTGCG', 'TTTCGCGA']
SI_3A_B10 = SI_P03_B10 = ['ACCATTAA', 'CTGGACGT', 'GAACGGTC', 'TGTTCACG']
SI_3A_C10 = SI_P03_C10 = ['ATCTGATC', 'CGTGCTAA', 'GAGAAGGG', 'TCACTCCT']
SI_3A_D10 = SI_P03_D10 = ['AGATAACA', 'CTTATTTG', 'GCGGGCAT', 'TACCCGGC']
SI_3A_E10 = SI_P03_E10 = ['ATTGTTTC', 'CGCAGGAG', 'GCACCAGT', 'TAGTACCA']
SI_3A_F10 = SI_P03_F10 = ['ATTCGTGC', 'CGCGTGCA', 'GAATACTG', 'TCGACAAT']
SI_3A_G10 = SI_P03_G10 = ['ATACTGAG', 'CGGAGACT', 'GATGCCTC', 'TCCTATGA']
SI_3A_H10 = SI_P03_H10 = ['ATTTCAGC', 'CGAGTGAT', 'GACCGCCA', 'TCGAATTG']
SI_3A_A11 = SI_P03_A11 = ['ACCAGTCC', 'CTTTCCTT', 'GGACAGGG', 'TAGGTAAA']
SI_3A_B11 = SI_P03_B11 = ['ATGGTCGC', 'CGACATAG', 'GATTCGCT', 'TCCAGATA']
SI_3A_C11 = SI_P03_C11 = ['ACCGAACA', 'CGACTCTT', 'GTTTGTGG', 'TAGACGAC']
SI_3A_D11 = SI_P03_D11 = ['ATATGAGA', 'CACCTCAG', 'GCTACTTC', 'TGGGAGCT']
SI_3A_E11 = SI_P03_E11 = ['ATCGCCAT', 'CATAAAGG', 'GGGTTTCC', 'TCACGGTA']
SI_3A_F11 = SI_P03_F11 = ['AGCAGTTA', 'CTTGTACC', 'GAACCCGG', 'TCGTAGAT']
SI_3A_G11 = SI_P03_G11 = ['AGGGCGTT', 'CTATACGC', 'GCTCGTCA', 'TACATAAG']
SI_3A_H11 = SI_P03_H11 = ['AGGATCGA', 'CACGATTC', 'GTATCGAG', 'TCTCGACT']
SI_3A_A12 = SI_P03_A12 = ['ACTACTGT', 'CGGGAACG', 'GACCTCTC', 'TTATGGAA']
SI_3A_B12 = SI_P03_B12 = ['ACGCTTGG', 'CGCTACAT', 'GAAAGACA', 'TTTGCGTC']
SI_3A_C12 = SI_P03_C12 = ['ATCCGGCA', 'CCGTTATG', 'GGTAATGT', 'TAAGCCAC']
SI_3A_D12 = SI_P03_D12 = ['AGAAACGT', 'CACTCAAC', 'GCTGTGTA', 'TTGCGTCG']
SI_3A_E12 = SI_P03_E12 = ['ACGCGGAA', 'CGCTATCC', 'GTTGCATG', 'TAAATCGT']
SI_3A_F12 = SI_P03_F12 = ['AATTGAAC', 'CCAGTGGA', 'GTCCATTG', 'TGGACCCT']
SI_3A_G12 = SI_P03_G12 = ['ACCCGCAC', 'CATGCGTA', 'GTGATAGT', 'TGATATCG']
SI_3A_H12 = SI_P03_H12 = ['ACGAGTAG', 'CAATCCCT', 'GTCCAGGC', 'TGTGTATA']
# WGS+ Tubes
SI_T2_1 = ['GGGTGATC', 'TTACCGAT', 'AATGACGA', 'CCCATTCG']
SI_T2_2 = ['GGGTCGAA', 'ATCCGCCC', 'TCTATAGT', 'CAAGATTG']
SI_T2_3 = ['GCTGATAT', 'TGCCGAGC', 'AAATTGCG', 'CTGACCTA']
SI_T2_4 = ['ACTTCTGA', 'TTCATCTT', 'CGACGACG', 'GAGGAGAC']
SI_T2_5 = ['GAATACAA', 'AGCATACC', 'TCGGGTTT', 'CTTCCGGG']
SI_T2_6 = ['TATTGAGA', 'GTAGTCAG', 'CGCCATTC', 'ACGACGCT']
SI_T2_7 = ['AAATCTGT', 'GTCCAACC', 'TCTGGCTG', 'CGGATGAA']
SI_T2_8 = ['CCTTGAAC', 'GAAATCGG', 'TGGCCTCT', 'ATCGAGTA']
# Chromium WGS Plate
SI_GA_A1 = SI_P2_A1 = ['GGTTTACT', 'CTAAACGG', 'TCGGCGTC', 'AACCGTAA']
SI_GA_A2 = SI_P2_A2 = ['TTTCATGA', 'ACGTCCCT', 'CGCATGTG', 'GAAGGAAC']
SI_GA_A3 = SI_P2_A3 = ['CAGTACTG', 'AGTAGTCT', 'GCAGTAGA', 'TTCCCGAC']
SI_GA_A4 = SI_P2_A4 = ['TATGATTC', 'CCCACAGT', 'ATGCTGAA', 'GGATGCCG']
SI_GA_A5 = SI_P2_A5 = ['CTAGGTGA', 'TCGTTCAG', 'AGCCAATT', 'GATACGCC']
SI_GA_A6 = SI_P2_A6 = ['CGCTATGT', 'GCTGTCCA', 'TTGAGATC', 'AAACCGAG']
SI_GA_A7 = SI_P2_A7 = ['ACAGAGGT', 'TATAGTTG', 'CGGTCCCA', 'GTCCTAAC']
SI_GA_A8 = SI_P2_A8 = ['GCATCTCC', 'TGTAAGGT', 'CTGCGATG', 'AACGTCAA']
SI_GA_A9 = SI_P2_A9 = ['TCTTAAAG', 'CGAGGCTC', 'GTCCTTCT', 'AAGACGGA']
SI_GA_A10 = SI_P2_A10 = ['GAAACCCT', 'TTTCTGTC', 'CCGTGTGA', 'AGCGAAAG']
SI_GA_A11 = SI_P2_A11 = ['GTCCGGTC', 'AAGATCAT', 'CCTGAAGG', 'TGATCTCA']
SI_GA_A12 = SI_P2_A12 = ['AGTGGAAC', 'GTCTCCTT', 'TCACATCA', 'CAGATGGG']
SI_GA_B1 = SI_P2_B1 = ['GTAATCTT', 'TCCGGAAG', 'AGTTCGGC', 'CAGCATCA']
SI_GA_B2 = SI_P2_B2 = ['TACTCTTC', 'CCTGTGCG', 'GGACACGT', 'ATGAGAAA']
SI_GA_B3 = SI_P2_B3 = ['GTGTATTA', 'TGTGCGGG', 'ACCATAAC', 'CAACGCCT']
SI_GA_B4 = SI_P2_B4 = ['ACTTCATA', 'GAGATGAC', 'TGCCGTGG', 'CTAGACCT']
SI_GA_B5 = SI_P2_B5 = ['AATAATGG', 'CCAGGGCA', 'TGCCTCAT', 'GTGTCATC']
SI_GA_B6 = SI_P2_B6 = ['CGTTAATC', 'GCCACGCT', 'TTACTCAG', 'AAGGGTGA']
SI_GA_B7 = SI_P2_B7 = ['AAACCTCA', 'GCCTTGGT', 'CTGGACTC', 'TGTAGAAG']
SI_GA_B8 = SI_P2_B8 = ['AAAGTGCT', 'GCTACCTG', 'TGCTGTAA', 'CTGCAAGC']
SI_GA_B9 = SI_P2_B9 = ['CTGTAACT', 'TCTAGCGA', 'AGAGTGTG', 'GACCCTAC']
SI_GA_B10 = SI_P2_B10 = ['ACCGTATG', 'GATTAGAT', 'CTGACTGA', 'TGACGCCC']
SI_GA_B11 = SI_P2_B11 = ['GTTCCTCA', 'AGGTACGC', 'TAAGTATG', 'CCCAGGAT']
SI_GA_B12 = SI_P2_B12 = ['TACCACCA', 'CTAAGTTT', 'GGGTCAAG', 'ACTGTGGC']
SI_GA_C1 = SI_P2_C1 = ['CCACTTAT', 'AACTGGCG', 'TTGGCATA', 'GGTAACGC']
SI_GA_C2 = SI_P2_C2 = ['CCTAGACC', 'ATCTCTGT', 'TAGCTCTA', 'GGAGAGAG']
SI_GA_C3 = SI_P2_C3 = ['TCAGCCGT', 'CAGAGGCC', 'GGTCAATA', 'ATCTTTAG']
SI_GA_C4 = SI_P2_C4 = ['ACAATTCA', 'TGCGCAGC', 'CATCACTT', 'GTGTGGAG']
SI_GA_C5 = SI_P2_C5 = ['CGACTTGA', 'TACAGACT', 'ATTGCGTG', 'GCGTACAC']
SI_GA_C6 = SI_P2_C6 = ['ATTACTTC', 'TGCGAACT', 'GCATTCGG', 'CAGCGGAA']
SI_GA_C7 = SI_P2_C7 = ['GTCTCTCG', 'AATCTCTC', 'CGGAGGGA', 'TCAGAAAT']
SI_GA_C8 = SI_P2_C8 = ['GTTGAGAA', 'AGATCTGG', 'TCGATACT', 'CACCGCTC']
SI_GA_C9 = SI_P2_C9 = ['GCGCAGAA', 'ATCTTACC', 'TATGGTGT', 'CGAACCTG']
SI_GA_C10 = SI_P2_C10 = ['TCTCAGTG', 'GAGACTAT', 'CGCTTAGC', 'ATAGGCCA']
SI_GA_C11 = SI_P2_C11 = ['GAGGATCT', 'AGACCATA', 'TCCTGCGC', 'CTTATGAG']
SI_GA_C12 = SI_P2_C12 = ['TCTCGTTT', 'GGCTAGCG', 'ATGACCGC', 'CAAGTAAA']
SI_GA_D1 = SI_P2_D1 = ['CACTCGGA', 'GCTGAATT', 'TGAAGTAC', 'ATGCTCCG']
SI_GA_D2 = SI_P2_D2 = ['TAACAAGG', 'GGTTCCTC', 'ATCATGCA', 'CCGGGTAT']
SI_GA_D3 = SI_P2_D3 = ['ACATTACT', 'TTTGGGTA', 'CAGCCCAC', 'GGCAATGG']
SI_GA_D4 = SI_P2_D4 = ['CCCTAACA', 'ATTCCGAT', 'TGGATTGC', 'GAAGGCTG']
SI_GA_D5 = SI_P2_D5 = ['CTCGTCAC', 'GATCAGCA', 'ACAACAGG', 'TGGTGTTT']
SI_GA_D6 = SI_P2_D6 = ['CATGCGAT', 'TGATATTC', 'GTGATCGA', 'ACCCGACG']
SI_GA_D7 = SI_P2_D7 = ['ATTTGCTA', 'TAGACACC', 'CCACAGGG', 'GGCGTTAT']
SI_GA_D8 = SI_P2_D8 = ['GCAACAAA', 'TAGTTGTC', 'CGCCATCG', 'ATTGGCGT']
SI_GA_D9 = SI_P2_D9 = ['AGGAGATG', 'GATGTGGT', 'CTACATCC', 'TCCTCCAA']
SI_GA_D10 = SI_P2_D10 = ['CAATACCC', 'TGTCTATG', 'ACCACGAA', 'GTGGGTGT']
SI_GA_D11 = SI_P2_D11 = ['CTTTGCGG', 'TGCACAAA', 'AAGCAGTC', 'GCAGTTCT']
SI_GA_D12 = SI_P2_D12 = ['GCACAATG', 'CTTGGTAC', 'TGCACCGT', 'AAGTTGCA']
SI_GA_E1 = SI_P2_E1 = ['TGGTAAAC', 'GAAAGGGT', 'ACTGCTCG', 'CTCCTCTA']
SI_GA_E2 = SI_P2_E2 = ['GTGGTACC', 'TACTATAG', 'ACAAGGTA', 'CGTCCCGT']
SI_GA_E3 = SI_P2_E3 = ['AGGTATTG', 'CTCCTAGT', 'TCAAGGCC', 'GATGCCAA']
SI_GA_E4 = SI_P2_E4 = ['TTCGCCCT', 'GGATGGGC', 'AATCAATG', 'CCGATTAA']
SI_GA_E5 = SI_P2_E5 = ['CATTAGCG', 'TTCGCTGA', 'ACAAGAAT', 'GGGCTCTC']
SI_GA_E6 = SI_P2_E6 = ['CTGCGGCT', 'GACTCAAA', 'AGAAACTC', 'TCTGTTGG']
SI_GA_E7 = SI_P2_E7 = ['CACGCCTT', 'GTATATAG', 'TCTCGGGC', 'AGGATACA']
SI_GA_E8 = SI_P2_E8 = ['ATAGTTAC', 'TGCTGAGT', 'CCTACGTA', 'GAGCACCG']
SI_GA_E9 = SI_P2_E9 = ['TTGTTTCC', 'GGAGGAGG', 'CCTAACAA', 'AACCCGTT']
SI_GA_E10 = SI_P2_E10 = ['AAATGTGC', 'GGGCAAAT', 'TCTATCCG', 'CTCGCGTA']
SI_GA_E11 = SI_P2_E11 = ['AAGCGCTG', 'CGTTTGAT', 'GTAGCACA', 'TCCAATGC']
SI_GA_E12 = SI_P2_E12 = ['ACCGGCTC', 'GAGTTAGT', 'CGTCCTAG', 'TTAAAGCA']
SI_GA_F1 = SI_P2_F1 = ['GTTGCAGC', 'TGGAATTA', 'CAATGGAG', 'ACCCTCCT']
SI_GA_F2 = SI_P2_F2 = ['TTTACATG', 'CGCGATAC', 'ACGCGGGT', 'GAATTCCA']
SI_GA_F3 = SI_P2_F3 = ['TTCAGGTG', 'ACGGACAT', 'GATCTTGA', 'CGATCACC']
SI_GA_F4 = SI_P2_F4 = ['CCCAATAG', 'GTGTCGCT', 'AGAGTCGC', 'TATCGATA']
SI_GA_F5 = SI_P2_F5 = ['GACTACGT', 'CTAGCGAG', 'TCTATATC', 'AGGCGTCA']
SI_GA_F6 = SI_P2_F6 = ['CGGAGCAC', 'GACCTATT', 'ACTTAGGA', 'TTAGCTCG']
SI_GA_F7 = SI_P2_F7 = ['CGTGCAGA', 'AACAAGAT', 'TCGCTTCG', 'GTATGCTC']
SI_GA_F8 = SI_P2_F8 = ['CATGAACA', 'TCACTCGC', 'AGCTGGAT', 'GTGACTTG']
SI_GA_F9 = SI_P2_F9 = ['CAAGCTCC', 'GTTCACTG', 'TCGTGAAA', 'AGCATGGT']
SI_GA_F10 = SI_P2_F10 = ['GCTTGGCT', 'AAACAAAC', 'CGGGCTTA', 'TTCATCGG']
SI_GA_F11 = SI_P2_F11 = ['GCGAGAGT', 'TACGTTCA', 'AGTCCCAC', 'CTATAGTG']
SI_GA_F12 = SI_P2_F12 = ['TGATGCAT', 'GCTACTGA', 'CACCTGCC', 'ATGGAATG']
SI_GA_G1 = SI_P2_G1 = ['ATGAATCT', 'GATCTCAG', 'CCAGGAGC', 'TGCTCGTA']
SI_GA_G2 = SI_P2_G2 = ['TGATTCTA', 'ACTAGGAG', 'CAGCCACT', 'GTCGATGC']
SI_GA_G3 = SI_P2_G3 = ['CCTCATTC', 'AGCATCCG', 'GTGGCAAT', 'TAATGGGA']
SI_GA_G4 = SI_P2_G4 = ['GCGATGTG', 'AGATACAA', 'TTTCCACT', 'CACGGTGC']
SI_GA_G5 = SI_P2_G5 = ['GAGCAAGA', 'TCTGTGAT', 'CGCAGTTC', 'ATATCCCG']
SI_GA_G6 = SI_P2_G6 = ['CTGACGCG', 'GGTCGTAC', 'TCCTTCTT', 'AAAGAAGA']
SI_GA_G7 = SI_P2_G7 = ['GGTATGCA', 'CTCGAAAT', 'ACACCTTC', 'TAGTGCGG']
SI_GA_G8 = SI_P2_G8 = ['TATGAGCT', 'CCGATAGC', 'ATACCCAA', 'GGCTGTTG']
SI_GA_G9 = SI_P2_G9 = ['TAGGACGT', 'ATCCCACA', 'GGAATGTC', 'CCTTGTAG']
SI_GA_G10 = SI_P2_G10 = ['TCGCCAGC', 'AATGTTAG', 'CGATAGCT', 'GTCAGCTA']
SI_GA_G11 = SI_P2_G11 = ['TTATCGTT', 'AGCAGAGC', 'CATCTCCA', 'GCGGATAG']
SI_GA_G12 = SI_P2_G12 = ['ATTCTAAG', 'CCCGATTA', 'TGGAGGCT', 'GAATCCGC']
SI_GA_H1 = SI_P2_H1 = ['GTATGTCA', 'TGTCAGAC', 'CACGTCGG', 'ACGACATT']
SI_GA_H2 = SI_P2_H2 = ['TAATGACC', 'ATGCCTTA', 'GCCGAGAT', 'CGTATCGG']
SI_GA_H3 = SI_P2_H3 = ['CCAAGATG', 'AGGCCCGA', 'TACGTGAC', 'GTTTATCT']
SI_GA_H4 = SI_P2_H4 = ['GCCATTCC', 'CAAGAATT', 'TTGCCGGA', 'AGTTGCAG']
SI_GA_H5 = SI_P2_H5 = ['CCACTACA', 'GATTCTGG', 'TGCGGCTT', 'ATGAAGAC']
SI_GA_H6 = SI_P2_H6 = ['TAGGATAA', 'CCTTTGTC', 'GTACGCGG', 'AGCACACT']
SI_GA_H7 = SI_P2_H7 = ['AGCTATCA', 'CATATAAC', 'TCAGGGTG', 'GTGCCCGT']
SI_GA_H8 = SI_P2_H8 = ['TTGTTGAT', 'GCTCAACC', 'CAAAGTGG', 'AGCGCCTA']
SI_GA_H9 = SI_P2_H9 = ['ACACTGTT', 'CAGGATGG', 'GGCTGAAC', 'TTTACCCA']
SI_GA_H10 = SI_P2_H10 = ['GTAATTGC', 'AGTCGCTT', 'CACGAGAA', 'TCGTCACG']
SI_GA_H11 = SI_P2_H11 = ['GGCGAGTA', 'ACTTCTAT', 'CAAATACG', 'TTGCGCGC']
SI_GA_H12 = SI_P2_H12 = ['GACAGCAT', 'TTTGTACA', 'AGGCCGTG', 'CCATATGC']
SAMPLE_INDEX_MAP = {
# GemCode Tube labels
'SI-001': SI_001,
'SI-002': SI_002,
'SI-003': SI_003,
'SI-004': SI_004,
'SI-005': SI_005,
'SI-006': SI_006,
'SI-007': SI_007,
'SI-008': SI_008,
# GemCode Plate labels
'SI-P01-A1': SI_P01_A1,
'SI-P01-B1': SI_P01_B1,
'SI-P01-C1': SI_P01_C1,
'SI-P01-D1': SI_P01_D1,
'SI-P01-E1': SI_P01_E1,
'SI-P01-F1': SI_P01_F1,
'SI-P01-G1': SI_P01_G1,
'SI-P01-H1': SI_P01_H1,
'SI-P01-A2': SI_P01_A2,
'SI-P01-B2': SI_P01_B2,
'SI-P01-C2': SI_P01_C2,
'SI-P01-D2': SI_P01_D2,
'SI-P01-E2': SI_P01_E2,
'SI-P01-F2': SI_P01_F2,
'SI-P01-G2': SI_P01_G2,
'SI-P01-H2': SI_P01_H2,
'SI-P01-A3': SI_P01_A3,
'SI-P01-B3': SI_P01_B3,
'SI-P01-C3': SI_P01_C3,
'SI-P01-D3': SI_P01_D3,
'SI-P01-E3': SI_P01_E3,
'SI-P01-F3': SI_P01_F3,
'SI-P01-G3': SI_P01_G3,
'SI-P01-H3': SI_P01_H3,
'SI-P01-A4': SI_P01_A4,
'SI-P01-B4': SI_P01_B4,
'SI-P01-C4': SI_P01_C4,
'SI-P01-D4': SI_P01_D4,
'SI-P01-E4': SI_P01_E4,
'SI-P01-F4': SI_P01_F4,
'SI-P01-G4': SI_P01_G4,
'SI-P01-H4': SI_P01_H4,
'SI-P01-A5': SI_P01_A5,
'SI-P01-B5': SI_P01_B5,
'SI-P01-C5': SI_P01_C5,
'SI-P01-D5': SI_P01_D5,
'SI-P01-E5': SI_P01_E5,
'SI-P01-F5': SI_P01_F5,
'SI-P01-G5': SI_P01_G5,
'SI-P01-H5': SI_P01_H5,
'SI-P01-A6': SI_P01_A6,
'SI-P01-B6': SI_P01_B6,
'SI-P01-C6': SI_P01_C6,
'SI-P01-D6': SI_P01_D6,
'SI-P01-E6': SI_P01_E6,
'SI-P01-F6': SI_P01_F6,
'SI-P01-G6': SI_P01_G6,
'SI-P01-H6': SI_P01_H6,
'SI-P01-A7': SI_P01_A7,
'SI-P01-B7': SI_P01_B7,
'SI-P01-C7': SI_P01_C7,
'SI-P01-D7': SI_P01_D7,
'SI-P01-E7': SI_P01_E7,
'SI-P01-F7': SI_P01_F7,
'SI-P01-G7': SI_P01_G7,
'SI-P01-H7': SI_P01_H7,
'SI-P01-A8': SI_P01_A8,
'SI-P01-B8': SI_P01_B8,
'SI-P01-C8': SI_P01_C8,
'SI-P01-D8': SI_P01_D8,
'SI-P01-E8': SI_P01_E8,
'SI-P01-F8': SI_P01_F8,
'SI-P01-G8': SI_P01_G8,
'SI-P01-H8': SI_P01_H8,
'SI-P01-A9': SI_P01_A9,
'SI-P01-B9': SI_P01_B9,
'SI-P01-C9': SI_P01_C9,
'SI-P01-D9': SI_P01_D9,
'SI-P01-E9': SI_P01_E9,
'SI-P01-F9': SI_P01_F9,
'SI-P01-G9': SI_P01_G9,
'SI-P01-H9': SI_P01_H9,
'SI-P01-A10': SI_P01_A10,
'SI-P01-B10': SI_P01_B10,
'SI-P01-C10': SI_P01_C10,
'SI-P01-D10': SI_P01_D10,
'SI-P01-E10': SI_P01_E10,
'SI-P01-F10': SI_P01_F10,
'SI-P01-G10': SI_P01_G10,
'SI-P01-H10': SI_P01_H10,
'SI-P01-A11': SI_P01_A11,
'SI-P01-B11': SI_P01_B11,
'SI-P01-C11': SI_P01_C11,
'SI-P01-D11': SI_P01_D11,
'SI-P01-E11': SI_P01_E11,
'SI-P01-F11': SI_P01_F11,
'SI-P01-G11': SI_P01_G11,
'SI-P01-H11': SI_P01_H11,
'SI-P01-A12': SI_P01_A12,
'SI-P01-B12': SI_P01_B12,
'SI-P01-C12': SI_P01_C12,
'SI-P01-D12': SI_P01_D12,
'SI-P01-E12': SI_P01_E12,
'SI-P01-F12': SI_P01_F12,
'SI-P01-G12': SI_P01_G12,
'SI-P01-H12': SI_P01_H12,
'SI-P03-A1': SI_P03_A1,
'SI-P03-B1': SI_P03_B1,
'SI-P03-C1': SI_P03_C1,
'SI-P03-D1': SI_P03_D1,
'SI-P03-E1': SI_P03_E1,
'SI-P03-F1': SI_P03_F1,
'SI-P03-G1': SI_P03_G1,
'SI-P03-H1': SI_P03_H1,
'SI-P03-A2': SI_P03_A2,
'SI-P03-B2': SI_P03_B2,
'SI-P03-C2': SI_P03_C2,
'SI-P03-D2': SI_P03_D2,
'SI-P03-E2': SI_P03_E2,
'SI-P03-F2': SI_P03_F2,
'SI-P03-G2': SI_P03_G2,
'SI-P03-H2': SI_P03_H2,
'SI-P03-A3': SI_P03_A3,
'SI-P03-B3': SI_P03_B3,
'SI-P03-C3': SI_P03_C3,
'SI-P03-D3': SI_P03_D3,
'SI-P03-E3': SI_P03_E3,
'SI-P03-F3': SI_P03_F3,
'SI-P03-G3': SI_P03_G3,
'SI-P03-H3': SI_P03_H3,
'SI-P03-A4': SI_P03_A4,
'SI-P03-B4': SI_P03_B4,
'SI-P03-C4': SI_P03_C4,
'SI-P03-D4': SI_P03_D4,
'SI-P03-E4': SI_P03_E4,
'SI-P03-F4': SI_P03_F4,
'SI-P03-G4': SI_P03_G4,
'SI-P03-H4': SI_P03_H4,
'SI-P03-A5': SI_P03_A5,
'SI-P03-B5': SI_P03_B5,
'SI-P03-C5': SI_P03_C5,
'SI-P03-D5': SI_P03_D5,
'SI-P03-E5': SI_P03_E5,
'SI-P03-F5': SI_P03_F5,
'SI-P03-G5': SI_P03_G5,
'SI-P03-H5': SI_P03_H5,
'SI-P03-A6': SI_P03_A6,
'SI-P03-B6': SI_P03_B6,
'SI-P03-C6': SI_P03_C6,
'SI-P03-D6': SI_P03_D6,
'SI-P03-E6': SI_P03_E6,
'SI-P03-F6': SI_P03_F6,
'SI-P03-G6': SI_P03_G6,
'SI-P03-H6': SI_P03_H6,
'SI-P03-A7': SI_P03_A7,
'SI-P03-B7': SI_P03_B7,
'SI-P03-C7': SI_P03_C7,
'SI-P03-D7': SI_P03_D7,
'SI-P03-E7': SI_P03_E7,
'SI-P03-F7': SI_P03_F7,
'SI-P03-G7': SI_P03_G7,
'SI-P03-H7': SI_P03_H7,
'SI-P03-A8': SI_P03_A8,
'SI-P03-B8': SI_P03_B8,
'SI-P03-C8': SI_P03_C8,
'SI-P03-D8': SI_P03_D8,
'SI-P03-E8': SI_P03_E8,
'SI-P03-F8': SI_P03_F8,
'SI-P03-G8': SI_P03_G8,
'SI-P03-H8': SI_P03_H8,
'SI-P03-A9': SI_P03_A9,
'SI-P03-B9': SI_P03_B9,
'SI-P03-C9': SI_P03_C9,
'SI-P03-D9': SI_P03_D9,
'SI-P03-E9': SI_P03_E9,
'SI-P03-F9': SI_P03_F9,
'SI-P03-G9': SI_P03_G9,
'SI-P03-H9': SI_P03_H9,
'SI-P03-A10': SI_P03_A10,
'SI-P03-B10': SI_P03_B10,
'SI-P03-C10': SI_P03_C10,
'SI-P03-D10': SI_P03_D10,
'SI-P03-E10': SI_P03_E10,
'SI-P03-F10': SI_P03_F10,
'SI-P03-G10': SI_P03_G10,
'SI-P03-H10': SI_P03_H10,
'SI-P03-A11': SI_P03_A11,
'SI-P03-B11': SI_P03_B11,
'SI-P03-C11': SI_P03_C11,
'SI-P03-D11': SI_P03_D11,
'SI-P03-E11': SI_P03_E11,
'SI-P03-F11': SI_P03_F11,
'SI-P03-G11': SI_P03_G11,
'SI-P03-H11': SI_P03_H11,
'SI-P03-A12': SI_P03_A12,
'SI-P03-B12': SI_P03_B12,
'SI-P03-C12': SI_P03_C12,
'SI-P03-D12': SI_P03_D12,
'SI-P03-E12': SI_P03_E12,
'SI-P03-F12': SI_P03_F12,
'SI-P03-G12': SI_P03_G12,
'SI-P03-H12': SI_P03_H12,
'SI-3A-A1': SI_3A_A1,
'SI-3A-B1': SI_3A_B1,
'SI-3A-C1': SI_3A_C1,
'SI-3A-D1': SI_3A_D1,
'SI-3A-E1': SI_3A_E1,
'SI-3A-F1': SI_3A_F1,
'SI-3A-G1': SI_3A_G1,
'SI-3A-H1': SI_3A_H1,
'SI-3A-A2': SI_3A_A2,
'SI-3A-B2': SI_3A_B2,
'SI-3A-C2': SI_3A_C2,
'SI-3A-D2': SI_3A_D2,
'SI-3A-E2': SI_3A_E2,
'SI-3A-F2': SI_3A_F2,
'SI-3A-G2': SI_3A_G2,
'SI-3A-H2': SI_3A_H2,
'SI-3A-A3': SI_3A_A3,
'SI-3A-B3': SI_3A_B3,
'SI-3A-C3': SI_3A_C3,
'SI-3A-D3': SI_3A_D3,
'SI-3A-E3': SI_3A_E3,
'SI-3A-F3': SI_3A_F3,
'SI-3A-G3': SI_3A_G3,
'SI-3A-H3': SI_3A_H3,
'SI-3A-A4': SI_3A_A4,
'SI-3A-B4': SI_3A_B4,
'SI-3A-C4': SI_3A_C4,
'SI-3A-D4': SI_3A_D4,
'SI-3A-E4': SI_3A_E4,
'SI-3A-F4': SI_3A_F4,
'SI-3A-G4': SI_3A_G4,
'SI-3A-H4': SI_3A_H4,
'SI-3A-A5': SI_3A_A5,
'SI-3A-B5': SI_3A_B5,
'SI-3A-C5': SI_3A_C5,
'SI-3A-D5': SI_3A_D5,
'SI-3A-E5': SI_3A_E5,
'SI-3A-F5': SI_3A_F5,
'SI-3A-G5': SI_3A_G5,
'SI-3A-H5': SI_3A_H5,
'SI-3A-A6': SI_3A_A6,
'SI-3A-B6': SI_3A_B6,
'SI-3A-C6': SI_3A_C6,
'SI-3A-D6': SI_3A_D6,
'SI-3A-E6': SI_3A_E6,
'SI-3A-F6': SI_3A_F6,
'SI-3A-G6': SI_3A_G6,
'SI-3A-H6': SI_3A_H6,
'SI-3A-A7': SI_3A_A7,
'SI-3A-B7': SI_3A_B7,
'SI-3A-C7': SI_3A_C7,
'SI-3A-D7': SI_3A_D7,
'SI-3A-E7': SI_3A_E7,
'SI-3A-F7': SI_3A_F7,
'SI-3A-G7': SI_3A_G7,
'SI-3A-H7': SI_3A_H7,
'SI-3A-A8': SI_3A_A8,
'SI-3A-B8': SI_3A_B8,
'SI-3A-C8': SI_3A_C8,
'SI-3A-D8': SI_3A_D8,
'SI-3A-E8': SI_3A_E8,
'SI-3A-F8': SI_3A_F8,
'SI-3A-G8': SI_3A_G8,
'SI-3A-H8': SI_3A_H8,
'SI-3A-A9': SI_3A_A9,
'SI-3A-B9': SI_3A_B9,
'SI-3A-C9': SI_3A_C9,
'SI-3A-D9': SI_3A_D9,
'SI-3A-E9': SI_3A_E9,
'SI-3A-F9': SI_3A_F9,
'SI-3A-G9': SI_3A_G9,
'SI-3A-H9': SI_3A_H9,
'SI-3A-A10': SI_3A_A10,
'SI-3A-B10': SI_3A_B10,
'SI-3A-C10': SI_3A_C10,
'SI-3A-D10': SI_3A_D10,
'SI-3A-E10': SI_3A_E10,
'SI-3A-F10': SI_3A_F10,
'SI-3A-G10': SI_3A_G10,
'SI-3A-H10': SI_3A_H10,
'SI-3A-A11': SI_3A_A11,
'SI-3A-B11': SI_3A_B11,
'SI-3A-C11': SI_3A_C11,
'SI-3A-D11': SI_3A_D11,
'SI-3A-E11': SI_3A_E11,
'SI-3A-F11': SI_3A_F11,
'SI-3A-G11': SI_3A_G11,
'SI-3A-H11': SI_3A_H11,
'SI-3A-A12': SI_3A_A12,
'SI-3A-B12': SI_3A_B12,
'SI-3A-C12': SI_3A_C12,
'SI-3A-D12': SI_3A_D12,
'SI-3A-E12': SI_3A_E12,
'SI-3A-F12': SI_3A_F12,
'SI-3A-G12': SI_3A_G12,
'SI-3A-H12': SI_3A_H12,
'SI-GA-A1': SI_GA_A1,
'SI-GA-B1': SI_GA_B1,
'SI-GA-C1': SI_GA_C1,
'SI-GA-D1': SI_GA_D1,
'SI-GA-E1': SI_GA_E1,
'SI-GA-F1': SI_GA_F1,
'SI-GA-G1': SI_GA_G1,
'SI-GA-H1': SI_GA_H1,
'SI-GA-A2': SI_GA_A2,
'SI-GA-B2': SI_GA_B2,
'SI-GA-C2': SI_GA_C2,
'SI-GA-D2': SI_GA_D2,
'SI-GA-E2': SI_GA_E2,
'SI-GA-F2': SI_GA_F2,
'SI-GA-G2': SI_GA_G2,
'SI-GA-H2': SI_GA_H2,
'SI-GA-A3': SI_GA_A3,
'SI-GA-B3': SI_GA_B3,
'SI-GA-C3': SI_GA_C3,
'SI-GA-D3': SI_GA_D3,
'SI-GA-E3': SI_GA_E3,
'SI-GA-F3': SI_GA_F3,
'SI-GA-G3': SI_GA_G3,
'SI-GA-H3': SI_GA_H3,
'SI-GA-A4': SI_GA_A4,
'SI-GA-B4': SI_GA_B4,
'SI-GA-C4': SI_GA_C4,
'SI-GA-D4': SI_GA_D4,
'SI-GA-E4': SI_GA_E4,
'SI-GA-F4': SI_GA_F4,
'SI-GA-G4': SI_GA_G4,
'SI-GA-H4': SI_GA_H4,
'SI-GA-A5': SI_GA_A5,
'SI-GA-B5': SI_GA_B5,
'SI-GA-C5': SI_GA_C5,
'SI-GA-D5': SI_GA_D5,
'SI-GA-E5': SI_GA_E5,
'SI-GA-F5': SI_GA_F5,
'SI-GA-G5': SI_GA_G5,
'SI-GA-H5': SI_GA_H5,
'SI-GA-A6': SI_GA_A6,
'SI-GA-B6': SI_GA_B6,
'SI-GA-C6': SI_GA_C6,
'SI-GA-D6': SI_GA_D6,
'SI-GA-E6': SI_GA_E6,
'SI-GA-F6': SI_GA_F6,
'SI-GA-G6': SI_GA_G6,
'SI-GA-H6': SI_GA_H6,
'SI-GA-A7': SI_GA_A7,
'SI-GA-B7': SI_GA_B7,
'SI-GA-C7': SI_GA_C7,
'SI-GA-D7': SI_GA_D7,
'SI-GA-E7': SI_GA_E7,
'SI-GA-F7': SI_GA_F7,
'SI-GA-G7': SI_GA_G7,
'SI-GA-H7': SI_GA_H7,
'SI-GA-A8': SI_GA_A8,
'SI-GA-B8': SI_GA_B8,
'SI-GA-C8': SI_GA_C8,
'SI-GA-D8': SI_GA_D8,
'SI-GA-E8': SI_GA_E8,
'SI-GA-F8': SI_GA_F8,
'SI-GA-G8': SI_GA_G8,
'SI-GA-H8': SI_GA_H8,
'SI-GA-A9': SI_GA_A9,
'SI-GA-B9': SI_GA_B9,
'SI-GA-C9': SI_GA_C9,
'SI-GA-D9': SI_GA_D9,
'SI-GA-E9': SI_GA_E9,
'SI-GA-F9': SI_GA_F9,
'SI-GA-G9': SI_GA_G9,
'SI-GA-H9': SI_GA_H9,
'SI-GA-A10': SI_GA_A10,
'SI-GA-B10': SI_GA_B10,
'SI-GA-C10': SI_GA_C10,
'SI-GA-D10': SI_GA_D10,
'SI-GA-E10': SI_GA_E10,
'SI-GA-F10': SI_GA_F10,
'SI-GA-G10': SI_GA_G10,
'SI-GA-H10': SI_GA_H10,
'SI-GA-A11': SI_GA_A11,
'SI-GA-B11': SI_GA_B11,
'SI-GA-C11': SI_GA_C11,
'SI-GA-D11': SI_GA_D11,
'SI-GA-E11': SI_GA_E11,
'SI-GA-F11': SI_GA_F11,
'SI-GA-G11': SI_GA_G11,
'SI-GA-H11': SI_GA_H11,
'SI-GA-A12': SI_GA_A12,
'SI-GA-B12': SI_GA_B12,
'SI-GA-C12': SI_GA_C12,
'SI-GA-D12': SI_GA_D12,
'SI-GA-E12': SI_GA_E12,
'SI-GA-F12': SI_GA_F12,
'SI-GA-G12': SI_GA_G12,
'SI-GA-H12': SI_GA_H12,
# GemCode Part numbers
'220027': SI_001,
'220028': SI_002,
'220029': SI_003,
'220030': SI_004,
'220031': SI_005,
'220032': SI_006,
'220033': SI_007,
'220034': SI_008,
# WGS+ Tube labels
'SI-T2-1': SI_T2_1,
'SI-T2-2': SI_T2_2,
'SI-T2-3': SI_T2_3,
'SI-T2-4': SI_T2_4,
'SI-T2-5': SI_T2_5,
'SI-T2-6': SI_T2_6,
'SI-T2-7': SI_T2_7,
'SI-T2-8': SI_T2_8,
# WGS+ Plate labels
'SI-P2-A1': SI_P2_A1,
'SI-P2-A2': SI_P2_A2,
'SI-P2-A3': SI_P2_A3,
'SI-P2-A4': SI_P2_A4,
'SI-P2-A5': SI_P2_A5,
'SI-P2-A6': SI_P2_A6,
'SI-P2-A7': SI_P2_A7,
'SI-P2-A8': SI_P2_A8,
'SI-P2-A9': SI_P2_A9,
'SI-P2-A10': SI_P2_A10,
'SI-P2-A11': SI_P2_A11,
'SI-P2-A12': SI_P2_A12,
'SI-P2-B1': SI_P2_B1,
'SI-P2-B2': SI_P2_B2,
'SI-P2-B3': SI_P2_B3,
'SI-P2-B4': SI_P2_B4,
'SI-P2-B5': SI_P2_B5,
'SI-P2-B6': SI_P2_B6,
'SI-P2-B7': SI_P2_B7,
'SI-P2-B8': SI_P2_B8,
'SI-P2-B9': SI_P2_B9,
'SI-P2-B10': SI_P2_B10,
'SI-P2-B11': SI_P2_B11,
'SI-P2-B12': SI_P2_B12,
'SI-P2-C1': SI_P2_C1,
'SI-P2-C2': SI_P2_C2,
'SI-P2-C3': SI_P2_C3,
'SI-P2-C4': SI_P2_C4,
'SI-P2-C5': SI_P2_C5,
'SI-P2-C6': SI_P2_C6,
'SI-P2-C7': SI_P2_C7,
'SI-P2-C8': SI_P2_C8,
'SI-P2-C9': SI_P2_C9,
'SI-P2-C10': SI_P2_C10,
'SI-P2-C11': SI_P2_C11,
'SI-P2-C12': SI_P2_C12,
'SI-P2-D1': SI_P2_D1,
'SI-P2-D2': SI_P2_D2,
'SI-P2-D3': SI_P2_D3,
'SI-P2-D4': SI_P2_D4,
'SI-P2-D5': SI_P2_D5,
'SI-P2-D6': SI_P2_D6,
'SI-P2-D7': SI_P2_D7,
'SI-P2-D8': SI_P2_D8,
'SI-P2-D9': SI_P2_D9,
'SI-P2-D10': SI_P2_D10,
'SI-P2-D11': SI_P2_D11,
'SI-P2-D12': SI_P2_D12,
'SI-P2-E1': SI_P2_E1,
'SI-P2-E2': SI_P2_E2,
'SI-P2-E3': SI_P2_E3,
'SI-P2-E4': SI_P2_E4,
'SI-P2-E5': SI_P2_E5,
'SI-P2-E6': SI_P2_E6,
'SI-P2-E7': SI_P2_E7,
'SI-P2-E8': SI_P2_E8,
'SI-P2-E9': SI_P2_E9,
'SI-P2-E10': SI_P2_E10,
'SI-P2-E11': SI_P2_E11,
'SI-P2-E12': SI_P2_E12,
'SI-P2-F1': SI_P2_F1,
'SI-P2-F2': SI_P2_F2,
'SI-P2-F3': SI_P2_F3,
'SI-P2-F4': SI_P2_F4,
'SI-P2-F5': SI_P2_F5,
'SI-P2-F6': SI_P2_F6,
'SI-P2-F7': SI_P2_F7,
'SI-P2-F8': SI_P2_F8,
'SI-P2-F9': SI_P2_F9,
'SI-P2-F10': SI_P2_F10,
'SI-P2-F11': SI_P2_F11,
'SI-P2-F12': SI_P2_F12,
'SI-P2-G1': SI_P2_G1,
'SI-P2-G2': SI_P2_G2,
'SI-P2-G3': SI_P2_G3,
'SI-P2-G4': SI_P2_G4,
'SI-P2-G5': SI_P2_G5,
'SI-P2-G6': SI_P2_G6,
'SI-P2-G7': SI_P2_G7,
'SI-P2-G8': SI_P2_G8,
'SI-P2-G9': SI_P2_G9,
'SI-P2-G10': SI_P2_G10,
'SI-P2-G11': SI_P2_G11,
'SI-P2-G12': SI_P2_G12,
'SI-P2-H1': SI_P2_H1,
'SI-P2-H2': SI_P2_H2,
'SI-P2-H3': SI_P2_H3,
'SI-P2-H4': SI_P2_H4,
'SI-P2-H5': SI_P2_H5,
'SI-P2-H6': SI_P2_H6,
'SI-P2-H7': SI_P2_H7,
'SI-P2-H8': SI_P2_H8,
'SI-P2-H9': SI_P2_H9,
'SI-P2-H10': SI_P2_H10,
'SI-P2-H11': SI_P2_H11,
'SI-P2-H12': SI_P2_H12,
# WGS+ Plate Label alternate
'SI-P02-A1': SI_P2_A1,
'SI-P02-A2': SI_P2_A2,
'SI-P02-A3': SI_P2_A3,
'SI-P02-A4': SI_P2_A4,
'SI-P02-A5': SI_P2_A5,
'SI-P02-A6': SI_P2_A6,
'SI-P02-A7': SI_P2_A7,
'SI-P02-A8': SI_P2_A8,
'SI-P02-A9': SI_P2_A9,
'SI-P02-A10': SI_P2_A10,
'SI-P02-A11': SI_P2_A11,
'SI-P02-A12': SI_P2_A12,
'SI-P02-B1': SI_P2_B1,
'SI-P02-B2': SI_P2_B2,
'SI-P02-B3': SI_P2_B3,
'SI-P02-B4': SI_P2_B4,
'SI-P02-B5': SI_P2_B5,
'SI-P02-B6': SI_P2_B6,
'SI-P02-B7': SI_P2_B7,
'SI-P02-B8': SI_P2_B8,
'SI-P02-B9': SI_P2_B9,
'SI-P02-B10': SI_P2_B10,
'SI-P02-B11': SI_P2_B11,
'SI-P02-B12': SI_P2_B12,
'SI-P02-C1': SI_P2_C1,
'SI-P02-C2': SI_P2_C2,
'SI-P02-C3': SI_P2_C3,
'SI-P02-C4': SI_P2_C4,
'SI-P02-C5': SI_P2_C5,
'SI-P02-C6': SI_P2_C6,
'SI-P02-C7': SI_P2_C7,
'SI-P02-C8': SI_P2_C8,
'SI-P02-C9': SI_P2_C9,
'SI-P02-C10': SI_P2_C10,
'SI-P02-C11': SI_P2_C11,
'SI-P02-C12': SI_P2_C12,
'SI-P02-D1': SI_P2_D1,
'SI-P02-D2': SI_P2_D2,
'SI-P02-D3': SI_P2_D3,
'SI-P02-D4': SI_P2_D4,
'SI-P02-D5': SI_P2_D5,
'SI-P02-D6': SI_P2_D6,
'SI-P02-D7': SI_P2_D7,
'SI-P02-D8': SI_P2_D8,
'SI-P02-D9': SI_P2_D9,
'SI-P02-D10': SI_P2_D10,
'SI-P02-D11': SI_P2_D11,
'SI-P02-D12': SI_P2_D12,
'SI-P02-E1': SI_P2_E1,
'SI-P02-E2': SI_P2_E2,
'SI-P02-E3': SI_P2_E3,
'SI-P02-E4': SI_P2_E4,
'SI-P02-E5': SI_P2_E5,
'SI-P02-E6': SI_P2_E6,
'SI-P02-E7': SI_P2_E7,
'SI-P02-E8': SI_P2_E8,
'SI-P02-E9': SI_P2_E9,
'SI-P02-E10': SI_P2_E10,
'SI-P02-E11': SI_P2_E11,
'SI-P02-E12': SI_P2_E12,
'SI-P02-F1': SI_P2_F1,
'SI-P02-F2': SI_P2_F2,
'SI-P02-F3': SI_P2_F3,
'SI-P02-F4': SI_P2_F4,
'SI-P02-F5': SI_P2_F5,
'SI-P02-F6': SI_P2_F6,
'SI-P02-F7': SI_P2_F7,
'SI-P02-F8': SI_P2_F8,
'SI-P02-F9': SI_P2_F9,
'SI-P02-F10': SI_P2_F10,
'SI-P02-F11': SI_P2_F11,
'SI-P02-F12': SI_P2_F12,
'SI-P02-G1': SI_P2_G1,
'SI-P02-G2': SI_P2_G2,
'SI-P02-G3': SI_P2_G3,
'SI-P02-G4': SI_P2_G4,
'SI-P02-G5': SI_P2_G5,
'SI-P02-G6': SI_P2_G6,
'SI-P02-G7': SI_P2_G7,
'SI-P02-G8': SI_P2_G8,
'SI-P02-G9': SI_P2_G9,
'SI-P02-G10': SI_P2_G10,
'SI-P02-G11': SI_P2_G11,
'SI-P02-G12': SI_P2_G12,
'SI-P02-H1': SI_P2_H1,
'SI-P02-H2': SI_P2_H2,
'SI-P02-H3': SI_P2_H3,
'SI-P02-H4': SI_P2_H4,
'SI-P02-H5': SI_P2_H5,
'SI-P02-H6': SI_P2_H6,
'SI-P02-H7': SI_P2_H7,
'SI-P02-H8': SI_P2_H8,
'SI-P02-H9': SI_P2_H9,
'SI-P02-H10': SI_P2_H10,
'SI-P02-H11': SI_P2_H11,
'SI-P02-H12': SI_P2_H12,
}
# Chromium lot-specific oligos
CHROMIUM_LOT1_PART_A = ['AGAGCGA', 'CGATTGA', 'TAGACCA', 'AAATGCC', 'CTTTGCG', 'TCAGCAA', 'CTCCTAG', 'ATTATCC']
CHROMIUM_LOT2_PART_A = ['GACACTA', 'CCCTCTC', 'ATCGCGG', 'CTGGCAG', 'CCAGCTT', 'CATAGCA', 'CGTGTTC', 'GCACCAG']
CHROMIUM_LOT3_PART_A = ['ATGTGAC', 'GACGTCG', 'ACTGGCG', 'TGGCAAT', 'GAGGGTA', 'GTTTCGC', 'CAAGTGT', 'TTGAAGC']
CHROMIUM_LOT4_PART_A = ['CGATCCT', 'TGTTGCC', 'ACCTATT', 'ACAACTG', 'CTGTGTC', 'CTGGAAT', 'CAGAGTT', 'GGGCTGT']
CHROMIUM_LOT5_PART_A = ['TAGCTCC', 'CAATTTC', 'GCTCGAG', 'GAAGGCA', 'CGGCATG', 'TATTCCA', 'TCTCTGG', 'AGGTACT']
CHROMIUM_LOT6_PART_A = ['ACTTGCC', 'GTGAGTT', 'GTTGTCC', 'CATAACG', 'TCGTAAG', 'TTATCCA', 'GTGGAGA', 'TCCTGCA']
CHROMIUM_LOT7_PART_A = ['TAAGCCA', 'TCGGTGG', 'AAGGTAA', 'GGAACAG', 'GTGGAAG', 'TTAGACG', 'ATCCTAT', 'TTCCGTG']
CHROMIUM_LOT8_PART_A = ['GGTTTAG', 'CGTATAG', 'ATAGGCT', 'CTCTCGA', 'GTCTTAT', 'GATTGCA', 'TGAGCTA', 'ACGCGTG']
CHROMIUM_LOT9_PART_A = ['CGACACG', 'TCTCGTG', 'TGATGAC', 'TGCGTAA', 'TACCCTG', 'AGGTGCC', 'CTTGTGC', 'GCATGGC']
CHROMIUM_LOT10_PART_A = ['CAGCACG', 'CATGATG', 'ATCAACG', 'GATAAGA', 'CTGGTTC', 'CGATTCC', 'AGGTGAG', 'GGCCTGA']
CHROMIUM_LOT11_PART_A = ['ACAGTTG', 'TAAGCAC', 'ATCTTTG', 'TCTTGCG', 'TACATGG', 'CAAGGTT', 'AGGCTGC', 'GGTCGTG']
CHROMIUM_LOT12_PART_A = ['CCATTAT', 'GTTGCGG', 'AGGGTAG', 'GCCCAAG', 'TGTGCCT', 'ATTCTTG', 'GGTGCCA', 'GTATAGC']
CHROMIUM_LOT13_PART_A = ['GGCATCG', 'GACTGAT', 'TGGTGTA', 'TCCGTTG', 'CCTTCAG', 'CAGGCCA', 'GCACCGA', 'AGATCCA']
CHROMIUM_LOT_MAP = {
'Chromium Lot 1': CHROMIUM_LOT1_PART_A,
'Chromium Lot 2': CHROMIUM_LOT2_PART_A,
'Chromium Lot 3': CHROMIUM_LOT3_PART_A,
'Chromium Lot 4': CHROMIUM_LOT4_PART_A,
'Chromium Lot 5': CHROMIUM_LOT5_PART_A,
'Chromium Lot 6': CHROMIUM_LOT6_PART_A,
'Chromium Lot 7': CHROMIUM_LOT7_PART_A,
'Chromium Lot 8': CHROMIUM_LOT8_PART_A,
'Chromium Lot 9': CHROMIUM_LOT9_PART_A,
'Chromium Lot 10': CHROMIUM_LOT10_PART_A,
'Chromium Lot 11': CHROMIUM_LOT11_PART_A,
'Chromium Lot 12': CHROMIUM_LOT12_PART_A,
'Chromium Lot 13': CHROMIUM_LOT13_PART_A,
}
# GemCode lot-specific oligos
# note: lots 1-15 all use the same part As
GEMCODE_LOT1_PART_A = ['GGGTGA', 'TTCATC', 'CACAAC', 'GAAGAT', 'CAGCAT', 'CGTCAA', 'GAAACA', 'TGTTTC']
GEMCODE_LOT16_PART_A = ['CAAGTC', 'ACAAAG', 'CTGGAT', 'TTGTCT', 'AGCCTA', 'GGGAAC', 'TTCCTA', 'CCGTAA']
GEMCODE_LOT17_PART_A = ['AGTCCA', 'CAGGAG', 'CAATGC', 'CAATCG', 'AACAGA', 'TTACTC', 'ACTGAC', 'TAAGCC']
GEMCODE_LOT18_PART_A = ['GCATGT', 'CCAACA', 'TCGGTA', 'ATCGTG', 'ATTCTC', 'CGTTAG', 'TTCACT', 'GGTTTG']
GEMCODE_LOT19_PART_A = ['CTTTCA', 'TTGTTC', 'TAGCCA', 'GCGTAT', 'CGTACA', 'CCTTCG', 'CACACA', 'TACTTC']
GEMCODE_LOT20_PART_A = ['CTTCAT', 'ATTCCT', 'GTCTCC', 'CAGGGA', 'ATCCGA', 'CGAATC', 'AAACCC', 'CGCTAA']
GEMCODE_LOT21_PART_A = ['CAGATC', 'AATCCG', 'TACGTG', 'GAACAA', 'AGAGCG', 'CCAGAT', 'CGCTTC', 'TTATCC']
GEMCODE_LOT_MAP = {
'GemCode Lots 1-15': GEMCODE_LOT1_PART_A,
'GemCode Lot 16': GEMCODE_LOT16_PART_A,
'GemCode Lot 17': GEMCODE_LOT17_PART_A,
'GemCode Lot 18': GEMCODE_LOT18_PART_A,
'GemCode Lot 19': GEMCODE_LOT19_PART_A,
'GemCode Lot 20': GEMCODE_LOT20_PART_A,
'GemCode Lot 21': GEMCODE_LOT21_PART_A,
}
# if a whitelist isn't in this map, then assume it doesn't contain the alts and don't try to do lot detection
WHITELIST_TO_LOT_MAP = {
"884K-november-2015": GEMCODE_LOT_MAP,
"4M-with-alts-february-2016": CHROMIUM_LOT_MAP,
}
| 39.199826
| 166
| 0.631286
|
220a10ae4f94037f2fc5716fb1f0d5f4baeedfd5
| 820
|
py
|
Python
|
artikel/migrations/0001_initial.py
|
almaaqila/tugas-tengah-semester-pbp
|
7b7bc72ea19cedd61c77c26ed503efd9eb42bc0b
|
[
"MIT"
] | null | null | null |
artikel/migrations/0001_initial.py
|
almaaqila/tugas-tengah-semester-pbp
|
7b7bc72ea19cedd61c77c26ed503efd9eb42bc0b
|
[
"MIT"
] | null | null | null |
artikel/migrations/0001_initial.py
|
almaaqila/tugas-tengah-semester-pbp
|
7b7bc72ea19cedd61c77c26ed503efd9eb42bc0b
|
[
"MIT"
] | 5
|
2021-10-14T15:22:52.000Z
|
2021-12-29T12:22:43.000Z
|
# Generated by Django 3.2.8 on 2021-10-26 06:25
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Artikel',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
('body', models.TextField()),
('pub_date', models.DateField(auto_now_add=True)),
('image_file', models.ImageField(blank=True, upload_to='images')),
('image_url', models.URLField(blank=True)),
('slug', models.SlugField(max_length=255, unique=True)),
],
),
]
| 30.37037
| 117
| 0.568293
|
3dd16e259ad940bf592d1ae6981fa9f729ae6a7f
| 4,063
|
py
|
Python
|
utils/nbtoc.py
|
mmxsrup/fuzzingbook
|
a57d1ac940c6bec78ba70f2e1ed04fd87aeeb49e
|
[
"MIT"
] | 1
|
2021-11-02T18:40:46.000Z
|
2021-11-02T18:40:46.000Z
|
utils/nbtoc.py
|
mmxsrup/fuzzingbook
|
a57d1ac940c6bec78ba70f2e1ed04fd87aeeb49e
|
[
"MIT"
] | null | null | null |
utils/nbtoc.py
|
mmxsrup/fuzzingbook
|
a57d1ac940c6bec78ba70f2e1ed04fd87aeeb49e
|
[
"MIT"
] | 2
|
2019-12-28T16:53:57.000Z
|
2021-11-02T18:40:51.000Z
|
#!/usr/bin/env python
# Create table of contents for given notebook(s)
"""
usage:
python nbtoc.py A.ipynb B.ipynb C.ipynb
"""
import io, os, sys, types, re
import nbformat
import argparse
def get_text_contents(notebook):
with io.open(notebook, 'r', encoding='utf-8') as f:
nb = nbformat.read(f, as_version=4)
contents = ""
for cell in nb.cells:
if cell.cell_type == 'markdown':
contents += "".join(cell.source) + "\n\n"
# print("Contents of", notebook, ": ", repr(contents[:100]))
return contents
def get_title(notebook):
"""Return the title from a notebook file"""
contents = get_text_contents(notebook)
match = re.search(r'^# (.*)', contents, re.MULTILINE)
title = match.group(1).replace(r'\n', '')
# print("Title", title.encode('utf-8'))
return title
def notebook_toc_entry(notebook_name, prefix, path=None):
# notebook_path = import_notebooks.find_notebook(notebook_name, path)
notebook_path = notebook_name
notebook_title = get_title(notebook_path)
notebook_base = os.path.basename(notebook_path)
return prefix + " [" + notebook_title + "](" + notebook_base + ")\n"
def notebook_toc(public_chapters, appendices):
title = "# The Fuzzing Book"
chapter_toc = "## [Table of Contents](index.ipynb)\n\n"
counter = 1
for notebook in public_chapters + appendices:
notebook_title = get_title(notebook)
if (notebook_title.startswith("Part ") or
notebook_title.startswith("Appendices")):
# chapter_toc += "\n### " + notebook_title + "\n\n"
chapter_toc += "\n" + notebook_toc_entry(notebook, "###") + "\n"
else:
chapter_toc += notebook_toc_entry(notebook, "*") # repr(counter) + ".")
counter += 1
# appendix_toc = "### [Appendices](99_Appendices.ipynb)\n\n"
# for notebook in appendices:
# appendix_toc += notebook_toc_entry(notebook, "*")
sitemap = r"""## Sitemap
This sitemap shows possible paths through the book chapters. An arrow $A \rightarrow B$ means that chapter $A$ is a prerequisite for chapter $B$."""
sitemap_code_1 = "from IPython.display import SVG"
sitemap_code_2 = "SVG(filename='PICS/Sitemap.svg')"
toc_notebook = nbformat.v4.new_notebook(
cells=[
nbformat.v4.new_markdown_cell(source=title),
nbformat.v4.new_markdown_cell(source=sitemap),
nbformat.v4.new_code_cell(source=sitemap_code_1),
nbformat.v4.new_code_cell(source=sitemap_code_2),
nbformat.v4.new_markdown_cell(source=chapter_toc)
# nbformat.v4.new_markdown_cell(source=appendix_toc),
])
# Get along with TOC extension
toc_notebook.metadata['toc'] = {
"base_numbering": 1,
"nav_menu": {},
"number_sections": False,
"sideBar": False,
"skip_h1_title": False,
"title_cell": "",
"title_sidebar": "Contents",
"toc_cell": False,
"toc_position": {},
"toc_section_display": False,
"toc_window_display": False
}
# Add general metadata
toc_notebook.metadata["kernelspec"] = {
"display_name": "Python 3",
"language": "python",
"name": "python3"
}
toc_notebook.metadata["language_info"] = {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.6"
}
return toc_notebook
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--chapters", help="List of public chapters")
parser.add_argument("--appendices", help="List of appendices")
args = parser.parse_args()
public_chapters = args.chapters.split()
appendices = args.appendices.split()
toc_notebook = notebook_toc(public_chapters, appendices)
sys.stdout.buffer.write(nbformat.writes(toc_notebook).encode("utf-8"))
| 31.742188
| 149
| 0.634014
|
17e7278792a8cb8490f89df78b4e1172773c8be4
| 245
|
py
|
Python
|
app/loader.py
|
xandrade/alpha-api
|
03a6a43d1a9dd4c8f1d072b8af11751b17dc35d8
|
[
"MIT"
] | 1
|
2021-12-20T16:08:37.000Z
|
2021-12-20T16:08:37.000Z
|
app/loader.py
|
xandrade/alpha-api
|
03a6a43d1a9dd4c8f1d072b8af11751b17dc35d8
|
[
"MIT"
] | null | null | null |
app/loader.py
|
xandrade/alpha-api
|
03a6a43d1a9dd4c8f1d072b8af11751b17dc35d8
|
[
"MIT"
] | 1
|
2022-01-11T21:50:57.000Z
|
2022-01-11T21:50:57.000Z
|
from dotenv import load_dotenv, find_dotenv
def load_enviroment() -> None:
"""Load enviroment variables from .env file."""
path = find_dotenv()
if path:
load_dotenv(path)
if __name__ == "__main__":
load_enviroment()
| 17.5
| 51
| 0.665306
|
40f4bb126ed11d7b72dd904c3997c66a58264cc9
| 2,040
|
py
|
Python
|
source/participant/main.py
|
netzahdzc/mat-dashboard
|
b7ada2a6b1fcfca9b375417da1504b1894508719
|
[
"MIT"
] | null | null | null |
source/participant/main.py
|
netzahdzc/mat-dashboard
|
b7ada2a6b1fcfca9b375417da1504b1894508719
|
[
"MIT"
] | null | null | null |
source/participant/main.py
|
netzahdzc/mat-dashboard
|
b7ada2a6b1fcfca9b375417da1504b1894508719
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2017 CICESE
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Flask dependencies
from flask import Flask
from flask import render_template
from flask import make_response
from flask import request, Response
# Internal dependencies
from lib.NGSIData import NGSIData
# Format dependencies
import pprint
# Start listening through port 8001
app = Flask(__name__);
""" Null route (/)
This route might be used for testing purposes.
"""
@app.route("/")
def hello():
return "Hello LocalServer";
""" queryContext route (/v1/queryContext)
This method allows to retrieve NGSI v1 entities by triggering a POST call from which a contextResponses structure.
For more reference, please visit NGSI v1 official documentation: http://telefonicaid.github.io/fiware-orion/api/v1/
"""
@app.route("/v1/queryContext", methods = ['POST'])
def getData():
# Data handler.
_contextResponses = NGSIData();
# Remote access data.
username = str(request.headers["Referer"]).split("/")[3];
service = str(request.headers["Fiware-Service"]);
servicePath = str(request.headers["Fiware-ServicePath"]);
token = "";#str(request.headers["X-Auth-Token"]);
# Header settled.
response = Response(response = _contextResponses.post(username, service, servicePath, request.args, token));
response.headers["Accept"] = "application/json";
response.headers["Fiware-Service"] = service;
response.headers["Fiware-ServicePath"] = servicePath;
return response;
if __name__ == "__main__":
app.run(host = "0.0.0.0", port=8001, debug = False);
| 32.380952
| 115
| 0.743137
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.