hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
676a35038f2f1fa1008bac252e568a2761aaeeed
| 2,866
|
py
|
Python
|
deepfool.py
|
LandAndLand/Universal-Adversarial-Perturbations-Pytorch
|
967bb0aedf924a3836172bf0d4acb8a7ac442048
|
[
"MIT"
] | 39
|
2019-06-03T21:28:24.000Z
|
2022-03-26T12:12:16.000Z
|
deepfool.py
|
LandAndLand/Universal-Adversarial-Perturbations-Pytorch
|
967bb0aedf924a3836172bf0d4acb8a7ac442048
|
[
"MIT"
] | 1
|
2020-02-24T02:02:00.000Z
|
2020-02-24T02:02:00.000Z
|
deepfool.py
|
LandAndLand/Universal-Adversarial-Perturbations-Pytorch
|
967bb0aedf924a3836172bf0d4acb8a7ac442048
|
[
"MIT"
] | 10
|
2019-07-05T19:09:57.000Z
|
2022-02-16T05:38:18.000Z
|
## This file source is https://github.com/BXuan694/Universal-Adversarial-Perturbation/blob/master/deepfool.py
## This file is not the scope of the original paper of this project
import numpy as np
from torch.autograd import Variable
import torch as torch
import copy
from torch.autograd.gradcheck import zero_gradients
def deepfool(image, net, num_classes, overshoot, max_iter):
"""
:param image:
:param net: network (input: images, output: values of activation **BEFORE** softmax).
:param num_classes: num_classes (limits the number of classes to test against, by default = 10)
:param overshoot: used as a termination criterion to prevent vanishing updates (default = 0.02).
:param max_iter: maximum number of iterations for deepfool (default = 50)
:return: minimal perturbation that fools the classifier, number of iterations that it required, new estimated_label and perturbed image
"""
is_cuda = torch.cuda.is_available()
if is_cuda:
image = image.cuda()
net = net.cuda()
f_image = net.forward(Variable(image[None, :, :, :], requires_grad=True)).data.cpu().numpy().flatten()
I = f_image.argsort()[::-1]
I = I[0:num_classes]
label = I[0]
input_shape = image.cpu().numpy().shape
pert_image = copy.deepcopy(image)
w = np.zeros(input_shape)
r_tot = np.zeros(input_shape)
loop_i = 0
x = Variable(pert_image[None, :], requires_grad=True)
fs = net.forward(x)
k_i = label
while k_i == label and loop_i < max_iter:
pert = np.inf
fs[0, I[0]].backward(retain_graph=True)
grad_orig = x.grad.data.cpu().numpy().copy()
for k in range(1, num_classes):
zero_gradients(x)
fs[0, I[k]].backward(retain_graph=True)
cur_grad = x.grad.data.cpu().numpy().copy()
# set new w_k and new f_k
w_k = cur_grad - grad_orig
f_k = (fs[0, I[k]] - fs[0, I[0]]).data.cpu().numpy()
pert_k = abs(f_k)/np.linalg.norm(w_k.flatten())
# determine which w_k to use
if pert_k < pert:
pert = pert_k
w = w_k
# compute r_i and r_tot
# Added 1e-4 for numerical stability
r_i = (pert+1e-4) * w / np.linalg.norm(w)
r_tot = np.float32(r_tot + r_i)
if is_cuda:
pert_image = image + (1+overshoot)*torch.from_numpy(r_tot).cuda()
else:
pert_image = image + (1+overshoot)*torch.from_numpy(r_tot)
x = Variable(pert_image, requires_grad=True)
# print(image.shape)
# print(x.view(1,1,image.shape[0],-1).shape)
fs = net.forward(x.view(1,1,image.shape[1],-1))
k_i = np.argmax(fs.data.cpu().numpy().flatten())
loop_i += 1
return (1+overshoot)*r_tot, loop_i, label, k_i, pert_image
| 33.325581
| 142
| 0.618632
|
55f75811d3b2ede04b0b8b8b6f4bd969afde4971
| 1,204
|
py
|
Python
|
model/cascade.rcnn/yexiguafu/res50.rcnn.double.heads.two.stages.set.nms/demo.py
|
yexiguafuqihao/crowddet-megengine
|
866c0fd3767e8f3cce84a78efc0ff95f23ef6b61
|
[
"Apache-2.0"
] | 1
|
2022-03-09T06:59:32.000Z
|
2022-03-09T06:59:32.000Z
|
model/cascade.rcnn/yexiguafu/res50.rcnn.double.heads.two.stages.set.nms/demo.py
|
yexiguafuqihao/crowddet-megengine
|
866c0fd3767e8f3cce84a78efc0ff95f23ef6b61
|
[
"Apache-2.0"
] | null | null | null |
model/cascade.rcnn/yexiguafu/res50.rcnn.double.heads.two.stages.set.nms/demo.py
|
yexiguafuqihao/crowddet-megengine
|
866c0fd3767e8f3cce84a78efc0ff95f23ef6b61
|
[
"Apache-2.0"
] | null | null | null |
from config import config
from common import *
def computeJaccard(fpath, save_path ='results.md'):
assert os.path.exists(fpath)
records = load_func(fpath)
GT = load_func(config.eval_json)
fid = open(save_path, 'a')
for i in range(3, 10):
score_thr = 1e-1 * i
results = common_process(worker, records, 20, GT, score_thr, 0.5)
line = strline(results)
line = 'score_thr:{:.3f}, '.format(score_thr) + line
print(line)
fid.write(line + '\n')
fid.flush()
fid.close()
def computeIoUs(fpath):
name = os.path.basename(fpath)
print('Evaluating {}...'.format(name))
mAP, mMR = compute_mAP(fpath)
fid = open('results.md', 'a')
fid.write('{}\ndtboxes:\n'.format(name))
print('{}\ndtboxes:\n'.format(name))
line = 'mAP:{:.4f}, mMR:{:.4f}, '.format(mAP, mMR)
print(line)
fid.write(line + '\n')
fid.close()
computeJaccard(fpath)
def eval_all():
for epoch in range(20, 40):
fpath = osp.join(config.eval_dir, 'epoch-{}.human'.format(epoch))
if not os.path.exists(fpath):
continue
computeIoUs(fpath)
if __name__ == '__main__':
eval_all()
| 26.173913
| 73
| 0.591362
|
6aedde51ba335d586bdb455e473e969b9e72abe2
| 1,670
|
py
|
Python
|
dockerfille_dev/flask_api.py
|
Bilal-IA/Continuous-Machine-Learning-CML-CI-CD-project
|
5b5c418241f0d2b6c05bde196348c90769c13518
|
[
"Apache-2.0"
] | null | null | null |
dockerfille_dev/flask_api.py
|
Bilal-IA/Continuous-Machine-Learning-CML-CI-CD-project
|
5b5c418241f0d2b6c05bde196348c90769c13518
|
[
"Apache-2.0"
] | null | null | null |
dockerfille_dev/flask_api.py
|
Bilal-IA/Continuous-Machine-Learning-CML-CI-CD-project
|
5b5c418241f0d2b6c05bde196348c90769c13518
|
[
"Apache-2.0"
] | null | null | null |
from flask import Flask, request
import pickle
import flasgger
from flasgger import Swagger
app=Flask(__name__)
Swagger(app)
pickle_in = open("model.pkl","rb")
reg_model=pickle.load(pickle_in)
@app.route('/predict',methods=["Get"])
def predict_house_price():
"""Real Estate Price Prediction !!.
---
parameters:
- name: transaction_date
in: query
type: number
required: true
- name: house_age
in: query
type: number
required: true
- name: distance_to_MRT_station
in: query
type: number
required: true
- name: number_of_convenience_stores
in: query
type: number
required: true
- name: latitude
in: query
type: number
required: true
- name: longitude
in: query
type: number
required: true
responses:
200:
description: The output values
"""
transaction_date=request.args.get("transaction_date")
house_age=request.args.get("house_age")
distance_to_MRT_station=request.args.get("distance_to_MRT_station")
number_of_convenience_stores=request.args.get("number_of_convenience_stores")
latitude=request.args.get("latitude")
longitude_=request.args.get("longitude_")
prediction=reg_model.predict([[transaction_date,house_age,distance_to_MRT_station,number_of_convenience_stores,latitude,longitude_]])
print(prediction)
return "The predicted House Price is"+str(prediction)
if __name__=='__main__':
app.run(host='0.0.0.0',port=8000)
| 27.833333
| 138
| 0.632335
|
b5bd07cb92cfaded0d512d59acd3cdf09193dfed
| 8,049
|
py
|
Python
|
modin/pandas/test/dataframe/test_binary.py
|
heuermh/modin
|
2f880c1c93b05dbcaf8120ef6bb75a6adfa2e8e5
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
modin/pandas/test/dataframe/test_binary.py
|
heuermh/modin
|
2f880c1c93b05dbcaf8120ef6bb75a6adfa2e8e5
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
modin/pandas/test/dataframe/test_binary.py
|
heuermh/modin
|
2f880c1c93b05dbcaf8120ef6bb75a6adfa2e8e5
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
import pytest
import pandas
import matplotlib
import modin.pandas as pd
from modin.pandas.test.utils import (
df_equals,
test_data_values,
test_data_keys,
eval_general,
test_data,
create_test_dfs,
)
from modin.config import NPartitions
NPartitions.put(4)
# Force matplotlib to not use any Xwindows backend.
matplotlib.use("Agg")
@pytest.mark.parametrize(
"other",
[
lambda df: 4,
lambda df, axis: df.iloc[0] if axis == "columns" else list(df[df.columns[0]]),
],
ids=["scalar", "series_or_list"],
)
@pytest.mark.parametrize("axis", ["rows", "columns"])
@pytest.mark.parametrize(
"op",
[
*("add", "radd", "sub", "rsub", "mod", "rmod", "pow", "rpow"),
*("truediv", "rtruediv", "mul", "rmul", "floordiv", "rfloordiv"),
],
)
def test_math_functions(other, axis, op):
data = test_data["float_nan_data"]
if (op == "floordiv" or op == "rfloordiv") and axis == "rows":
# lambda == "series_or_list"
pytest.xfail(reason="different behaviour")
if op == "rmod" and axis == "rows":
# lambda == "series_or_list"
pytest.xfail(reason="different behaviour")
eval_general(
*create_test_dfs(data), lambda df: getattr(df, op)(other(df, axis), axis=axis)
)
@pytest.mark.parametrize(
"other",
[lambda df: df[: -(2 ** 4)], lambda df: df[df.columns[0]].reset_index(drop=True)],
ids=["check_missing_value", "check_different_index"],
)
@pytest.mark.parametrize("fill_value", [None, 3.0])
@pytest.mark.parametrize(
"op",
[
*("add", "radd", "sub", "rsub", "mod", "rmod", "pow", "rpow"),
*("truediv", "rtruediv", "mul", "rmul", "floordiv", "rfloordiv"),
],
)
def test_math_functions_fill_value(other, fill_value, op):
data = test_data["int_data"]
modin_df, pandas_df = pd.DataFrame(data), pandas.DataFrame(data)
eval_general(
modin_df,
pandas_df,
lambda df: getattr(df, op)(other(df), axis=0, fill_value=fill_value),
)
@pytest.mark.parametrize(
"op",
[
*("add", "radd", "sub", "rsub", "mod", "rmod", "pow", "rpow"),
*("truediv", "rtruediv", "mul", "rmul", "floordiv", "rfloordiv"),
],
)
def test_math_functions_level(op):
modin_df = pd.DataFrame(test_data["int_data"])
modin_df.index = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in modin_df.index]
)
# Defaults to pandas
with pytest.warns(UserWarning):
# Operation against self for sanity check
getattr(modin_df, op)(modin_df, axis=0, level=1)
@pytest.mark.parametrize(
"math_op, alias",
[
("truediv", "divide"),
("truediv", "div"),
("rtruediv", "rdiv"),
("mul", "multiply"),
("sub", "subtract"),
("add", "__add__"),
("radd", "__radd__"),
("div", "__div__"),
("rdiv", "__rdiv__"),
("truediv", "__truediv__"),
("rtruediv", "__rtruediv__"),
("floordiv", "__floordiv__"),
("rfloordiv", "__rfloordiv__"),
("mod", "__mod__"),
("rmod", "__rmod__"),
("mul", "__mul__"),
("rmul", "__rmul__"),
("pow", "__pow__"),
("rpow", "__rpow__"),
("sub", "__sub__"),
("rsub", "__rsub__"),
],
)
def test_math_alias(math_op, alias):
assert getattr(pd.DataFrame, math_op) == getattr(pd.DataFrame, alias)
@pytest.mark.parametrize("other", ["as_left", 4, 4.0, "a"])
@pytest.mark.parametrize("op", ["eq", "ge", "gt", "le", "lt", "ne"])
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_comparison(data, op, other):
eval_general(
*create_test_dfs(data),
lambda df: getattr(df, op)(df if other == "as_left" else other),
)
@pytest.mark.parametrize("op", ["eq", "ge", "gt", "le", "lt", "ne"])
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_multi_level_comparison(data, op):
modin_df_multi_level = pd.DataFrame(data)
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in modin_df_multi_level.index]
)
modin_df_multi_level.index = new_idx
# Defaults to pandas
with pytest.warns(UserWarning):
# Operation against self for sanity check
getattr(modin_df_multi_level, op)(modin_df_multi_level, axis=0, level=1)
def test_equals():
frame_data = {"col1": [2.9, 3, 3, 3], "col2": [2, 3, 4, 1]}
modin_df1 = pd.DataFrame(frame_data)
modin_df2 = pd.DataFrame(frame_data)
assert modin_df1.equals(modin_df2)
df_equals(modin_df1, modin_df2)
df_equals(modin_df1, pd.DataFrame(modin_df1))
frame_data = {"col1": [2.9, 3, 3, 3], "col2": [2, 3, 5, 1]}
modin_df3 = pd.DataFrame(frame_data, index=list("abcd"))
assert not modin_df1.equals(modin_df3)
with pytest.raises(AssertionError):
df_equals(modin_df3, modin_df1)
with pytest.raises(AssertionError):
df_equals(modin_df3, modin_df2)
assert modin_df1.equals(modin_df2._query_compiler.to_pandas())
@pytest.mark.parametrize("is_more_other_partitions", [True, False])
@pytest.mark.parametrize(
"op_type", ["df_ser", "df_df", "ser_ser_same_name", "ser_ser_different_name"]
)
@pytest.mark.parametrize(
"is_idx_aligned", [True, False], ids=["idx_aligned", "idx_not_aligned"]
)
def test_mismatched_row_partitions(is_idx_aligned, op_type, is_more_other_partitions):
data = [0, 1, 2, 3, 4, 5]
modin_df1, pandas_df1 = create_test_dfs({"a": data, "b": data})
modin_df, pandas_df = modin_df1.loc[:2], pandas_df1.loc[:2]
modin_df2 = modin_df.append(modin_df)
pandas_df2 = pandas_df.append(pandas_df)
if is_more_other_partitions:
modin_df2, modin_df1 = modin_df1, modin_df2
pandas_df2, pandas_df1 = pandas_df1, pandas_df2
if is_idx_aligned:
if is_more_other_partitions:
modin_df1.index = pandas_df1.index = pandas_df2.index
else:
modin_df2.index = pandas_df2.index = pandas_df1.index
# Pandas don't support this case because result will contain duplicate values by col axis.
if op_type == "df_ser" and not is_idx_aligned and is_more_other_partitions:
eval_general(
modin_df2,
pandas_df2,
lambda df: df / modin_df1.a
if isinstance(df, pd.DataFrame)
else df / pandas_df1.a,
)
return
if op_type == "df_ser":
modin_res = modin_df2 / modin_df1.a
pandas_res = pandas_df2 / pandas_df1.a
elif op_type == "df_df":
modin_res = modin_df2 / modin_df1
pandas_res = pandas_df2 / pandas_df1
elif op_type == "ser_ser_same_name":
modin_res = modin_df2.a / modin_df1.a
pandas_res = pandas_df2.a / pandas_df1.a
elif op_type == "ser_ser_different_name":
modin_res = modin_df2.a / modin_df1.b
pandas_res = pandas_df2.a / pandas_df1.b
df_equals(modin_res, pandas_res)
def test_duplicate_indexes():
data = [0, 1, 2, 3, 4, 5]
modin_df1, pandas_df1 = create_test_dfs(
{"a": data, "b": data}, index=[0, 1, 2, 0, 1, 2]
)
modin_df2, pandas_df2 = create_test_dfs({"a": data, "b": data})
df_equals(modin_df1 / modin_df2, pandas_df1 / pandas_df2)
| 32.719512
| 94
| 0.638464
|
02f2408585678efc2603dabf4e3e519292b9af20
| 3,979
|
py
|
Python
|
lib/galaxy/datatypes/upload_util.py
|
astrovsky01/galaxy
|
e106fd2f0b1a9a2f37a92e634fb8a5dc3d0fe51a
|
[
"CC-BY-3.0"
] | 2
|
2015-02-23T21:21:44.000Z
|
2015-06-17T20:10:47.000Z
|
lib/galaxy/datatypes/upload_util.py
|
astrovsky01/galaxy
|
e106fd2f0b1a9a2f37a92e634fb8a5dc3d0fe51a
|
[
"CC-BY-3.0"
] | 8
|
2015-09-23T15:45:17.000Z
|
2021-06-30T19:14:19.000Z
|
lib/galaxy/datatypes/upload_util.py
|
anvilproject/galaxy
|
c429c8f12f3788bb1450d8a7b43cfb84831f2c54
|
[
"CC-BY-3.0"
] | null | null | null |
import os
from typing import NamedTuple, Optional
from galaxy.datatypes import data, sniff
from galaxy.util.checkers import (
check_binary,
is_single_file_zip,
is_zip,
)
class UploadProblemException(Exception):
pass
class HandleUploadResponse(NamedTuple):
stdout: Optional[str]
ext: str
datatype: data.Data
is_binary: bool
converted_path: Optional[str]
def handle_upload(
registry,
path: str, # dataset.path
requested_ext: str, # dataset.file_type
name: str, # dataset.name,
tmp_prefix: Optional[str],
tmp_dir: Optional[str],
check_content: bool,
link_data_only: bool,
in_place: bool,
auto_decompress: bool,
convert_to_posix_lines: bool,
convert_spaces_to_tabs: bool,
) -> HandleUploadResponse:
stdout = None
converted_path = None
multi_file_zip = False
# Does the first 1K contain a null?
is_binary = check_binary(path)
# Decompress if needed/desired and determine/validate filetype. If a keep-compressed datatype is explicitly selected
# or if autodetection is selected and the file sniffs as a keep-compressed datatype, it will not be decompressed.
if not link_data_only:
if auto_decompress and is_zip(path) and not is_single_file_zip(path):
multi_file_zip = True
try:
ext, converted_path, compression_type = sniff.handle_uploaded_dataset_file_internal(
path,
registry,
ext=requested_ext,
tmp_prefix=tmp_prefix,
tmp_dir=tmp_dir,
in_place=in_place,
check_content=check_content,
is_binary=is_binary,
auto_decompress=auto_decompress,
uploaded_file_ext=os.path.splitext(name)[1].lower().lstrip('.'),
convert_to_posix_lines=convert_to_posix_lines,
convert_spaces_to_tabs=convert_spaces_to_tabs,
)
except sniff.InappropriateDatasetContentError as exc:
raise UploadProblemException(exc)
elif requested_ext == 'auto':
ext = sniff.guess_ext(path, registry.sniff_order, is_binary=is_binary)
else:
ext = requested_ext
# The converted path will be the same as the input path if no conversion was done (or in-place conversion is used)
converted_path = None if converted_path == path else converted_path
# Validate datasets where the filetype was explicitly set using the filetype's sniffer (if any)
if requested_ext != 'auto':
datatype = registry.get_datatype_by_extension(requested_ext)
# Enable sniffer "validate mode" (prevents certain sniffers from disabling themselves)
if check_content and hasattr(datatype, 'sniff') and not datatype.sniff(path):
stdout = ("Warning: The file 'Type' was set to '{ext}' but the file does not appear to be of that"
" type".format(ext=requested_ext))
# Handle unsniffable binaries
if is_binary and ext == 'binary':
upload_ext = os.path.splitext(name)[1].lower().lstrip('.')
if registry.is_extension_unsniffable_binary(upload_ext):
stdout = ("Warning: The file's datatype cannot be determined from its contents and was guessed based on"
" its extension, to avoid this warning, manually set the file 'Type' to '{ext}' when uploading"
" this type of file".format(ext=upload_ext))
ext = upload_ext
else:
stdout = ("The uploaded binary file format cannot be determined automatically, please set the file 'Type'"
" manually")
datatype = registry.get_datatype_by_extension(ext)
if multi_file_zip and not getattr(datatype, 'compressed', False):
stdout = 'ZIP file contained more than one file, only the first file was added to Galaxy.'
return HandleUploadResponse(stdout, ext, datatype, is_binary, converted_path)
| 39.79
| 120
| 0.671023
|
c368486fba0ab6bd97ee7297baa78caba3170c2c
| 538
|
py
|
Python
|
main/xinput/template.py
|
RoastVeg/cports
|
803c7f07af341eb32f791b6ec1f237edb2764bd5
|
[
"BSD-2-Clause"
] | null | null | null |
main/xinput/template.py
|
RoastVeg/cports
|
803c7f07af341eb32f791b6ec1f237edb2764bd5
|
[
"BSD-2-Clause"
] | null | null | null |
main/xinput/template.py
|
RoastVeg/cports
|
803c7f07af341eb32f791b6ec1f237edb2764bd5
|
[
"BSD-2-Clause"
] | null | null | null |
pkgname = "xinput"
pkgver = "1.6.3"
pkgrel = 0
build_style = "gnu_configure"
hostmakedepends = ["pkgconf"]
makedepends = [
"libxext-devel", "libxi-devel", "libxrandr-devel", "libxinerama-devel"
]
pkgdesc = "X input device configuration utility"
maintainer = "q66 <q66@chimera-linux.org>"
license = "MIT"
url = "https://xorg.freedesktop.org"
source = f"$(XORG_SITE)/app/{pkgname}-{pkgver}.tar.bz2"
sha256 = "35a281dd3b9b22ea85e39869bb7670ba78955d5fec17c6ef7165d61e5aeb66ed"
def post_install(self):
self.install_license("COPYING")
| 29.888889
| 75
| 0.737918
|
a04e5610ec98ac23399936c3693e1730c24fc347
| 351
|
py
|
Python
|
pressure_vessel_calcs/h_conical_head_calc.py
|
marciokristo/Pressure_Vessel_Calc
|
146a656021f694d0faf453cce2a0947eef24158c
|
[
"MIT"
] | null | null | null |
pressure_vessel_calcs/h_conical_head_calc.py
|
marciokristo/Pressure_Vessel_Calc
|
146a656021f694d0faf453cce2a0947eef24158c
|
[
"MIT"
] | null | null | null |
pressure_vessel_calcs/h_conical_head_calc.py
|
marciokristo/Pressure_Vessel_Calc
|
146a656021f694d0faf453cce2a0947eef24158c
|
[
"MIT"
] | null | null | null |
"""
PR_VESSEL 2021 ASME VIII Div.1
Calculation program for Pressure Vessels analysis
Created by Marcio Cristo 2021
This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation.
File destinated to create conical head calculations functions
"""
| 39
| 167
| 0.811966
|
51f28545c2e31df4fe0ae246c58c01590dcfada7
| 3,366
|
py
|
Python
|
Python Programs/consecutive-prime-sum-tcs-codevita.py
|
muhammad-masood-ur-rehman/Skillrack
|
71a25417c89d0efab40ee6229ccd758b26ae4312
|
[
"CC0-1.0"
] | 2
|
2021-06-26T21:50:59.000Z
|
2021-09-18T04:55:51.000Z
|
Python Programs/consecutive-prime-sum-tcs-codevita.py
|
muhammad-masood-ur-rehman/Skillrack
|
71a25417c89d0efab40ee6229ccd758b26ae4312
|
[
"CC0-1.0"
] | null | null | null |
Python Programs/consecutive-prime-sum-tcs-codevita.py
|
muhammad-masood-ur-rehman/Skillrack
|
71a25417c89d0efab40ee6229ccd758b26ae4312
|
[
"CC0-1.0"
] | null | null | null |
Consecutive Prime Sum - TCS CodeVita
Some prime numbers can be expressed as a sum of other consecutive prime numbers.
· For example
o 5 = 2 + 3,
o 17 = 2 + 3 + 5 + 7,
o 41 = 2 + 3 + 5 + 7 + 11 + 13.
Your task is to find out how many prime numbers which satisfy this property are present in the range 3 to N subject to a constraint that summation should always start with number 2.
Write code to find out the number of prime numbers that satisfy the above-mentioned property in a given range.
Input Format: First line contains a number N
Output Format: Print the total number of all such prime numbers which are less than or equal to N.
Constraints: 2<N<=12,000,000,000
Example Input/Output 1:
Input:
43
Output:
4
Python:
num = int(input())
arr = []
sum = 0
count = 0
if num > 1:
for i in range(2, num + 2):
for j in range(2, i):
if i % j == 0:
break
else:
arr.append(i)
def is_prime(sum):
for i in range(2, (sum // 2) +2):
if sum % i == 0:
return False
else:
return True
for i in range(0, len(arr)):
sum = sum + arr[i]
if sum <= num:
if is_prime(sum):
count = count + 1
print(count)
C:
#include <stdio.h>
int prime(int b)
{
int j,cnt;
cnt=1;
for(j=2;j<=b/2;j++)
{
if(b%j==0)
cnt=0;
}
if(cnt==0)
return 1;
else
return 0;
}
int main() {
int i,j,n,cnt,a[25],c,sum=0,count=0,k=0;
scanf("%d",&n);
for(i=2;i<=n;i++)
{
cnt=1;
for(j=2;j<=n/2;j++)
{
if(i%j==0)
cnt=0;
}
if(cnt==1)
{
a[k]=i;
k++;
}
}
for(i=0;i<k;i++)
{
sum=sum+a[i];
c= prime(sum);
if(c==1)
count++;
}
printf("%d",count);
return 0;
}
C++:
#include <iostream>
using namespace std;
int prime(int b)
{
int j,cnt;
cnt=1;
for(j=2;j<=b/2;j++)
{
if(b%j==0)
cnt=0;
}
if(cnt==0)
return 1;
else
return 0;
}
int main()
{
int i,j,n,cnt,a[25],c,sum=0,count=0,k=0;
cin>>n;
for(i=2;i<=n;i++)
{
cnt=1;
for(j=2;j<=n/2;j++)
{
if(i%j==0)
cnt=0;
}
if(cnt==1)
{
a[k]=i;
k++;
}
}
for(i=0;i<k;i++)
{
sum=sum+a[i];
c= prime(sum);
if(c==1)
count++;
}
cout<<count;
return 0;
}
Java:
import java.util.Scanner;
class Main {
static int prime(int b) {
int j,cnt;
cnt=1;
for (j = 2; j <= b/2; j++) {
if(b%j==0)
cnt=0;
}
if(cnt==0)
return 1;
else
return 0;
}
public static void main(String[] args) {
Scanner sc = new Scanner(System.in);
int i,j,n=0,cnt,c=0,sum=0,count=0,k=0;
Main t = new Main();
int[] a = new int[25];
System.out.println("Enter no");
n = sc.nextInt();
for (i = 2; i <=n ; i++) {
cnt=1;
for (j = 2; j <= n/2; j++) {
if(i%j==0)
cnt=0;
}
if(cnt==1) {
a[k]=i;
k++;
}
}
for (i = 0; i < k; i++) {
sum=sum+a[i];
c=t.prime(sum);
if(c==1)
count++;
}
System.out.println(count);
}
}
| 19.125
| 181
| 0.45514
|
3dca01bfccd19f099bd7425174814c2b2876b5e8
| 5,366
|
py
|
Python
|
KD_Lib/KD/vision/BANN/BANN.py
|
DA-southampton/KD_Lib
|
bd4a9b93b9674607ecf467d280d5cab1c516bdc6
|
[
"MIT"
] | 1
|
2021-11-26T17:46:29.000Z
|
2021-11-26T17:46:29.000Z
|
KD_Lib/KD/vision/BANN/BANN.py
|
DA-southampton/KD_Lib
|
bd4a9b93b9674607ecf467d280d5cab1c516bdc6
|
[
"MIT"
] | null | null | null |
KD_Lib/KD/vision/BANN/BANN.py
|
DA-southampton/KD_Lib
|
bd4a9b93b9674607ecf467d280d5cab1c516bdc6
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
import torch.nn.functional as F
import os
import glob
from copy import deepcopy
from KD_Lib.KD.common import BaseClass
class BANN(BaseClass):
"""
Implementation of paper "Born Again Neural Networks"
https://arxiv.org/abs/1805.04770
:param student_model (torch.nn.Module): Student model
:param train_loader (torch.utils.data.DataLoader): Dataloader for training
:param val_loader (torch.utils.data.DataLoader): Dataloader for validation
:param optimizer (torch.optim.*): Optimizer for training
:param num_gen (int): Number of generations to train.
:param loss_fn (torch.nn.Module): Loss Function used for first model in gen.
later, KLDivLoss is used.
:param temp (float): Temperature parameter for distillation
:param distil_weight (float): Weight paramter for distillation loss
:param device (str): Device for training; 'cpu' for cpu and 'cuda' for gpu
:param log (bool): True if logging required
:param logdir (str): Directory for storing logs
"""
def __init__(
self,
student_model,
train_loader,
val_loader,
optimizer,
num_gen,
loss_fn=nn.CrossEntropyLoss(),
epoch_interval=5,
temp=20.0,
distil_weight=0.5,
device="cpu",
log=False,
logdir="./Experiments",
):
super(BANN, self).__init__(
student_model,
student_model,
train_loader,
val_loader,
optimizer,
optimizer,
loss_fn,
temp,
distil_weight,
device,
log,
logdir,
)
self.init_weights = deepcopy(student_model.state_dict())
self.init_optim = deepcopy(optimizer.state_dict())
self.num_gen = num_gen
self.gen = 0
def train_student(
self,
epochs=10,
plot_losses=False,
save_model=True,
save_model_pth="./models/student-{}.pth",
):
"""
Function that will be training the student
:param epochs (int): Number of epochs you want to train the student per generation
:param plot_losses (bool): True if you want to plot the losses for every generation
:param save_model (bool): True if you want to save the student model (Set true if you want to use models for later evaluation)
:param save_model_pth (str): Path where you want to save the student model
"""
try:
fmt = save_model_pth.format(1)
except:
print("Invalid save_model_pth, allow {\} for generation number")
return
for k in range(self.num_gen):
print("Born Again : Gen {}/{}".format(k + 1, self.num_gen))
self._train_student(
epochs, plot_losses, save_model, save_model_pth.format(k + 1)
)
# Use best model in k-1 gen as last model
self.teacher_model.load_state_dict(self.best_student_model_weights)
# Reset model for next generation
self.student_model.load_state_dict(self.init_weights)
# Reset optimizer for next generation
self.optimizer_student.load_state_dict(self.init_optim)
self.gen += 1
def evaluate(self, models_dir="./models"):
"""
Evaluate method for printing accuracies of the trained network
:param models_dir (str): Location of stored models. (default: ./models)
"""
print("Evaluating Model Ensemble")
models_dir = glob.glob(os.path.join(models_dir, "*.pth"))
len_models = len(models_dir)
outputs = []
model = self.student_model
for model_weight in models_dir:
model.load_state_dict(torch.load(model_weight))
output = self._evaluate_model(model, verbose=False)
outputs.append(output)
print("Total Models: ", len(outputs))
total = len(self.val_loader)
print("Total Samples: ", total)
correct = 0
for idx, (data, target) in enumerate(self.val_loader):
target = target.to(self.device)
output = outputs[0][idx] / len_models
for k in range(1, len_models):
output += outputs[k][idx] / len_models
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
print("-" * 80)
print(f"Accuracy: {correct/total}")
def calculate_kd_loss(self, y_pred_student, y_pred_teacher, y_true):
"""
Function used for calculating the KD loss during distillation
:param y_pred_student (Tensor): Predicted outputs from the student network
:param y_pred_teacher (Tensor): Predicted outputs from the teacher network
:param y_true (Tensor): True labels
"""
if self.gen == 0:
return self.loss_fn(y_pred_student, y_true)
s_i = F.log_softmax(y_pred_student / self.temp, dim=1)
t_i = F.softmax(y_pred_teacher / self.temp, dim=1)
KD_loss = nn.KLDivLoss()(s_i, t_i) * (
self.distil_weight * self.temp * self.temp
)
KD_loss += F.cross_entropy(y_pred_student, y_true) * (1.0 - self.distil_weight)
return KD_loss
| 35.773333
| 134
| 0.611629
|
e7037fbf044ecc71e6d33b7cba00759955406b69
| 4,357
|
py
|
Python
|
keras_frcnn/stanford_dogs_parser.py
|
bordac6/frcnn-from-scratch-with-keras
|
f041d360c835d5ec15c3c12bf3658a54ffb20d73
|
[
"Apache-2.0"
] | null | null | null |
keras_frcnn/stanford_dogs_parser.py
|
bordac6/frcnn-from-scratch-with-keras
|
f041d360c835d5ec15c3c12bf3658a54ffb20d73
|
[
"Apache-2.0"
] | null | null | null |
keras_frcnn/stanford_dogs_parser.py
|
bordac6/frcnn-from-scratch-with-keras
|
f041d360c835d5ec15c3c12bf3658a54ffb20d73
|
[
"Apache-2.0"
] | null | null | null |
import os
import cv2
import xml.etree.ElementTree as ET
from scipy.io import loadmat
import numpy as np
def get_data(data_paths):
all_imgs = []
classes_count = {}
class_mapping = {}
visualise = False
print("data path:", data_paths)
train_list = loadmat(os.path.join(data_paths, 'lists', 'train_list.mat'))
# test_list = loadmat(os.path.join(data_paths, 'lists', 'test_list.mat'))
print('Parsing annotation files')
annot_path = os.path.join(data_paths, 'Annotation')
imgs_path = os.path.join(data_paths, 'Images')
trainval = [tr[0][0] for tr in train_list['file_list']]
trainval_files = trainval[:int(len(trainval)*0.8)]
test_files = trainval[int(len(trainval)*0.8):]
annots = [os.path.join(annot_path, s[0][0]) for s in train_list['annotation_list']]
idx = 0
for annot in annots:
try:
idx += 1
et = ET.parse(annot)
element = et.getroot()
element_objs = element.findall('object')
element_filename = element.find('filename').text
element_width = int(element.find('size').find('width').text)
element_height = int(element.find('size').find('height').text)
if len(element_objs) > 0:
path_to_filename = element_filename
for file_name in trainval:
if element_filename in file_name:
path_to_filename = file_name
annotation_data = {'filepath': os.path.join(imgs_path, path_to_filename), 'width': element_width,
'height': element_height, 'bboxes': []}
if any( element_filename in tv for tv in trainval_files):
annotation_data['imageset'] = 'trainval'
elif any(element_filename in tst for tst in test_files):
annotation_data['imageset'] = 'test'
else:
annotation_data['imageset'] = 'trainval'
for element_obj in element_objs:
class_name = element_obj.find('name').text
if class_name not in classes_count:
classes_count[class_name] = 1
else:
classes_count[class_name] += 1
if class_name not in class_mapping:
class_mapping[class_name] = len(class_mapping)
obj_bbox = element_obj.find('bndbox')
x1 = int(round(float(obj_bbox.find('xmin').text)))
y1 = int(round(float(obj_bbox.find('ymin').text)))
x2 = int(round(float(obj_bbox.find('xmax').text)))
y2 = int(round(float(obj_bbox.find('ymax').text)))
difficulty = 1 # parse all files.
annotation_data['bboxes'].append(
{'class': class_name, 'x1': x1, 'x2': x2, 'y1': y1, 'y2': y2, 'difficult': difficulty})
all_imgs.append(annotation_data)
# if visualise:
# img = cv2.imread(annotation_data['filepath'])
# for bbox in annotation_data['bboxes']:
# cv2.rectangle(img, (bbox['x1'], bbox['y1']), (bbox[
# 'x2'], bbox['y2']), (0, 0, 255))
# cv2.imshow('img', img)
# cv2.waitKey(0)
except Exception as e:
print(e)
continue
return all_imgs, classes_count, class_mapping
| 48.955056
| 129
| 0.439982
|
2141bd975fad1b180f193f014ef9c5533a34f002
| 1,547
|
gyp
|
Python
|
library/boost-phoenix/1.57.0.gyp
|
KjellSchubert/bru
|
dd70b721d07fbd27c57c845cc3a29cd8f2dfc587
|
[
"MIT"
] | 3
|
2015-01-06T15:22:16.000Z
|
2015-11-27T18:13:04.000Z
|
library/boost-phoenix/1.57.0.gyp
|
KjellSchubert/bru
|
dd70b721d07fbd27c57c845cc3a29cd8f2dfc587
|
[
"MIT"
] | 7
|
2015-02-10T15:13:38.000Z
|
2021-05-30T07:51:13.000Z
|
library/boost-phoenix/1.57.0.gyp
|
KjellSchubert/bru
|
dd70b721d07fbd27c57c845cc3a29cd8f2dfc587
|
[
"MIT"
] | 3
|
2015-01-29T17:19:53.000Z
|
2016-01-06T12:50:06.000Z
|
{
"targets": [
{
"target_name": "boost-phoenix",
"type": "none",
"include_dirs": [
"1.57.0/phoenix-boost-1.57.0/include"
],
"all_dependent_settings": {
"include_dirs": [
"1.57.0/phoenix-boost-1.57.0/include"
]
},
"dependencies": [
"../boost-config/boost-config.gyp:*",
"../boost-predef/boost-predef.gyp:*",
"../boost-proto/boost-proto.gyp:*",
"../boost-assert/boost-assert.gyp:*",
"../boost-detail/boost-detail.gyp:*",
"../boost-fusion/boost-fusion.gyp:*",
"../boost-range/boost-range.gyp:*",
"../boost-core/boost-core.gyp:*",
"../boost-bind/boost-bind.gyp:*",
"../boost-preprocessor/boost-preprocessor.gyp:*",
"../boost-mpl/boost-mpl.gyp:*"
]
}
# this here's one these smallish looking tests that take disturbingly
# long to compile. It's not as slow to compile as most of the
# boost-spirit tests, but still slow enough for me to not want to keep
# it enabled
#{
# "target_name": "boost-phoenix_test_for_each",
# "type": "executable",
# "test": {},
# "sources": [ "1.57.0/phoenix-boost-1.57.0/test/algorithm/for_each.cpp" ],
# "dependencies": [ "boost-phoenix" ]
#}
]
}
| 37.731707
| 86
| 0.461538
|
3d5da590ffaaa562174a9fd2350c60699a8b4cb2
| 10,294
|
py
|
Python
|
tools/waf-tools/f_vscode.py
|
patkan/foxbms-2
|
329216a5b0739362512b4c744975a136f674f60c
|
[
"CC-BY-4.0",
"BSD-3-Clause"
] | null | null | null |
tools/waf-tools/f_vscode.py
|
patkan/foxbms-2
|
329216a5b0739362512b4c744975a136f674f60c
|
[
"CC-BY-4.0",
"BSD-3-Clause"
] | null | null | null |
tools/waf-tools/f_vscode.py
|
patkan/foxbms-2
|
329216a5b0739362512b4c744975a136f674f60c
|
[
"CC-BY-4.0",
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (c) 2010 - 2021, Fraunhofer-Gesellschaft zur Foerderung der angewandten Forschung e.V.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# We kindly request you to use one or more of the following phrases to refer to
# foxBMS in your hardware, software, documentation or advertising materials:
#
# - "This product uses parts of foxBMS®"
# - "This product includes parts of foxBMS®"
# - "This product is derived from foxBMS®"
"""Implements a waf tool to configure a VS Code workspace to foxBMS specific
needs.
For information on VS Code see https://code.visualstudio.com/.
"""
import os
import re
import pathlib
import json
import jsonschema
import jinja2
from waflib import Utils
from waflib import Context
# This tool uses slash as path separator for the sake of simplicity as it
# - works on both, Windows and unix-like systems (see
# https://docs.microsoft.com/en-us/archive/blogs/larryosterman/why-is-the-dos-path-character)
# - it make the json configuration file more readable
def fix_jinja(txt):
"""appends empty line to file, if missing"""
return (
os.linesep.join([s for s in txt.splitlines() if not s.strip() == ""])
+ os.linesep
)
def configure(conf): # pylint: disable=too-many-statements,too-many-branches
"""configuration step of the VS Code waf tool:
- Find code
- configure a project if code was found"""
# create a VS Code workspace if code is installed on this platform
if Utils.is_win32:
conf.find_program("code", mandatory=False)
if not conf.env.CODE:
code_dir = "Microsoft VS Code"
path_list = [
os.path.join(os.environ["LOCALAPPDATA"], "Programs", code_dir),
os.path.join(os.environ["PROGRAMFILES"], code_dir),
]
conf.find_program(
"code",
path_list=path_list,
mandatory=False,
)
else:
conf.find_program("code", mandatory=False)
if not conf.env.CODE:
return
conf.start_msg("Creating workspace")
vscode_dir = conf.path.make_node(".vscode")
vscode_dir.mkdir()
vscode_config_dir = conf.path.find_dir(os.path.join("tools", "ide", "vscode"))
template_loader = jinja2.FileSystemLoader(searchpath=vscode_config_dir.relpath())
template_env = jinja2.Environment(loader=template_loader)
if Utils.is_win32:
waf_wrapper_script = pathlib.Path(conf.path.abspath()).as_posix() + "/waf.bat"
else:
waf_wrapper_script = pathlib.Path(conf.path.abspath()) + "/waf.sh"
template = template_env.get_template("tasks.json.jinja2")
tasks = template.render(
WAF_WRAPPER_SCRIPT=waf_wrapper_script,
)
vsc_tasks_file = os.path.join(vscode_dir.relpath(), "tasks.json")
conf.path.make_node(vsc_tasks_file).write(fix_jinja(tasks))
template = template_env.get_template("extensions.json.jinja2")
extensions = template.render()
vsc_extensions_file = os.path.join(vscode_dir.relpath(), "extensions.json")
conf.path.make_node(vsc_extensions_file).write(fix_jinja(extensions))
template = template_env.get_template("cspell.json.jinja2")
cspell = template.render()
vsc_cspell_file = os.path.join(vscode_dir.relpath(), "cspell.json")
conf.path.make_node(vsc_cspell_file).write(fix_jinja(cspell))
template = template_env.get_template("settings.json.jinja2")
# Python and friends: Python, conda, pylint, black
py_exe = "python"
if conf.env.PYTHON:
py_exe = pathlib.Path(conf.env.PYTHON[0]).as_posix()
conda_exe = "conda"
if conf.env.CONDA:
conda_exe = pathlib.Path(conf.env.CONDA[0]).as_posix()
pylint_exe = "pylint"
if conf.env.PYLINT:
pylint_exe = pathlib.Path(conf.env.PYLINT[0]).as_posix()
pylint_cfg = ""
if conf.env.PYLINT_CONFIG:
pylint_cfg = pathlib.Path(conf.env.PYLINT_CONFIG[0]).as_posix()
black_exe = "black"
if conf.env.BLACK:
black_exe = pathlib.Path(conf.env.BLACK[0]).as_posix()
black_cfg = ""
if conf.env.BLACK_CONFIG:
black_cfg = pathlib.Path(conf.env.BLACK_CONFIG[0]).as_posix()
# directory of waf and waf-tools
waf_dir = pathlib.Path(Context.waf_dir).as_posix()
waf_tools_dir = pathlib.Path(
os.path.join(conf.path.abspath(), "tools", "waf-tools")
).as_posix()
# Clang-format
clang_format_executable = ""
if conf.env.CLANG_FORMAT:
clang_format_executable = pathlib.Path(conf.env.CLANG_FORMAT[0]).as_posix()
# now it is in an case save to render the template
if not conf.env.CLANG_FORMAT[0]:
clang_format_executable = ""
else:
clang_format_executable = pathlib.Path(conf.env.CLANG_FORMAT[0]).as_posix()
settings = template.render(
PYTHONPATH=py_exe,
WAF_DIR=waf_dir,
WAF_TOOLS_DIR=waf_tools_dir,
CONDA_PATH=conda_exe,
PYLINT_PATH=pylint_exe,
PYLINT_CONFIG=pylint_cfg,
BLACKPATH=black_exe,
BLACK_CONFIG=black_cfg,
CLANG_FORMAT_EXECUTABLE=clang_format_executable,
)
vsc_settings_file = os.path.join(vscode_dir.relpath(), "settings.json")
conf.path.make_node(vsc_settings_file).write(fix_jinja(settings))
template = template_env.get_template("c_cpp_properties.json.jinja2")
defines_read = (
conf.root.find_node(conf.env.COMPILER_BUILTIN_DEFINES_FILE[0])
.read()
.splitlines()
)
vscode_defines = []
reg = re.compile(r"(#define)([ ])([a-zA-Z0-9_]{1,})([ ])([a-zA-Z0-9_\":. ]{1,})")
for d in defines_read:
define = d.split("/*")[0]
_def = reg.search(define)
if _def:
def_name, val = _def.group(3), _def.group(5)
if def_name in ("__DATE__", "__TIME__"):
continue
if '"' in val:
val = val.replace('"', '\\"')
vscode_defines.append((def_name, val))
bms_config = json.loads(
conf.path.find_node(os.path.join("conf", "bms", "bms.json")).read()
)
bms_config_schema = json.loads(
conf.path.find_node(
os.path.join("conf", "bms", "schema", "bms.schema.json")
).read()
)
try:
jsonschema.validate(instance=bms_config, schema=bms_config_schema)
except jsonschema.exceptions.ValidationError as err:
good_values = ", ".join([f"'{i}'" for i in err.validator_value])
conf.fatal(
f"Measurement IC '{err.instance}' is not supported. Use one of "
f"these: {good_values}."
)
bal = bms_config["slave-unit"]["balancing-strategy"]
soc = bms_config["application"]["algorithm"]["state-estimation"]["soc"]
soe = bms_config["application"]["algorithm"]["state-estimation"]["soe"]
soh = bms_config["application"]["algorithm"]["state-estimation"]["soh"]
imd = bms_config["application"]["insulation-monitoring-device"]
imd_manufacturer = imd["manufacturer"]
imd_model = imd["model"]
chip = bms_config["slave-unit"]["measurement-ic"]["chip"]
if chip in ("6804-1", "6811-1", "6812-1"):
chip = "6813-1"
c_cpp_properties = template.render(
ARMCL=pathlib.Path(conf.env.CC[0]).as_posix(),
OS=bms_config["operating-system"]["name"],
BALANCING_STRATEGY=bal,
MEASUREMENT_IC_MANUFACTURER=bms_config["slave-unit"]["measurement-ic"][
"manufacturer"
],
MEASUREMENT_IC_CHIP=chip,
TEMPERATURE_SENSOR_MANUFACTURER=bms_config["slave-unit"]["temperature-sensor"][
"manufacturer"
],
TEMPERATURE_SENSOR_MODEL=bms_config["slave-unit"]["temperature-sensor"][
"model"
],
TEMPERATURE_SENSOR_METHOD=bms_config["slave-unit"]["temperature-sensor"][
"method"
],
STATE_ESTIMATOR_SOC=soc,
STATE_ESTIMATOR_SOE=soe,
STATE_ESTIMATOR_SOH=soh,
IMD_MANUFACTURER=imd_manufacturer,
IMD_MODEL=imd_model,
INCLUDES=[pathlib.Path(x).as_posix() for x in conf.env.INCLUDES],
CSTANDARD="c99",
DEFINES=vscode_defines,
)
vsc_c_cpp_properties_file = os.path.join(
vscode_dir.relpath(), "c_cpp_properties.json"
)
for i in conf.env.VSCODE_MK_DIRS:
conf.path.make_node(i).mkdir()
conf.path.make_node(vsc_c_cpp_properties_file).write(fix_jinja(c_cpp_properties))
template = template_env.get_template("launch.json.jinja2")
gdb_exe = "gdb"
if conf.env.GDB:
gdb_exe = pathlib.Path(conf.env.GDB[0]).as_posix()
launch = template.render(GDB=gdb_exe)
vsc_launch_file = os.path.join(vscode_dir.relpath(), "launch.json")
conf.path.make_node(vsc_launch_file).write(fix_jinja(launch))
conf.end_msg("ok")
| 38.845283
| 98
| 0.674471
|
0c3d1d17430244999650f3ff14ef461bc2b43b0a
| 17,035
|
py
|
Python
|
tests/integration/basetests.py
|
glormph/msstitch
|
ec7256caa5ec2bb0ba14a326fd445266c56f2295
|
[
"MIT"
] | 2
|
2017-03-06T13:59:52.000Z
|
2019-02-08T19:33:40.000Z
|
tests/integration/basetests.py
|
glormph/msstitch
|
ec7256caa5ec2bb0ba14a326fd445266c56f2295
|
[
"MIT"
] | 3
|
2017-06-26T07:10:33.000Z
|
2020-03-06T11:43:28.000Z
|
tests/integration/basetests.py
|
glormph/msstitch
|
ec7256caa5ec2bb0ba14a326fd445266c56f2295
|
[
"MIT"
] | 2
|
2018-10-04T10:04:04.000Z
|
2019-05-17T22:09:29.000Z
|
import unittest
import subprocess
import os
import shutil
import sqlite3
import re
from lxml import etree
from tempfile import mkdtemp
class BaseTest(unittest.TestCase):
testdir = 'tests'
fixdir = os.path.join(os.getcwd(), testdir, 'fixtures')
basefixdir = os.path.join(os.getcwd(), testdir, 'base_fixtures')
outdir = os.path.join(os.getcwd(), testdir, 'test_output')
executable = 'msstitch'
def setUp(self):
self.infile = os.path.join(self.fixdir, self.infilename)
os.makedirs(self.outdir, exist_ok=True)
self.workdir = mkdtemp(dir=self.outdir)
os.chdir(self.workdir)
self.resultfn = os.path.join(self.workdir, self.infilename)
def tearDown(self):
pass
#shutil.rmtree(self.workdir)
def get_std_options(self):
cmd = [self.executable, self.command, '-i']
if type(self.infile) != list:
self.infile = [self.infile]
cmd.extend(self.infile)
if self.resultfn is not None:
cmd.extend(['-o', self.resultfn])
elif self.resultfn is None:
cmd.extend(['-d', self.workdir])
return cmd
def run_command(self, options=[], return_error=False):
cmd = self.get_std_options()
cmd.extend(options)
check = not return_error
try:
complete = subprocess.run(cmd, capture_output=True, text=True, check=check)
except subprocess.CalledProcessError as e:
print('Failed to run command: ', cmd)
print('Output of command:')
print(e.stdout)
print(e.stderr)
raise
else:
return complete
def get_values_from_db(self, dbfile, sql):
db = sqlite3.connect(dbfile)
return db.execute(sql)
def seq_in_db(self, dbconn, seq, seqtype, max_falloff=False):
seq = seq.replace('L', 'I')
if seqtype == 'ntermfalloff':
seq = '{0}%'.format(seq[::-1])
sql = 'SELECT seqs FROM known_searchspace WHERE seqs LIKE ?'
for match in dbconn.execute(sql, (seq,)):
if match[0][:-max_falloff] in seq:
return True
else:
sql = ('SELECT EXISTS(SELECT seqs FROM known_searchspace WHERE '
'seqs=? LIMIT 1)')
return dbconn.execute(sql, (seq,)).fetchone()[0] == 1
def get_tsvheader(self, fn):
with open(fn) as fp:
return next(fp).strip('\n').split('\t')
def tsv_generator(self, fn):
header = self.get_tsvheader(fn)
tsv = self.get_all_lines(fn)
for line in tsv:
yield {field: val for field, val in
zip(header, line.strip('\n').split('\t'))}
def get_all_lines(self, fn):
with open(fn) as fp:
next(fp)
for line in fp:
yield line
def get_xml_namespace(self, fn):
root = self.get_root_el(fn)
ns = {}
for prefix in root.nsmap:
separator = ':'
nsprefix = prefix
if prefix is None:
nsprefix = ''
separator = ''
ns['xmlns{0}{1}'.format(separator, nsprefix)] = root.nsmap[prefix]
return ns
def get_root_el(self, fn):
rootgen = etree.iterparse(fn, events=('start',))
root = next(rootgen)[1]
for child in root.getchildren():
root.remove(child)
return root
def copy_db_to_workdir(self, dbfn, dst=False):
if not dst:
shutil.copy(os.path.join(self.fixdir, dbfn), self.resultfn)
else:
shutil.copy(os.path.join(self.fixdir, dbfn), dst)
def get_float_or_na(self, value):
try:
return float(value)
except ValueError:
return value
def check_lines(self, expected, result):
with open(expected) as fp, open(result) as resultfp:
for expline, resline in zip(fp, resultfp):
self.assertEqual(expline, resline)
def isoquant_check(self, expected_isotable, acc_field, channels, nopsms):
isoquant = {}
accession = self.get_tsvheader(expected_isotable)[0]
for line in self.tsv_generator(expected_isotable):
acc = line.pop(accession)
isoquant[acc] = line
for result in self.tsv_generator(self.resultfn):
for ch in channels + nopsms:
try:
resval = float(result[ch])
expval = float(isoquant[result[acc_field]][ch])
except ValueError:
# NA found
self.assertEqual(result[ch], isoquant[result[acc_field]][ch])
else:
self.assertEqual(resval, expval)
class BaseTestPycolator(BaseTest):
infilename = 'perco.xml'
def get_psm_pep_ids_from_file(self, fn):
contents = self.read_percolator_out(fn)
return {'psm_ids': self.get_element_ids(contents['psms'],
'psm_id', contents['ns']),
'peptide_ids': self.get_element_ids(contents['peptides'],
'peptide_id',
contents['ns']),
'psm_seqs': self.get_psm_seqs(contents['psms'],
contents['ns'])
}
def read_percolator_out(self, fn):
ns = self.get_xml_namespace(fn)['xmlns']
contents = {'ns': ns, 'psms': [], 'peptides': []}
xml = etree.iterparse(fn)
for ac, el in xml:
if el.tag == '{%s}psm' % ns:
contents['psms'].append(el)
elif el.tag == '{%s}peptide' % ns:
contents['peptides'].append(el)
return contents
def get_element_ids(self, elements, id_attrib, ns):
return [x.attrib['{%s}%s' % (ns, id_attrib)] for x in elements]
def get_psm_seqs(self, psms, ns):
return [pepseq.attrib['seq'] for pepseq in
self.get_subelements(psms, 'peptide_seq', ns)]
def get_subelements(self, elements, subel, ns):
return [element.find('{%s}%s' % (ns, subel)) for element in elements]
def strip_modifications(self, pep):
return re.sub('\[UNIMOD:\d*\]', '', pep)
class MzidTSVBaseTest(BaseTest):
def rowify(self, records):
row, rownr = [], 0
for record in records:
if record[0] == rownr:
row.append(record[1:])
else:
yield row
row = [record[1:]]
rownr += 1
def check_results_sql(self, checkfields, expected_values):
for resultvals, exp_vals in zip(self.get_values(checkfields),
expected_values):
for resultval, expectval in zip(resultvals, exp_vals):
self.assertEqual([str(x) if x is not None else 'NA'
for x in expectval],
[str(x) for x in resultval])
def process_dbvalues_both(self, dbfile, sql, permfieldnrs, permfieldnames):
'''FIXME what does this do, is it same as rowify but with fields?'''
dbvals = self.get_values_from_db(dbfile, sql)
rownr, permvals = 0, []
for record in dbvals:
if record[0] != rownr:
yield [tuple([x, y]) for x, y in zip(permfieldnames, permvals)]
permvals = [record[nr] for nr in permfieldnrs]
rownr += 1
else:
permvals.extend([record[nr] for nr in permfieldnrs])
def get_values(self, checkfields, outfile=False):
if not outfile:
outfile = self.resultfn
with open(outfile) as fp:
header = next(fp).strip('\n').split('\t')
fieldindices = [header.index(field) for field in checkfields]
for line in fp:
line = line.strip('\n').split('\t')
if len(checkfields) > 1:
yield [(field, line[ix]) for field, ix in
zip(checkfields, fieldindices)]
else:
yield [(line[ix],) for ix in fieldindices]
class MSLookupTest(BaseTest):
base_db_fn = None
suffix = ''
def setUp(self):
super().setUp()
self.resultfn = os.path.join(self.workdir, 'mslookup_db.sqlite')
if self.base_db_fn is not None:
self.copy_db_to_workdir(self.base_db_fn)
def run_command(self, options=None):
if options is None:
options = []
if self.base_db_fn is not None:
options.extend(['--dbfile', self.resultfn])
super().run_command(options)
class ProttableTest(BaseTest):
def setUp(self):
super().setUp()
self.psmfile = os.path.join(
self.fixdir, 'set1_target_pg.tsv')
self.decoyfn = os.path.join(self.fixdir, 'decoy_peptides.tsv')
def check_ms1(self, featkey, featout):
top_ms1 = self.get_top_peps(self.infile[0], featkey, 'Peptide sequence', 'MS1 area (highest of all PSMs)')
top_ms1 = {prot: sum(sorted(ms1s.values(), reverse=True)[:3]) /
len(sorted(ms1s.values())[:3])
for prot, ms1s in top_ms1.items()}
for protein in self.tsv_generator(self.resultfn):
try:
self.assertEqual(float(protein['MS1 precursor area']),
top_ms1[protein[featout]])
except ValueError:
self.assertNotIn(protein['Protein ID'], top_ms1)
def dotest_proteintable(self, scorecolpat, featkey, featout,
summarize_method='denoms', should_error=False):
if summarize_method == 'denoms':
summ = ['--denompatterns', '126']
elif summarize_method == 'sweep':
summ = ['--mediansweep']
elif summarize_method == 'intensity':
summ = ['--medianintensity']
options = ['--scorecolpattern', scorecolpat,
'--logscore', '--decoyfn', self.decoyfn, '--ms1quant',
'--isobquantcolpattern', 'plex',
*summ, '--psmtable', self.psmfile
] + self.specialoptions
res = self.run_command(options, return_error=should_error)
if not should_error:
self.check_ms1(featkey, featout)
return res
def get_top_peps(self, fn, featkey, pepkey, valuekey, lowerbetter=False):
top_vals = {}
for pep in self.tsv_generator(fn):
prot = pep[featkey]
seq = pep[pepkey]
try:
value = float(pep[valuekey])
except ValueError:
continue
if prot in top_vals and seq in top_vals[prot]:
if lowerbetter and value > top_vals[prot][seq]:
continue
elif not lowerbetter and value < top_vals[prot][seq]:
continue
try:
top_vals[prot][seq] = value
except KeyError:
top_vals[prot] = {seq: value}
return top_vals
class MergeTest(BaseTest):
infilename = ''
command = 'merge'
dbfn = 'target_psms.sqlite'
def run_command(self, options):
self.infile = os.path.join(self.fixdir, self.infilename)
self.resultfn = os.path.join(self.workdir, self.infilename)
super().run_command(options)
def setUp(self):
super().setUp()
self.dbfile = os.path.join(self.workdir, self.dbfn)
self.copy_db_to_workdir(self.dbfn, self.dbfile)
self.options = ['--setnames', 'Set1',
'--fdrcolpattern', 'q-value', '--ms1quantcolpattern',
'area', '--isobquantcolpattern', 'plex',
'--dbfile', self.dbfile,
]
def check_build_values(self, sql, fields, accession, cutoff=False):
expected = {}
sql = sql + ' WHERE bs.set_name="Set1"'
for rec in self.get_values_from_db(self.dbfile, sql):
# skip multiple entries of e.g protein group per peptide
# or gene per protein
try:
expected[rec[0]][rec[1]] = rec[2:]
except KeyError:
expected[rec[0]] = {rec[1]: rec[2:]}
for line in self.tsv_generator(self.resultfn):
acc = line[accession]
for setname, pepvals in expected[acc].items():
for val, field in zip(pepvals, fields):
self.assertEqual(str(val),
line['{}_{}'.format(setname, field)])
expected.pop(acc)
self.check_exp_empty(expected, cutoff)
def check_exp_empty(self, expected, cutoff):
if cutoff:
self.assertFalse(expected == {})
else:
self.assertTrue(expected == {})
def check_built_isobaric(self, sql, accession, nopsms=False, cutoff=False):
expected = {}
if 'WHERE' in sql:
sql = sql + ' AND bs.set_name="Set1"'
else:
sql = sql + ' WHERE bs.set_name="Set1"'
for rec in self.get_values_from_db(self.dbfile, sql):
if nopsms:
am_psm, fqpsm = False, False
else:
am_psm, fqpsm = rec[4], rec[5]
try:
expected[rec[0]][rec[1]][rec[2]] = [rec[3], am_psm, fqpsm]
except KeyError:
try:
expected[rec[0]][rec[1]] = {rec[2]: [rec[3], am_psm, fqpsm]}
except KeyError:
expected[rec[0]] = {rec[1]: {rec[2]: [rec[3], am_psm, fqpsm]}}
if cutoff:
expected[rec[0]][rec[1]][rec[2]] = [rec[3], am_psm, fqpsm, rec[6]]
for line in self.tsv_generator(self.resultfn):
for setname, fields in expected[line[accession]].items():
for field, exp_val in fields.items():
setfield = '{}_{}'.format(setname, field)
if cutoff and exp_val[3] > cutoff:
exp_val = ['NA', 'NA']
if line[setfield] != 'NA':
self.assertAlmostEqual(float(line[setfield]), exp_val[0])
else:
self.assertEqual(exp_val[0], 'NA')
if not nopsms:
nr_psms = line['{} - Quanted PSM count'.format(setfield)]
self.assertEqual(nr_psms, str(exp_val[1]))
if not nopsms:
nr_fqpsms = line['{}_Fully quanted PSM count'.format(setname)]
self.assertEqual(nr_fqpsms, str(exp_val[2]))
expected.pop(line[accession])
self.check_exp_empty(expected, cutoff)
def check_protein_data(self, centrictype, sql, psm_sql):
centric = {'proteincentric': 'pc', 'genecentric': 'gc',
'assoccentric': 'ac'}[centrictype]
expected = {rec[0]: [x if x else 'NA' for x in rec[1:]] for rec in
self.get_values_from_db(self.dbfile, sql)}
pdatalup = {
'pc': {'acc': 'Protein ID', 'fields': [('Gene ID', 0), ('Gene Name', 1), ('Coverage', 3)]},
'gc': {'acc': 'Gene ID', 'fields': [('Gene Name', 0), ('Protein ID(s)', 1)]},
'ac': {'acc': 'Gene Name', 'fields': [('Gene ID', 0), ('Protein ID(s)', 1)]},
}
for row in self.tsv_generator(self.resultfn):
acc = row[pdatalup[centric]['acc']]
for (field, ix) in pdatalup[centric]['fields']:
self.assertEqual(set(str(row[field]).split(';')), set(str(expected[acc][ix]).split(',')))
self.assertEqual(row['Description'], expected[acc][2])
expected, unipeps = {}, {}
for rec in self.get_values_from_db(self.dbfile, psm_sql):
pacc = rec[0]
try:
unipeps[rec[2]].add(pacc)
except KeyError:
unipeps[rec[2]] = set([pacc])
try:
expected[pacc]['psms'].add(rec[1])
except KeyError:
expected[pacc] = {'psms': set([rec[1]]), 'pep': set([rec[2]]),
'unipep': 0}
else:
expected[pacc]['pep'].add(rec[2])
for pep, prot in unipeps.items():
if len(prot) == 1:
expected[prot.pop()]['unipep'] += 1
for protein in self.tsv_generator(self.resultfn):
pacc = protein[pdatalup[centric]['acc']]
poolname = 'Set1'
self.assertEqual(protein['{}_Unique peptide count'.format(poolname)],
str(expected[pacc]['unipep']))
self.assertEqual(protein['{}_Peptide count'.format(poolname)],
str(len(expected[pacc]['pep'])))
self.assertEqual(protein['{}_PSM count'.format(poolname)],
str(len(expected[pacc]['psms'])))
| 39.43287
| 114
| 0.532962
|
85ced520c9935a7bbd3491364d2ae154ebc498f0
| 6,792
|
py
|
Python
|
bindings/python/ensmallen_graph/datasets/string/thioalkalimicrobiumaerophilum.py
|
caufieldjh/ensmallen_graph
|
14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a
|
[
"MIT"
] | null | null | null |
bindings/python/ensmallen_graph/datasets/string/thioalkalimicrobiumaerophilum.py
|
caufieldjh/ensmallen_graph
|
14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a
|
[
"MIT"
] | null | null | null |
bindings/python/ensmallen_graph/datasets/string/thioalkalimicrobiumaerophilum.py
|
caufieldjh/ensmallen_graph
|
14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a
|
[
"MIT"
] | null | null | null |
"""
This file offers the methods to automatically retrieve the graph Thioalkalimicrobium aerophilum.
The graph is automatically retrieved from the STRING repository.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-02 21:36:28.452014
The undirected graph Thioalkalimicrobium aerophilum has 2061 nodes and
129705 weighted edges, of which none are self-loops. The graph is dense
as it has a density of 0.06110 and has 4 connected components, where the
component with most nodes has 2052 nodes and the component with the least
nodes has 2 nodes. The graph median node degree is 104, the mean node degree
is 125.87, and the node degree mode is 2. The top 5 most central nodes
are 717772.THIAE_10085 (degree 897), 717772.THIAE_07710 (degree 770), 717772.THIAE_06645
(degree 732), 717772.THIAE_03260 (degree 679) and 717772.THIAE_09745 (degree
623).
References
---------------------
Please cite the following if you use the data:
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.string import ThioalkalimicrobiumAerophilum
# Then load the graph
graph = ThioalkalimicrobiumAerophilum()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen_graph import EnsmallenGraph # pylint: disable=import-error
def ThioalkalimicrobiumAerophilum(
directed: bool = False,
verbose: int = 2,
cache_path: str = "graphs/string",
**additional_graph_kwargs: Dict
) -> EnsmallenGraph:
"""Return new instance of the Thioalkalimicrobium aerophilum graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False,
Wether to load the graph as directed or undirected.
By default false.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache_path: str = "graphs",
Where to store the downloaded graphs.
additional_graph_kwargs: Dict,
Additional graph kwargs.
Returns
-----------------------
Instace of Thioalkalimicrobium aerophilum graph.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-02 21:36:28.452014
The undirected graph Thioalkalimicrobium aerophilum has 2061 nodes and
129705 weighted edges, of which none are self-loops. The graph is dense
as it has a density of 0.06110 and has 4 connected components, where the
component with most nodes has 2052 nodes and the component with the least
nodes has 2 nodes. The graph median node degree is 104, the mean node degree
is 125.87, and the node degree mode is 2. The top 5 most central nodes
are 717772.THIAE_10085 (degree 897), 717772.THIAE_07710 (degree 770), 717772.THIAE_06645
(degree 732), 717772.THIAE_03260 (degree 679) and 717772.THIAE_09745 (degree
623).
References
---------------------
Please cite the following if you use the data:
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.string import ThioalkalimicrobiumAerophilum
# Then load the graph
graph = ThioalkalimicrobiumAerophilum()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
return AutomaticallyRetrievedGraph(
graph_name="ThioalkalimicrobiumAerophilum",
dataset="string",
directed=directed,
verbose=verbose,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| 35.560209
| 223
| 0.708186
|
d4f09e7c7e695a027cd5f9335e372eac4fe053bc
| 1,637
|
py
|
Python
|
ooobuild/lo/util/x_refresh_listener.py
|
Amourspirit/ooo_uno_tmpl
|
64e0c86fd68f24794acc22d63d8d32ae05dd12b8
|
[
"Apache-2.0"
] | null | null | null |
ooobuild/lo/util/x_refresh_listener.py
|
Amourspirit/ooo_uno_tmpl
|
64e0c86fd68f24794acc22d63d8d32ae05dd12b8
|
[
"Apache-2.0"
] | null | null | null |
ooobuild/lo/util/x_refresh_listener.py
|
Amourspirit/ooo_uno_tmpl
|
64e0c86fd68f24794acc22d63d8d32ae05dd12b8
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Interface Class
# this is a auto generated file generated by Cheetah
# Libre Office Version: 7.3
# Namespace: com.sun.star.util
import typing
from abc import abstractmethod
from ..lang.x_event_listener import XEventListener as XEventListener_c7230c4a
if typing.TYPE_CHECKING:
from ..lang.event_object import EventObject as EventObject_a3d70b03
class XRefreshListener(XEventListener_c7230c4a):
"""
makes it possible to receive refreshed events.
See Also:
`API XRefreshListener <https://api.libreoffice.org/docs/idl/ref/interfacecom_1_1sun_1_1star_1_1util_1_1XRefreshListener.html>`_
"""
__ooo_ns__: str = 'com.sun.star.util'
__ooo_full_ns__: str = 'com.sun.star.util.XRefreshListener'
__ooo_type_name__: str = 'interface'
__pyunointerface__: str = 'com.sun.star.util.XRefreshListener'
@abstractmethod
def refreshed(self, rEvent: 'EventObject_a3d70b03') -> None:
"""
is called when the object data is refreshed.
"""
__all__ = ['XRefreshListener']
| 34.829787
| 135
| 0.745266
|
c361ee9a1f7457902ed58797b4cd61b9315f1372
| 5,228
|
py
|
Python
|
desktop/libs/hadoop/src/hadoop/fs/upload.py
|
abayer/hue
|
27213bb8fe89cdf0547109081e9f29c03bcc8ca5
|
[
"Apache-2.0"
] | 1
|
2020-01-09T15:48:22.000Z
|
2020-01-09T15:48:22.000Z
|
desktop/libs/hadoop/src/hadoop/fs/upload.py
|
abayer/hue
|
27213bb8fe89cdf0547109081e9f29c03bcc8ca5
|
[
"Apache-2.0"
] | null | null | null |
desktop/libs/hadoop/src/hadoop/fs/upload.py
|
abayer/hue
|
27213bb8fe89cdf0547109081e9f29c03bcc8ca5
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Classes for a custom upload handler to stream into HDFS.
Note that since our middlewares inspect request.POST, we cannot inject a custom
handler into a specific view. Therefore we always use the HDFSfileUploadHandler,
which is triggered by a magic prefix ("HDFS") in the field name.
See http://docs.djangoproject.com/en/1.2/topics/http/file-uploads/
"""
import errno
import logging
import time
from django.core.files.uploadhandler import \
FileUploadHandler, StopFutureHandlers, StopUpload
from desktop.lib import fsmanager
import hadoop.fs.hadoopfs
UPLOAD_SUBDIR = 'hue-uploads'
LOG = logging.getLogger(__name__)
class HDFSerror(Exception):
pass
class HDFStemporaryUploadedFile(hadoop.fs.hadoopfs.FileUpload):
"""
A temporary HDFS file to store upload data.
This class does not have any file read methods.
"""
def __init__(self, request, name):
self.name = name
self.size = None
self._do_cleanup = False
try:
self._fs = request.fs
except AttributeError:
_, self._fs = fsmanager.get_default_hdfs()
# Don't want to handle this upload if we don't have an HDFS
if not self._fs:
raise HDFSerror("No HDFS found")
# We want to set the user to be the superuser. But any operation
# in the fs needs a username, including the retrieval of the superuser.
# So we first set it to the DEFAULT_USER to break this chicken-&-egg.
self._fs.setuser(hadoop.fs.hadoopfs.DEFAULT_USER)
self._fs.setuser(self._fs.superuser)
self._path = self._fs.mktemp(
subdir='hue-uploads',
prefix='tmp.%s' % (request.environ['REMOTE_ADDR'],))
# Make the tmp dir 0777
self._fs.chmod(self._fs.dirname(self._path), 0777)
hadoop.fs.hadoopfs.FileUpload.__init__(self, self._fs, self._path)
self._do_cleanup = True
def __del__(self):
if self._do_cleanup:
# Do not do cleanup here. It's hopeless. The self._fs threadlocal states
# are going to be all wrong.
LOG.error("Left-over upload file is not cleaned up: %s" % (self._path,))
def get_temp_path(self):
return self._path
def finish_upload(self, size):
try:
self.size = size
self.close()
except Exception, ex:
LOG.exception('Error uploading file to %s' % (self.path,))
raise
def remove(self):
try:
self._fs.remove(self._path)
self._do_cleanup = False
except IOError, ex:
if ex.errno != errno.ENOENT:
LOG.exception('Failed to remove temporary upload file "%s". '
'Please cleanup manually: %s' % (self._path, ex))
class HDFSfileUploadHandler(FileUploadHandler):
"""
Handle file upload by storing data in a temp HDFS file.
This handler is triggered by any upload field whose name starts with
"HDFS" (case insensitive).
"""
def __init__(self, request):
FileUploadHandler.__init__(self, request)
self._file = None
self._starttime = 0
self._activated = False
def new_file(self, field_name, file_name, *args, **kwargs):
# Detect "HDFS" in the field name.
# NOTE: The user is not authenticated at this point, and it's
# very difficult to do so because we handle upload before
# running the auth middleware.
if field_name.upper().startswith('HDFS'):
try:
self._file = HDFStemporaryUploadedFile(self.request, file_name)
except (HDFSerror, IOError), ex:
LOG.error("Not using HDFS upload handler: %s" % (ex,))
return
LOG.debug('Upload attempt to %s' % (self._file.get_temp_path(),))
self._activated = True
self._starttime = time.time()
raise StopFutureHandlers()
def receive_data_chunk(self, raw_data, start):
if not self._activated:
return raw_data
try:
self._file.write(raw_data)
self._file.flush()
return None
except IOError:
LOG.exception('Error storing upload data in temporary file "%s"' %
(self._file.get_temp_path(),))
raise StopUpload()
def file_complete(self, file_size):
if not self._activated:
return None
try:
self._file.finish_upload(file_size)
except IOError:
LOG.exception('Error closing uploaded temporary file "%s"' %
(self._file.get_temp_path(),))
raise
elapsed = time.time() - self._starttime
LOG.debug('Uploaded %s bytes to HDFS in %s seconds' % (file_size, elapsed))
return self._file
| 32.47205
| 80
| 0.688982
|
0419b2e1d88c6040202150b709038c2b9d54f87f
| 3,924
|
py
|
Python
|
Bio/motifs/xms.py
|
lukasz-kozlowski/biopython
|
6b601cf09234e1e82cfc94ad5030389036cb6343
|
[
"BSD-3-Clause"
] | 2,856
|
2015-01-01T07:10:06.000Z
|
2022-03-31T18:17:25.000Z
|
Bio/motifs/xms.py
|
lukasz-kozlowski/biopython
|
6b601cf09234e1e82cfc94ad5030389036cb6343
|
[
"BSD-3-Clause"
] | 3,429
|
2015-01-05T11:11:42.000Z
|
2022-03-31T13:08:10.000Z
|
Bio/motifs/xms.py
|
lukasz-kozlowski/biopython
|
6b601cf09234e1e82cfc94ad5030389036cb6343
|
[
"BSD-3-Clause"
] | 1,619
|
2015-01-05T13:07:11.000Z
|
2022-03-31T19:19:52.000Z
|
# Copyright 2015 by Gert Hulselmans. All rights reserved.
# This file is part of the Biopython distribution and governed by your
# choice of the "Biopython License Agreement" or the "BSD 3-Clause License".
# Please see the LICENSE file that should have been included as part of this
# package.
"""Parse XMS motif files."""
from Bio import motifs
from xml.dom import minidom, Node
import re
class XMSScanner:
"""Class for scanning XMS XML file."""
def __init__(self, doc):
"""Generate motif Record from xms document, an XML-like motif pfm file."""
self.record = Record()
for child in doc.getElementsByTagName("motif"):
if child.nodeType == Node.ELEMENT_NODE:
self.handle_motif(child)
def handle_motif(self, node):
"""Read the motif's name and column from the node and add the motif record."""
motif_name = self.get_text(node.getElementsByTagName("name"))
nucleotide_counts = {"A": [], "C": [], "G": [], "T": []}
for column in node.getElementsByTagName("column"):
[
nucleotide_counts[nucleotide].append(float(nucleotide_count))
for nucleotide, nucleotide_count in zip(
["A", "C", "G", "T"], self.get_acgt(column)
)
]
motif = motifs.Motif(alphabet="GATC", counts=nucleotide_counts)
motif.name = motif_name
self.record.append(motif)
def get_property_value(self, node, key_name):
"""Extract the value of the motif's property named key_name from node."""
for cur_property in node.getElementsByTagName("prop"):
right_property = False
cur_value = None
for child in cur_property.childNodes:
if child.nodeType != Node.ELEMENT_NODE:
continue
if child.tagName == "key" and self.get_text([child]) == key_name:
right_property = True
if child.tagName == "value":
cur_value = self.get_text([child])
if right_property:
return cur_value
return None
def get_acgt(self, node):
"""Get and return the motif's weights of A, C, G, T."""
a, c, g, t = 0.0, 0.0, 0.0, 0.0
for weight in node.getElementsByTagName("weight"):
if weight.getAttribute("symbol") == "adenine":
a = float(self.get_text([weight]))
elif weight.getAttribute("symbol") == "cytosine":
c = float(self.get_text([weight]))
elif weight.getAttribute("symbol") == "guanine":
g = float(self.get_text([weight]))
elif weight.getAttribute("symbol") == "thymine":
t = float(self.get_text([weight]))
return a, c, g, t
def get_text(self, nodelist):
"""Return a string representation of the motif's properties listed on nodelist ."""
retlist = []
for node in nodelist:
if node.nodeType == Node.TEXT_NODE:
retlist.append(node.wholeText)
elif node.hasChildNodes:
retlist.append(self.get_text(node.childNodes))
return re.sub(r"\s+", " ", "".join(retlist))
class Record(list):
"""Class to store the information in a XMS matrix table.
The record inherits from a list containing the individual motifs.
"""
def __str__(self):
"""Return a string representation of the motifs in the Record object."""
return "\n".join(str(motif) for motif in self)
def read(handle):
"""Read motifs in XMS matrix format from a file handle.
XMS is an XML format for describing regulatory motifs and PSSMs.
This format was defined by Thomas Down, and used in the NestedMICA and MotifExplorer programs.
"""
xms_doc = minidom.parse(handle)
record = XMSScanner(xms_doc).record
return record
| 36.672897
| 98
| 0.603721
|
55c430fb3f1eb7ccb30dabc9a121fa578c877b72
| 3,346
|
py
|
Python
|
tests/src/HDP/prediction/smote.py
|
bellwethers-in-se/issueCloseTime
|
e5e00c9625da0793dc8e7985fd88b0ca0b35f7d3
|
[
"MIT"
] | 9
|
2017-07-27T10:32:48.000Z
|
2021-07-01T11:51:51.000Z
|
tests/src/HDP/prediction/smote.py
|
bellwethers-in-se/issueCloseTime
|
e5e00c9625da0793dc8e7985fd88b0ca0b35f7d3
|
[
"MIT"
] | 11
|
2016-03-15T16:27:47.000Z
|
2019-09-05T02:25:08.000Z
|
tests/src/HDP/prediction/smote.py
|
bellwethers-in-se/issueCloseTime
|
e5e00c9625da0793dc8e7985fd88b0ca0b35f7d3
|
[
"MIT"
] | 5
|
2017-01-28T22:45:34.000Z
|
2019-12-04T13:15:10.000Z
|
from collections import Counter
from sklearn.neighbors import NearestNeighbors, BallTree, KDTree
import numpy as np
from scipy.spatial.distance import euclidean
from random import choice, seed as rseed, uniform as rand
from pdb import set_trace
import pandas as pd
def SMOTE(data=None, atleast=50, atmost=100, a=None, b=None, k=5, resample=False):
"""
Synthetic Minority Oversampling Technique
"""
def knn(a, b):
"k nearest neighbors"
b = np.array([bb[:-1] for bb in b])
tree = BallTree(b)
__, indx = tree.query(a[:-1], k=6)
return [b[i] for i in indx]
# set_trace()
# return sorted(b, key=lambda F: euclidean(a[:-1], F[:-1]))
def kfn(me, my_lot, others):
"k farthest neighbors"
my_closest = None
return sorted(b, key=lambda F: euclidean(a[:-1], F[:-1]))
def extrapolate(one, two):
# t=time()
new = len(one) * [None]
new[:-1] = [a + rand(0, 1) * (b - a) for
a, b in zip(one[:-1], two[:-1])]
new[-1] = int(one[-1])
return new
def populate(data, atleast):
newData = [dd.tolist() for dd in data]
if atleast - len(newData) < 0:
try:
return [choice(newData) for _ in xrange(atleast)]
except:
set_trace()
else:
for _ in xrange(atleast - len(newData)):
one = choice(data)
neigh = knn(one, data)[1:k + 1]
try:
two = choice(neigh)
except IndexError:
two = one
newData.append(extrapolate(one, two))
return newData
def populate2(data1, data2):
newData = []
for _ in xrange(atleast):
for one in data1:
neigh = kfn(one, data)[1:k + 1]
try:
two = choice(neigh)
except IndexError:
two = one
newData.append(extrapolate(one, two))
return [choice(newData) for _ in xrange(atleast)]
def depopulate(data):
# if resample:
# newer = []
# for _ in xrange(atmost):
# orig = choice(data)
# newer.append(extrapolate(orig, knn(orig, data)[1]))
# return newer
# else:
return [choice(data).tolist() for _ in xrange(atmost)]
newCells = []
# rseed(1)
klass = lambda df: df[df.columns[-1]]
count = Counter(klass(data))
# set_trace()
atleast = 50 # if a==None else int(a*max([count[k] for k in count.keys()]))
atmost = 100 # if b==None else int(b*max([count[k] for k in count.keys()]))
try:
major, minor = count.keys()
except:
set_trace()
for u in count.keys():
if u == minor:
newCells.extend(populate([r for r in data.as_matrix() if r[-1] == u], atleast=atleast))
if u == major:
newCells.extend(depopulate([r for r in data.as_matrix() if r[-1] == u]))
else:
newCells.extend([r.tolist() for r in data.as_matrix() if r[-1] == u])
# set_trace()
return pd.DataFrame(newCells, columns=data.columns)
def __test_smote():
"""
A test case goes here
:return:
"""
pass
if __name__=="__main__":
__test_smote()
| 30.981481
| 99
| 0.527197
|
6d6ba65def40f3fbb5a99e9f9f303fc0459daea4
| 6,355
|
py
|
Python
|
utility/uwsgi/daemon.py
|
sbofgayschool/KV2
|
b894ac3ddbc389df642fc1bcb730cb0d3310ad4c
|
[
"MIT"
] | 3
|
2020-01-14T14:44:59.000Z
|
2020-01-15T23:52:42.000Z
|
utility/uwsgi/daemon.py
|
ririripley/KV2
|
b894ac3ddbc389df642fc1bcb730cb0d3310ad4c
|
[
"MIT"
] | null | null | null |
utility/uwsgi/daemon.py
|
ririripley/KV2
|
b894ac3ddbc389df642fc1bcb730cb0d3310ad4c
|
[
"MIT"
] | 1
|
2020-12-07T16:15:43.000Z
|
2020-12-07T16:15:43.000Z
|
# -*- coding: utf-8 -*-
# The MIT License (MIT)
# Copyright (c) 2020 SBofGaySchoolBuPaAnything
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
__author__ = "chenty"
import json
import configparser
import subprocess
import os
import signal
from utility.function import get_logger, log_output, transform_address
def run(module_name="Gateway", uwsgi_conf_path="config/uwsgi.json"):
"""
Load config and run uwsgi
:param module_name: Name of the caller module
:param uwsgi_conf_path: Path to uwsgi config file
:return: None
"""
# Load config file
with open(uwsgi_conf_path, "r") as f:
config = json.load(f)
# Generate daemon logger from config file
# If logger arguments exist in config file, write the log to the designated file
# Else, forward the log to standard output
if "log_daemon" in config["daemon"]:
daemon_logger = get_logger(
"uwsgi_daemon",
config["daemon"]["log_daemon"]["info"],
config["daemon"]["log_daemon"]["error"]
)
else:
daemon_logger = get_logger("uwsgi_daemon", None, None)
daemon_logger.info("%s uwsgi_daemon program started." % module_name)
# Generate uwsgi logger forwarding uwsgi log to designated location
if "log_uwsgi" in config["daemon"]:
uwsgi_logger = get_logger(
"uwsgi",
config["daemon"]["log_uwsgi"]["info"],
config["daemon"]["log_uwsgi"]["error"],
True
)
else:
uwsgi_logger = get_logger("uwsgi", None, None, True)
# Generate ini config file for uwsgi
ini = configparser.ConfigParser()
ini["uwsgi"] = {
"http": config["uwsgi"]["host"] + ":" + config["uwsgi"]["port"],
"module": config["uwsgi"]["module"],
"processes": config["uwsgi"]["processes"],
"threads": config["uwsgi"]["threads"],
"master": config["uwsgi"]["master"]
}
with open(config["uwsgi"]["exe"][-1], "w") as f:
ini.write(f)
daemon_logger.info("Generated ini file for uwsgi.")
command = config["uwsgi"]["exe"]
for c in command:
daemon_logger.info("Starting uwsgi with command: " + c)
# Run uwsgi in a subprocess.
uwsgi_proc = subprocess.Popen(
command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT
)
# Log the raw output of uwsgi until it exit or terminated
try:
log_output(uwsgi_logger, uwsgi_proc.stdout, None)
daemon_logger.info("Received EOF from uwsgi.")
except KeyboardInterrupt:
daemon_logger.info("Received SIGINT. Killing uwsgi process.", exc_info=True)
os.kill(uwsgi_proc.pid, signal.SIGINT)
except:
daemon_logger.error("Accidentally terminated. Killing uwsgi process.", exc_info=True)
uwsgi_proc.terminate()
# Wait for the subprocess to prevent
uwsgi_proc.wait()
daemon_logger.info("%s uwsgi_daemon program exiting." % module_name)
return
def command_parser(parser):
"""
Add uwsgi args to args parser
:param parser: The args parser
:return: Callback function to modify config
"""
# Add needed args
parser.add_argument("--uwsgi-host", dest="uwsgi_host", default=None,
help="Listen address of uwsgi")
parser.add_argument("--uwsgi-port", type=int, dest="uwsgi_port", default=None,
help="Listen port of uwsgi")
parser.add_argument("--uwsgi-processes", type=int, dest="uwsgi_processes", default=None,
help="Number of process of the uwsgi")
parser.add_argument("--uwsgi-threads", type=int, dest="uwsgi_threads", default=None,
help="Number of thread in each the uwsgi process")
parser.add_argument("--uwsgi-print-log", dest="uwsgi_print_log", action="store_const", const=True, default=False,
help="Print the log of uwsgi module to stdout")
def conf_generator(args, config_sub, client, services, start_order):
"""
Callback function to modify uwsgi configuration according to parsed args
:param args: Parse args
:param config_sub: Template config
:param client: Docker client
:param services: Dictionary of services
:param start_order: List of services in starting order
:return: None
"""
# Modify config by parsed args
if args.uwsgi_host is not None:
config_sub["uwsgi"]["host"] = transform_address(args.uwsgi_host, client)
if args.uwsgi_port is not None:
config_sub["uwsgi"]["port"] = str(args.uwsgi_port)
if args.uwsgi_processes is not None:
config_sub["uwsgi"]["processes"] = args.uwsgi_processes
if args.uwsgi_threads is not None:
config_sub["uwsgi"]["threads"] = args.uwsgi_threads
if args.uwsgi_print_log:
config_sub["daemon"].pop("log_daemon", None)
config_sub["daemon"].pop("log_uwsgi", None)
config_sub["server"].pop("log_daemon", None)
# Generate information for execution
services["uwsgi"] = {
"pid_file": config_sub["daemon"]["pid_file"],
"command": config_sub["daemon"]["exe"],
"process": None
}
start_order.append("uwsgi")
return
return conf_generator
| 39.47205
| 117
| 0.654603
|
e26c34124fd2b4407c4cfd7317a3ae5c2eb07e74
| 22,802
|
py
|
Python
|
Benchmark_Approaches/IGSQL/model_util.py
|
xjtu-intsoft/chase-page
|
84657a6b80e926b91ff99a78e660ba8f603321f4
|
[
"MIT"
] | 34
|
2021-05-07T06:59:33.000Z
|
2022-03-15T02:36:19.000Z
|
Benchmark_Approaches/IGSQL/model_util.py
|
xjtu-intsoft/chase-page
|
84657a6b80e926b91ff99a78e660ba8f603321f4
|
[
"MIT"
] | null | null | null |
Benchmark_Approaches/IGSQL/model_util.py
|
xjtu-intsoft/chase-page
|
84657a6b80e926b91ff99a78e660ba8f603321f4
|
[
"MIT"
] | 2
|
2021-08-17T12:23:03.000Z
|
2021-10-12T04:10:41.000Z
|
"""Basic model training and evaluation functions."""
from enum import Enum
import random
import sys
import json
import progressbar
import model.torch_utils
import data_util.sql_util
import torch
def write_prediction(fileptr,
identifier,
input_seq,
probability,
prediction,
flat_prediction,
gold_query,
flat_gold_queries,
gold_tables,
index_in_interaction,
database_username,
database_password,
database_timeout,
compute_metrics=True):
pred_obj = {}
pred_obj["identifier"] = identifier
if len(identifier.split('/')) == 2:
database_id, interaction_id = identifier.split('/')
else:
database_id = 'atis'
interaction_id = identifier
pred_obj["database_id"] = database_id
pred_obj["interaction_id"] = interaction_id
pred_obj["input_seq"] = input_seq
pred_obj["probability"] = probability
pred_obj["prediction"] = prediction
pred_obj["flat_prediction"] = flat_prediction
pred_obj["gold_query"] = gold_query
pred_obj["flat_gold_queries"] = flat_gold_queries
pred_obj["index_in_interaction"] = index_in_interaction
pred_obj["gold_tables"] = str(gold_tables)
# Now compute the metrics we want.
if compute_metrics:
# First metric: whether flat predicted query is in the gold query set.
correct_string = " ".join(flat_prediction) in [
" ".join(q) for q in flat_gold_queries]
pred_obj["correct_string"] = correct_string
# Database metrics
if not correct_string:
syntactic, semantic, pred_table = sql_util.execution_results(
" ".join(flat_prediction), database_username, database_password, database_timeout)
pred_table = sorted(pred_table)
best_prec = 0.
best_rec = 0.
best_f1 = 0.
for gold_table in gold_tables:
num_overlap = float(len(set(pred_table) & set(gold_table)))
if len(set(gold_table)) > 0:
prec = num_overlap / len(set(gold_table))
else:
prec = 1.
if len(set(pred_table)) > 0:
rec = num_overlap / len(set(pred_table))
else:
rec = 1.
if prec > 0. and rec > 0.:
f1 = (2 * (prec * rec)) / (prec + rec)
else:
f1 = 1.
best_prec = max(best_prec, prec)
best_rec = max(best_rec, rec)
best_f1 = max(best_f1, f1)
else:
syntactic = True
semantic = True
pred_table = []
best_prec = 1.
best_rec = 1.
best_f1 = 1.
assert best_prec <= 1.
assert best_rec <= 1.
assert best_f1 <= 1.
pred_obj["syntactic"] = syntactic
pred_obj["semantic"] = semantic
correct_table = (pred_table in gold_tables) or correct_string
pred_obj["correct_table"] = correct_table
pred_obj["strict_correct_table"] = correct_table and syntactic
pred_obj["pred_table"] = str(pred_table)
pred_obj["table_prec"] = best_prec
pred_obj["table_rec"] = best_rec
pred_obj["table_f1"] = best_f1
fileptr.write(json.dumps(pred_obj, ensure_ascii=False) + "\n")
class Metrics(Enum):
"""Definitions of simple metrics to compute."""
LOSS = 1
TOKEN_ACCURACY = 2
STRING_ACCURACY = 3
CORRECT_TABLES = 4
STRICT_CORRECT_TABLES = 5
SEMANTIC_QUERIES = 6
SYNTACTIC_QUERIES = 7
def get_progressbar(name, size):
"""Gets a progress bar object given a name and the total size.
Inputs:
name (str): The name to display on the side.
size (int): The maximum size of the progress bar.
"""
return progressbar.ProgressBar(maxval=size,
widgets=[name,
progressbar.Bar('=', '[', ']'),
' ',
progressbar.Percentage(),
' ',
progressbar.ETA()])
def train_epoch_with_utterances(batches,
model,
randomize=True):
"""Trains model for a single epoch given batches of utterance data.
Inputs:
batches (UtteranceBatch): The batches to give to training.
model (ATISModel): The model obect.
learning_rate (float): The learning rate to use during training.
dropout_amount (float): Amount of dropout to set in the model.
randomize (bool): Whether or not to randomize the order that the batches are seen.
"""
if randomize:
random.shuffle(batches)
progbar = get_progressbar("train ", len(batches))
progbar.start()
loss_sum = 0.
for i, batch in enumerate(batches):
batch_loss = model.train_step(batch)
loss_sum += batch_loss
progbar.update(i)
progbar.finish()
total_loss = loss_sum / len(batches)
return total_loss
def train_epoch_with_interactions(interaction_batches,
params,
model,
randomize=True,
db2id=None,
id2db=None,
step=None):
"""Trains model for single epoch given batches of interactions.
Inputs:
interaction_batches (list of InteractionBatch): The batches to train on.
params (namespace): Parameters to run with.
model (ATISModel): Model to train.
randomize (bool): Whether or not to randomize the order that batches are seen.
"""
if randomize:
random.shuffle(interaction_batches)
progbar = get_progressbar("train ", len(interaction_batches))
progbar.start()
loss_sum = 0.
skip_ls = ["sakila_1", "baseball_1", "soccer_1", "cre_Drama_Workshop_Groups", "formula_1", "assets_maintenance/8"]
skip_num = 0
for i, interaction_batch in enumerate(interaction_batches):
assert len(interaction_batch) == 1
interaction = interaction_batch.items[0]
if interaction.identifier == "raw/atis2/12-1.1/ATIS2/TEXT/TEST/NOV92/770/5":
continue
if "baseball_1" in interaction.identifier:
continue
skip = False
if 'cosql' in params.data_directory:
print(interaction.identifier, i, skip_num)
for ele in skip_ls:
if ele in interaction.identifier:
print("skip")
skip = True
continue
if skip:
print("skip, length:", len(interaction.gold_utterances()))
skip_num += 1
continue
batch_loss, step = model.train_step(interaction, params.train_maximum_sql_length, db2id=db2id, id2db=id2db, step=step)
loss_sum += batch_loss
#torch.cuda.empty_cache()
progbar.update(i)
progbar.finish()
total_loss = loss_sum / len(interaction_batches)
return total_loss, step
def update_sums(metrics,
metrics_sums,
predicted_sequence,
flat_sequence,
gold_query,
original_gold_query,
gold_forcing=False,
loss=None,
token_accuracy=0.,
database_username="",
database_password="",
database_timeout=0,
gold_table=None):
"""" Updates summing for metrics in an aggregator.
TODO: don't use sums, just keep the raw value.
"""
if Metrics.LOSS in metrics:
metrics_sums[Metrics.LOSS] += loss.item()
if Metrics.TOKEN_ACCURACY in metrics:
if gold_forcing:
metrics_sums[Metrics.TOKEN_ACCURACY] += token_accuracy
else:
num_tokens_correct = 0.
for j, token in enumerate(gold_query):
if len(
predicted_sequence) > j and predicted_sequence[j] == token:
num_tokens_correct += 1
metrics_sums[Metrics.TOKEN_ACCURACY] += num_tokens_correct / \
len(gold_query)
if Metrics.STRING_ACCURACY in metrics:
metrics_sums[Metrics.STRING_ACCURACY] += int(
flat_sequence == original_gold_query)
if Metrics.CORRECT_TABLES in metrics:
assert database_username, "You did not provide a database username"
assert database_password, "You did not provide a database password"
assert database_timeout > 0, "Database timeout is 0 seconds"
# Evaluate SQL
if flat_sequence != original_gold_query:
syntactic, semantic, table = sql_util.execution_results(
" ".join(flat_sequence), database_username, database_password, database_timeout)
else:
syntactic = True
semantic = True
table = gold_table
metrics_sums[Metrics.CORRECT_TABLES] += int(table == gold_table)
if Metrics.SYNTACTIC_QUERIES in metrics:
metrics_sums[Metrics.SYNTACTIC_QUERIES] += int(syntactic)
if Metrics.SEMANTIC_QUERIES in metrics:
metrics_sums[Metrics.SEMANTIC_QUERIES] += int(semantic)
if Metrics.STRICT_CORRECT_TABLES in metrics:
metrics_sums[Metrics.STRICT_CORRECT_TABLES] += int(
table == gold_table and syntactic)
def construct_averages(metrics_sums, total_num):
""" Computes the averages for metrics.
Inputs:
metrics_sums (dict Metric -> float): Sums for a metric.
total_num (int): Number to divide by (average).
"""
metrics_averages = {}
for metric, value in metrics_sums.items():
metrics_averages[metric] = value / total_num
if metric != "loss":
metrics_averages[metric] *= 100.
return metrics_averages
def evaluate_utterance_sample(sample,
model,
max_generation_length,
name="",
gold_forcing=False,
metrics=None,
total_num=-1,
database_username="",
database_password="",
database_timeout=0,
write_results=False):
"""Evaluates a sample of utterance examples.
Inputs:
sample (list of Utterance): Examples to evaluate.
model (ATISModel): Model to predict with.
max_generation_length (int): Maximum length to generate.
name (str): Name to log with.
gold_forcing (bool): Whether to force the gold tokens during decoding.
metrics (list of Metric): Metrics to evaluate with.
total_num (int): Number to divide by when reporting results.
database_username (str): Username to use for executing queries.
database_password (str): Password to use when executing queries.
database_timeout (float): Timeout on queries when executing.
write_results (bool): Whether to write the results to a file.
"""
assert metrics
if total_num < 0:
total_num = len(sample)
metrics_sums = {}
for metric in metrics:
metrics_sums[metric] = 0.
predictions_file = open(name + "_predictions.json", "w")
print("Predicting with filename " + str(name) + "_predictions.json")
progbar = get_progressbar(name, len(sample))
progbar.start()
predictions = []
for i, item in enumerate(sample):
_, loss, predicted_seq = model.eval_step(
item, max_generation_length, feed_gold_query=gold_forcing)
loss = loss / len(item.gold_query())
predictions.append(predicted_seq)
flat_sequence = item.flatten_sequence(predicted_seq)
token_accuracy = torch_utils.per_token_accuracy(
item.gold_query(), predicted_seq)
if write_results:
write_prediction(
predictions_file,
identifier=item.interaction.identifier,
input_seq=item.input_sequence(),
probability=0,
prediction=predicted_seq,
flat_prediction=flat_sequence,
gold_query=item.gold_query(),
flat_gold_queries=item.original_gold_queries(),
gold_tables=item.gold_tables(),
index_in_interaction=item.utterance_index,
database_username=database_username,
database_password=database_password,
database_timeout=database_timeout)
update_sums(metrics,
metrics_sums,
predicted_seq,
flat_sequence,
item.gold_query(),
item.original_gold_queries()[0],
gold_forcing,
loss,
token_accuracy,
database_username=database_username,
database_password=database_password,
database_timeout=database_timeout,
gold_table=item.gold_tables()[0])
progbar.update(i)
progbar.finish()
predictions_file.close()
return construct_averages(metrics_sums, total_num), None
def evaluate_interaction_sample(sample,
model,
max_generation_length,
name="",
gold_forcing=False,
metrics=None,
total_num=-1,
database_username="",
database_password="",
database_timeout=0,
use_predicted_queries=False,
write_results=False,
use_gpu=False,
compute_metrics=False):
""" Evaluates a sample of interactions. """
predictions_file = open(name + "_predictions.json", "w")
print("Predicting with file " + str(name + "_predictions.json"))
metrics_sums = {}
for metric in metrics:
metrics_sums[metric] = 0.
progbar = get_progressbar(name, len(sample))
progbar.start()
num_utterances = 0
ignore_with_gpu = [line.strip() for line in open(
"data/cpu_full_interactions.txt").readlines()]
predictions = []
use_gpu = not ("--no_gpus" in sys.argv or "--no_gpus=1" in sys.argv)
model.eval()
for i, interaction in enumerate(sample):
# if use_gpu and interaction.identifier in ignore_with_gpu:
# continue
# elif not use_gpu and interaction.identifier not in ignore_with_gpu:
# continue
try:
with torch.no_grad():
if use_predicted_queries:
example_preds = model.predict_with_predicted_queries(
interaction,
max_generation_length)
else:
example_preds = model.predict_with_gold_queries(
interaction,
max_generation_length,
feed_gold_query=gold_forcing)
#torch.cuda.empty_cache()
except RuntimeError as exception:
print("Failed on interaction: " + str(interaction.identifier))
print(exception)
print("\n\n")
exit()
predictions.extend(example_preds)
assert len(example_preds) == len(
interaction.interaction.utterances) or not example_preds
for j, pred in enumerate(example_preds):
num_utterances += 1
sequence, loss, token_accuracy, _, decoder_results = pred
if use_predicted_queries:
item = interaction.processed_utterances[j]
original_utt = interaction.interaction.utterances[item.index]
gold_query = original_utt.gold_query_to_use
original_gold_query = original_utt.original_gold_query
gold_table = original_utt.gold_sql_results
gold_queries = [q[0] for q in original_utt.all_gold_queries]
gold_tables = [q[1] for q in original_utt.all_gold_queries]
index = item.index
else:
item = interaction.gold_utterances()[j]
gold_query = item.gold_query()
original_gold_query = item.original_gold_query()
gold_table = item.gold_table()
gold_queries = item.original_gold_queries()
gold_tables = item.gold_tables()
index = item.utterance_index
if loss:
loss = loss / len(gold_query)
flat_sequence = item.flatten_sequence(sequence)
if write_results:
write_prediction(
predictions_file,
identifier=interaction.identifier,
input_seq=item.input_sequence(),
probability=decoder_results.probability,
prediction=sequence,
flat_prediction=flat_sequence,
gold_query=gold_query,
flat_gold_queries=gold_queries,
gold_tables=gold_tables,
index_in_interaction=index,
database_username=database_username,
database_password=database_password,
database_timeout=database_timeout,
compute_metrics=compute_metrics)
update_sums(metrics,
metrics_sums,
sequence,
flat_sequence,
gold_query,
original_gold_query,
gold_forcing,
loss,
token_accuracy,
database_username=database_username,
database_password=database_password,
database_timeout=database_timeout,
gold_table=gold_table)
progbar.update(i)
progbar.finish()
if total_num < 0:
total_num = num_utterances
predictions_file.close()
return construct_averages(metrics_sums, total_num), predictions
def evaluate_using_predicted_queries(sample,
model,
name="",
gold_forcing=False,
metrics=None,
total_num=-1,
database_username="",
database_password="",
database_timeout=0,
snippet_keep_age=1):
predictions_file = open(name + "_predictions.json", "w")
print("Predicting with file " + str(name + "_predictions.json"))
assert not gold_forcing
metrics_sums = {}
for metric in metrics:
metrics_sums[metric] = 0.
progbar = get_progressbar(name, len(sample))
progbar.start()
num_utterances = 0
predictions = []
for i, item in enumerate(sample):
int_predictions = []
item.start_interaction()
while not item.done():
utterance = item.next_utterance(snippet_keep_age)
predicted_sequence, loss, _, probability = model.eval_step(
utterance)
int_predictions.append((utterance, predicted_sequence))
flat_sequence = utterance.flatten_sequence(predicted_sequence)
if sql_util.executable(
flat_sequence,
username=database_username,
password=database_password,
timeout=database_timeout) and probability >= 0.24:
utterance.set_pred_query(
item.remove_snippets(predicted_sequence))
item.add_utterance(utterance,
item.remove_snippets(predicted_sequence),
previous_snippets=utterance.snippets())
else:
# Add the /previous/ predicted query, guaranteed to be syntactically
# correct
seq = []
utterance.set_pred_query(seq)
item.add_utterance(
utterance, seq, previous_snippets=utterance.snippets())
original_utt = item.interaction.utterances[utterance.index]
write_prediction(
predictions_file,
identifier=item.interaction.identifier,
input_seq=utterance.input_sequence(),
probability=probability,
prediction=predicted_sequence,
flat_prediction=flat_sequence,
gold_query=original_utt.gold_query_to_use,
flat_gold_queries=[
q[0] for q in original_utt.all_gold_queries],
gold_tables=[
q[1] for q in original_utt.all_gold_queries],
index_in_interaction=utterance.index,
database_username=database_username,
database_password=database_password,
database_timeout=database_timeout)
update_sums(metrics,
metrics_sums,
predicted_sequence,
flat_sequence,
original_utt.gold_query_to_use,
original_utt.original_gold_query,
gold_forcing,
loss,
token_accuracy=0,
database_username=database_username,
database_password=database_password,
database_timeout=database_timeout,
gold_table=original_utt.gold_sql_results)
predictions.append(int_predictions)
progbar.update(i)
progbar.finish()
if total_num < 0:
total_num = num_utterances
predictions_file.close()
return construct_averages(metrics_sums, total_num), predictions
| 36.95624
| 126
| 0.555302
|
777d00046f9c7faf78f9ee4605e1695c87e8c4f8
| 1,062
|
py
|
Python
|
Python_Exams/1_Exam_Arrays/Solutions/solution_exercise_3.py
|
pablogalve/Python-Learning
|
40e296ba416dffc3f9c794c7770e03cc2c9a53d0
|
[
"MIT"
] | 4
|
2020-01-27T13:46:19.000Z
|
2022-02-15T15:11:50.000Z
|
Python_Exams/1_Exam_Arrays/Solutions/solution_exercise_3.py
|
pablogalve/Python-Learning
|
40e296ba416dffc3f9c794c7770e03cc2c9a53d0
|
[
"MIT"
] | null | null | null |
Python_Exams/1_Exam_Arrays/Solutions/solution_exercise_3.py
|
pablogalve/Python-Learning
|
40e296ba416dffc3f9c794c7770e03cc2c9a53d0
|
[
"MIT"
] | null | null | null |
print("...CITIES...")
print("1-Add an element to the llist.")
print("2-Remove an element from the list.")
print("3-Find an element in the list.")
print("4-Exit the program")
list_variable = []
i = 0
chosen_option = int(input("Choose an option: "))
while (chosen_option != 4):
if (chosen_option == 1):
element = input("What city do you want to add to the list? ")
list_variable.append(element)
print(list_variable)
if (chosen_option == 2):
erase = input("What city do you want to remove from the list? ")
if erase in list_variable:
list_variable.remove(erase)
else:
print("Your element is not on the list.")
print(list_variable)
if (chosen_option == 3):
cont = 0
buscar=input("What city do you want to find in the list? ")
if buscar in list_variable:
cont = cont + 1
print("Your city appears " + str(cont) + " times on the list.")
else:
print("Your word is not on the list.")
chosen_option=int(input("Choose another option: "))
print("The final list is: " + str(list_variable))
| 33.1875
| 69
| 0.653484
|
199f10c2bbe835dd35880e4a2bba5bdf1f3664e8
| 56
|
py
|
Python
|
cli/__init__.py
|
dimko/binaryalert
|
53d7c407a18f5c3153b6bb4a2ceaf21c8321e89e
|
[
"Apache-2.0"
] | 1,324
|
2017-07-26T03:46:03.000Z
|
2022-03-31T01:32:47.000Z
|
cli/__init__.py
|
abhinavbom/binaryalert
|
53d7c407a18f5c3153b6bb4a2ceaf21c8321e89e
|
[
"Apache-2.0"
] | 168
|
2017-07-26T05:48:01.000Z
|
2021-06-01T22:27:35.000Z
|
cli/__init__.py
|
abhinavbom/binaryalert
|
53d7c407a18f5c3153b6bb4a2ceaf21c8321e89e
|
[
"Apache-2.0"
] | 212
|
2017-07-26T04:05:11.000Z
|
2022-02-25T17:48:15.000Z
|
"""BinaryAlert release version"""
__version__ = '1.2.0'
| 18.666667
| 33
| 0.696429
|
3964f1144e51d9b7aa9fc2d8a201844148828aa5
| 2,222
|
py
|
Python
|
entity_history/migrations/0001_initial.py
|
ambitioninc/django-entity-history
|
128b4bddb7338d259f69179bafd1918f3e59b4b0
|
[
"MIT"
] | 4
|
2015-11-01T00:33:25.000Z
|
2017-08-31T16:56:18.000Z
|
entity_history/migrations/0001_initial.py
|
ambitioninc/django-entity-history
|
128b4bddb7338d259f69179bafd1918f3e59b4b0
|
[
"MIT"
] | 2
|
2015-04-06T20:24:38.000Z
|
2015-04-20T14:32:50.000Z
|
entity_history/migrations/0001_initial.py
|
ambitioninc/django-entity-history
|
128b4bddb7338d259f69179bafd1918f3e59b4b0
|
[
"MIT"
] | 7
|
2015-04-06T19:49:37.000Z
|
2018-03-05T18:14:53.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('entity', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='EntityActivationEvent',
fields=[
('id', models.AutoField(serialize=False, auto_created=True, verbose_name='ID', primary_key=True)),
('time', models.DateTimeField(db_index=True, help_text='The time of the activation / deactivation')),
('was_activated', models.BooleanField(help_text='True if the entity was activated, false otherwise', default=None)),
('entity', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, help_text='The entity that was activated / deactivated', to='entity.Entity')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='EntityRelationshipActivationEvent',
fields=[
('id', models.AutoField(serialize=False, auto_created=True, verbose_name='ID', primary_key=True)),
('time', models.DateTimeField(db_index=True, help_text='The time of the activation / deactivation')),
('was_activated', models.BooleanField(help_text='True if the entity was activated, false otherwise', default=None)),
('sub_entity', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='entity.Entity', related_name='+', help_text='The sub entity in the relationship that was activated / deactivated')),
('super_entity', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='entity.Entity', related_name='+', help_text='The super entity in the relationship that was activated / deactivated')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='EntityHistory',
fields=[
],
options={
'proxy': True,
},
bases=('entity.entity',),
),
]
| 44.44
| 218
| 0.605311
|
5a0cfae56b6755531f88369e3c738996e650facf
| 592
|
py
|
Python
|
src/api/data_objects.py
|
Matthew-Klawitter/Matrix-Barista-Bot
|
cc42cabce2ed123fff94f77f1677d6685b19acc2
|
[
"MIT"
] | 2
|
2021-10-17T03:32:49.000Z
|
2022-02-24T04:28:15.000Z
|
src/api/data_objects.py
|
Matthew-Klawitter/Matrix-Barista-Bot
|
cc42cabce2ed123fff94f77f1677d6685b19acc2
|
[
"MIT"
] | null | null | null |
src/api/data_objects.py
|
Matthew-Klawitter/Matrix-Barista-Bot
|
cc42cabce2ed123fff94f77f1677d6685b19acc2
|
[
"MIT"
] | null | null | null |
class Message:
def __init__(self, bridge, room, event):
self.username = room.user_name(event.sender)
self.room_id = room.room_id
self.room_name = room.display_name
self.bridge = bridge
self.message = event.body
self.is_command = event.body.startswith("!")
# Parsed message for correct command handling
if self.is_command:
parts = event.body[1:].split(" ", 1)
self.command = parts[0]
if len(parts) > 1:
self.args = parts[1]
else:
self.args = ""
| 34.823529
| 53
| 0.552365
|
cb0e8c8363421c0f5d9d9a96aa708b9be39ce5af
| 703
|
py
|
Python
|
ot2_controller/launch/ot2_bob_bringup.launch.py
|
AD-SDL/ot2_workcell
|
c994c5a5bd79a154e624f17bbde1a13a5461cc69
|
[
"MIT"
] | 2
|
2022-02-09T19:18:58.000Z
|
2022-02-09T19:18:59.000Z
|
ot2_controller/launch/ot2_bob_bringup.launch.py
|
urd00m/ros2tests
|
c994c5a5bd79a154e624f17bbde1a13a5461cc69
|
[
"MIT"
] | 1
|
2022-03-14T20:50:28.000Z
|
2022-03-14T20:50:28.000Z
|
ot2_controller/launch/ot2_bob_bringup.launch.py
|
urd00m/ros2tests
|
c994c5a5bd79a154e624f17bbde1a13a5461cc69
|
[
"MIT"
] | null | null | null |
from launch import LaunchDescription
from launch_ros.actions import Node
def generate_launch_description():
ld = LaunchDescription()
ot2_controller = Node(
package="ot2_controller",
executable="ot2_controller",
output='screen',
parameters=[{"name": "bob"}],
emulate_tty=True,
# arguments=[('__log_level:=debug')],
)
protocol_handler = Node(
package="ot2_controller",
executable="protocol_manager",
output='screen',
parameters=[{"name": "bob"}],
emulate_tty=True,
# arguments=[('__log_level:=debug')],
)
ld.add_action(ot2_controller)
ld.add_action(protocol_handler)
return ld
| 27.038462
| 44
| 0.630156
|
d9b1aeb73eb595109a7a5aaa6583f0dbe67a0152
| 1,083
|
py
|
Python
|
samples/tests/test_query_to_arrow.py
|
KoffieLabs/python-bigquery
|
33b317abdc6d69f33722cb0504bb0b78c1c80e30
|
[
"Apache-2.0"
] | 1
|
2022-03-25T21:07:44.000Z
|
2022-03-25T21:07:44.000Z
|
samples/tests/test_query_to_arrow.py
|
abecerrilsalas/python-bigquery
|
8da4fa9e77bcfd2b68818b5d65b38ccc59899a01
|
[
"Apache-2.0"
] | null | null | null |
samples/tests/test_query_to_arrow.py
|
abecerrilsalas/python-bigquery
|
8da4fa9e77bcfd2b68818b5d65b38ccc59899a01
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from .. import query_to_arrow
pyarrow = pytest.importorskip("pyarrow")
def test_query_to_arrow(capsys: "pytest.CaptureFixture[str]") -> None:
arrow_table = query_to_arrow.query_to_arrow()
out, err = capsys.readouterr()
assert "Downloaded 8 rows, 2 columns." in out
arrow_schema = arrow_table.schema
assert arrow_schema.names == ["race", "participant"]
assert pyarrow.types.is_string(arrow_schema.types[0])
assert pyarrow.types.is_struct(arrow_schema.types[1])
| 34.935484
| 74
| 0.751616
|
d57621fec4e5bfc885c13ec0ec64ee3152777236
| 13,261
|
py
|
Python
|
luna/gateware/soc/peripheral.py
|
TomKeddie/luna
|
6688f7ae2a1b506eb6498cc2ad15dddcef0cc06a
|
[
"BSD-3-Clause"
] | 2
|
2020-11-04T10:54:15.000Z
|
2022-03-17T20:38:21.000Z
|
luna/gateware/soc/peripheral.py
|
hxkrrzq/luna
|
e56a3eef6a9fa138755512bec1252725601425c1
|
[
"BSD-3-Clause"
] | null | null | null |
luna/gateware/soc/peripheral.py
|
hxkrrzq/luna
|
e56a3eef6a9fa138755512bec1252725601425c1
|
[
"BSD-3-Clause"
] | 2
|
2021-06-26T06:06:52.000Z
|
2022-01-19T22:36:19.000Z
|
#
# This file is part of LUNA.
#
# Adapted from lambdasoc.
# This file includes content Copyright (C) 2020 LambdaConcept.
#
# Per our BSD license, derivative files must include this license disclaimer.
#
# Copyright (c) 2020 Great Scott Gadgets <info@greatscottgadgets.com>
# SPDX-License-Identifier: BSD-3-Clause
""" Peripheral helpers for LUNA devices. """
from contextlib import contextmanager
from nmigen import Module, Elaboratable
from nmigen import tracer
from nmigen.utils import log2_int
from nmigen_soc import csr, wishbone
from nmigen_soc.memory import MemoryMap
from nmigen_soc.csr.wishbone import WishboneCSRBridge
from .event import EventSource, IRQLine, InterruptSource
__all__ = ["Peripheral", "CSRBank", "PeripheralBridge"]
class Peripheral:
"""Wishbone peripheral.
A helper class to reduce the boilerplate needed to control a peripheral with a Wishbone interface.
It provides facilities for instantiating CSR registers, requesting windows to subordinate busses
and sending interrupt requests to the CPU.
The ``Peripheral`` class is not meant to be instantiated as-is, but rather as a base class for
actual peripherals.
Usage example
-------------
```
class ExamplePeripheral(Peripheral, Elaboratable):
def __init__(self):
super().__init__()
bank = self.csr_bank()
self._foo = bank.csr(8, "r")
self._bar = bank.csr(8, "w")
self._rdy = self.event(mode="rise")
self._bridge = self.bridge(data_width=32, granularity=8, alignment=2)
self.bus = self._bridge.bus
self.irq = self._bridge.irq
def elaborate(self, platform):
m = Module()
m.submodules.bridge = self._bridge
# ...
return m
```
Arguments
---------
name : str
Name of this peripheral. If ``None`` (default) the name is inferred from the variable
name this peripheral is assigned to.
Properties
----------
name : str
Name of the peripheral.
"""
def __init__(self, name=None, src_loc_at=1):
if name is not None and not isinstance(name, str):
raise TypeError("Name must be a string, not {!r}".format(name))
self.name = name or tracer.get_var_name(depth=2 + src_loc_at).lstrip("_")
self._csr_banks = []
self._windows = []
self._events = []
self._bus = None
self._irq = None
@property
def bus(self):
"""Wishbone bus interface.
Return value
------------
An instance of :class:`Interface`.
Exceptions
----------
Raises :exn:`NotImplementedError` if the peripheral does not have a Wishbone bus.
"""
if self._bus is None:
raise NotImplementedError("Peripheral {!r} does not have a bus interface"
.format(self))
return self._bus
@bus.setter
def bus(self, bus):
if not isinstance(bus, wishbone.Interface):
raise TypeError("Bus interface must be an instance of wishbone.Interface, not {!r}"
.format(bus))
self._bus = bus
@property
def irq(self):
"""Interrupt request line.
Return value
------------
An instance of :class:`IRQLine`.
Exceptions
----------
Raises :exn:`NotImplementedError` if the peripheral does not have an IRQ line.
"""
if self._irq is None:
raise NotImplementedError("Peripheral {!r} does not have an IRQ line"
.format(self))
return self._irq
@irq.setter
def irq(self, irq):
if not isinstance(irq, IRQLine):
raise TypeError("IRQ line must be an instance of IRQLine, not {!r}"
.format(irq))
self._irq = irq
def csr_bank(self, *, addr=None, alignment=None, desc=None):
"""Request a CSR bank.
Arguments
---------
addr : int or None
Address of the bank. If ``None``, the implicit next address will be used.
Otherwise, the exact specified address (which must be a multiple of
``2 ** max(alignment, bridge_alignment)``) will be used.
alignment : int or None
Alignment of the bank. If not specified, the bridge alignment is used.
See :class:`nmigen_soc.csr.Multiplexer` for details.
desc: (str, optional):
Documentation of the given CSR bank.
Return value
------------
An instance of :class:`CSRBank`.
"""
bank = CSRBank(name_prefix=self.name)
self._csr_banks.append((bank, addr, alignment))
return bank
def window(self, *, addr_width, data_width, granularity=None, features=frozenset(),
alignment=0, addr=None, sparse=None):
"""Request a window to a subordinate bus.
See :meth:`nmigen_soc.wishbone.Decoder.add` for details.
Return value
------------
An instance of :class:`nmigen_soc.wishbone.Interface`.
"""
window = wishbone.Interface(addr_width=addr_width, data_width=data_width,
granularity=granularity, features=features)
granularity_bits = log2_int(data_width // window.granularity)
window.memory_map = MemoryMap(addr_width=addr_width + granularity_bits,
data_width=window.granularity, alignment=alignment)
self._windows.append((window, addr, sparse))
return window
def event(self, *, mode="level", name=None, src_loc_at=0, desc=None):
"""Request an event source.
See :class:`EventSource` for details.
Return value
------------
An instance of :class:`EventSource`.
"""
event = EventSource(mode=mode, name=name, src_loc_at=1 + src_loc_at)
self._events.append(event)
return event
def bridge(self, *, data_width=8, granularity=None, features=frozenset(), alignment=0):
"""Request a bridge to the resources of the peripheral.
See :class:`PeripheralBridge` for details.
Return value
------------
A :class:`PeripheralBridge` providing access to local resources.
"""
return PeripheralBridge(self, data_width=data_width, granularity=granularity,
features=features, alignment=alignment)
def iter_csr_banks(self):
"""Iterate requested CSR banks and their parameters.
Yield values
------------
A tuple ``bank, addr, alignment`` describing the bank and its parameters.
"""
for bank, addr, alignment in self._csr_banks:
yield bank, addr, alignment
def iter_windows(self):
"""Iterate requested windows and their parameters.
Yield values
------------
A tuple ``window, addr, sparse`` descr
given to :meth:`Peripheral.window`.
"""
for window, addr, sparse in self._windows:
yield window, addr, sparse
def iter_events(self):
"""Iterate requested event sources.
Yield values
------------
An instance of :class:`EventSource`.
"""
for event in self._events:
yield event
class CSRBank:
"""CSR register bank.
Parameters
----------
name_prefix : str
Name prefix of the bank registers.
"""
def __init__(self, *, name_prefix=""):
self._name_prefix = name_prefix
self._csr_regs = []
def csr(self, width, access, *, addr=None, alignment=None, name=None, desc=None,
src_loc_at=0):
"""Request a CSR register.
Parameters
----------
width : int
Width of the register. See :class:`nmigen_soc.csr.Element`.
access : :class:`Access`
Register access mode. See :class:`nmigen_soc.csr.Element`.
addr : int
Address of the register. See :meth:`nmigen_soc.csr.Multiplexer.add`.
alignment : int
Register alignment. See :class:`nmigen_soc.csr.Multiplexer`.
name : str
Name of the register. If ``None`` (default) the name is inferred from the variable
name this register is assigned to.
desc: str
Documentation for the provided register, if available.
Used to capture register documentation automatically.
Return value
------------
An instance of :class:`nmigen_soc.csr.Element`.
"""
if name is not None and not isinstance(name, str):
raise TypeError("Name must be a string, not {!r}".format(name))
name = name or tracer.get_var_name(depth=2 + src_loc_at).lstrip("_")
elem_name = "{}_{}".format(self._name_prefix, name)
elem = csr.Element(width, access, name=elem_name)
self._csr_regs.append((elem, addr, alignment))
return elem
def iter_csr_regs(self):
"""Iterate requested CSR registers and their parameters.
Yield values
------------
A tuple ``elem, addr, alignment`` describing the register and its parameters.
"""
for elem, addr, alignment in self._csr_regs:
yield elem, addr, alignment
class PeripheralBridge(Elaboratable):
"""Peripheral bridge.
A bridge providing access to the registers and windows of a peripheral, and support for
interrupt requests from its event sources.
Event managment is performed by an :class:`InterruptSource` submodule.
Parameters
---------
periph : :class:`Peripheral`
The peripheral whose resources are exposed by this bridge.
data_width : int
Data width. See :class:`nmigen_soc.wishbone.Interface`.
granularity : int or None
Granularity. See :class:`nmigen_soc.wishbone.Interface`.
features : iter(str)
Optional signal set. See :class:`nmigen_soc.wishbone.Interface`.
alignment : int
Resource alignment. See :class:`nmigen_soc.memory.MemoryMap`.
Attributes
----------
bus : :class:`nmigen_soc.wishbone.Interface`
Wishbone bus providing access to the resources of the peripheral.
irq : :class:`IRQLine`, out
Interrupt request. It is raised if any event source is enabled and has a pending
notification.
"""
def __init__(self, periph, *, data_width, granularity, features, alignment):
if not isinstance(periph, Peripheral):
raise TypeError("Peripheral must be an instance of Peripheral, not {!r}"
.format(periph))
self._wb_decoder = wishbone.Decoder(addr_width=1, data_width=data_width,
granularity=granularity,
features=features, alignment=alignment)
self._csr_subs = []
for bank, bank_addr, bank_alignment in periph.iter_csr_banks():
if bank_alignment is None:
bank_alignment = alignment
csr_mux = csr.Multiplexer(addr_width=1, data_width=8, alignment=bank_alignment)
for elem, elem_addr, elem_alignment in bank.iter_csr_regs():
if elem_alignment is None:
elem_alignment = alignment
csr_mux.add(elem, addr=elem_addr, alignment=elem_alignment, extend=True)
csr_bridge = WishboneCSRBridge(csr_mux.bus, data_width=data_width)
self._wb_decoder.add(csr_bridge.wb_bus, addr=bank_addr, extend=True)
self._csr_subs.append((csr_mux, csr_bridge))
for window, window_addr, window_sparse in periph.iter_windows():
self._wb_decoder.add(window, addr=window_addr, sparse=window_sparse, extend=True)
events = list(periph.iter_events())
if len(events) > 0:
self._int_src = InterruptSource(events, name="{}_ev".format(periph.name))
self.irq = self._int_src.irq
csr_mux = csr.Multiplexer(addr_width=1, data_width=8, alignment=alignment)
csr_mux.add(self._int_src.status, extend=True)
csr_mux.add(self._int_src.pending, extend=True)
csr_mux.add(self._int_src.enable, extend=True)
csr_bridge = WishboneCSRBridge(csr_mux.bus, data_width=data_width)
self._wb_decoder.add(csr_bridge.wb_bus, extend=True)
self._csr_subs.append((csr_mux, csr_bridge))
else:
self._int_src = None
self.irq = None
self.bus = self._wb_decoder.bus
def elaborate(self, platform):
m = Module()
for i, (csr_mux, csr_bridge) in enumerate(self._csr_subs):
m.submodules[ "csr_mux_{}".format(i)] = csr_mux
m.submodules["csr_bridge_{}".format(i)] = csr_bridge
if self._int_src is not None:
m.submodules._int_src = self._int_src
m.submodules.wb_decoder = self._wb_decoder
return m
| 34.989446
| 102
| 0.598296
|
2bbcc9b8bb0a67d7be431fb22741ea93a00a8521
| 788
|
py
|
Python
|
resources/lib/utils.py
|
robweber/service.history
|
445d88fcd35c508a49404cb3a6172c6857da5a17
|
[
"MIT"
] | 1
|
2022-02-13T16:55:30.000Z
|
2022-02-13T16:55:30.000Z
|
resources/lib/utils.py
|
robweber/service.history
|
445d88fcd35c508a49404cb3a6172c6857da5a17
|
[
"MIT"
] | null | null | null |
resources/lib/utils.py
|
robweber/service.history
|
445d88fcd35c508a49404cb3a6172c6857da5a17
|
[
"MIT"
] | null | null | null |
import xbmc
import xbmcaddon
__addon_id__= 'service.history'
__Addon = xbmcaddon.Addon(__addon_id__)
def data_dir():
return __Addon.getAddonInfo('profile')
def addon_dir():
return __Addon.getAddonInfo('path')
def log(message,loglevel=xbmc.LOGNOTICE):
xbmc.log(encode(__addon_id__ + ": " + message),level=loglevel)
def showNotification(message):
xbmc.executebuiltin("Notification(" + getString(30010) + "," + message + ",4000," + xbmc.translatePath(__Addon.getAddonInfo('path') + "/icon.png") + ")")
def getSetting(name):
return __Addon.getSetting(name)
def setSetting(name,value):
__Addon.setSetting(name,value)
def getString(string_id):
return __Addon.getLocalizedString(string_id)
def encode(string):
return string.encode('UTF-8','replace')
| 26.266667
| 157
| 0.725888
|
12020df236136a7e613b4e88bbe23ab744130758
| 3,245
|
py
|
Python
|
model_selfAttn.py
|
danachang/End2EndDecoder
|
575d6077c5932a2e0a63d5af9691d12e5cb468ef
|
[
"MIT"
] | null | null | null |
model_selfAttn.py
|
danachang/End2EndDecoder
|
575d6077c5932a2e0a63d5af9691d12e5cb468ef
|
[
"MIT"
] | null | null | null |
model_selfAttn.py
|
danachang/End2EndDecoder
|
575d6077c5932a2e0a63d5af9691d12e5cb468ef
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from util import log
from decoder_selfAttn import Decoder
from decoder_mdl import Decoder_Mdl
class Model(object):
def __init__(self, config,
debug_information=False,
is_train=True):
self.debug = debug_information
self.config = config
self.batch_size = config.batch_size
self.h = config.h
self.w = config.w
self.c = config.c
self.output_dim = config.output_dim
self.output_act_fn = config.output_act_fn
self.num_d_conv = config.num_d_conv
self.num_d_fc = config.num_d_fc
self.d_norm_type = config.d_norm_type
self.loss_type = config.loss_type
# added for Decoder_mdl
self.load_pretrained = config.load_pretrained
self.arch = config.arch
# create placeholders for the input
self.image = tf.placeholder(
name='image', dtype=tf.float32,
shape=[self.batch_size, self.h, self.w, self.c],
)
self.label = tf.placeholder(
name='label', dtype=tf.float32,
shape=[self.batch_size, self.output_dim],
)
self.build(is_train=is_train)
def get_feed_dict(self, batch_chunk):
fd = {
self.image: batch_chunk['image'], # [bs, h, w, c]
self.label: batch_chunk['label'], # [bs, v] (v should be 3)
}
return fd
def build(self, is_train=True):
# Decoder {{{
# =========
# Input: an image [bs, h, w, c]
# Output: [bs, [x, y, v]]
if self.arch == 'ConvNet':
D = Decoder('Decoder', self.output_dim, self.output_act_fn,
self.num_d_conv, self.num_d_fc,
self.d_norm_type, is_train)
else:
D = Decoder_Mdl('Decoder_Mdl', self.output_dim, self.output_act_fn,
self.num_d_conv, self.num_d_fc,
self.d_norm_type, is_train,
self.load_pretrained, self.arch)
pred_label, conv_list, actv_list, attn_list = D(self.image)
self.pred_label = pred_label
self.conv_list = conv_list
self.actv_list = actv_list
self.attn_list = attn_list
# }}}
# Build losses {{{
# =========
# compute loss
if self.loss_type == 'l1':
self.ori_loss = tf.abs(self.label - pred_label)
self.loss = tf.reduce_mean(self.ori_loss)
elif self.loss_type == 'l2':
self.ori_loss = (self.label - pred_label) **2
self.loss = tf.reduce_mean(self.ori_loss)
else:
raise NotImplementedError
# }}}
# TensorBoard summaries {{{
# =========
tf.summary.scalar("loss/loss", self.loss)
tf.summary.image("image", self.image)
# }}}
# Output {{{
# =========
self.output = {
'pred_label': pred_label
}
# }}}
log.warn('\033[93mSuccessfully loaded the model.\033[0m')
| 30.613208
| 80
| 0.552542
|
a990710624274b2f54cca3a0ef587b69b44600e9
| 1,418
|
py
|
Python
|
yts-top-movies/yts-top-movies.py
|
jae-yong-2/awesomeScripts
|
7561f2a6966e51973338db592da9f64cf53245e8
|
[
"MIT"
] | 245
|
2020-09-24T03:49:20.000Z
|
2021-01-31T20:09:57.000Z
|
yts-top-movies/yts-top-movies.py
|
jae-yong-2/awesomeScripts
|
7561f2a6966e51973338db592da9f64cf53245e8
|
[
"MIT"
] | 252
|
2020-09-28T02:19:44.000Z
|
2021-01-23T09:00:34.000Z
|
yts-top-movies/yts-top-movies.py
|
jae-yong-2/awesomeScripts
|
7561f2a6966e51973338db592da9f64cf53245e8
|
[
"MIT"
] | 219
|
2020-09-23T18:51:42.000Z
|
2021-01-23T09:54:40.000Z
|
"""This script allows us to see the most popular movies on yts.mx
- Most popular movies
- Latest uploads
- Upcoming movies
The script uses BeautifylSoup to scrap data from the website.
"""
import requests
from bs4 import BeautifulSoup
title = f"""
\t {'-' * 80}
\t {'YTS.mx Top Movies':^80}
\t {'-' * 80}
\t {'By: chemsedd':^80}
\t {'-' * 80}
"""
print(title)
# YTS website
url = 'https://yts.mx/'
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:79.0)\
Gecko/20100101\
Firefox/79.0'
}
# Request web page
page = requests.get(url, headers=headers)
# parse to html
soup = BeautifulSoup(page.content, 'html.parser')
# top movies titles
top_movies_title = soup.find_all('a', class_='browse-movie-title')
# top movies years
top_movies_year = soup.find_all('div', class_='browse-movie-year')
movies_list = zip(top_movies_title, top_movies_year)
#
# Popular downloads
for index, movie in enumerate(movies_list):
if index == 0:
print('\t\n', '\t Popular Downloads:', '\n\t', '-' * 80)
elif index == 4:
print('\t\n', '\t Latest Movies:', '\n\t', '-' * 80)
elif index == 12:
print('\t\n', '\t Upcoming Movies:', '\n\t', '-' * 80)
title, year = movie
link = title.attrs['href']
title = title.contents[-1].strip()
year = year.contents[0].strip('\n')
print(f"\t {index + 1:02} - {title:30} ({year}) >> {link}")
| 27.269231
| 70
| 0.616361
|
2cdbaf6ffd5b45d80505c713762777ee91ef76b9
| 1,096
|
py
|
Python
|
var/spack/repos/builtin/packages/r-sp/package.py
|
RemoteConnectionManager/spack
|
f2967b6c16effd26ce007cf86cadbb645c574f50
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | null | null | null |
var/spack/repos/builtin/packages/r-sp/package.py
|
RemoteConnectionManager/spack
|
f2967b6c16effd26ce007cf86cadbb645c574f50
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 17
|
2018-09-20T18:32:50.000Z
|
2019-12-04T16:58:12.000Z
|
var/spack/repos/builtin/packages/r-sp/package.py
|
RemoteConnectionManager/spack
|
f2967b6c16effd26ce007cf86cadbb645c574f50
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1
|
2019-09-21T07:45:10.000Z
|
2019-09-21T07:45:10.000Z
|
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RSp(RPackage):
"""Classes and methods for spatial data; the classes document where the
spatial location information resides, for 2D or 3D data. Utility functions
are provided, e.g. for plotting data as maps, spatial selection, as well as
methods for retrieving coordinates, for subsetting, print, summary, etc."""
homepage = "https://github.com/edzer/sp/"
url = "https://cloud.r-project.org/src/contrib/sp_1.2-3.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/sp"
version('1.3-1', sha256='57988b53ba8acc35f3912d62feba4b929a0f757c6b54080c623c5d805e0cb59f')
version('1.2-7', sha256='6d60e03e1abd30a7d4afe547d157ce3dd7a8c166fc5e407fd6d62ae99ff30460')
version('1.2-3', 'f0e24d993dec128642ee66b6b47b10c1')
depends_on('r@3.0.0:', type=('build', 'run'))
depends_on('r-lattice', type=('build', 'run'))
| 43.84
| 95
| 0.730839
|
32c7cee45607a82e35c95d44e9d12e5365b515f2
| 142
|
py
|
Python
|
exercises/while_loops.py
|
salwa19-meet/y2s18-python_review
|
701c4e3a1a294fbcb32be3f06f54a371bcececfa
|
[
"MIT"
] | null | null | null |
exercises/while_loops.py
|
salwa19-meet/y2s18-python_review
|
701c4e3a1a294fbcb32be3f06f54a371bcececfa
|
[
"MIT"
] | null | null | null |
exercises/while_loops.py
|
salwa19-meet/y2s18-python_review
|
701c4e3a1a294fbcb32be3f06f54a371bcececfa
|
[
"MIT"
] | null | null | null |
# Write your solution for 1.3 here!sum = 0
i = 1
counter = 0
while (sum<=10000):
sum = sum + i
i+=1
counter+=1
print (counter )
| 15.777778
| 43
| 0.577465
|
add9c6dd70fe908b970668eb02a23564754e9dfc
| 1,312
|
py
|
Python
|
Laelia/apps/base/migrations/0029_auto_20201003_2233.py
|
arantesdv/LaeliaAppProject
|
93fca5393cb8406694903d9adde02067480c792e
|
[
"MIT"
] | null | null | null |
Laelia/apps/base/migrations/0029_auto_20201003_2233.py
|
arantesdv/LaeliaAppProject
|
93fca5393cb8406694903d9adde02067480c792e
|
[
"MIT"
] | null | null | null |
Laelia/apps/base/migrations/0029_auto_20201003_2233.py
|
arantesdv/LaeliaAppProject
|
93fca5393cb8406694903d9adde02067480c792e
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.6 on 2020-10-04 01:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('base', '0028_auto_20201003_2231'),
]
operations = [
migrations.AlterField(
model_name='schedule',
name='duration',
field=models.IntegerField(choices=[(15, '15min'), (30, '30min'), (45, '45min'), (60, '60min'), (75, '75min'), (90, '90min'), (105, '105min'), (120, '120min')], default=60),
),
migrations.AlterField(
model_name='schedule',
name='hour',
field=models.IntegerField(choices=[(0, '0h'), (1, '1h'), (2, '2h'), (3, '3h'), (4, '4h'), (5, '5h'), (6, '6h'), (7, '7h'), (8, '8h'), (9, '9h'), (10, '10h'), (11, '11h'), (12, '12h'), (13, '13h'), (14, '14h'), (15, '15h'), (16, '16h'), (17, '17h'), (18, '18h'), (19, '19h'), (20, '20h'), (21, '21h'), (22, '22h'), (23, '23h')], default=8),
),
migrations.AlterField(
model_name='schedule',
name='min',
field=models.IntegerField(choices=[(0, '0min'), (5, '5min'), (10, '10min'), (15, '15min'), (20, '20min'), (25, '25min'), (30, '30min'), (35, '35min'), (40, '40min'), (45, '45min'), (50, '50min'), (55, '55min')], default=0),
),
]
| 45.241379
| 351
| 0.492378
|
882ca4fef2a8c0710b6b3779de645d5523509ae9
| 8,122
|
py
|
Python
|
networkconfgen/custom_filters.py
|
hoelsner/networkconfgen
|
d30ccc5c4e58d74c5eb5bcd41386c453d0130685
|
[
"MIT"
] | 21
|
2018-01-25T10:56:00.000Z
|
2020-12-12T18:09:26.000Z
|
networkconfgen/custom_filters.py
|
AstritCepele/networkconfgen
|
d30ccc5c4e58d74c5eb5bcd41386c453d0130685
|
[
"MIT"
] | null | null | null |
networkconfgen/custom_filters.py
|
AstritCepele/networkconfgen
|
d30ccc5c4e58d74c5eb5bcd41386c453d0130685
|
[
"MIT"
] | 7
|
2018-06-28T13:44:20.000Z
|
2020-11-16T00:26:18.000Z
|
"""
Any custom filter that is bound to the Jinja2 Template Engine used in the NetworkConfGen class
"""
import logging
import re
from ipaddress import IPv4Network
from networkconfgen.constants import ERROR_UNKNOWN, ERROR_INVALID_VLAN_RANGE, ERROR_INVALID_VALUE, \
CISCO_INTERFACE_PATTERN, JUNIPER_INTERFACE_PATTERN, OS_CISCO_IOS, OS_JUNIPER_JUNOS, ERROR_PARAMETER, \
ERROR_REGEX, ERROR_NO_MATCH
logger = logging.getLogger("networkconfgen")
def get_interface_components(interface_string, regex_to_use=CISCO_INTERFACE_PATTERN):
"""
split the given string to the interface_name, chassis, module and port (consider experimental)
:param interface_string:
:param regex_to_use: regular expression, that defines four named parameters interface_name, chassis, module and port
:return: tuple with the interface_name (lowered), the chassis number (default to 0 if not found), the module and port
"""
pattern = re.compile(regex_to_use)
param = pattern.match(interface_string.lower())
interface_name = param.group("interface_name").lower()
if not param.group("chassis"):
chassis = 0
else:
chassis = param.group("chassis")
module = param.group("module")
port = int(param.group("port"))
return interface_name, str(chassis), str(module), str(port)
def dotted_decimal(prefix_length):
"""
converts the given prefix length to a dotted decimal representation
:param prefix_length:
:return:
"""
try:
ip = IPv4Network("0.0.0.0/" + str(prefix_length))
return str(ip.netmask)
except Exception:
return "%s(%s)" % (ERROR_INVALID_VALUE, prefix_length)
def wildcard_mask(prefix_length):
"""
converts the given prefix length to a dotted decimal hostmask (e.g. for ACLs)
:param prefix_length:
:return:
"""
try:
ip = IPv4Network("0.0.0.0/" + str(prefix_length))
return str(ip.hostmask)
except Exception:
return "%s(%s)" % (ERROR_INVALID_VALUE, prefix_length)
def valid_vlan_name(vlan_name):
"""
create a valid VLAN name (removed certain unwanted charaters)
:param vlan_name:
:return:
"""
invalid_chars = list(";,.#+*=?%$()[]")
clean_string = vlan_name.replace(" ", "_")
clean_string = clean_string.replace("-", "_")
clean_string = "".join(e for e in clean_string if e not in invalid_chars)
return clean_string
def convert_interface_name(interface_name, target_vendor=None):
"""
used to convert an vendor specific interface name to another interface name of a different vendor
:param interface_name: interface string, that should be converted
:param target_vendor: target Vendor string ('cisco_ios' or 'juniper_junos')
"""
if target_vendor is None:
target_vendor = OS_JUNIPER_JUNOS
translation_map = {
# from => to => interface name (lowercase)
OS_CISCO_IOS: {
OS_JUNIPER_JUNOS: {
"eth": "ge",
"fa": "ge",
"gi": "ge",
"te": "xe",
"fo": "et",
},
},
OS_JUNIPER_JUNOS: {
OS_CISCO_IOS: {
"ge": "gi",
"xe": "te",
},
}
}
try:
result = None
if re.match(CISCO_INTERFACE_PATTERN, interface_name, re.IGNORECASE):
if OS_CISCO_IOS not in target_vendor: # don't modify if the target vendor is the same
# cisco interface detected
logger.debug("Cisco interface string '%s' detected by convert_interface_name" % interface_name)
if OS_JUNIPER_JUNOS in target_vendor.lower():
# juniper port numbering starts with 0
intf, chassis, module, port = get_interface_components(interface_name, CISCO_INTERFACE_PATTERN)
result = "%s-%s/%s/%s" % (translation_map[OS_CISCO_IOS][OS_JUNIPER_JUNOS][intf],
chassis, module, str(int(port) - 1))
elif re.match(JUNIPER_INTERFACE_PATTERN, interface_name, re.IGNORECASE):
if OS_JUNIPER_JUNOS not in target_vendor: # don't modify if the target vendor is the same
# juniper interface detected
logger.debug("Juniper interface string '%s' detected by convert_interface_name" % interface_name)
if OS_CISCO_IOS in target_vendor.lower():
intf, chassis, module, port = get_interface_components(interface_name, JUNIPER_INTERFACE_PATTERN)
result = "%s%s/%s/%s" % (translation_map[OS_JUNIPER_JUNOS][OS_CISCO_IOS][intf],
chassis, module, str(int(port) + 1))
if result:
return result
else:
return interface_name
except Exception as ex:
msg = "Exception while translating interface name '%s' to '%s' (%s)" % (interface_name, target_vendor, str(ex))
logger.error(msg)
logger.debug(msg, exc_info=True)
return ERROR_UNKNOWN
def expand_vlan_list(vlan_list):
"""
converts a range statement to a list (or a list with an error message if the parameter is not valid) - no
verification of the VLAN ID range (1-4094)
:param vlan_list: string in the format `(\d+-\d+)`
:return: list entry with the result
"""
result = []
list_regex = "(\d+-\d+)"
re_result = re.match(list_regex, vlan_list)
if re_result is None:
# if the list is invalid, return a list with a single error entry
result.append("%s(%s)" % (ERROR_INVALID_VLAN_RANGE, vlan_list))
else:
# extract values and check range
elements = vlan_list.split("-")
val_a = int(elements[0])
val_b = int(elements[1])
if val_a >= val_b:
result.append("%s(%s)" % (ERROR_INVALID_VLAN_RANGE, vlan_list))
else:
# valid parameter, create vlan list
result.extend(list(range(val_a, val_b + 1)))
return result
def split_interface(interface_regex, value):
"""
convert an interface based on the given regular expression to a dictionary with all components, e.g.
regex: .*(?P<chassis>\d+)\/(?P<module>\d+)\/(?P<port>\d+).*
value: interface gi1/2/3
will return
{
"chassis": 1,
"module": 2,
"port": 3
}
or in case of an error to
{
"error": "message"
}
:param interface_regex: valid regular expression
:param value: value that should be parsed
:return: dictionary with three possible
"""
if type(interface_regex) is not str:
return {"error": "%s(%s)" % (ERROR_PARAMETER, "invalid type for 'interface_regex'")}
if type(value) is not str:
return {"error": "%s(%s)" % (ERROR_PARAMETER, "invalid type for 'value'")}
try:
pattern = re.compile(interface_regex, re.IGNORECASE)
except Exception as ex:
return {"error": "%s(%s)" % (ERROR_REGEX, str(ex))}
match = pattern.match(value)
if match:
# check groups 'chassis', 'module'and 'port'
# if empty, a None value is used
result = match.groupdict()
valid_groups = ["chassis", "module", "port"]
# add None values if certain groups does not exist
for key in valid_groups:
result[key] = None if key not in result.keys() else result[key]
# delete other keys
keys_to_delete = []
for key in result.keys():
if key not in valid_groups:
keys_to_delete.append(key)
for key in keys_to_delete:
del result[key]
else:
# no match, return error message
result = {"error": "%s(pattern '%s' for '%s')" % (ERROR_NO_MATCH, interface_regex, value)}
return result
def split_interface_cisco_ios(value):
return split_interface(".*%s.*" % CISCO_INTERFACE_PATTERN, value)
def split_interface_juniper_junos(value):
return split_interface(".*%s.*" % JUNIPER_INTERFACE_PATTERN, value)
| 32.358566
| 121
| 0.619921
|
46207b5296e9594219bb55face43bf1f45981042
| 6,339
|
py
|
Python
|
baselines/baselines/common/policies.py
|
yooceii/HardRLWithYoutube
|
e9644c16a8125af09d998ad1df733135d129a3aa
|
[
"MIT"
] | 34
|
2018-10-16T16:09:07.000Z
|
2021-10-19T06:21:03.000Z
|
baselines/baselines/common/policies.py
|
yooceii/HardRLWithYoutube
|
e9644c16a8125af09d998ad1df733135d129a3aa
|
[
"MIT"
] | 5
|
2019-01-31T16:31:13.000Z
|
2019-06-26T01:13:04.000Z
|
baselines/baselines/common/policies.py
|
yooceii/HardRLWithYoutube
|
e9644c16a8125af09d998ad1df733135d129a3aa
|
[
"MIT"
] | 9
|
2018-12-04T11:39:43.000Z
|
2021-04-02T19:17:11.000Z
|
import tensorflow as tf
from baselines.common import tf_util
from baselines.a2c.utils import fc
from baselines.common.distributions import make_pdtype
from baselines.common.input import observation_placeholder, encode_observation
from baselines.common.tf_util import adjust_shape
from baselines.common.mpi_running_mean_std import RunningMeanStd
from baselines.common.models import get_network_builder
import gym
class PolicyWithValue(object):
"""
Encapsulates fields and methods for RL policy and value function estimation with shared parameters
"""
def __init__(self, env, observations, latent, estimate_q=False, vf_latent=None, sess=None, **tensors):
"""
Parameters:
----------
env RL environment
observations tensorflow placeholder in which the observations will be fed
latent latent state from which policy distribution parameters should be inferred
vf_latent latent state from which value function should be inferred (if None, then latent is used)
sess tensorflow session to run calculations in (if None, default session is used)
**tensors tensorflow tensors for additional attributes such as state or mask
"""
self.X = observations
self.state = tf.constant([])
self.initial_state = None
self.__dict__.update(tensors)
vf_latent = vf_latent if vf_latent is not None else latent
vf_latent = tf.layers.flatten(vf_latent)
latent = tf.layers.flatten(latent)
self.pdtype = make_pdtype(env.action_space)
self.pd, self.pi = self.pdtype.pdfromlatent(latent, init_scale=0.01)
self.action = self.pd.sample()
self.neglogp = self.pd.neglogp(self.action)
self.sess = sess
if estimate_q:
assert isinstance(env.action_space, gym.spaces.Discrete)
self.q = fc(vf_latent, 'q', env.action_space.n)
self.vf = self.q
else:
self.vf = fc(vf_latent, 'vf', 1)
self.vf = self.vf[:,0]
def _evaluate(self, variables, observation, **extra_feed):
sess = self.sess or tf.get_default_session()
feed_dict = {self.X: adjust_shape(self.X, observation)}
for inpt_name, data in extra_feed.items():
if inpt_name in self.__dict__.keys():
inpt = self.__dict__[inpt_name]
if isinstance(inpt, tf.Tensor) and inpt._op.type == 'Placeholder':
feed_dict[inpt] = adjust_shape(inpt, data)
return sess.run(variables, feed_dict)
def step(self, observation, **extra_feed):
"""
Compute next action(s) given the observation(s)
Parameters:
----------
observation observation data (either single or a batch)
**extra_feed additional data such as state or mask (names of the arguments should match the ones in constructor, see __init__)
Returns:
-------
(action, value estimate, next state, negative log likelihood of the action under current policy parameters) tuple
"""
a, v, state, neglogp = self._evaluate([self.action, self.vf, self.state, self.neglogp], observation, **extra_feed)
if state.size == 0:
state = None
return a, v, state, neglogp
def value(self, ob, *args, **kwargs):
"""
Compute value estimate(s) given the observation(s)
Parameters:
----------
observation observation data (either single or a batch)
**extra_feed additional data such as state or mask (names of the arguments should match the ones in constructor, see __init__)
Returns:
-------
value estimate
"""
return self._evaluate(self.vf, ob, *args, **kwargs)
def save(self, save_path):
tf_util.save_state(save_path, sess=self.sess)
def load(self, load_path):
tf_util.load_state(load_path, sess=self.sess)
def build_policy(env, policy_network, value_network=None, normalize_observations=False, estimate_q=False, **policy_kwargs):
if isinstance(policy_network, str):
network_type = policy_network
policy_network = get_network_builder(network_type)(**policy_kwargs)
def policy_fn(nbatch=None, nsteps=None, sess=None, observ_placeholder=None):
ob_space = env.observation_space
X = observ_placeholder if observ_placeholder is not None else observation_placeholder(ob_space, batch_size=nbatch)
extra_tensors = {}
if normalize_observations and X.dtype == tf.float32:
encoded_x, rms = _normalize_clip_observation(X)
extra_tensors['rms'] = rms
else:
encoded_x = X
encoded_x = encode_observation(ob_space, encoded_x)
with tf.variable_scope('pi', reuse=tf.AUTO_REUSE):
policy_latent, recurrent_tensors = policy_network(encoded_x)
if recurrent_tensors is not None:
# recurrent architecture, need a few more steps
nenv = nbatch // nsteps
assert nenv > 0, 'Bad input for recurrent policy: batch size {} smaller than nsteps {}'.format(nbatch, nsteps)
policy_latent, recurrent_tensors = policy_network(encoded_x, nenv)
extra_tensors.update(recurrent_tensors)
_v_net = value_network
if _v_net is None or _v_net == 'shared':
vf_latent = policy_latent
else:
if _v_net == 'copy':
_v_net = policy_network
else:
assert callable(_v_net)
with tf.variable_scope('vf', reuse=tf.AUTO_REUSE):
vf_latent, _ = _v_net(encoded_x)
policy = PolicyWithValue(
env=env,
observations=X,
latent=policy_latent,
vf_latent=vf_latent,
sess=sess,
estimate_q=estimate_q,
**extra_tensors
)
return policy
return policy_fn
def _normalize_clip_observation(x, clip_range=[-5.0, 5.0]):
rms = RunningMeanStd(shape=x.shape[1:])
norm_x = tf.clip_by_value((x - rms.mean) / rms.std, min(clip_range), max(clip_range))
return norm_x, rms
| 35.216667
| 137
| 0.630541
|
3e5d64cb37ce17377d8718948d142779de4a84fb
| 7,678
|
py
|
Python
|
example/example_face.py
|
Tomoya-K-0504/deepSELF
|
0e5a7d0169b3e9edcb5c8d9802140a84ce5cb69a
|
[
"MIT"
] | 1
|
2021-08-17T09:56:47.000Z
|
2021-08-17T09:56:47.000Z
|
example/example_face.py
|
JIangjiang1108/deepSELF
|
0e5a7d0169b3e9edcb5c8d9802140a84ce5cb69a
|
[
"MIT"
] | null | null | null |
example/example_face.py
|
JIangjiang1108/deepSELF
|
0e5a7d0169b3e9edcb5c8d9802140a84ce5cb69a
|
[
"MIT"
] | 1
|
2021-08-17T09:56:41.000Z
|
2021-08-17T09:56:41.000Z
|
import itertools
import logging
import pprint
import shutil
from copy import deepcopy
from dataclasses import dataclass
from datetime import datetime as dt
from pathlib import Path
import hydra
import mlflow
import numpy as np
import pandas as pd
import torch
from hydra import utils
from joblib import Parallel, delayed
from omegaconf import OmegaConf
from deepself.models.nn_models.cnn import CNNConfig
from deepself.models.nn_models.cnn_rnn import CNNRNNConfig
from deepself.models.nn_models.rnn import RNNConfig
from deepself.src.dataset import ManifestDataSet
from deepself.tasks.base_experiment import typical_train, typical_experiment
from deepself.utils.config import ExptConfig, before_hydra
from deepself.utils.utils import dump_dict
LABELS = ['Anger', 'Disgust', 'Fear', 'Happy', 'Sad', 'Surprise', 'Neutral']
@dataclass
class ExampleFaceConfig(ExptConfig):
n_parallel: int = 1
mlflow: bool = False
def label_func(row):
return row[0]
def load_func(row):
im = np.array(list(map(int, row[1].split(' ')))).reshape((48, 48)) / 255
return torch.tensor(im[None, :, :], dtype=torch.float)
def create_manifest(expt_conf, expt_dir):
data_dir = Path(utils.to_absolute_path('input'))
manifest_df = pd.read_csv(data_dir / 'fer2013.csv')
train_val_df = manifest_df[manifest_df['Usage'] == 'Training']
train_df = train_val_df.iloc[:int(len(train_val_df) * 0.7), :]
train_df.to_csv(expt_dir / 'train_manifest.csv', index=False, header=None)
expt_conf.train.train_path = expt_dir / 'train_manifest.csv'
val_df = train_val_df.iloc[int(len(train_val_df) * 0.7):, :]
val_df.to_csv(expt_dir / 'val_manifest.csv', index=False, header=None)
expt_conf.train.val_path = expt_dir / 'val_manifest.csv'
test_df = manifest_df[manifest_df['Usage'] != 'Training']
test_df.to_csv(expt_dir / 'test_manifest.csv', index=False, header=None)
expt_conf.train.test_path = expt_dir / 'test_manifest.csv'
return expt_conf
def set_hyperparameter(expt_conf, param, param_value):
if len(param.split('.')) == 1:
expt_conf[param] = param_value
else:
tmp = expt_conf
for attr in param.split('.')[:-1]:
tmp = getattr(tmp, str(attr))
setattr(tmp, param.split('.')[-1], param_value)
return expt_conf
def main(cfg, expt_dir, hyperparameters):
if cfg.expt_id == 'timestamp':
cfg.expt_id = dt.today().strftime('%Y-%m-%d_%H:%M')
logging.basicConfig(level=logging.DEBUG, format="[%(name)s] [%(levelname)s] %(message)s",
filename=expt_dir / 'expt.log')
cfg.train.class_names = LABELS
dataset_cls = ManifestDataSet
metrics_names = {'train': ['loss', 'uar'],
'val': ['loss', 'uar'],
'test': ['loss', 'uar']}
cfg = create_manifest(cfg, expt_dir)
process_func = None
patterns = list(itertools.product(*hyperparameters.values()))
val_results = pd.DataFrame(np.zeros((len(patterns), len(hyperparameters) + len(metrics_names['val']))),
columns=list(hyperparameters.keys()) + metrics_names['val'])
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(hyperparameters)
groups = None
def experiment(pattern, cfg):
for i, param in enumerate(hyperparameters.keys()):
cfg = set_hyperparameter(cfg, param, pattern[i])
cfg.train.model.model_path = str(expt_dir / f"{'_'.join([str(p).replace('/', '-') for p in pattern])}.pth")
cfg.train.log_id = f"{'_'.join([str(p).replace('/', '-') for p in pattern])}"
with mlflow.start_run():
result_series, val_pred, _ = typical_train(cfg, load_func, label_func, process_func, dataset_cls, groups)
mlflow.log_params({hyperparameter: value for hyperparameter, value in zip(hyperparameters.keys(), pattern)})
return result_series, val_pred
# For debugging
if cfg.n_parallel == 1:
result_pred_list = [experiment(pattern, deepcopy(cfg)) for pattern in patterns]
else:
cfg.n_jobs = 0
result_pred_list = Parallel(n_jobs=cfg.n_parallel, verbose=0)(
[delayed(experiment)(pattern, deepcopy(cfg)) for pattern in patterns])
val_results.iloc[:, :len(hyperparameters)] = patterns
result_list = np.array([result for result, pred in result_pred_list])
val_results.iloc[:, len(hyperparameters):] = result_list
pp.pprint(val_results)
pp.pprint(val_results.iloc[:, len(hyperparameters):].describe())
val_results.to_csv(expt_dir / 'val_results.csv', index=False)
print(f"Devel results saved into {expt_dir / 'val_results.csv'}")
for (_, _), pattern in zip(result_pred_list, patterns):
pattern_name = f"{'_'.join([str(p).replace('/', '-') for p in pattern])}"
dump_dict(expt_dir / f'{pattern_name}.txt', cfg)
# Train with train + devel dataset
if cfg.test:
best_trial_idx = val_results['uar'].argmax()
best_pattern = patterns[best_trial_idx]
for i, param in enumerate(hyperparameters.keys()):
cfg = set_hyperparameter(cfg, param, best_pattern[i])
dump_dict(expt_dir / 'best_parameters.txt', {p: v for p, v in zip(hyperparameters.keys(), best_pattern)})
metrics, pred_dict_list, _ = typical_experiment(cfg, load_func, label_func, process_func, dataset_cls,
groups)
sub_name = f"uar-{metrics[-1]:.4f}_sub_{'_'.join([str(p).replace('/', '-') for p in best_pattern])}.csv"
pd.DataFrame(pred_dict_list['test']).to_csv(expt_dir / f'{sub_name}_prob.csv', index=False, header=None)
pd.DataFrame(pred_dict_list['test'].argmax(axis=1)).to_csv(expt_dir / sub_name, index=False, header=None)
print(f"Submission file is saved in {expt_dir / sub_name}")
mlflow.end_run()
@hydra.main(config_name="config")
def hydra_main(cfg: ExampleFaceConfig):
console = logging.StreamHandler()
console.setFormatter(logging.Formatter("[%(name)s] [%(levelname)s] %(message)s"))
console.setLevel(logging.INFO)
logging.getLogger("ml").addHandler(console)
if OmegaConf.get_type(cfg.train.model) == CNNConfig:
hyperparameters = {
'train.model.optim.lr': [1e-4],
}
elif OmegaConf.get_type(cfg.train.model) == CNNRNNConfig:
hyperparameters = {
'train.model.optim.lr': [1e-3, 1e-4, 1e-5],
'window_size': [0.5],
'window_stride': [0.1],
'transform': ['logmel'],
'rnn_type': [cfg.rnn_type],
'bidirectional': [True],
'rnn_n_layers': [1],
'rnn_hidden_size': [10],
}
elif OmegaConf.get_type(cfg.train.model) == RNNConfig:
hyperparameters = {
'bidirectional': [True, False],
'rnn_type': ['lstm', 'gru'],
'rnn_n_layers': [1, 2],
'rnn_hidden_size': [10, 50],
'transform': [None],
'train.model.optim.lr': [1e-3, 1e-4, 1e-5],
}
else:
hyperparameters = {
'train.model.optim.lr': [1e-4, 1e-5],
'data.batch_size': [64],
'data.epoch_rate': [1.0],
'data.sample_balance': ['same'],
}
cfg.expt_id = f'{cfg.train.model_type.value}_pretrain-{cfg.train.model.pretrained}'
expt_dir = Path(utils.to_absolute_path('output')) / 'example_face' / f'{cfg.expt_id}'
expt_dir.mkdir(exist_ok=True, parents=True)
main(cfg, expt_dir, hyperparameters)
if not cfg.mlflow:
shutil.rmtree('mlruns')
if __name__ == '__main__':
config_store = before_hydra(ExampleFaceConfig)
hydra_main()
| 37.091787
| 120
| 0.646913
|
228c2c4d80462b7089b07d8ccee38c9a8e4f7a3c
| 2,961
|
py
|
Python
|
mysite/scisheets/helpers/helpers_test.py
|
ScienceStacks/JViz
|
c8de23d90d49d4c9bc10da25f4a87d6f44aab138
|
[
"Artistic-2.0",
"Apache-2.0"
] | 31
|
2016-11-16T22:34:35.000Z
|
2022-03-22T22:16:11.000Z
|
mysite/scisheets/helpers/helpers_test.py
|
ScienceStacks/JViz
|
c8de23d90d49d4c9bc10da25f4a87d6f44aab138
|
[
"Artistic-2.0",
"Apache-2.0"
] | 6
|
2017-06-24T06:29:36.000Z
|
2022-01-23T06:30:01.000Z
|
mysite/scisheets/helpers/helpers_test.py
|
ScienceStacks/JViz
|
c8de23d90d49d4c9bc10da25f4a87d6f44aab138
|
[
"Artistic-2.0",
"Apache-2.0"
] | 4
|
2017-07-27T16:23:50.000Z
|
2022-03-12T06:36:13.000Z
|
'''Client Mocks'''
import scisheets.core.helpers.cell_types as cell_types
from django.test import TestCase, RequestFactory
from django.contrib.sessions.middleware import SessionMiddleware
BASE_URL = "http://localhost:8000/scisheets/"
TARGET = 'Cell'
COMMAND = 'Update'
VALUE = 'XXX'
ROW_INDEX = 1
COLUMN_INDEX = 3
COLUMN_NAME = 'Col_2'
TABLE_NAME = 'XYZ'
IGNORE_TEST = False
class HelperHTTP(object):
def __init__(self):
self._factory = RequestFactory()
def addSessionToRequest(self, request):
middleware = SessionMiddleware()
middleware.process_request(request)
request.session.save()
def ajaxCommandFactory(self):
ajax_cmd = {}
ajax_cmd['target'] = TARGET
ajax_cmd['command'] = COMMAND
ajax_cmd['value'] = VALUE
ajax_cmd['row'] = ROW_INDEX
ajax_cmd['columnName'] = COLUMN_NAME
ajax_cmd['table'] = TABLE_NAME
ajax_cmd['args[]'] = None
return ajax_cmd
def createBaseURL(self, params=None):
# Creates the base URL to construct a table
# Input: params
# 0 - number of columns
# 1 - number of rows
# Output: URL
if params is None:
client_url = BASE_URL
else:
ncol = params[0]
nrow = params[1]
client_url = "%s%d/%d/" % (BASE_URL, ncol, nrow)
return client_url
def createURL(self, address="dummy", count=None, values=None, names=None):
# Input: count - number of variables
# names - variable names
# values - values to use for each variable
# address - URL address
# Returns - a URL string with variables in the GET format
url = address
count = 0
if values is not None:
count = len(values)
if names is None:
names = []
for n in range(count):
names.append("var%d" % n)
for n in range(count):
if n == 0:
url += "command?"
else:
url += "&"
if values is None:
url += "%s=%d" % (names[n], n)
else:
if cell_types.isStr(values[n]):
url += "%s=%s" % (names[n], values[n])
elif isinstance(values[n], int):
url += "%s=%d" % (names[n], values[n])
elif cell_types.isFloats([n]):
url += "%s=%f" % (names[n], values[n])
elif values[n] is None:
url += "%s=%s" % (names[n], None)
elif isinstance(values[n], list):
url += "%s=%s" % (names[n], values[n])
else:
import pdb; pdb.set_trace()
UNKNOWN_TYPE
return url
def createURLFromAjaxCommand(self, ajax_cmd, address=None):
# Input: ajax_cmd - command dictionary from commandFactory
# Output: URL
names = ajax_cmd.keys()
values = []
for name in names:
values.append(ajax_cmd[name])
return self.createURL(values=values, names=names, address=address)
def URL2Request(self, url):
# Input: url - URL string
# Returns - request with count number of parameters
return self._factory.get(url)
| 28.747573
| 76
| 0.609591
|
bf8e6c757be4595777016c9c30c3d2a607aad858
| 2,150
|
py
|
Python
|
zipline/pipeline/hooks/delegate.py
|
leonarduschen/zipline
|
5e6c9fce7e0f812bd181024ad192ca2976d49667
|
[
"Apache-2.0"
] | 14,525
|
2015-01-01T02:57:52.000Z
|
2022-03-31T18:16:35.000Z
|
zipline/pipeline/hooks/delegate.py
|
leonarduschen/zipline
|
5e6c9fce7e0f812bd181024ad192ca2976d49667
|
[
"Apache-2.0"
] | 2,146
|
2015-01-01T13:03:44.000Z
|
2022-02-22T03:25:28.000Z
|
zipline/pipeline/hooks/delegate.py
|
leonarduschen/zipline
|
5e6c9fce7e0f812bd181024ad192ca2976d49667
|
[
"Apache-2.0"
] | 4,517
|
2015-01-01T14:26:47.000Z
|
2022-03-31T14:38:05.000Z
|
from interface import implements
from zipline.utils.compat import ExitStack, contextmanager, wraps
from .iface import PipelineHooks, PIPELINE_HOOKS_CONTEXT_MANAGERS
from .no import NoHooks
def delegating_hooks_method(method_name):
"""Factory function for making DelegatingHooks methods.
"""
if method_name in PIPELINE_HOOKS_CONTEXT_MANAGERS:
# Generate a contextmanager that enters the context of all child hooks.
@wraps(getattr(PipelineHooks, method_name))
@contextmanager
def ctx(self, *args, **kwargs):
with ExitStack() as stack:
for hook in self._hooks:
sub_ctx = getattr(hook, method_name)(*args, **kwargs)
stack.enter_context(sub_ctx)
yield stack
return ctx
else:
# Generate a method that calls methods of all child hooks.
@wraps(getattr(PipelineHooks, method_name))
def method(self, *args, **kwargs):
for hook in self._hooks:
sub_method = getattr(hook, method_name)
sub_method(*args, **kwargs)
return method
class DelegatingHooks(implements(PipelineHooks)):
"""A PipelineHooks that delegates to one or more other hooks.
Parameters
----------
hooks : list[implements(PipelineHooks)]
Sequence of hooks to delegate to.
"""
def __new__(cls, hooks):
if len(hooks) == 0:
# OPTIMIZATION: Short-circuit to a NoHooks if we don't have any
# sub-hooks.
return NoHooks()
elif len(hooks) == 1:
# OPTIMIZATION: Unwrap delegation layer if we only have one
# sub-hook.
return hooks[0]
else:
self = super(DelegatingHooks, cls).__new__(cls)
self._hooks = hooks
return self
# Implement all interface methods by delegating to corresponding methods on
# input hooks.
locals().update({
name: delegating_hooks_method(name)
# TODO: Expose this publicly on interface.
for name in PipelineHooks._signatures
})
del delegating_hooks_method
| 32.575758
| 79
| 0.626977
|
4a19fd2b2985d4d9656278c2e7d1ba36e8276eed
| 9,621
|
py
|
Python
|
train_crf.py
|
bubblemans/Gun-Violence-Information-Retrieval-Using-BERT-as-Sequence-Tagging-Task
|
a8a199760c531286ae79fd9de541387a6c9fd5a7
|
[
"MIT"
] | null | null | null |
train_crf.py
|
bubblemans/Gun-Violence-Information-Retrieval-Using-BERT-as-Sequence-Tagging-Task
|
a8a199760c531286ae79fd9de541387a6c9fd5a7
|
[
"MIT"
] | null | null | null |
train_crf.py
|
bubblemans/Gun-Violence-Information-Retrieval-Using-BERT-as-Sequence-Tagging-Task
|
a8a199760c531286ae79fd9de541387a6c9fd5a7
|
[
"MIT"
] | null | null | null |
import argparse
import math
import logging
import pandas as pd
import tqdm
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from models import BERT_CRF_Linear, BERT_CRF_LSTM, BERT_CRF_BiLSTM
from dataset import GunViolenceDataset
from utils import *
logging.basicConfig(format='%(asctime)s - %(message)s', level=logging.INFO)
def train(train_X, train_Y, learning_rate, cuda_available, epochs, model_type, is_balance, batch_size, max_seq_length, patience, min_delta, baseline):
training_set = GunViolenceDataset(train_X, train_Y)
training_generator = DataLoader(
training_set,
batch_size=batch_size,
shuffle=True,
)
iter_in_one_epoch = len(train_X) // batch_size
tokenizer = torch.hub.load(TRANSFORMER_PATH, 'tokenizer', 'bert-base-cased') # cased!
model = None
if model_type == 'LSTM':
model = BERT_CRF_LSTM(3)
elif model_type == 'BiLSTM':
model = BERT_CRF_BiLSTM(3)
else:
model = BERT_CRF_Linear(3) # 3 different labels: B, I, O
if cuda_available:
model.to('cuda') # move data onto GPU
model.train()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
losses = []
num_no_improve = 0
best_loss = None
stopping_epoch = 0
for epoch in range(1, epochs + 1):
loss = 0
with tqdm.tqdm(training_generator, unit="batch") as tepoch:
for i, (train_x, train_y) in enumerate(tepoch):
tepoch.set_description(f"Epoch {epoch}")
# prepare model input
tokens, labels = convert_examples_to_features(train_x, train_y, tokenizer, max_seq_length)
indexed_tokens = [tokenizer.convert_tokens_to_ids(token) for token in tokens]
segments_ids = [[0] * len(indexed_token) for indexed_token in indexed_tokens]
if cuda_available:
segments_tensors = torch.tensor(segments_ids).to('cuda')
tokens_tensor = torch.tensor(indexed_tokens).to('cuda')
labels = torch.tensor(labels).to('cuda')
else:
segments_tensors = torch.tensor(segments_ids)
tokens_tensor = torch.tensor(indexed_tokens)
labels = torch.tensor(labels)
# forward pass
y_pred, logits, loss = model(tokens_tensor, segments_tensors, labels)
losses.append((epoch + i / iter_in_one_epoch, loss.item()))
# display loss
tepoch.set_postfix(loss="{:.4f}".format(loss.item()))
# zero out gradients
optimizer.zero_grad()
# backward pass
loss.backward()
# update parameters
optimizer.step()
if not best_loss:
best_loss = loss
elif loss <= best_loss + min_delta:
best_loss = loss
num_no_improve += 1
elif loss < baseline:
num_no_improve += 1
if num_no_improve > patience:
stopping_epoch = epoch
logging.info('Early Stop on epoch {} with the best loss {}'.format(stopping_epoch, best_loss))
break
torch.save(model, 'output/model')
return model, tokenizer, stopping_epoch
def evaluate(model, evaluate_X, evaluate_Y, tokenizer, cuda_available, batch_size, max_seq_length, model_type, lr, epochs):
def _get_prediction(normalized_probs):
# classify B, I, O based on probabilities
labels = []
for sample_prob in normalized_probs:
max_prob = -math.inf
label = None
for i, prob in enumerate(sample_prob):
if max_prob < prob:
max_prob = prob
label = i
labels.append(label)
return labels
model.eval()
num_samples = len(evaluate_X)
evaluate_set = GunViolenceDataset(evaluate_X, evaluate_Y)
evaluate_generator = DataLoader(
evaluate_set,
batch_size=1,
shuffle=True,
)
num_of_tp = num_of_fn = num_of_fp = num_of_tn = 0
for i, (evaluate_x, evaluate_y) in enumerate(evaluate_generator):
tokens, labels = convert_examples_to_features(evaluate_x, evaluate_y, tokenizer, max_seq_length)
indexed_tokens = [tokenizer.convert_tokens_to_ids(token) for token in tokens]
segments_ids = [[0] * len(indexed_token) for indexed_token in indexed_tokens]
if cuda_available:
segments_tensors = torch.tensor(segments_ids).to('cuda')
tokens_tensor = torch.tensor(indexed_tokens).to('cuda')
labels = torch.tensor(labels).to('cuda')
else:
segments_tensors = torch.tensor(segments_ids)
tokens_tensor = torch.tensor(indexed_tokens)
labels = torch.tensor(labels)
with torch.no_grad():
y_pred, logits, loss = model(tokens_tensor, segments_tensors, labels)
normalized_probs = nn.functional.softmax(logits, dim=1)[0]
results = y_pred[0]
# get the real target
original = ''
for i, (x, y) in enumerate(zip(evaluate_x[0].split(), evaluate_y[0].split())):
if y[0] == 'B':
original = x + ' '
index = i
while index + 1 < len(evaluate_y[0].split()) and evaluate_y[0].split()[index + 1][0] == 'I':
original += '{} '.format(evaluate_x[0].split()[index + 1])
index += 1
break
original = original.strip()
probabilities = []
predictions = []
prediction = []
for token, tag, prob in zip(tokens[0], results, normalized_probs):
if tag == 0:
# tag == 'B'
probabilities.append(prob)
if len(prediction) != 0:
predictions.append(prediction)
prediction = []
prediction.append(token)
elif tag == 1:
# tag == 'I'
prediction.append(token)
if len(prediction) != 0:
predictions.append(prediction)
# one sentence might generate multiple targets, eg. shooters or victims
# we need to pick the most possible one, which is the one has the highest probability in 'B' tag
max_prob = -math.inf
max_prob_ind = 0
for i, prob in enumerate(probabilities):
if max_prob < prob[0]:
max_prob_ind = i
max_prob = prob[0]
# calculate true positive, false positive, true negative, false negative
result = ''
if len(predictions) != 0:
result = tokenizer.convert_tokens_to_string(predictions[max_prob_ind])
if result == original:
num_of_tp += 1
else:
num_of_fp += 1
else:
if original.strip() != '':
num_of_fn += 1
else:
num_of_tn += 1
accuracy = num_of_tp/num_samples if num_samples != 0 else 0
precision = num_of_tp/(num_of_tp + num_of_fp) if num_of_tp + num_of_fp != 0 else 0
recall = num_of_tp/(num_of_tp + num_of_fn) if num_of_tp + num_of_fn != 0 else 0
with open('victim/output/crf_{}_{}_{}_{}_{}.txt'.format(model_type, lr, epochs, batch_size, max_seq_length), 'w') as wf:
wf.write('tp: {}\n'.format(num_of_tp))
wf.write('tn: {}\n'.format(num_of_tn))
wf.write('fp: {}\n'.format(num_of_fp))
wf.write('fn: {}\n'.format(num_of_fn))
wf.write('total: {}\n'.format(num_samples))
wf.write('correct: {}\n'.format(num_of_tp))
wf.write('accuracy: {}\n'.format(accuracy))
wf.write('precision: {}\n'.format(precision))
wf.write('recall: {}\n'.format(recall))
f1 = 2 * precision * recall / (precision + recall) if precision + recall != 0 else 0
wf.write('F1: {}\n'.format(f1))
def get_data(filename):
df = pd.read_csv(filename)
texts = df['texts'].tolist()
labels = df['labels'].tolist()
return texts, labels
if __name__ == '__main__':
args = handle_arguments()
model = None
tokenizer = None
if not args.model:
train_X, train_Y = get_data(args.input_dir + '/train.csv', args.is_balance)
dev_X, dev_Y = get_data(args.input_dir + '/dev.csv', args.is_balance)
train_X += dev_X
train_Y += dev_Y
model, tokenizer, stopping_epoch = train(
train_X,
train_Y,
args.lr,
args.cuda_available,
args.epochs,
args.model_type,
args.is_balance,
args.batch_size,
args.max_seq_length,
args.patience,
args.min_delta,
args.baseline
)
else:
model = torch.load(args.model)
tokenizer = torch.hub.load(TRANSFORMER_PATH, 'tokenizer', 'bert-base-cased') # cased!
test_X, test_Y = get_data(args.input_dir + '/test.csv')
eval_results = evaluate(
model,
test_X,
test_Y,
tokenizer,
args.cuda_available,
args.batch_size,
args.max_seq_length,
args.model_type,
args.lr,
stopping_epoch,
args.output_dir
)
| 35.899254
| 150
| 0.566989
|
924a8378e03074102f4f470ee41805bd89d04096
| 1,655
|
py
|
Python
|
djangocms_nbrender/migrations/0001_initial.py
|
correctiv/djangocms-nbrender
|
230e23a22a3e08d6e64d985da5da0a1b5cc43372
|
[
"MIT"
] | 6
|
2016-03-22T09:26:44.000Z
|
2020-03-05T16:20:35.000Z
|
djangocms_nbrender/migrations/0001_initial.py
|
correctiv/djangocms-nbrender
|
230e23a22a3e08d6e64d985da5da0a1b5cc43372
|
[
"MIT"
] | null | null | null |
djangocms_nbrender/migrations/0001_initial.py
|
correctiv/djangocms-nbrender
|
230e23a22a3e08d6e64d985da5da0a1b5cc43372
|
[
"MIT"
] | 4
|
2017-12-14T08:18:22.000Z
|
2018-09-18T00:30:29.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import djangocms_nbrender.utils
import djangocms_nbrender.models
class Migration(migrations.Migration):
dependencies = [
('cms', '0013_urlconfrevision'),
]
operations = [
migrations.CreateModel(
name='Notebook',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=255)),
('slug', models.SlugField(max_length=255)),
('url', models.URLField(blank=True)),
('notebook', models.FileField(storage=djangocms_nbrender.utils.OverwriteStorage(), upload_to=djangocms_nbrender.models.get_notebook_filename, blank=True)),
],
options={
'ordering': ('name',),
'verbose_name': 'Notebook',
'verbose_name_plural': 'Notebooks',
},
),
migrations.CreateModel(
name='NotebookViewerCMSPlugin',
fields=[
('cmsplugin_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='cms.CMSPlugin')),
('start', models.IntegerField(null=True, blank=True)),
('count', models.IntegerField(null=True, blank=True)),
('notebook', models.ForeignKey(to='djangocms_nbrender.Notebook')),
],
options={
'abstract': False,
},
bases=('cms.cmsplugin',),
),
]
| 36.777778
| 171
| 0.574018
|
0a0e6f9ca0d12d45dd00c00f80e139d097fe5600
| 748
|
py
|
Python
|
kaggle-tables/main.py
|
brigadadigitalmx/medical-policy-briefs
|
93c60d294538337cb1dd187b0dc30e3940882bf7
|
[
"Apache-2.0"
] | null | null | null |
kaggle-tables/main.py
|
brigadadigitalmx/medical-policy-briefs
|
93c60d294538337cb1dd187b0dc30e3940882bf7
|
[
"Apache-2.0"
] | 12
|
2020-04-07T20:30:34.000Z
|
2021-09-28T01:52:47.000Z
|
kaggle-tables/main.py
|
brigadadigitalmx/medical-policy-briefs
|
93c60d294538337cb1dd187b0dc30e3940882bf7
|
[
"Apache-2.0"
] | 4
|
2020-04-10T03:24:43.000Z
|
2020-04-14T00:49:23.000Z
|
import re
from pathlib import Path
from selenium import webdriver
import pandas as pd
driver = webdriver.Firefox()
driver.get("https://www.kaggle.com/covid-19-contributions")
elems = driver.find_elements_by_class_name('c19-finding')
first = elems[0]
headers = first.find_elements_by_class_name('heading6')
tables = first.find_elements_by_class_name('c19-table')
assert len(headers) == len(tables)
Path('output').mkdir(exist_ok=True)
pattern = re.compile('[\W_]+')
for header, table in zip(headers, tables):
name = pattern.sub('_', header.text )
print(name)
table_html = table.get_attribute('innerHTML')
df = pd.read_html('<table>{}</table>'.format(table_html))[0]
df.to_csv(str(Path('output', name+'.csv')), index=False)
| 27.703704
| 64
| 0.725936
|
54e86aef40055c8911549ee3ec692bfeb7c8ee40
| 5,245
|
py
|
Python
|
promgen/admin.py
|
kackey0-1/promgen
|
8fb7b65c8a814429a0b5f484bfec931df76c5d83
|
[
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] | 913
|
2016-08-02T03:06:09.000Z
|
2022-03-29T17:35:26.000Z
|
promgen/admin.py
|
kackey0-1/promgen
|
8fb7b65c8a814429a0b5f484bfec931df76c5d83
|
[
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] | 299
|
2016-08-08T02:48:02.000Z
|
2022-03-31T01:01:15.000Z
|
promgen/admin.py
|
kackey0-1/promgen
|
8fb7b65c8a814429a0b5f484bfec931df76c5d83
|
[
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] | 152
|
2016-08-06T08:23:15.000Z
|
2022-02-28T09:17:12.000Z
|
# Copyright (c) 2017 LINE Corporation
# These sources are released under the terms of the MIT license: see LICENSE
import json
from django import forms
from django.contrib import admin
from django.utils.html import format_html
from promgen import actions, models, plugins
class PrometheusInline(admin.TabularInline):
model = models.Prometheus
class FilterInline(admin.TabularInline):
model = models.Filter
@admin.register(models.Host)
class HostAdmin(admin.ModelAdmin):
list_display = ('name', 'farm')
@admin.register(models.Shard)
class ShardAdmin(admin.ModelAdmin):
list_display = ("name", "url", "proxy", "enabled")
list_filter = ("proxy", "enabled")
inlines = [PrometheusInline]
actions = [
actions.shard_targets,
actions.shard_rules,
actions.shard_urls,
]
@admin.register(models.Service)
class ServiceAdmin(admin.ModelAdmin):
list_display = ('name', 'owner')
list_filter = (('owner', admin.RelatedOnlyFieldListFilter),)
list_select_related = ('owner',)
@admin.register(models.Project)
class ProjectAdmin(admin.ModelAdmin):
list_display = ('name', 'shard', 'service', 'farm', 'owner')
list_select_related = ('service', 'farm', 'shard', 'owner')
list_filter = ('shard', ('owner', admin.RelatedOnlyFieldListFilter),)
class SenderForm(forms.ModelForm):
sender = forms.ChoiceField(choices=[
(entry.module_name, entry.module_name) for entry in plugins.notifications()
])
class Meta:
model = models.Sender
exclude = ['content_object']
@admin.register(models.Sender)
class SenderAdmin(admin.ModelAdmin):
list_display = ('content_object', 'content_type', 'sender', 'show_value', 'owner')
form = SenderForm
list_filter = ('sender', 'content_type')
list_select_related = ('content_type',)
inlines = [FilterInline]
@admin.register(models.Farm)
class FarmAdmin(admin.ModelAdmin):
list_display = ('name', 'source')
list_filter = ('source',)
@admin.register(models.Exporter)
class ExporterAdmin(admin.ModelAdmin):
list_display = ('job', 'port', 'path', 'project', 'enabled')
list_filter = ('job', 'port',)
readonly_fields = ('project',)
@admin.register(models.DefaultExporter)
class DefaultExporterAdmin(admin.ModelAdmin):
list_display = ('job', 'port', 'path')
list_filter = ('job', 'port')
@admin.register(models.Probe)
class ProbeAdmin(admin.ModelAdmin):
list_display = ("module", "description")
@admin.register(models.URL)
class URLAdmin(admin.ModelAdmin):
# Disable add permission and project editing because of the difficult UI
# but leave support for editing url/probe through admin panel
def has_add_permission(self, request):
return False
list_display = ("url", "probe", "project")
list_filter = ("probe", ("project__service", admin.RelatedOnlyFieldListFilter))
list_select_related = ("project", "project__service", "probe")
readonly_fields = ("project",)
class RuleLabelInline(admin.TabularInline):
model = models.RuleLabel
class RuleAnnotationInline(admin.TabularInline):
model = models.RuleAnnotation
@admin.register(models.Rule)
class RuleAdmin(admin.ModelAdmin):
list_display = ('name', 'clause', 'duration', 'content_object')
list_filter = ('duration',)
list_select_related = ('content_type',)
inlines = [RuleLabelInline, RuleAnnotationInline]
def get_queryset(self, request):
qs = super(RuleAdmin, self).get_queryset(request)
return qs.prefetch_related('content_object',)
@admin.register(models.Prometheus)
class PrometheusAdmin(admin.ModelAdmin):
list_display = ("shard", "host", "port")
list_filter = ("shard",)
actions = [
actions.prometheus_targets,
actions.prometheus_rules,
actions.prometheus_urls,
actions.prometheus_reload,
actions.prometheus_tombstones,
]
@admin.register(models.Alert)
class AlertAdmin(admin.ModelAdmin):
def __getattr__(self, name):
# Override __getattr__ so that we can return a label
# for any of our special values in list_display
def __get_label(label):
def __wrapped(instance):
try:
return instance.json['commonLabels'][label]
except KeyError:
return ''
# We give the wrapped function the same description as
# our label so that it shows up right in the admin panel
__wrapped.short_description = label
return __wrapped
if name in self.list_display:
return __get_label(name)
date_hierarchy = 'created'
list_display = (
'created',
'datasource',
'alertname',
'service',
'project',
'severity',
'job',
)
fields = ('created', '_json')
readonly_fields = ('created', '_json')
ordering = ('-created',)
@admin.display(description="json")
def _json(self, instance):
return format_html('<pre>{}</pre>', json.dumps(instance.json, indent=2))
def has_add_permission(self, request, obj=None):
return False
def has_change_permission(self, request, obj=None):
return False
| 28.505435
| 86
| 0.672259
|
7e7df1b2cba282e14a79dd5fcf25200390493c07
| 5,428
|
py
|
Python
|
rational_number_class.py
|
markfoleyie/pa1_2021
|
b6011ff6eece29e53095a8cf69d0f2764e8d0c88
|
[
"MIT"
] | 1
|
2020-10-01T20:22:40.000Z
|
2020-10-01T20:22:40.000Z
|
rational_number_class.py
|
markfoleyie/pa1_2021
|
b6011ff6eece29e53095a8cf69d0f2764e8d0c88
|
[
"MIT"
] | null | null | null |
rational_number_class.py
|
markfoleyie/pa1_2021
|
b6011ff6eece29e53095a8cf69d0f2764e8d0c88
|
[
"MIT"
] | 3
|
2020-10-29T21:19:37.000Z
|
2021-02-25T20:04:30.000Z
|
"""
A simple Rational number class. This demonstrates operator overloading using the 'magic' methods such as '__add__'.
"""
class Rational:
"""
Implements a simple Rational number. Note that not all possible methods are implemented. Only a sample of the
'magic' methods which enable operator overloading are implemented.
"""
def __init__(self, numer, denom=1):
"""
Initializer method. This sets up the instance variables i.e. those prepended with 'self.'
In this case we store the numerator & denominator, being the two elements that make up a
rational number. An integer can be considered a rational in the form of itself over 1,
hence the default.
Your classes should always include an initializer method.
"""
self.numer = numer
self.denom = denom
def __str__(self):
"""
String representation for printing. When print(instance) is called this method is fired.
Your classes should always have this method.
"""
return f"{self.numer} / {self.denom}"
def __repr__(self):
"""
Representation of Rational number. This is the 'definitive' representation of an instance, usually a
string representation of the code to make the instance.
It's also acceptable, at this level, to just make this invoke the __str__ method.
this method is invoked when you enter the instance name in an interactive editor such as IDLE or
PyCharm console.
Your classes should always have this method.
"""
return f"{self.__class__.__name__}({self.numer}, {self.denom})"
def __add__(self, f):
"""
Add two Rationals, __add__ enables the '+' operator, thus overloading it.
"""
if type(f) == int:
# Make sure that the second object is a rational, if it's an int then convert it.
f = Rational(f)
if type(f) == Rational:
# find the least common multiple (lcm)
this_lcm = self.lcm(self.denom, f.denom)
# multiply to make denominators the same, then add numerators
this_sum = (this_lcm // self.denom * self.numer) + \
(this_lcm // f.denom * f.numer)
return Rational(this_sum, this_lcm)
else:
raise TypeError(f"Invalid type. Cannot add rational and {type(f)}.")
def __radd__(self, f):
"""
Add two Rationals (reversed). Mapping is reversed: if "1 + x", x maps to self, and 1 maps to f. We then
just call __add__ as in 'normal' addition.
"""
return self.__add__(f)
def __iadd__(self, i):
'''
Increment a rational. Implements rational += i.
'''
return self.__add__(i)
def __sub__(self, f):
"""
Subtract two Rationals. Subtraction is the same as addition with "+" changed to "-"
"""
this_lcm = self.lcm(self.denom, f.denom)
numerator_diff = (this_lcm // self.denom * self.numer) - \
(this_lcm // f.denom * f.numer)
return Rational(numerator_diff, this_lcm)
def reduce_rational(self, rational):
"""
Return the reduced fractional value. Find the geatest common divisor (gcd) and then divide
numerator and denominator by gcd.
"""
this_gcd = self.gcd(rational.numer, rational.denom)
return Rational(rational.numer // this_gcd, rational.denom // this_gcd)
def __eq__(self, f):
"""
Compare two Rationals for equality. Reduce both; then check that numerators and denominators are equal
"""
f1 = self.reduce_rational(self)
f2 = f.reduce_rational(f)
return f1.numer == f2.numer and f1.denom == f2.denom
@staticmethod
def gcd(a, b):
"""
Greatest common divisor. This is a utility method to calculate GCD of any rational number, so as such,
it doesn't need to be in the class at all. It could be implemented as a function outside the class.
However, it's convenient to store it in the class but it doesn't neet 'self'. WE call this type of method a
static method. We haven't covered these yet so don't worry if you don't understand the concept.
"""
# Ensure that a > b, if it is not reverse a & b
if not a > b:
a, b = b, a
print(f"Initial fraction is {a}/{b}")
while b != 0:
rem = a % b
a, b = b, rem
print(f"... {a}/{b}")
print(f"GCD is {a}")
return a
@classmethod
def lcm(cls, a, b):
"""
Least common multiple (LCM). Also a utility method implemented as a 'class' method. It could be a static
nethod but as it calls the gcd method it needs to know its own class. Note that, instead of 'self', we
use 'cls'. Again, don't worry about not understanding this concept yet.
"""
print(f"LCM is {a * b // cls.gcd(a, b)}")
return (a * b // cls.gcd(a, b))
if __name__ == "__main__":
rat1 = Rational(1, 2)
rat2 = Rational(3, 4)
rat6 = Rational(6, 8)
rat3 = rat1 + rat2
rat4 = rat1.__add__(rat2)
rat5 = Rational.__add__(rat1, rat2)
rat7 = rat2 - rat2
print(rat1)
print(rat2)
print(f"{rat2} == {rat6} is {rat2 == rat6}")
# Deliberate error
rat8 = rat1 + 3.14
| 35.94702
| 115
| 0.602432
|
d760a78a562fc5634425083f5eb63174fd6ea3fb
| 1,017
|
py
|
Python
|
run.py
|
tr4n2uil/test-automate
|
ca68b2b142f987643784b1eaab69328eb979e58e
|
[
"MIT"
] | null | null | null |
run.py
|
tr4n2uil/test-automate
|
ca68b2b142f987643784b1eaab69328eb979e58e
|
[
"MIT"
] | null | null | null |
run.py
|
tr4n2uil/test-automate
|
ca68b2b142f987643784b1eaab69328eb979e58e
|
[
"MIT"
] | null | null | null |
#!/usr/local/bin/python
# coding: utf-8
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
#desired_cap = {'os': 'ios', 'device': 'iPhone 6', 'browser': 'iPhone', 'emulator': True }
desired_cap = {'os': 'OS X', 'os_version': 'El Capitan', 'browser': 'Chrome', 'browser_version': '46' }
driver = webdriver.Remote(
#command_executor="http://vibhajrajan1:SvnxogEy3yWtWzqCuWCD@local.browserstack.com:8080/wd/hub",
command_executor="http://vibhajrajan1:SvnxogEy3yWtWzqCuWCD@local.browserstack.com/wd/hub",
#command_executor='https://vibhajrajan1:isx1GLKoDPyxvJwMZBso@66.201.41.7/wd/hub',
desired_capabilities=desired_cap)
driver.get("http://www.google.com")
if not "Google" in driver.title:
raise Exception("Unable to load google page!")
for n in range(0, 50):
print driver.title
elem = driver.find_element_by_name("q")
elem.send_keys(u'👿')
elem.submit()
print driver.title
driver.quit()
| 37.666667
| 103
| 0.745329
|
dbd8b34ac2fdd238f11219430c9057fe793c44ff
| 4,041
|
py
|
Python
|
mcrouter/test/test_mcrouter.py
|
alynx282/mcrouter
|
b16af1a119eee775b051d323cb885b73fdf75757
|
[
"MIT"
] | null | null | null |
mcrouter/test/test_mcrouter.py
|
alynx282/mcrouter
|
b16af1a119eee775b051d323cb885b73fdf75757
|
[
"MIT"
] | null | null | null |
mcrouter/test/test_mcrouter.py
|
alynx282/mcrouter
|
b16af1a119eee775b051d323cb885b73fdf75757
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import time
from mcrouter.test.MCProcess import Memcached
from mcrouter.test.McrouterTestCase import McrouterTestCase
from mcrouter.test.mock_servers import SleepServer
from mcrouter.test.mock_servers import ConnectionErrorServer
class TestDevNull(McrouterTestCase):
config = './mcrouter/test/test_dev_null.json'
extra_args = []
def setUp(self):
# The order here must corresponds to the order of hosts in the .json
self.mc_good = self.add_server(Memcached())
self.mc_wild = self.add_server(Memcached())
def get_mcrouter(self):
return self.add_mcrouter(self.config, extra_args=self.extra_args)
def test_dev_null(self):
mcr = self.get_mcrouter()
# finally setup is done
mcr.set("good:key", "should_be_set")
mcr.set("key", "should_be_set_wild")
mcr.set("null:key", "should_not_be_set")
mcgood_val = self.mc_good.get("good:key")
mcnull_val = self.mc_wild.get("null:key")
mcwild_val = self.mc_wild.get("key")
self.assertEqual(mcgood_val, "should_be_set")
self.assertEqual(mcnull_val, None)
self.assertEqual(mcwild_val, "should_be_set_wild")
self.assertEqual(mcr.delete("null:key2"), None)
self.assertEqual(int(mcr.stats('all')['dev_null_requests']), 2)
class TestDuplicateServers(McrouterTestCase):
config = './mcrouter/test/test_duplicate_servers.json'
extra_args = []
def setUp(self):
self.wildcard = self.add_server(Memcached(), 12345)
def get_mcrouter(self):
return self.add_mcrouter(
self.config, '/a/a/', extra_args=self.extra_args)
def test_duplicate_servers(self):
mcr = self.get_mcrouter()
stats = mcr.stats('servers')
# Check that only one proxy destination connection is made
# for all the duplicate servers
self.assertEqual(1, len(stats))
# Hardcoding default server timeout
key = ('localhost:' + str(self.port_map[12345]) +
':ascii:plain:notcompressed-1000')
self.assertTrue(key in stats)
class TestDuplicateServersDiffTimeouts(McrouterTestCase):
config = './mcrouter/test/test_duplicate_servers_difftimeouts.json'
extra_args = []
def setUp(self):
self.wildcard = self.add_server(Memcached(), 12345)
def get_mcrouter(self):
return self.add_mcrouter(
self.config, '/a/a/', extra_args=self.extra_args)
def test_duplicate_servers_difftimeouts(self):
mcr = self.get_mcrouter()
stats = mcr.stats('servers')
# Check that only two proxy destination connections are made
# for all the duplicate servers in pools with diff timeout
self.assertEqual(2, len(stats))
# Hardcoding default server timeout
key = ('localhost:' + str(self.port_map[12345]) +
':ascii:plain:notcompressed-1000')
self.assertTrue(key in stats)
key = ('localhost:' + str(self.port_map[12345]) +
':ascii:plain:notcompressed-2000')
self.assertTrue(key in stats)
class TestPoolServerErrors(McrouterTestCase):
config = './mcrouter/test/test_pool_server_errors.json'
def setUp(self):
self.mc1 = self.add_server(Memcached())
# mc2 is ErrorRoute
self.mc3 = self.add_server(Memcached())
def test_pool_server_errors(self):
mcr = self.add_mcrouter(self.config, '/a/a/')
self.assertIsNone(mcr.get('test'))
stats = mcr.stats('servers')
self.assertEqual(2, len(stats))
self.assertTrue(mcr.set('/b/b/abc', 'valueA'))
self.assertEqual(self.mc1.get('abc'), 'valueA')
self.assertFalse(mcr.set('/b/b/a', 'valueB'))
self.assertTrue(mcr.set('/b/b/ab', 'valueC'))
self.assertEqual(self.mc3.get('ab'), 'valueC')
| 33.396694
| 76
| 0.661717
|
5f1eac462a2dcc6007189e1e39bba852ec2a1aeb
| 13,212
|
py
|
Python
|
pycalculix/cadimporter.py
|
tnakaicode/-pycalculix4win
|
c269e66d5b234275ca7905f13d186e31864523ea
|
[
"Apache-2.0"
] | null | null | null |
pycalculix/cadimporter.py
|
tnakaicode/-pycalculix4win
|
c269e66d5b234275ca7905f13d186e31864523ea
|
[
"Apache-2.0"
] | null | null | null |
pycalculix/cadimporter.py
|
tnakaicode/-pycalculix4win
|
c269e66d5b234275ca7905f13d186e31864523ea
|
[
"Apache-2.0"
] | null | null | null |
"""This module stores the CadImporter class, which is used to load CAD parts."""
import collections
import math
import os
# needed to prevent dxfgrabber from crashing on import
os.environ['DXFGRABBER_CYTHON'] = 'OFF'
import dxfgrabber # needed for dxf files
import subprocess # needed to run gmsh to make geo files
from . import geometry
from . import partmodule
class CadImporter(object):
"""Makes an object which can import cad parts.
Args:
feamodel (FeaModel): model that we want to import parts into
layer (int): layer to import, all entities will be flattened
to one plane. -1 mean all layers, any other number is a specific layer
swapxy (bool): True rotates the part from x axial to y axial
scale (str): string telling the unit conversion scalar
* Any option of 'fromunit-tounit' using the below units
* mm, m, in, ft
* Examples 'mm-m' 'm-in' 'ft-mm' 'in-ft'
* Default value is '' and does not apply a scale factor
Attributes:
__fea (FeaModel): model
__fname (str): file we want to import
__layer (int): layer to import
__swapxy (bool): If true, swap part from xy to yx orientation
"""
def __init__(self, feamodel, fname='', layer=-1, swapxy=False, scale=''):
self.__fea = feamodel
self.__fname = fname
self.__layer = layer
self.__swapxy = swapxy
self.__scale = scale
def load(self):
"""Loads the self.__fname cad file
Returns:
list: list of Part
"""
if self.__fname == '':
print('You must pass in a file name to load!')
return []
ext = os.path.splitext(self.__fname)[1]
first_pt = None
if len(self.__fea.points) > 0:
first_pt = self.__fea.points[0]
if ext == '.dxf':
parts = self.__load_dxf()
elif ext in ['.brep', '.brp', '.iges', '.igs', '.step', '.stp']:
self.__make_geo()
parts = self.__load_geo()
last_pt = None
if first_pt != None:
if len(self.__fea.points) > 2:
last_pt = self.__fea.points[-1]
if self.__scale != '':
# call scale
pass
return parts
def __fix_tuple(self, xy_tup):
"""Adjusts the point to be in the right plane (yx)"""
if self.__swapxy:
return xy_tup[::-1]
return xy_tup
@staticmethod
def __find_make_pt(xy_tup, points_dict):
"""
Returns a point if it exists within geometry.ACC
or makes, stores and returns the point if it doesn't exist
"""
point = points_dict.get(xy_tup)
if point is not None:
return point
xy_point = geometry.Point(xy_tup[0], xy_tup[1])
for realpoint in points_dict.values():
dist = (xy_point - realpoint).length()
if dist < geometry.ACC:
return realpoint
points_dict[xy_tup] = xy_point
return xy_point
def __get_pts_lines(self, lines, arcs):
"""Returns a set of points, and a list of Lines and Arcs
Args:
lines (dxfgrabber LINE list): dxf lines
arcs (dxfgrabber ARC list): dxf arcs
Returns:
list: [list of points, list of Line and Arc]
"""
# store unique points
points_dict = {}
all_lines = []
for ind, line in enumerate(lines):
tup = self.__fix_tuple((line.start[0], line.start[1]))
start = self.__find_make_pt(tup, points_dict)
tup = self.__fix_tuple((line.end[0], line.end[1]))
end = self.__find_make_pt(tup, points_dict)
line = geometry.Line(start, end)
all_lines.append(line)
for ind, arc in enumerate(arcs):
# dxfgrabber arcs are stored ccw when looking at xy plane
# x horizontal
# y vertical
tup = self.__fix_tuple((arc.center[0], arc.center[1]))
center = self.__find_make_pt(tup, points_dict)
sign = -1
if self.__swapxy:
sign = 1
startangle = arc.start_angle*sign
endangle = arc.end_angle*sign
angle = endangle - startangle
if arc.end_angle < arc.start_angle:
angle = angle + 360*sign
"""
print('---------------------------------------')
print('| ARC')
print('center: %s' % center)
print('startangle: %f' % startangle)
print('endangle: %f' % endangle)
print('traversed_angle: %f' % angle)
"""
start_vect = geometry.Point(0, arc.radius)
if self.__swapxy == False:
start_vect = geometry.Point(arc.radius, 0)
start_vect.rot_ccw_deg(arc.start_angle*sign)
end_vect = geometry.Point(0, arc.radius)
if self.__swapxy == False:
end_vect = geometry.Point(arc.radius, 0)
end_vect.rot_ccw_deg(arc.end_angle*sign)
start = center + start_vect
start_tup = (start.x, start.y)
end = center + end_vect
end_tup = (end.x, end.y)
start = self.__find_make_pt(start_tup, points_dict)
end = self.__find_make_pt(end_tup, points_dict)
rvect = start - center
if abs(angle) <= 90:
arc = geometry.Arc(start, end, center)
all_lines.append(arc)
print('1 arc made')
continue
#print(' %s' % arc)
pieces = math.ceil(abs(angle)/90)
print('%i arcs being made' % pieces)
points = [start, end]
# 2 pieces need 3 points, we have start + end already --> 1 pt
inserts = pieces + 1 - 2
piece_ang = angle/pieces
#print('piece_ang = %f' % piece_ang)
while inserts > 0:
rvect.rot_ccw_deg(piece_ang)
point = center + rvect
tup = (point.x, point.y)
point = self.__find_make_pt(tup, points_dict)
points.insert(-1, point)
inserts = inserts - 1
for ind in range(len(points)-1):
#print(' %s' % arc)
arc = geometry.Arc(points[ind], points[ind+1], center)
all_lines.append(arc)
for line in all_lines:
line.save_to_points()
return [list(points_dict.values()), all_lines]
def __make_geo(self):
"""Makes a gmsh geo file given a step, iges, or brep input"""
# gmsh freecad_part.iges -o out_iges.geo -0
fname_list = self.__fname.split('.')
geo_file = fname_list[0]+'.geo'
runstr = "%s %s -o %s -0" % (os.getenv("GMSH"), self.__fname, geo_file)
print(runstr)
subprocess.call(runstr, shell=True)
print('Wrote file: %s' % geo_file)
def __load_geo(self):
"""Loads in a gmsh geo file and returns a list of parts
Returns:
list: list of Part
"""
pass
# process any splines? and turn them into arcs
# http://www.mathopenref.com/constcirclecenter.html
# find max dist between points
# double it
# select two segments
# draw normal lines
# find intersections, that is the center
@staticmethod
def __dangling_points(all_points):
return [point for point in all_points
if len(point.lines) == 1 and not point.arc_center]
def __load_dxf(self):
"""Loads in a dxf file and returns a list of parts
Returns:
list: list of Part
"""
print('Loading file: %s' % self.__fname)
dwg = dxfgrabber.readfile(self.__fname)
lines = [item for item in dwg.entities if item.dxftype == 'LINE']
arcs = [item for item in dwg.entities if item.dxftype == 'ARC']
if self.__layer > -1:
lines = [item for item in lines if item.layer == self.__layer]
arcs = [item for item in arcs if item.layer == self.__layer]
print('File read.')
print('Loaded %i lines' % len(lines))
print('Loaded %i arcs' % len(arcs))
print('Loaded %i line segments, lines or arcs' %
(len(lines)+len(arcs)))
# get all points and Line and Arc using pycalculix entities
print('Converting to pycalculix lines arcs and points ...')
all_points, all_lines = self.__get_pts_lines(lines, arcs)
print('Loaded %i line segments, lines or arcs' % len(all_lines))
print('Loaded %i points' % len(all_points))
# for point in all_points:
# print('%s %s' % (point, point.lines))
# for line in all_lines:
# print('%s %s' % (line, line.points))
# remove all lines that are not part of areas
dangling_points = self.__dangling_points(all_points)
pruned_geometry = bool(dangling_points)
while dangling_points:
for point in dangling_points:
all_points.remove(point)
print('Removed point= %s' % point)
dangling_line = list(point.lines)[0]
point.unset_line(dangling_line)
if dangling_line in all_lines:
all_lines.remove(dangling_line)
print('Removed line= %s' % dangling_line)
dangling_points = self.__dangling_points(all_points)
if pruned_geometry:
print('Remaining line segments: %i' % len(all_lines))
print('Remaining points: %i' % len(all_points))
# make line all_loops now
all_loops = []
line = all_lines[0]
this_loop = geometry.LineLoop()
while len(all_lines) > 0:
this_loop.append(line)
all_lines.remove(line)
if this_loop.closed == True:
all_loops.append(this_loop)
this_loop = geometry.LineLoop()
if all_lines:
line = all_lines[0]
continue
point = line.pt(1)
other_lines = point.lines - set([line])
if len(other_lines) > 1:
# note: one could exclude connected segment nodes
# make disconnected line all_loops, then have another
# loop to connect thos disconnected line all_loops
print('One point was connected to > 2 lines.')
print('Only import simple part all_loops, or surfaces.')
raise Exception('Import geometry is too complex')
next_line = list(other_lines)[0]
if line.pt(1) != next_line.pt(0):
next_line.reverse()
line = next_line
# find exterior loops
exterior_loops = []
for ind, loop in enumerate(all_loops):
other_loops = all_loops[ind+1:]
other_loops.extend(exterior_loops)
is_exterior = True
for other_loop in other_loops:
if loop.inside(other_loop):
is_exterior = False
break
if is_exterior:
# exterior must be clockwise
if loop.ccw:
loop.reverse()
exterior_loops.append(loop)
# remove the found part exterior loops from all_loops
for exterior_loop in exterior_loops:
all_loops.remove(exterior_loop)
# each part in parts is a list of line all_loops
# [exterior, hole1, hole2]
parts = [[exterior_loop] for exterior_loop in exterior_loops]
# now place the child hole loops after the part exterior loop
for part_loops in parts:
exterior_loop = part_loops[0]
# find child holes
for hole_loop in all_loops:
if hole_loop.inside(exterior_loop):
hole_loop.hole = True
# holes must be ccw
if not hole_loop.ccw:
hole_loop.reverse()
part_loops.append(hole_loop)
# remove child holes from loop list
for hole_loop in part_loops[1:]:
all_loops.remove(hole_loop)
# make parts
parts_list = []
for part_loops in parts:
this_part = partmodule.Part(self.__fea)
for ind, loop in enumerate(part_loops):
is_hole = loop.hole
start = loop[0].pt(0)
this_part.goto(start.x, start.y, is_hole)
for item in loop:
if isinstance(item, geometry.Line):
end = item.pt(1)
this_part.draw_line_to(end.x, end.y)
elif isinstance(item, geometry.Arc):
end = item.pt(1)
center = item.actr
this_part.draw_arc(end.x, end.y, center.x, center.y)
parts_list.append(this_part)
print('Parts created: %i' % len(parts_list))
return parts_list
| 39.321429
| 82
| 0.548819
|
671a2e7e071ec01724b5691461ac1b52c3e191bc
| 6,404
|
py
|
Python
|
NLP/GPT2/run_generation.py
|
LiuHaolan/models
|
1639b3039237c3997c51ff87f0b6113bb2e8d236
|
[
"Apache-2.0"
] | null | null | null |
NLP/GPT2/run_generation.py
|
LiuHaolan/models
|
1639b3039237c3997c51ff87f0b6113bb2e8d236
|
[
"Apache-2.0"
] | null | null | null |
NLP/GPT2/run_generation.py
|
LiuHaolan/models
|
1639b3039237c3997c51ff87f0b6113bb2e8d236
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# coding=utf-8
# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Conditional text generation with the auto-regressive models of the library (GPT/GPT-2)
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import logging
from tqdm import trange
import oneflow as flow
import numpy as np
from model_config import GPT2Config
from model import GPT2LMHeadModel
from tokenizer import build_tokenizer
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger = logging.getLogger(__name__)
MAX_LENGTH = int(10000) # Hardcoded max length to avoid infinite loop
def set_seed(args):
np.random.seed(args.seed)
flow.manual_seed(args.seed)
def top_k_top_p_filtering(logits, top_k=0, top_p=0.0, filter_value=-float('Inf')):
""" Filter a distribution of logits using top-k and/or nucleus (top-p) filtering
Args:
logits: logits distribution shape (vocabulary size)
top_k > 0: keep only top k tokens with highest probability (top-k filtering).
top_p > 0.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering).
Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751)
From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317
"""
top_k = min(top_k, logits.size(-1)) # Safety check
if top_k > 0:
# Remove all tokens with a probability less than the last token of the top-k
indices_to_remove = logits < flow.topk(logits, top_k)[0][..., -1, None]
logits[indices_to_remove] = filter_value
# todo: support top_p
# if top_p > 0.0:
# sorted_logits, sorted_indices = flow.sort(logits, descending=True)
# cumulative_probs = flow.cumsum(flow.softmax(sorted_logits, dim=-1), dim=-1)
# # Remove tokens with cumulative probability above the threshold
# sorted_indices_to_remove = cumulative_probs > top_p
# # Shift the indices to the right to keep also the first token above the threshold
# sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
# sorted_indices_to_remove[..., 0] = 0
# indices_to_remove = sorted_indices[sorted_indices_to_remove]
# logits[indices_to_remove] = filter_value
return logits
def sample_sequence(model, length, context, num_samples=1, temperature=1, top_k=1, top_p=0.0, device='cuda'):
context = flow.tensor(context, dtype=flow.long, device=device)
context = context.unsqueeze(0).repeat(num_samples, 1)
generated = context
past_key_values = None
with flow.no_grad():
for _ in trange(length):
outputs = model(generated, past_key_values=past_key_values, use_cache=True)
logits, past_key_values = outputs[:2]
next_token_logits = logits[:, -1, :] / temperature
filtered_logits = top_k_top_p_filtering(next_token_logits, top_k=top_k, top_p=top_p)
probs = filtered_logits.softmax(-1)
next_token = probs.argmax(-1)
# next_token = flow.multinomial(flow.softmax(filtered_logits, dim=-1), num_samples=1)
generated = flow.cat((generated, next_token.unsqueeze(0)), dim=1)
return generated
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--vocab_file", default="gpt2-vocab.json", type=str)
parser.add_argument("--merges_file", default="gpt2-merges.txt", type=str)
parser.add_argument("--restore_file", default="gpt2_oneflow_model", type=str, help="Path to pre-trained model")
parser.add_argument("--prompt", type=str, default="")
parser.add_argument("--length", type=int, default=20)
parser.add_argument("--temperature", type=float, default=1.0)
parser.add_argument("--top_k", type=int, default=1)
parser.add_argument("--top_p", type=float, default=0.9)
parser.add_argument("--no_cuda", action='store_true', help="Avoid using CUDA when available")
parser.add_argument('--seed', type=int, default=42, help="random seed for initialization")
args = parser.parse_args()
args.device = flow.device("cuda" if not args.no_cuda else "cpu")
set_seed(args)
tokenizer = build_tokenizer(vocab_file=args.vocab_file, merges_file=args.merges_file, tokenizer_type="GPT2BPETokenizer")
config = GPT2Config()
model = GPT2LMHeadModel(config)
if args.restore_file is not None:
model.load_state_dict(flow.load(args.restore_file))
model.lm_head.weight = model.transformer.wte.weight
model.to(args.device)
model.eval()
if args.length < 0 and config.max_position_embeddings > 0:
args.length = config.max_position_embeddings
elif 0 < config.max_position_embeddings < args.length:
args.length = config.max_position_embeddings # No generation bigger than model size
elif args.length < 0:
args.length = MAX_LENGTH # avoid infinite loop
print(args)
while True:
raw_text = args.prompt if args.prompt else input("Model prompt >>> ")
context_tokens = tokenizer.tokenize(raw_text)
out = sample_sequence(
model=model,
context=context_tokens,
length=args.length,
temperature=args.temperature,
top_k=args.top_k,
top_p=args.top_p,
device=args.device,
)
out = out[0, len(context_tokens):].tolist()
text = tokenizer.detokenize(out)
print(text)
if args.prompt:
break
return text
if __name__ == '__main__':
main()
| 41.856209
| 124
| 0.686914
|
e5107e1707993e6b0ea9a6e4a05a7c8e57202625
| 115
|
py
|
Python
|
pylint_beergarden/__init__.py
|
gershwinlabs/pylint-beergarden
|
1a581c4dbf73eb5b1cd3b945291d9c0aca0cd55c
|
[
"MIT"
] | null | null | null |
pylint_beergarden/__init__.py
|
gershwinlabs/pylint-beergarden
|
1a581c4dbf73eb5b1cd3b945291d9c0aca0cd55c
|
[
"MIT"
] | null | null | null |
pylint_beergarden/__init__.py
|
gershwinlabs/pylint-beergarden
|
1a581c4dbf73eb5b1cd3b945291d9c0aca0cd55c
|
[
"MIT"
] | null | null | null |
# vim: expandtab tabstop=4 shiftwidth=4
from pylint_beergarden import plugin, checker
register = plugin.register
| 19.166667
| 45
| 0.808696
|
00dc588a4688a086685c490e20bd1bd7c71b2a19
| 2,068
|
py
|
Python
|
homeassistant/components/binary_sensor/modbus.py
|
EmitKiwi/home-assistant
|
0999e2ddc476f4bddf710005168b082f03a7cdc0
|
[
"Apache-2.0"
] | 37
|
2018-05-22T07:17:26.000Z
|
2022-03-03T13:14:46.000Z
|
homeassistant/components/binary_sensor/modbus.py
|
EmitKiwi/home-assistant
|
0999e2ddc476f4bddf710005168b082f03a7cdc0
|
[
"Apache-2.0"
] | 125
|
2018-12-11T07:31:20.000Z
|
2021-07-27T08:20:03.000Z
|
homeassistant/components/binary_sensor/modbus.py
|
EmitKiwi/home-assistant
|
0999e2ddc476f4bddf710005168b082f03a7cdc0
|
[
"Apache-2.0"
] | 8
|
2018-05-30T20:05:26.000Z
|
2021-02-19T14:17:05.000Z
|
"""
Support for Modbus Coil sensors.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/binary_sensor.modbus/
"""
import logging
import voluptuous as vol
import homeassistant.components.modbus as modbus
from homeassistant.const import CONF_NAME, CONF_SLAVE
from homeassistant.components.binary_sensor import BinarySensorDevice
from homeassistant.helpers import config_validation as cv
from homeassistant.components.sensor import PLATFORM_SCHEMA
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['modbus']
CONF_COIL = 'coil'
CONF_COILS = 'coils'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_COILS): [{
vol.Required(CONF_COIL): cv.positive_int,
vol.Required(CONF_NAME): cv.string,
vol.Optional(CONF_SLAVE): cv.positive_int
}]
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the Modbus binary sensors."""
sensors = []
for coil in config.get(CONF_COILS):
sensors.append(ModbusCoilSensor(
coil.get(CONF_NAME),
coil.get(CONF_SLAVE),
coil.get(CONF_COIL)))
add_devices(sensors)
class ModbusCoilSensor(BinarySensorDevice):
"""Modbus coil sensor."""
def __init__(self, name, slave, coil):
"""Initialize the modbus coil sensor."""
self._name = name
self._slave = int(slave) if slave else None
self._coil = int(coil)
self._value = None
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def is_on(self):
"""Return the state of the sensor."""
return self._value
def update(self):
"""Update the state of the sensor."""
result = modbus.HUB.read_coils(self._slave, self._coil, 1)
try:
self._value = result.bits[0]
except AttributeError:
_LOGGER.error(
'No response from modbus slave %s coil %s',
self._slave,
self._coil)
| 28.722222
| 74
| 0.663443
|
289c8317fafc084793c303577de5a0a539d2555d
| 4,464
|
py
|
Python
|
CybORG/CybORG/Shared/BaselineRewardCalculator.py
|
rafvasq/cage-challenge-1
|
95affdfa38afc1124f1a1a09c92fbc0ed5b96318
|
[
"MIT"
] | 18
|
2021-08-20T15:07:55.000Z
|
2022-03-11T12:05:15.000Z
|
CybORG/CybORG/Shared/BaselineRewardCalculator.py
|
rafvasq/cage-challenge-1
|
95affdfa38afc1124f1a1a09c92fbc0ed5b96318
|
[
"MIT"
] | 7
|
2021-11-09T06:46:58.000Z
|
2022-03-31T12:35:06.000Z
|
CybORG/CybORG/Shared/BaselineRewardCalculator.py
|
rafvasq/cage-challenge-1
|
95affdfa38afc1124f1a1a09c92fbc0ed5b96318
|
[
"MIT"
] | 13
|
2021-08-17T00:26:31.000Z
|
2022-03-29T20:06:45.000Z
|
# Copyright DST Group. Licensed under the MIT license.
from CybORG.Shared.Actions.Action import Action
from CybORG.Shared.RewardCalculator import RewardCalculator
WIN_REWARD = 1
LOSE_REWARD = -1
REWARD_MAX_DECIMAL_PLACES = 3
REWARD_DIFF_SCALE_FACTOR = 1000
class BaselineRewardCalculator(RewardCalculator):
def __init__(self, agent_name: str):
super().__init__(agent_name)
self.previous_diff = 0
self.flat = True
def calculate_reward(self, current_state: dict, action: Action, agent_observations: dict, done: bool):
reward = 0
if done:
# check for win
win = True
for key, host in current_state.items():
if key != "success" and isinstance(host, dict):
for session in host.get("Sessions", []):
if "Agent" in session and session["Agent"] == "Red":
win = False
break
if not win:
break
# check for win or loss
if win:
reward += WIN_REWARD
else:
reward += LOSE_REWARD
current_diff = 0
for i in range(len(self.init_state)):
if self.init_state[i] != flat_fixed_state[i]:
current_diff -= 1
# Code for calculating reward with recursive methods.
# Note that self.init_state will need to be an observation
# rather than a flat fixed list for this to work
#
# current_diff = self.obs_diff(self.init_state.get_dict(),
# current_state.get_dict())
diff = (current_diff - self.previous_diff)
reward += diff / REWARD_DIFF_SCALE_FACTOR
self.previous_diff = current_diff
self.previous_state = flat_fixed_state
self.previous_obs = agent_observations
self.tick()
return round(reward, REWARD_MAX_DECIMAL_PLACES)
# Method to find number of differences between two observations as
# dictionaries
def obs_diff(self, init, current):
reward = 0
if init == current:
return reward
list_init = {}
list_current = {}
val_init = {}
val_current = {}
shared_keys = []
for k, v in current.items():
if type(v) is dict:
if k in init:
reward += self.obs_diff(init[k], current[k])
shared_keys.append(k)
else:
reward -= 1 + self.obs_size(v)
elif type(v) is list:
list_current[k] = v
else:
val_current[k] = v
for k, v in init.items():
if type(v) is dict:
if k not in shared_keys:
reward -= 1 + self.obs_size(init[k])
elif type(v) is list:
list_init[k] = v
else:
val_init[k] = v
for k, v in list_init.items():
if k in list_current:
for d_init in v:
for d_cur in list_current[k]:
if d_init == d_cur:
v.remove(d_init)
list_current[k].remove(d_cur)
for i in range(len(v)):
if i < len(list_current[k]):
reward += self.obs_diff(v[i], list_current[k][i])
else:
reward -= self.obs_size(v[i])
for i in range(len(v), len(list_current[k])):
reward -= self.obs_size(list_current[k][i])
list_current.pop(k)
else:
reward -= 1
for d in v:
reward -= self.obs_size(d)
for k, v in list_current.items():
reward -= 1
for d in v:
reward -= self.obs_size(d)
reward -= len(dict(val_init.items() ^ val_current.items()))
return reward
# Method to find the size of an observation as a dictionary
def obs_size(self, d):
count = 0
for k, v in d.items():
if type(v) is dict:
count += 1 + self.obs_size(v)
elif type(v) is list:
count += 1
for i in v:
count += self.obs_size(i)
else:
count += 1
return count
| 33.56391
| 106
| 0.499776
|
85714bb7a1ae08a253c7b81a878b4d2d5fb5b727
| 278
|
py
|
Python
|
paramiko_tutorial/files.py
|
Full-Stack-Cloud-Developer/paramiko-tutorial
|
e890a9770ef896fde43e93d683470b9ae736453c
|
[
"MIT"
] | 1
|
2020-11-25T12:46:10.000Z
|
2020-11-25T12:46:10.000Z
|
paramiko_tutorial/files.py
|
jbrdge/paramiko-tutorial
|
90151d4b432e69f089a41079a05387d8c235e697
|
[
"MIT"
] | null | null | null |
paramiko_tutorial/files.py
|
jbrdge/paramiko-tutorial
|
90151d4b432e69f089a41079a05387d8c235e697
|
[
"MIT"
] | null | null | null |
"""Find local files to be uploaded to remote host."""
import os
def fetch_local_files(local_file_dir):
"""Create list of file paths."""
local_files = os.walk(local_file_dir)
for root, dirs, files in local_files:
return [f'{root}/{file}' for file in files]
| 27.8
| 53
| 0.683453
|
15daf5fc39f3766110be8a8f733f328fe07b3d76
| 490
|
py
|
Python
|
data/scripts/templates/object/tangible/ship/components/droid_interface/shared_ddi_sfs_imperial_2.py
|
obi-two/GameServer
|
7d37024e2291a97d49522610cd8f1dbe5666afc2
|
[
"MIT"
] | 20
|
2015-02-23T15:11:56.000Z
|
2022-03-18T20:56:48.000Z
|
data/scripts/templates/object/tangible/ship/components/droid_interface/shared_ddi_sfs_imperial_2.py
|
apathyboy/swganh
|
665128efe9154611dec4cb5efc61d246dd095984
|
[
"MIT"
] | null | null | null |
data/scripts/templates/object/tangible/ship/components/droid_interface/shared_ddi_sfs_imperial_2.py
|
apathyboy/swganh
|
665128efe9154611dec4cb5efc61d246dd095984
|
[
"MIT"
] | 20
|
2015-04-04T16:35:59.000Z
|
2022-03-24T14:54:37.000Z
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/ship/components/droid_interface/shared_ddi_sfs_imperial_2.iff"
result.attribute_template_id = 8
result.stfName("space/space_item","ddi_sfs_imperial_2_n")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
| 28.823529
| 98
| 0.746939
|
1d75f6c94f340178a26d4fd94c2c4311c301cd1b
| 5,120
|
py
|
Python
|
private_sharing/urls.py
|
danamlewis/open-humans
|
9b08310cf151f49032b66ddd005bbd47d466cc4e
|
[
"MIT"
] | 57
|
2016-09-01T21:55:52.000Z
|
2022-03-27T22:15:32.000Z
|
private_sharing/urls.py
|
danamlewis/open-humans
|
9b08310cf151f49032b66ddd005bbd47d466cc4e
|
[
"MIT"
] | 464
|
2015-03-23T18:08:28.000Z
|
2016-08-25T04:57:36.000Z
|
private_sharing/urls.py
|
danamlewis/open-humans
|
9b08310cf151f49032b66ddd005bbd47d466cc4e
|
[
"MIT"
] | 25
|
2017-01-24T16:23:27.000Z
|
2021-11-07T01:51:42.000Z
|
from django.urls import path, re_path
from django.views.generic import TemplateView
from . import views
app_name = "direct-sharing"
urlpatterns = [
path(
"projects/oauth2/create/",
views.CreateOAuth2DataRequestProjectView.as_view(),
name="create-oauth2",
),
path(
"projects/on-site/create/",
views.CreateOnSiteDataRequestProjectView.as_view(),
name="create-on-site",
),
re_path(
r"^projects/registered_datatypes/(?P<slug>[a-z0-9_-]+)/$",
views.SelectDatatypesView.as_view(),
name="select-datatypes",
),
re_path(
r"^projects/on-site/join/(?P<slug>[a-z0-9_-]+)/$",
views.JoinOnSiteDataRequestProjectView.as_view(),
name="join-on-site",
),
re_path(
r"^projects/on-site/authorize/(?P<slug>[a-z0-9_-]+)/$",
views.AuthorizeOnSiteDataRequestProjectView.as_view(),
name="authorize-on-site",
),
# Override /oauth2/authorize/ to specify our own context data
path(
"projects/oauth2/authorize/",
views.AuthorizeOAuth2ProjectView.as_view(),
name="authorize-oauth2",
),
path(
"authorize-inactive/",
TemplateView.as_view(template_name="private_sharing/authorize-inactive.html"),
name="authorize-inactive",
),
re_path(
r"^projects/leave/(?P<pk>[0-9]+)/$",
views.ProjectLeaveView.as_view(),
name="leave-project",
),
re_path(
r"^projects/oauth2/update/(?P<slug>[a-z0-9_-]+)/$",
views.UpdateOAuth2DataRequestProjectView.as_view(),
name="update-oauth2",
),
re_path(
r"^projects/on-site/update/(?P<slug>[a-z0-9_-]+)/$",
views.UpdateOnSiteDataRequestProjectView.as_view(),
name="update-on-site",
),
re_path(
r"^projects/oauth2/(?P<slug>[a-z0-9_-]+)/$",
views.OAuth2DataRequestProjectDetailView.as_view(),
name="detail-oauth2",
),
re_path(
r"^projects/on-site/(?P<slug>[a-z0-9_-]+)/$",
views.OnSiteDataRequestProjectDetailView.as_view(),
name="detail-on-site",
),
path(
"projects/manage/",
views.ManageDataRequestActivitiesView.as_view(),
name="manage-projects",
),
re_path(
r"^projects/message/(?P<slug>[a-z0-9_-]+)/$",
views.MessageProjectMembersView.as_view(),
name="message-members",
),
re_path(
r"^projects/remove-members/(?P<slug>[a-z0-9_-]+)/$",
views.RemoveProjectMembersView.as_view(),
name="remove-members",
),
re_path(
r"^projects/withdrawn-members/(?P<slug>[a-z0-9_-]+)/$",
views.DataRequestProjectWithdrawnView.as_view(),
name="withdrawn-members",
),
re_path(
r"^in-development/$", views.InDevelopmentView.as_view(), name="in-development"
),
# Documentation
path("overview/", views.OverviewView.as_view(), name="overview"),
path(
"approval/",
TemplateView.as_view(template_name="direct-sharing/approval.html"),
name="project-approval",
),
path(
"on-site-features/",
TemplateView.as_view(template_name="direct-sharing/on-site-features.html"),
name="on-site-features",
),
path(
"on-site-setup/",
TemplateView.as_view(template_name="direct-sharing/on-site-setup.html"),
name="on-site-setup",
),
path(
"on-site-data-access/",
TemplateView.as_view(template_name="direct-sharing/on-site-data-access.html"),
name="on-site-data-access",
),
path(
"on-site-messages/",
TemplateView.as_view(template_name="direct-sharing/on-site-messages.html"),
name="on-site-messages",
),
path(
"on-site-member-removal/",
TemplateView.as_view(
template_name="direct-sharing/on-site-member-removal.html"
),
name="on-site-member-removal",
),
path(
"on-site-data-upload/",
TemplateView.as_view(template_name="direct-sharing/on-site-data-upload.html"),
name="on-site-data-upload",
),
path(
"oauth2-features/",
TemplateView.as_view(template_name="direct-sharing/oauth2-features.html"),
name="oauth2-features",
),
path(
"oauth2-setup/",
TemplateView.as_view(template_name="direct-sharing/oauth2-setup.html"),
name="oauth2-setup",
),
path(
"oauth2-data-access/",
TemplateView.as_view(template_name="direct-sharing/oauth2-data-access.html"),
name="oauth2-data-access",
),
path(
"oauth2-messages/",
TemplateView.as_view(template_name="direct-sharing/oauth2-messages.html"),
name="oauth2-messages",
),
path(
"oauth2-member-removal/",
TemplateView.as_view(template_name="direct-sharing/oauth2-member-removal.html"),
name="oauth2-member-removal",
),
path(
"oauth2-data-upload/",
TemplateView.as_view(template_name="direct-sharing/oauth2-data-upload.html"),
name="oauth2-data-upload",
),
]
| 31.411043
| 88
| 0.604297
|
1aae79f49423c7b3ae9372c5d001c1edb7ffb5ab
| 6,197
|
py
|
Python
|
rl_player_train.py
|
ScarfZapdos/showdownbot
|
02c5ee6d8fb76fbaa5dc4f8b41e2ab6e29d6024d
|
[
"MIT"
] | null | null | null |
rl_player_train.py
|
ScarfZapdos/showdownbot
|
02c5ee6d8fb76fbaa5dc4f8b41e2ab6e29d6024d
|
[
"MIT"
] | null | null | null |
rl_player_train.py
|
ScarfZapdos/showdownbot
|
02c5ee6d8fb76fbaa5dc4f8b41e2ab6e29d6024d
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import numpy as np
import tensorflow as tf
import os
from poke_env.player.env_player import Gen8EnvSinglePlayer
from poke_env.player.random_player import RandomPlayer
from rl.agents.dqn import DQNAgent
from rl.policy import LinearAnnealedPolicy, EpsGreedyQPolicy
from rl.memory import SequentialMemory
from tensorflow.keras.layers import Dense, Flatten
from tensorflow.keras.models import Sequential
from tensorflow.keras.optimizers import Adam
from tensorflow.python.keras.backend import set_session, get_session
#DEEPLY INSPIRED BY POKE-ENV EXAMPLE
tf_config = tf.compat.v1.ConfigProto(
device_count={'GPU': 0},
intra_op_parallelism_threads=1,
allow_soft_placement=True)
tf_config.gpu_options.allow_growth = True
tf_config.gpu_options.per_process_gpu_memory_fraction = 0.6
tf.keras.backend.clear_session()
sess = tf.compat.v1.Session(config=tf_config)
graph = tf.compat.v1.get_default_graph()
# We define our RL player
# It needs a state embedder and a reward computer, hence these two methods
class SimpleRLPlayer(Gen8EnvSinglePlayer):
def embed_battle(self, battle):
# -1 indicates that the move does not have a base power
# or is not available
moves_base_power = -np.ones(4)
moves_dmg_multiplier = np.ones(4)
for i, move in enumerate(battle.available_moves):
moves_base_power[i] = (
move.base_power / 100
) # Simple rescaling to facilitate learning
if move.type and battle.opponent_active_pokemon:
moves_dmg_multiplier[i] = move.type.damage_multiplier(
battle.opponent_active_pokemon.type_1,
battle.opponent_active_pokemon.type_2,
)
# We count how many pokemons have not fainted in each team
remaining_mon_team = (
len([mon for mon in battle.team.values() if mon.fainted]) / 6
)
remaining_mon_opponent = (
len([mon for mon in battle.opponent_team.values() if mon.fainted]) / 6
)
# Final vector with 10 components
return np.concatenate(
[
moves_base_power,
moves_dmg_multiplier,
[remaining_mon_team, remaining_mon_opponent],
]
)
def compute_reward(self, battle) -> float:
return self.reward_computing_helper(
battle, fainted_value=2, hp_value=1, victory_value=30
)
class MaxDamagePlayer(RandomPlayer):
def choose_move(self, battle):
# If the player can attack, it will
if battle.available_moves:
# Finds the best move among available ones
best_move = max(battle.available_moves, key=lambda move: move.base_power)
return self.create_order(best_move)
# If no attack is available, a random switch will be made
else:
return self.choose_random_move(battle)
NB_TRAINING_STEPS = 5000
NB_EVALUATION_EPISODES = 100
tf.random.set_seed(0)
np.random.seed(0)
# This is the function that will be used to train the dqn
def dqn_training(player, dqn, nb_steps):
global sess
global graph
with graph.as_default():
set_session(sess)
dqn.fit(player, nb_steps=nb_steps)
player.complete_current_battle()
def dqn_evaluation(player, dqn, nb_episodes):
# Reset battle statistics
global sess
global graph
with graph.as_default():
set_session(sess)
player.reset_battles()
dqn.test(player, nb_episodes=nb_episodes, visualize=False, verbose=False)
print(
"DQN Evaluation: %d victories out of %d episodes"
% (player.n_won_battles, nb_episodes)
)
if __name__ == "__main__":
env_player = SimpleRLPlayer(battle_format="gen8randombattle")
print(env_player.action_space)
opponent = RandomPlayer(battle_format="gen8randombattle")
second_opponent = MaxDamagePlayer(battle_format="gen8randombattle")
# Output dimension
n_action = len(env_player.action_space)
model = Sequential()
model.add(Dense(128, activation="elu", name='Input', input_shape=(1, 10)))
# Our embedding have shape (1, 10), which affects our hidden layer
# dimension and output dimension
# Flattening resolve potential issues that would arise otherwise
model.add(Flatten(name='Flatten1'))
model.add(Dense(64, name='Hidden1', activation="elu"))
model.add(Dense(n_action, name='Hidden2', activation="linear"))
memory = SequentialMemory(limit=5000, window_length=1)
# Simple epsilon greedy
policy = LinearAnnealedPolicy(
EpsGreedyQPolicy(),
attr="eps",
value_max=1.0,
value_min=0.05,
value_test=0,
nb_steps=5000,
)
# Defining our DQN
dqn = DQNAgent(
model=model,
nb_actions=len(env_player.action_space),
policy=policy,
memory=memory,
nb_steps_warmup=750,
gamma=0.5,
target_model_update=1,
delta_clip=0.01,
enable_double_dqn=True,
)
init = tf.compat.v1.global_variables_initializer()
sess.run(init)
dqn.compile(Adam(lr=0.00025), metrics=["mae"])
# Training
with graph.as_default():
env_player.play_against(
env_algorithm=dqn_training,
opponent=opponent,
env_algorithm_kwargs={"dqn": dqn, "nb_steps": NB_TRAINING_STEPS},
)
model.save("model_%d" % NB_TRAINING_STEPS)
# Evaluation
print("Results against random player:")
env_player.play_against(
env_algorithm=dqn_evaluation,
opponent=opponent,
env_algorithm_kwargs={"dqn": dqn, "nb_episodes": NB_EVALUATION_EPISODES},
)
print("\nResults against max player:")
env_player.play_against(
env_algorithm=dqn_evaluation,
opponent=second_opponent,
env_algorithm_kwargs={"dqn": dqn, "nb_episodes": NB_EVALUATION_EPISODES},
)
| 31.943299
| 86
| 0.648701
|
747390a1ab0557c66ea8a047e2e1c213ed918e00
| 21,530
|
py
|
Python
|
urllib3/util/selectors.py
|
lslebodn/urllib3
|
da86fb6c3f44281d808ceb14d8f2a01895cab6e5
|
[
"MIT"
] | null | null | null |
urllib3/util/selectors.py
|
lslebodn/urllib3
|
da86fb6c3f44281d808ceb14d8f2a01895cab6e5
|
[
"MIT"
] | null | null | null |
urllib3/util/selectors.py
|
lslebodn/urllib3
|
da86fb6c3f44281d808ceb14d8f2a01895cab6e5
|
[
"MIT"
] | null | null | null |
# Backport of selectors.py from Python 3.5+ to support Python < 3.4
# Also has the behavior specified in PEP 475 which is to retry syscalls
# in the case of an EINTR error. This module is required because selectors34
# does not follow this behavior and instead returns that no dile descriptor
# events have occurred rather than retry the syscall. The decision to drop
# support for select.devpoll is made to maintain 100% test coverage.
import errno
import math
import select
import socket
import sys
import time
from collections import namedtuple
from ..packages.six import integer_types
try:
from collections.abc import Mapping
except ImportError:
from collections import Mapping
try:
monotonic = time.monotonic
except (AttributeError, ImportError): # Python 3.3<
monotonic = time.time
EVENT_READ = (1 << 0)
EVENT_WRITE = (1 << 1)
HAS_SELECT = True # Variable that shows whether the platform has a selector.
_SYSCALL_SENTINEL = object() # Sentinel in case a system call returns None.
_DEFAULT_SELECTOR = None
class SelectorError(Exception):
def __init__(self, errcode):
super(SelectorError, self).__init__()
self.errno = errcode
def __repr__(self):
return "<SelectorError errno={0}>".format(self.errno)
def __str__(self):
return self.__repr__()
def _fileobj_to_fd(fileobj):
""" Return a file descriptor from a file object. If
given an integer will simply return that integer back. """
if isinstance(fileobj, integer_types):
fd = fileobj
else:
try:
fd = int(fileobj.fileno())
except (AttributeError, TypeError, ValueError):
raise ValueError("Invalid file object: {0!r}".format(fileobj))
if fd < 0:
raise ValueError("Invalid file descriptor: {0}".format(fd))
return fd
# Determine which function to use to wrap system calls because Python 3.5+
# already handles the case when system calls are interrupted.
if sys.version_info >= (3, 5):
def _syscall_wrapper(func, _, *args, **kwargs):
""" This is the short-circuit version of the below logic
because in Python 3.5+ all system calls automatically restart
and recalculate their timeouts. """
try:
return func(*args, **kwargs)
except (OSError, IOError, select.error) as e:
errcode = None
if hasattr(e, "errno"):
errcode = e.errno
raise SelectorError(errcode)
else:
def _syscall_wrapper(func, recalc_timeout, *args, **kwargs):
""" Wrapper function for syscalls that could fail due to EINTR.
All functions should be retried if there is time left in the timeout
in accordance with PEP 475. """
if recalc_timeout and "timeout" not in kwargs:
raise ValueError(
"Timeout must be in kwargs to be recalculated")
# timeout for recalcultion must be a keyword argument
timeout = kwargs.get("timeout", None)
# Based on the timeout, determine call expiry time
if timeout is None:
expires = None
recalc_timeout = False
else:
timeout = float(timeout)
if timeout < 0.0: # Timeout less than 0 treated as no timeout.
expires = None
else:
expires = monotonic() + timeout
result = _SYSCALL_SENTINEL
while result is _SYSCALL_SENTINEL:
try:
result = func(*args, **kwargs)
# OSError is thrown by select.select
# IOError is thrown by select.epoll.poll
# select.error is thrown by select.poll.poll
# Aren't we thankful for Python 3.x rework for exceptions?
except (OSError, IOError, select.error) as e:
# select.error wasn't a subclass of OSError in the past.
errcode = None
if hasattr(e, "errno") and e.errno is not None:
errcode = e.errno
elif hasattr(e, "args"):
errcode = e.args[0]
# Also test for the Windows equivalent of EINTR.
is_interrupt = (errcode == errno.EINTR or (hasattr(errno, "WSAEINTR") and
errcode == errno.WSAEINTR))
if is_interrupt:
if expires is not None:
current_time = monotonic()
if current_time > expires:
raise OSError(errno.ETIMEDOUT, "Connection timed out")
if recalc_timeout:
kwargs["timeout"] = expires - current_time
continue
if errcode:
raise SelectorError(errcode)
else:
raise
return result
SelectorKey = namedtuple('SelectorKey', ['fileobj', 'fd', 'events', 'data'])
class _SelectorMapping(Mapping):
""" Mapping of file objects to selector keys """
def __init__(self, selector):
self._selector = selector
def __len__(self):
return len(self._selector._fd_to_key)
def __getitem__(self, fileobj):
try:
fd = self._selector._fileobj_lookup(fileobj)
return self._selector._fd_to_key[fd]
except KeyError:
raise KeyError("{0!r} is not registered.".format(fileobj))
def __iter__(self):
return iter(self._selector._fd_to_key)
class BaseSelector(object):
""" Abstract Selector class
A selector supports registering file objects to be monitored
for specific I/O events.
A file object is a file descriptor or any object with a
`fileno()` method. An arbitrary object can be attached to the
file object which can be used for example to store context info,
a callback, etc.
A selector can use various implementations (select(), poll(), epoll(),
and kqueue()) depending on the platform. The 'DefaultSelector' class uses
the most efficient implementation for the current platform.
"""
def __init__(self):
# Maps file descriptors to keys.
self._fd_to_key = {}
# Read-only mapping returned by get_map()
self._map = _SelectorMapping(self)
def _fileobj_lookup(self, fileobj):
""" Return a file descriptor from a file object.
This wraps _fileobj_to_fd() to do an exhaustive
search in case the object is invalid but we still
have it in our map. Used by unregister() so we can
unregister an object that was previously registered
even if it is closed. It is also used by _SelectorMapping
"""
try:
return _fileobj_to_fd(fileobj)
except ValueError:
# Search through all our mapped keys.
for key in self._fd_to_key.values():
if key.fileobj is fileobj:
return key.fd
# Raise ValueError after all.
raise
def register(self, fileobj, events, data=None):
""" Register a file object for a set of events to monitor. """
if (not events) or (events & ~(EVENT_READ | EVENT_WRITE)):
raise ValueError("Invalid events: {0!r}".format(events))
key = SelectorKey(fileobj, self._fileobj_lookup(fileobj), events, data)
if key.fd in self._fd_to_key:
raise KeyError("{0!r} (FD {1}) is already registered"
.format(fileobj, key.fd))
self._fd_to_key[key.fd] = key
return key
def unregister(self, fileobj):
""" Unregister a file object from being monitored. """
try:
key = self._fd_to_key.pop(self._fileobj_lookup(fileobj))
except KeyError:
raise KeyError("{0!r} is not registered".format(fileobj))
# Getting the fileno of a closed socket on Windows errors with EBADF.
except socket.error as e: # Platform-specific: Windows.
if e.errno != errno.EBADF:
raise
else:
for key in self._fd_to_key.values():
if key.fileobj is fileobj:
self._fd_to_key.pop(key.fd)
break
else:
raise KeyError("{0!r} is not registered".format(fileobj))
return key
def modify(self, fileobj, events, data=None):
""" Change a registered file object monitored events and data. """
# NOTE: Some subclasses optimize this operation even further.
try:
key = self._fd_to_key[self._fileobj_lookup(fileobj)]
except KeyError:
raise KeyError("{0!r} is not registered".format(fileobj))
if events != key.events:
self.unregister(fileobj)
key = self.register(fileobj, events, data)
elif data != key.data:
# Use a shortcut to update the data.
key = key._replace(data=data)
self._fd_to_key[key.fd] = key
return key
def select(self, timeout=None):
""" Perform the actual selection until some monitored file objects
are ready or the timeout expires. """
raise NotImplementedError()
def close(self):
""" Close the selector. This must be called to ensure that all
underlying resources are freed. """
self._fd_to_key.clear()
self._map = None
def get_key(self, fileobj):
""" Return the key associated with a registered file object. """
mapping = self.get_map()
if mapping is None:
raise RuntimeError("Selector is closed")
try:
return mapping[fileobj]
except KeyError:
raise KeyError("{0!r} is not registered".format(fileobj))
def get_map(self):
""" Return a mapping of file objects to selector keys """
return self._map
def _key_from_fd(self, fd):
""" Return the key associated to a given file descriptor
Return None if it is not found. """
try:
return self._fd_to_key[fd]
except KeyError:
return None
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
# Almost all platforms have select.select()
if hasattr(select, "select"):
class SelectSelector(BaseSelector):
""" Select-based selector. """
def __init__(self):
super(SelectSelector, self).__init__()
self._readers = set()
self._writers = set()
def register(self, fileobj, events, data=None):
key = super(SelectSelector, self).register(fileobj, events, data)
if events & EVENT_READ:
self._readers.add(key.fd)
if events & EVENT_WRITE:
self._writers.add(key.fd)
return key
def unregister(self, fileobj):
key = super(SelectSelector, self).unregister(fileobj)
self._readers.discard(key.fd)
self._writers.discard(key.fd)
return key
def _wrap_select(self, r, w, timeout=None):
""" Wrapper for select.select because timeout is a positional arg """
return select.select(r, w, [], timeout)
def select(self, timeout=None):
# Selecting on empty lists on Windows errors out.
if not len(self._readers) and not len(self._writers):
return []
timeout = None if timeout is None else max(timeout, 0.0)
ready = []
r, w, _ = _syscall_wrapper(self._wrap_select, True, self._readers,
self._writers, timeout=timeout)
r = set(r)
w = set(w)
for fd in r | w:
events = 0
if fd in r:
events |= EVENT_READ
if fd in w:
events |= EVENT_WRITE
key = self._key_from_fd(fd)
if key:
ready.append((key, events & key.events))
return ready
if hasattr(select, "poll"):
class PollSelector(BaseSelector):
""" Poll-based selector """
def __init__(self):
super(PollSelector, self).__init__()
self._poll = select.poll()
def register(self, fileobj, events, data=None):
key = super(PollSelector, self).register(fileobj, events, data)
event_mask = 0
if events & EVENT_READ:
event_mask |= select.POLLIN
if events & EVENT_WRITE:
event_mask |= select.POLLOUT
self._poll.register(key.fd, event_mask)
return key
def unregister(self, fileobj):
key = super(PollSelector, self).unregister(fileobj)
self._poll.unregister(key.fd)
return key
def _wrap_poll(self, timeout=None):
""" Wrapper function for select.poll.poll() so that
_syscall_wrapper can work with only seconds. """
if timeout is not None:
if timeout <= 0:
timeout = 0
else:
# select.poll.poll() has a resolution of 1 millisecond,
# round away from zero to wait *at least* timeout seconds.
timeout = math.ceil(timeout * 1e3)
result = self._poll.poll(timeout)
return result
def select(self, timeout=None):
ready = []
fd_events = _syscall_wrapper(self._wrap_poll, True, timeout=timeout)
for fd, event_mask in fd_events:
events = 0
if event_mask & ~select.POLLIN:
events |= EVENT_WRITE
if event_mask & ~select.POLLOUT:
events |= EVENT_READ
key = self._key_from_fd(fd)
if key:
ready.append((key, events & key.events))
return ready
if hasattr(select, "epoll"):
class EpollSelector(BaseSelector):
""" Epoll-based selector """
def __init__(self):
super(EpollSelector, self).__init__()
self._epoll = select.epoll()
def fileno(self):
return self._epoll.fileno()
def register(self, fileobj, events, data=None):
key = super(EpollSelector, self).register(fileobj, events, data)
events_mask = 0
if events & EVENT_READ:
events_mask |= select.EPOLLIN
if events & EVENT_WRITE:
events_mask |= select.EPOLLOUT
_syscall_wrapper(self._epoll.register, False, key.fd, events_mask)
return key
def unregister(self, fileobj):
key = super(EpollSelector, self).unregister(fileobj)
try:
_syscall_wrapper(self._epoll.unregister, False, key.fd)
except SelectorError:
# This can occur when the fd was closed since registry.
pass
return key
def select(self, timeout=None):
if timeout is not None:
if timeout <= 0:
timeout = 0.0
else:
# select.epoll.poll() has a resolution of 1 millisecond
# but luckily takes seconds so we don't need a wrapper
# like PollSelector. Just for better rounding.
timeout = math.ceil(timeout * 1e3) * 1e-3
timeout = float(timeout)
else:
timeout = -1.0 # epoll.poll() must have a float.
# We always want at least 1 to ensure that select can be called
# with no file descriptors registered. Otherwise will fail.
max_events = max(len(self._fd_to_key), 1)
ready = []
fd_events = _syscall_wrapper(self._epoll.poll, True,
timeout=timeout,
maxevents=max_events)
for fd, event_mask in fd_events:
events = 0
if event_mask & ~select.EPOLLIN:
events |= EVENT_WRITE
if event_mask & ~select.EPOLLOUT:
events |= EVENT_READ
key = self._key_from_fd(fd)
if key:
ready.append((key, events & key.events))
return ready
def close(self):
self._epoll.close()
super(EpollSelector, self).close()
if hasattr(select, "kqueue"):
class KqueueSelector(BaseSelector):
""" Kqueue / Kevent-based selector """
def __init__(self):
super(KqueueSelector, self).__init__()
self._kqueue = select.kqueue()
def fileno(self):
return self._kqueue.fileno()
def register(self, fileobj, events, data=None):
key = super(KqueueSelector, self).register(fileobj, events, data)
if events & EVENT_READ:
kevent = select.kevent(key.fd,
select.KQ_FILTER_READ,
select.KQ_EV_ADD)
_syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0)
if events & EVENT_WRITE:
kevent = select.kevent(key.fd,
select.KQ_FILTER_WRITE,
select.KQ_EV_ADD)
_syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0)
return key
def unregister(self, fileobj):
key = super(KqueueSelector, self).unregister(fileobj)
if key.events & EVENT_READ:
kevent = select.kevent(key.fd,
select.KQ_FILTER_READ,
select.KQ_EV_DELETE)
try:
_syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0)
except SelectorError:
pass
if key.events & EVENT_WRITE:
kevent = select.kevent(key.fd,
select.KQ_FILTER_WRITE,
select.KQ_EV_DELETE)
try:
_syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0)
except SelectorError:
pass
return key
def select(self, timeout=None):
if timeout is not None:
timeout = max(timeout, 0)
max_events = len(self._fd_to_key) * 2
ready_fds = {}
kevent_list = _syscall_wrapper(self._wrap_control, True,
None, max_events, timeout=timeout)
for kevent in kevent_list:
fd = kevent.ident
event_mask = kevent.filter
events = 0
if event_mask == select.KQ_FILTER_READ:
events |= EVENT_READ
if event_mask == select.KQ_FILTER_WRITE:
events |= EVENT_WRITE
key = self._key_from_fd(fd)
if key:
if key.fd not in ready_fds:
ready_fds[key.fd] = (key, events & key.events)
else:
old_events = ready_fds[key.fd][1]
ready_fds[key.fd] = (key, (events | old_events) & key.events)
return list(ready_fds.values())
def close(self):
self._kqueue.close()
super(KqueueSelector, self).close()
def _wrap_control(self, changelist, max_events, timeout=None):
return self._kqueue.control(changelist, max_events, timeout)
if not hasattr(select, 'select'): # Platform-specific: AppEngine
HAS_SELECT = False
def _can_allocate(struct):
""" Checks that select structs can be allocated by the underlying
operating system, not just advertised by the select module. We don't
check select() because we'll be hopeful that most platforms that
don't have it available will not advertise it. (ie: GAE) """
try:
# select.poll() objects won't fail until used.
if struct == 'poll':
p = select.poll()
p.poll(0)
# All others will fail on allocation.
else:
getattr(select, struct)().close()
return True
except (OSError, AttributeError):
return False
# Choose the best implementation, roughly:
# kqueue == epoll > poll > select. Devpoll not supported. (See above)
# select() also can't accept a FD > FD_SETSIZE (usually around 1024)
def DefaultSelector():
""" This function serves as a first call for DefaultSelector to
detect if the select module is being monkey-patched incorrectly
by eventlet, greenlet, and preserve proper behavior. """
global _DEFAULT_SELECTOR
if _DEFAULT_SELECTOR is None:
if _can_allocate('kqueue'):
_DEFAULT_SELECTOR = KqueueSelector
elif _can_allocate('epoll'):
_DEFAULT_SELECTOR = EpollSelector
elif _can_allocate('poll'):
_DEFAULT_SELECTOR = PollSelector
elif hasattr(select, 'select'):
_DEFAULT_SELECTOR = SelectSelector
else: # Platform-specific: AppEngine
raise ValueError('Platform does not have a selector')
return _DEFAULT_SELECTOR()
| 36.306914
| 89
| 0.56614
|
76ef55080249585331b0153c7e9f49d33aca787b
| 1,024
|
py
|
Python
|
DifferentiableHOS/Plot.py
|
LSSTDESC/DifferentiableHOS
|
7c83f6c9e4a820a0420adef113e0f2435732fe3e
|
[
"MIT"
] | 1
|
2021-01-18T14:44:14.000Z
|
2021-01-18T14:44:14.000Z
|
DifferentiableHOS/Plot.py
|
LSSTDESC/DifferentiableHOS
|
7c83f6c9e4a820a0420adef113e0f2435732fe3e
|
[
"MIT"
] | 13
|
2021-02-19T09:59:41.000Z
|
2021-12-13T09:52:41.000Z
|
DifferentiableHOS/Plot.py
|
LSSTDESC/DifferentiableHOS
|
7c83f6c9e4a820a0420adef113e0f2435732fe3e
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse
import numpy as np
def plot_contours(fisher, pos, nstd=1., ax=None, **kwargs):
"""
Plot 2D parameter contours given a Hessian matrix of the likelihood
"""
def eigsorted(cov):
vals, vecs = np.linalg.eigh(cov)
order = vals.argsort()[::-1]
return vals[order], vecs[:, order]
mat = fisher
cov = np.linalg.inv(mat)
sigma_marg = lambda i: np.sqrt(cov[i, i])
if ax is None:
ax = plt.gca()
vals, vecs = eigsorted(cov)
theta = np.degrees(np.arctan2(*vecs[:, 0][::-1]))
# Width and height are "full" widths, not radius
width, height = 2 * nstd * np.sqrt(vals)
ellip = Ellipse(xy=pos, width=width, height=height, angle=theta, **kwargs)
ax.add_artist(ellip)
sz = max(width, height)
s1 = 1.5 * nstd * sigma_marg(0)
s2 = 1.5 * nstd * sigma_marg(1)
ax.set_xlim(pos[0] - s1, pos[0] + s1)
ax.set_ylim(pos[1] - s2, pos[1] + s2)
plt.draw()
return ellip
| 27.675676
| 78
| 0.610352
|
fb77a5ef6a73df2a997f5848847b70fb3f2f2127
| 1,150
|
py
|
Python
|
backend/util/string.py
|
artontech/ArtonFileManager
|
b099c5294ab731b0a0f1eb7dbe35397df4515863
|
[
"Apache-2.0"
] | 1
|
2020-11-17T12:45:47.000Z
|
2020-11-17T12:45:47.000Z
|
backend/util/string.py
|
artontech/ArtonFileManager
|
b099c5294ab731b0a0f1eb7dbe35397df4515863
|
[
"Apache-2.0"
] | null | null | null |
backend/util/string.py
|
artontech/ArtonFileManager
|
b099c5294ab731b0a0f1eb7dbe35397df4515863
|
[
"Apache-2.0"
] | null | null | null |
''' string '''
import base64
import os
def join(str1: str, str2: str) -> str:
''' join string '''
if str1 is None:
str1 = ''
if str2 is None:
str2 = ''
return str1 + str2
def str2bool(data: str, default: bool = None) -> bool:
''' trans to bool '''
l_str = str(data).lower()
if l_str == "true":
return True
if l_str == "false":
return False
return default
def str2base64(data: str, encoding: str = "utf-8") -> str:
''' trans to base64 '''
return base64.b64encode(data.encode(encoding)).decode()
def relative_path(path1: str, path2: str) -> str:
''' calc relative path '''
path1, path2 = path1.split(os.sep), path2.split(os.sep)
intersection = 0
for index in range(min(len(path1), len(path2))):
m, n = path1[index], path2[index]
if m != n:
intersection = index
break
def backward():
return (len(path1) - intersection - 1) * ('..' + os.sep)
def forward():
return os.sep.join(path2[intersection:])
out = backward() + forward()
return out
| 25.555556
| 65
| 0.54087
|
211b67be1bc8853d3981c36517dbe6dde98e137f
| 18,076
|
py
|
Python
|
userbot/modules/gdrive.py
|
yonouf/UserBug
|
23c1d750e58a67eaaaf9a13b8700fe9dccf3bb7d
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
userbot/modules/gdrive.py
|
yonouf/UserBug
|
23c1d750e58a67eaaaf9a13b8700fe9dccf3bb7d
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
userbot/modules/gdrive.py
|
yonouf/UserBug
|
23c1d750e58a67eaaaf9a13b8700fe9dccf3bb7d
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 2
|
2020-01-01T11:36:37.000Z
|
2020-01-02T05:41:19.000Z
|
# Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.c (the "License");
# you may not use this file except in compliance with the License.
import asyncio
import math
import os
import time
from pySmartDL import SmartDL
from telethon import events
from apiclient.discovery import build
from apiclient.http import MediaFileUpload
from apiclient.errors import ResumableUploadError
from oauth2client.client import OAuth2WebServerFlow
from oauth2client.file import Storage
from oauth2client import file, client, tools
from userbot import (G_DRIVE_CLIENT_ID, G_DRIVE_CLIENT_SECRET,
G_DRIVE_AUTH_TOKEN_DATA, GDRIVE_FOLDER_ID, BOTLOG_CHATID,
TEMP_DOWNLOAD_DIRECTORY, CMD_HELP, LOGS)
from userbot.events import register
from mimetypes import guess_type
import httplib2
import subprocess
from userbot.modules.upload_download import progress, humanbytes, time_formatter
# Path to token json file, it should be in same directory as script
G_DRIVE_TOKEN_FILE = "./auth_token.txt"
# Copy your credentials from the APIs Console
CLIENT_ID = G_DRIVE_CLIENT_ID
CLIENT_SECRET = G_DRIVE_CLIENT_SECRET
# Check https://developers.google.com/drive/scopes for all available scopes
OAUTH_SCOPE = "https://www.googleapis.com/auth/drive.file"
# Redirect URI for installed apps, can be left as is
REDIRECT_URI = "urn:ietf:wg:oauth:2.0:oob"
# global variable to set Folder ID to upload to
parent_id = GDRIVE_FOLDER_ID
# global variable to indicate mimeType of directories in gDrive
G_DRIVE_DIR_MIME_TYPE = "application/vnd.google-apps.folder"
@register(pattern=r"^.gd(?: |$)(.*)", outgoing=True)
async def gdrive_upload_function(dryb):
""" For .gdrive command, upload files to google drive. """
await dryb.edit("Loading...")
input_str = dryb.pattern_match.group(1)
if CLIENT_ID is None or CLIENT_SECRET is None:
return
if not os.path.isdir(TEMP_DOWNLOAD_DIRECTORY):
os.makedirs(TEMP_DOWNLOAD_DIRECTORY)
required_file_name = None
if "|" in input_str:
url, file_name = input_str.split("|")
url = url.strip()
# https://stackoverflow.com/a/761825/4723940
file_name = file_name.strip()
head, tail = os.path.split(file_name)
if head:
if not os.path.isdir(os.path.join(TEMP_DOWNLOAD_DIRECTORY, head)):
os.makedirs(os.path.join(TEMP_DOWNLOAD_DIRECTORY, head))
file_name = os.path.join(head, tail)
downloaded_file_name = TEMP_DOWNLOAD_DIRECTORY + "" + file_name
downloader = SmartDL(url, downloaded_file_name, progress_bar=False)
downloader.start(blocking=False)
c_time = time.time()
display_message = None
while not downloader.isFinished():
status = downloader.get_status().capitalize()
total_length = downloader.filesize if downloader.filesize else None
downloaded = downloader.get_dl_size()
now = time.time()
diff = now - c_time
percentage = downloader.get_progress() * 100
speed = downloader.get_speed()
elapsed_time = round(diff) * 1000
progress_str = "[{0}{1}] {2}%".format(
''.join(["█" for i in range(math.floor(percentage / 10))]),
''.join(["░"
for i in range(10 - math.floor(percentage / 10))]),
round(percentage, 2))
estimated_total_time = downloader.get_eta(human=True)
try:
current_message = f"{status}...\
\nURL : {url}\
\nFile Name : {file_name}\
\n{progress_str}\
\n{humanbytes(downloaded)} of {humanbytes(total_length)}\
\nETA : {estimated_total_time}"
if round(diff %
10.00) == 0 and current_message != display_message:
await dryb.edit(current_message)
display_message = current_message
except Exception as e:
LOGS.info(str(e))
pass
if downloader.isSuccessful():
await dryb.edit(
"Downloaded to [here]({})\nNow Uploading to 𝐆𝐃𝐑𝐈𝐕𝐄."
.format(downloaded_file_name))
required_file_name = downloaded_file_name
else:
await dryb.edit("Incorrect URL\n{}".format(url))
elif input_str:
input_str = input_str.strip()
if os.path.exists(input_str):
required_file_name = input_str
await dryb.edit(
"Found [this]({})\nNow Uploading to 𝐆𝐃𝐑𝐈𝐕𝐄."
.format(input_str))
else:
await dryb.edit(
"File not found. Invalid path!")
return False
elif dryb.reply_to_msg_id:
try:
c_time = time.time()
downloaded_file_name = await dryb.client.download_media(
await dryb.get_reply_message(),
TEMP_DOWNLOAD_DIRECTORY,
progress_callback=lambda d, t: asyncio.get_event_loop(
).create_task(progress(d, t, dryb, c_time, "Downloading...")))
except Exception as e:
await dryb.edit(str(e))
else:
required_file_name = downloaded_file_name
await dryb.edit(
"Downloaded to [here]({})\nNow Uploading to 𝐆𝐃𝐑𝐈𝐕𝐄."
.format(downloaded_file_name))
if required_file_name:
if G_DRIVE_AUTH_TOKEN_DATA is not None:
with open(G_DRIVE_TOKEN_FILE, "w") as t_file:
t_file.write(G_DRIVE_AUTH_TOKEN_DATA)
# Check if token file exists, if not create it by requesting
# authorization code
if not os.path.isfile(G_DRIVE_TOKEN_FILE):
storage = await create_token_file(G_DRIVE_TOKEN_FILE, dryb)
http = authorize(G_DRIVE_TOKEN_FILE, storage)
# Authorize, get file parameters, upload file and print out result URL
# for download
http = authorize(G_DRIVE_TOKEN_FILE, None)
file_name, mime_type = file_ops(required_file_name)
# required_file_name will have the full path
# Sometimes API fails to retrieve starting URI, we wrap it.
try:
g_drive_link = await upload_file(http, required_file_name,
file_name, mime_type, dryb,
parent_id)
await dryb.edit(
f"This [Files]({required_file_name}) Uploaded to [𝐆𝐃𝐑𝐈𝐕𝐄]({g_drive_link})!"
)
except Exception as e:
await dryb.edit(
f"Uploading Error: {e}")
@register(pattern=r"^.ggd(?: |$)(.*)", outgoing=True)
async def upload_dir_to_gdrive(event):
await event.edit("Loading...")
if CLIENT_ID is None or CLIENT_SECRET is None:
return
input_str = event.pattern_match.group(1)
if os.path.isdir(input_str):
# TODO: remove redundant code
if G_DRIVE_AUTH_TOKEN_DATA is not None:
with open(G_DRIVE_TOKEN_FILE, "w") as t_file:
t_file.write(G_DRIVE_AUTH_TOKEN_DATA)
# Check if token file exists, if not create it by requesting authorization code
storage = None
if not os.path.isfile(G_DRIVE_TOKEN_FILE):
storage = await create_token_file(G_DRIVE_TOKEN_FILE, event)
http = authorize(G_DRIVE_TOKEN_FILE, storage)
# Authorize, get file parameters, upload file and print out result URL for download
# first, create a sub-directory
dir_id = await create_directory(
http, os.path.basename(os.path.abspath(input_str)), parent_id)
await DoTeskWithDir(http, input_str, event, dir_id)
dir_link = "https://drive.google.com/folderview?id={}".format(dir_id)
await event.edit(f"Here is your [𝐆𝐃𝐑𝐈𝐕𝐄]({dir_link})")
else:
await event.edit(f"Directory {input_str} Not Exist")
@register(pattern=r"^.li(?: |$)(.*)", outgoing=True)
async def gdrive_search_list(event):
await event.edit("Loading...")
if CLIENT_ID is None or CLIENT_SECRET is None:
return
input_str = event.pattern_match.group(1).strip()
# TODO: remove redundant code
if G_DRIVE_AUTH_TOKEN_DATA is not None:
with open(G_DRIVE_TOKEN_FILE, "w") as t_file:
t_file.write(G_DRIVE_AUTH_TOKEN_DATA)
# Check if token file exists, if not create it by requesting authorization code
storage = None
if not os.path.isfile(G_DRIVE_TOKEN_FILE):
storage = await create_token_file(G_DRIVE_TOKEN_FILE, event)
http = authorize(G_DRIVE_TOKEN_FILE, storage)
# Authorize, get file parameters, upload file and print out result URL for download
await event.edit(f"Searching for {input_str} in your 𝐆𝐃𝐑𝐈𝐕𝐄")
gsearch_results = await gdrive_search(http, input_str)
await event.edit(gsearch_results, link_preview=False)
@register(
pattern=
r"^.gsetf https?://drive\.google\.com/drive/u/\d/folders/([-\w]{25,})",
outgoing=True)
async def download(set):
"""For .gsetf command, allows you to set path"""
await set.edit("Loading...")
input_str = set.pattern_match.group(1)
if input_str:
parent_id = input_str
await set.edit(
"Custom Folder ID set successfully."
)
await set.delete()
else:
await set.edit(
"Use .gdrivesp <link to GDrive Folder> to set the folder to upload new files to."
)
@register(pattern="^.gsetclear$", outgoing=True)
async def download(gclr):
"""For .gsetclear command, allows you clear ur curnt custom path"""
await gclr.reply("Loading...")
parent_id = GDRIVE_FOLDER_ID
await gclr.edit("Custom Folder ID cleared.")
@register(pattern="^.gfolder$", outgoing=True)
async def show_current_gdrove_folder(event):
if parent_id:
folder_link = f"https://drive.google.com/drive/folders/" + parent_id
await event.edit(
f"I'm currently uploading files on [𝐆𝐃𝐑𝐈𝐕𝐄]({folder_link})")
else:
await event.edit(
f"My userbot is currently uploading files to the root of my Google Drive storage.\
\nFind uploaded files [here](https://drive.google.com/drive/my-drive)"
)
# Get mime type and name of given file
def file_ops(file_path):
mime_type = guess_type(file_path)[0]
mime_type = mime_type if mime_type else "text/plain"
file_name = file_path.split("/")[-1]
return file_name, mime_type
async def create_token_file(token_file, event):
# Run through the OAuth flow and retrieve credentials
flow = OAuth2WebServerFlow(CLIENT_ID,
CLIENT_SECRET,
OAUTH_SCOPE,
redirect_uri=REDIRECT_URI)
authorize_url = flow.step1_get_authorize_url()
async with event.client.conversation(BOTLOG_CHATID) as conv:
await conv.send_message(
f"Go to the following link in your browser: {authorize_url} and reply the code"
)
response = conv.wait_event(
events.NewMessage(outgoing=True, chats=BOTLOG_CHATID))
response = await response
code = response.message.message.strip()
credentials = flow.step2_exchange(code)
storage = Storage(token_file)
storage.put(credentials)
return storage
def authorize(token_file, storage):
# Get credentials
if storage is None:
storage = Storage(token_file)
credentials = storage.get()
# Create an httplib2.Http object and authorize it with our credentials
http = httplib2.Http()
credentials.refresh(http)
http = credentials.authorize(http)
return http
async def upload_file(http, file_path, file_name, mime_type, event, parent_id):
# Create Google Drive service instance
drive_service = build("drive", "v2", http=http, cache_discovery=False)
# File body description
media_body = MediaFileUpload(file_path, mimetype=mime_type, resumable=True)
body = {
"title": file_name,
"description": "Uploaded using custom Userbot",
"mimeType": mime_type,
}
if parent_id:
body["parents"] = [{"id": parent_id}]
# Permissions body description: anyone who has link can upload
# Other permissions can be found at https://developers.google.com/drive/v2/reference/permissions
permissions = {
"role": "reader",
"type": "anyone",
"value": None,
"withLink": True
}
# Insert a file
file = drive_service.files().insert(body=body, media_body=media_body)
response = None
display_message = ""
while response is None:
status, response = file.next_chunk()
await asyncio.sleep(1)
if status:
percentage = int(status.progress() * 100)
progress_str = "[{0}{1}] {2}%".format(
"".join(["█" for i in range(math.floor(percentage / 10))]),
"".join(["░"
for i in range(10 - math.floor(percentage / 10))]),
round(percentage, 2))
current_message = f"Uploading to 𝐆𝐃𝐑𝐈𝐕𝐄\nFile Name : {file_name}\n{progress_str}"
if display_message != current_message:
try:
await event.edit(current_message)
display_message = current_message
except Exception as e:
LOGS.info(str(e))
pass
file_id = response.get("id")
# Insert new permissions
drive_service.permissions().insert(fileId=file_id,
body=permissions).execute()
# Define file instance and get url for download
file = drive_service.files().get(fileId=file_id).execute()
download_url = file.get("webContentLink")
return download_url
async def create_directory(http, directory_name, parent_id):
drive_service = build("drive", "v2", http=http, cache_discovery=False)
permissions = {
"role": "reader",
"type": "anyone",
"value": None,
"withLink": True
}
file_metadata = {
"title": directory_name,
"mimeType": G_DRIVE_DIR_MIME_TYPE
}
if parent_id:
file_metadata["parents"] = [{"id": parent_id}]
file = drive_service.files().insert(body=file_metadata).execute()
file_id = file.get("id")
drive_service.permissions().insert(fileId=file_id,
body=permissions).execute()
LOGS.info("Created 𝐆𝐃𝐑𝐈𝐕𝐄 Folder :\nName : {}\nID : {} ".format(
file.get("title"), file_id))
return file_id
async def DoTeskWithDir(http, input_directory, event, parent_id):
list_dirs = os.listdir(input_directory)
if len(list_dirs) == 0:
return parent_id
r_p_id = None
for a_c_f_name in list_dirs:
current_file_name = os.path.join(input_directory, a_c_f_name)
if os.path.isdir(current_file_name):
current_dir_id = await create_directory(http, a_c_f_name,
parent_id)
r_p_id = await DoTeskWithDir(http, current_file_name, event,
current_dir_id)
else:
file_name, mime_type = file_ops(current_file_name)
# current_file_name will have the full path
g_drive_link = await upload_file(http, current_file_name,
file_name, mime_type, event,
parent_id)
r_p_id = parent_id
# TODO: there is a #bug here :(
return r_p_id
async def gdrive_list_file_md(service, file_id):
try:
file = service.files().get(fileId=file_id).execute()
# LOGS.info(file)
file_meta_data = {}
file_meta_data["title"] = file["title"]
mimeType = file["mimeType"]
file_meta_data["createdDate"] = file["createdDate"]
if mimeType == G_DRIVE_DIR_MIME_TYPE:
# is a dir.
file_meta_data["mimeType"] = "directory"
file_meta_data["previewURL"] = file["alternateLink"]
else:
# is a file.
file_meta_data["mimeType"] = file["mimeType"]
file_meta_data["md5Checksum"] = file["md5Checksum"]
file_meta_data["fileSize"] = str(humanbytes(int(file["fileSize"])))
file_meta_data["quotaBytesUsed"] = str(
humanbytes(int(file["quotaBytesUsed"])))
file_meta_data["previewURL"] = file["downloadUrl"]
return json.dumps(file_meta_data, sort_keys=True, indent=4)
except Exception as e:
return str(e)
async def gdrive_search(http, search_query):
if parent_id:
query = "'{}' in parents and (title contains '{}')".format(
parent_id, search_query)
else:
query = "title contains '{}'".format(search_query)
drive_service = build("drive", "v2", http=http, cache_discovery=False)
page_token = None
res = ""
while True:
try:
response = drive_service.files().list(
q=query,
spaces="drive",
fields="nextPageToken, items(id, title, mimeType)",
pageToken=page_token).execute()
for file in response.get("items", []):
file_title = file.get("title")
file_id = file.get("id")
if file.get("mimeType") == G_DRIVE_DIR_MIME_TYPE:
res += f"[FOLDER] {file_title}\nhttps://drive.google.com/drive/folders/{file_id}\n\n"
else:
res += f"{file_title}\nhttps://drive.google.com/uc?id={file_id}&export=download\n\n"
page_token = response.get("nextPageToken", None)
if page_token is None:
break
except Exception as e:
res += str(e)
break
msg = f"**GDrive Query**:\n{search_query}\n**Results**\n{res}"
return msg
| 40.895928
| 105
| 0.618721
|
a7e666dfa6daeed54685c2e7f80e02e16b0c2712
| 8,183
|
py
|
Python
|
SCSCons/PathList.py
|
Relintai/pandemonium_engine
|
3de05db75a396b497f145411f71eb363572b38ae
|
[
"MIT",
"Apache-2.0",
"CC-BY-4.0",
"Unlicense"
] | 1,403
|
2017-11-23T14:24:01.000Z
|
2022-03-30T20:59:39.000Z
|
nuitka/build/inline_copy/lib/scons-4.3.0/SCons/PathList.py
|
lps1333/Nuitka
|
02e8d59a275cd7fe482cbc8100e753ff5abe39d7
|
[
"Apache-2.0"
] | 3,708
|
2017-11-27T13:47:12.000Z
|
2022-03-29T17:21:17.000Z
|
nuitka/build/inline_copy/lib/scons-4.3.0/SCons/PathList.py
|
lps1333/Nuitka
|
02e8d59a275cd7fe482cbc8100e753ff5abe39d7
|
[
"Apache-2.0"
] | 281
|
2017-12-01T23:48:38.000Z
|
2022-03-31T15:25:44.000Z
|
# MIT License
#
# Copyright The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Handle lists of directory paths.
These are the path lists that get set as CPPPATH, LIBPATH,
etc.) with as much caching of data and efficiency as we can, while
still keeping the evaluation delayed so that we Do the Right Thing
(almost) regardless of how the variable is specified.
"""
import os
import SCons.Memoize
import SCons.Node
import SCons.Util
#
# Variables to specify the different types of entries in a PathList object:
#
TYPE_STRING_NO_SUBST = 0 # string with no '$'
TYPE_STRING_SUBST = 1 # string containing '$'
TYPE_OBJECT = 2 # other object
def node_conv(obj):
"""
This is the "string conversion" routine that we have our substitutions
use to return Nodes, not strings. This relies on the fact that an
EntryProxy object has a get() method that returns the underlying
Node that it wraps, which is a bit of architectural dependence
that we might need to break or modify in the future in response to
additional requirements.
"""
try:
get = obj.get
except AttributeError:
if isinstance(obj, SCons.Node.Node) or SCons.Util.is_Sequence( obj ):
result = obj
else:
result = str(obj)
else:
result = get()
return result
class _PathList:
"""
An actual PathList object.
"""
def __init__(self, pathlist):
"""
Initializes a PathList object, canonicalizing the input and
pre-processing it for quicker substitution later.
The stored representation of the PathList is a list of tuples
containing (type, value), where the "type" is one of the TYPE_*
variables defined above. We distinguish between:
strings that contain no '$' and therefore need no
delayed-evaluation string substitution (we expect that there
will be many of these and that we therefore get a pretty
big win from avoiding string substitution)
strings that contain '$' and therefore need substitution
(the hard case is things like '${TARGET.dir}/include',
which require re-evaluation for every target + source)
other objects (which may be something like an EntryProxy
that needs a method called to return a Node)
Pre-identifying the type of each element in the PathList up-front
and storing the type in the list of tuples is intended to reduce
the amount of calculation when we actually do the substitution
over and over for each target.
"""
if SCons.Util.is_String(pathlist):
pathlist = pathlist.split(os.pathsep)
elif not SCons.Util.is_Sequence(pathlist):
pathlist = [pathlist]
pl = []
for p in pathlist:
try:
found = '$' in p
except (AttributeError, TypeError):
type = TYPE_OBJECT
else:
if not found:
type = TYPE_STRING_NO_SUBST
else:
type = TYPE_STRING_SUBST
pl.append((type, p))
self.pathlist = tuple(pl)
def __len__(self): return len(self.pathlist)
def __getitem__(self, i): return self.pathlist[i]
def subst_path(self, env, target, source):
"""
Performs construction variable substitution on a pre-digested
PathList for a specific target and source.
"""
result = []
for type, value in self.pathlist:
if type == TYPE_STRING_SUBST:
value = env.subst(value, target=target, source=source,
conv=node_conv)
if SCons.Util.is_Sequence(value):
result.extend(SCons.Util.flatten(value))
elif value:
result.append(value)
elif type == TYPE_OBJECT:
value = node_conv(value)
if value:
result.append(value)
elif value:
result.append(value)
return tuple(result)
class PathListCache:
"""
A class to handle caching of PathList lookups.
This class gets instantiated once and then deleted from the namespace,
so it's used as a Singleton (although we don't enforce that in the
usual Pythonic ways). We could have just made the cache a dictionary
in the module namespace, but putting it in this class allows us to
use the same Memoizer pattern that we use elsewhere to count cache
hits and misses, which is very valuable.
Lookup keys in the cache are computed by the _PathList_key() method.
Cache lookup should be quick, so we don't spend cycles canonicalizing
all forms of the same lookup key. For example, 'x:y' and ['x',
'y'] logically represent the same list, but we don't bother to
split string representations and treat those two equivalently.
(Note, however, that we do, treat lists and tuples the same.)
The main type of duplication we're trying to catch will come from
looking up the same path list from two different clones of the
same construction environment. That is, given
env2 = env1.Clone()
both env1 and env2 will have the same CPPPATH value, and we can
cheaply avoid re-parsing both values of CPPPATH by using the
common value from this cache.
"""
def __init__(self):
self._memo = {}
def _PathList_key(self, pathlist):
"""
Returns the key for memoization of PathLists.
Note that we want this to be pretty quick, so we don't completely
canonicalize all forms of the same list. For example,
'dir1:$ROOT/dir2' and ['$ROOT/dir1', 'dir'] may logically
represent the same list if you're executing from $ROOT, but
we're not going to bother splitting strings into path elements,
or massaging strings into Nodes, to identify that equivalence.
We just want to eliminate obvious redundancy from the normal
case of re-using exactly the same cloned value for a path.
"""
if SCons.Util.is_Sequence(pathlist):
pathlist = tuple(SCons.Util.flatten(pathlist))
return pathlist
@SCons.Memoize.CountDictCall(_PathList_key)
def PathList(self, pathlist):
"""
Returns the cached _PathList object for the specified pathlist,
creating and caching a new object as necessary.
"""
pathlist = self._PathList_key(pathlist)
try:
memo_dict = self._memo['PathList']
except KeyError:
memo_dict = {}
self._memo['PathList'] = memo_dict
else:
try:
return memo_dict[pathlist]
except KeyError:
pass
result = _PathList(pathlist)
memo_dict[pathlist] = result
return result
PathList = PathListCache().PathList
del PathListCache
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| 36.368889
| 77
| 0.65245
|
f5f6806f33d4334e8f4039e613e5e6f4369bff14
| 3,004
|
py
|
Python
|
tensorflow/python/training/learning_rate_decay_test.py
|
deepakmuralidharan/tensorflow
|
f40e41f9c71ef2865f96f3db3cea2909797fe2a3
|
[
"Apache-2.0"
] | 23
|
2016-02-04T21:08:43.000Z
|
2022-01-14T13:22:33.000Z
|
tensorflow/python/training/learning_rate_decay_test.py
|
deepakmuralidharan/tensorflow
|
f40e41f9c71ef2865f96f3db3cea2909797fe2a3
|
[
"Apache-2.0"
] | 2
|
2016-05-31T16:38:55.000Z
|
2018-12-30T20:17:05.000Z
|
tensorflow/python/training/learning_rate_decay_test.py
|
deepakmuralidharan/tensorflow
|
f40e41f9c71ef2865f96f3db3cea2909797fe2a3
|
[
"Apache-2.0"
] | 20
|
2016-02-15T17:31:02.000Z
|
2020-01-12T08:18:48.000Z
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional test for learning rate decay."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.python.platform
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.training import learning_rate_decay
class LRDecayTest(test_util.TensorFlowTestCase):
def testContinuous(self):
with self.test_session():
step = 5
decayed_lr = learning_rate_decay.exponential_decay(0.05, step, 10, 0.96)
expected = .05 * 0.96 ** (5.0 / 10.0)
self.assertAllClose(decayed_lr.eval(), expected, 1e-6)
def testStaircase(self):
with self.test_session():
step = state_ops.variable_op([], dtypes.int32)
assign_100 = state_ops.assign(step, 100)
assign_1 = state_ops.assign(step, 1)
assign_2 = state_ops.assign(step, 2)
decayed_lr = learning_rate_decay.exponential_decay(.1, step, 3, 0.96,
staircase=True)
# No change to learning rate
assign_1.op.run()
self.assertAllClose(decayed_lr.eval(), .1, 1e-6)
assign_2.op.run()
self.assertAllClose(decayed_lr.eval(), .1, 1e-6)
# Decayed learning rate
assign_100.op.run()
expected = .1 * 0.96**(100 // 3)
self.assertAllClose(decayed_lr.eval(), expected, 1e-6)
def testVariables(self):
with self.test_session():
step = variables.Variable(1)
assign_1 = step.assign(1)
assign_2 = step.assign(2)
assign_100 = step.assign(100)
decayed_lr = learning_rate_decay.exponential_decay(.1, step, 3, 0.96,
staircase=True)
variables.initialize_all_variables().run()
# No change to learning rate
assign_1.op.run()
self.assertAllClose(decayed_lr.eval(), .1, 1e-6)
assign_2.op.run()
self.assertAllClose(decayed_lr.eval(), .1, 1e-6)
# Decayed learning rate
assign_100.op.run()
expected = .1 * 0.96**(100 // 3)
self.assertAllClose(decayed_lr.eval(), expected, 1e-6)
if __name__ == "__main__":
googletest.main()
| 37.55
| 80
| 0.665113
|
1224064900ea09b9ec50ee17ce1ec1a43214ecc8
| 3,816
|
py
|
Python
|
contrib/macdeploy/custom_dsstore.py
|
bitcoin-pirate/BITP-Core
|
613a24d6e49ed3035a2144a50f39e7d03eeca20b
|
[
"MIT"
] | null | null | null |
contrib/macdeploy/custom_dsstore.py
|
bitcoin-pirate/BITP-Core
|
613a24d6e49ed3035a2144a50f39e7d03eeca20b
|
[
"MIT"
] | null | null | null |
contrib/macdeploy/custom_dsstore.py
|
bitcoin-pirate/BITP-Core
|
613a24d6e49ed3035a2144a50f39e7d03eeca20b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# Copyright (c) 2013-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from __future__ import division,print_function,unicode_literals
import biplist
from ds_store import DSStore
from mac_alias import Alias
import sys
output_file = sys.argv[1]
package_name_ns = sys.argv[2]
ds = DSStore.open(output_file, 'w+')
ds['.']['bwsp'] = {
'ShowStatusBar': False,
'WindowBounds': b'{{300, 280}, {500, 343}}',
'ContainerShowSidebar': False,
'SidebarWidth': 0,
'ShowTabView': False,
'PreviewPaneVisibility': False,
'ShowToolbar': False,
'ShowSidebar': False,
'ShowPathbar': True
}
icvp = {
'gridOffsetX': 0.0,
'textSize': 12.0,
'viewOptionsVersion': 1,
'backgroundImageAlias': b'\x00\x00\x00\x00\x02\x1e\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xd1\x94\\\xb0H+\x00\x05\x00\x00\x00\x98\x0fbackground.tiff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x99\xd19\xb0\xf8\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\r\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b.background\x00\x00\x10\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x11\x00\x08\x00\x00\xd19\xb0\xf8\x00\x00\x00\x01\x00\x04\x00\x00\x00\x98\x00\x0e\x00 \x00\x0f\x00b\x00a\x00c\x00k\x00g\x00r\x00o\x00u\x00n\x00d\x00.\x00t\x00i\x00f\x00f\x00\x0f\x00\x02\x00\x00\x00\x12\x00\x1c/.background/background.tiff\x00\x14\x01\x06\x00\x00\x00\x00\x01\x06\x00\x02\x00\x00\x0cMacintosh HD\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xce\x97\xab\xc3H+\x00\x00\x01\x88[\x88\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02u\xab\x8d\xd1\x94\\\xb0devrddsk\xff\xff\xff\xff\x00\x00\t \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07bitcoin\x00\x00\x10\x00\x08\x00\x00\xce\x97\xab\xc3\x00\x00\x00\x11\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x01\x00\x14\x01\x88[\x88\x00\x16\xa9\t\x00\x08\xfaR\x00\x08\xfaQ\x00\x02d\x8e\x00\x0e\x00\x02\x00\x00\x00\x0f\x00\x1a\x00\x0c\x00M\x00a\x00c\x00i\x00n\x00t\x00o\x00s\x00h\x00 \x00H\x00D\x00\x13\x00\x01/\x00\x00\x15\x00\x02\x00\x14\xff\xff\x00\x00\xff\xff\x00\x00',
'backgroundColorBlue': 1.0,
'iconSize': 96.0,
'backgroundColorGreen': 1.0,
'arrangeBy': 'none',
'showIconPreview': True,
'gridSpacing': 100.0,
'gridOffsetY': 0.0,
'showItemInfo': False,
'labelOnBottom': True,
'backgroundType': 2,
'backgroundColorRed': 1.0
}
alias = Alias.from_bytes(icvp['backgroundImageAlias'])
alias.volume.name = package_name_ns
alias.volume.posix_path = '/Volumes/' + package_name_ns
alias.volume.disk_image_alias.target.filename = package_name_ns + '.temp.dmg'
alias.volume.disk_image_alias.target.carbon_path = 'Macintosh HD:Users:\x00bitcoinpirateuser:\x00Documents:\x00bitcoinpirate:\x00bitcoinpirate:\x00' + package_name_ns + '.temp.dmg'
alias.volume.disk_image_alias.target.posix_path = 'Users/bitcoinpirateuser/Documents/bitcoinpirate/bitcoinpirate/' + package_name_ns + '.temp.dmg'
alias.target.carbon_path = package_name_ns + ':.background:\x00background.tiff'
icvp['backgroundImageAlias'] = biplist.Data(alias.to_bytes())
ds['.']['icvp'] = icvp
ds['.']['vSrn'] = ('long', 1)
ds['Applications']['Iloc'] = (370, 156)
ds['BitcoinPirate-Qt.app']['Iloc'] = (128, 156)
ds.flush()
ds.close()
| 62.557377
| 1,817
| 0.730346
|
b2b41fae203875643d41015acebcc26954848381
| 6,737
|
py
|
Python
|
practice/src/transformer2.py
|
hankyul2/ddackdae
|
5e9728578ab0f05c1f49e4a39e9753233a75e1da
|
[
"Apache-2.0"
] | null | null | null |
practice/src/transformer2.py
|
hankyul2/ddackdae
|
5e9728578ab0f05c1f49e4a39e9753233a75e1da
|
[
"Apache-2.0"
] | null | null | null |
practice/src/transformer2.py
|
hankyul2/ddackdae
|
5e9728578ab0f05c1f49e4a39e9753233a75e1da
|
[
"Apache-2.0"
] | null | null | null |
import copy
import math
import torch
import numpy as np
from einops import rearrange
from torch import nn
import torch.nn.functional as F
class Encoder(nn.Module):
def __init__(self, layer, norm, N):
super(Encoder, self).__init__()
self.layers = clone(layer, N)
self.norm = norm
def forward(self, src, src_mask):
for layer in self.layers:
src = layer(src, src_mask)
return self.norm(src)
class EncoderLayer(nn.Module):
def __init__(self, attn, ff, su):
super(EncoderLayer, self).__init__()
self.attn = attn
self.ff = ff
self.su = clone(su, 2)
def forward(self, src, src_mask):
src = self.su[0](src, lambda x: self.attn(x, x, x, src_mask))
return self.su[1](src, self.ff)
class Decoder(nn.Module):
def __init__(self, layer, norm, N):
super(Decoder, self).__init__()
self.layers = clone(layer, N)
self.norm = norm
def forward(self, src, tgt, src_mask, tgt_mask):
for layer in self.layers:
tgt = layer(src, tgt, src_mask, tgt_mask)
return self.norm(tgt)
class DecoderLayer(nn.Module):
def __init__(self, attn, attn2, ff, su):
super(DecoderLayer, self).__init__()
self.attn = attn
self.attn2 = attn2
self.ff = ff
self.su = clone(su, 3)
def forward(self, src, tgt, src_mask, tgt_mask):
tgt = self.su[0](tgt, lambda x: self.attn(x, x, x, tgt_mask))
tgt = self.su[1](tgt, lambda x: self.attn(x, src, src, src_mask))
return self.su[2](tgt, self.ff)
class Transformer(nn.Module):
def __init__(self, src_embed, tgt_embed, encoder, decoder, generator):
super(Transformer, self).__init__()
self.src_embed = src_embed
self.tgt_embed = tgt_embed
self.encoder = encoder
self.decoder = decoder
self.generator = generator
def forward(self, src, tgt, src_mask, tgt_mask):
src_rep = self.encode(src, src_mask)
tgt_rep = self.decode(src_rep, tgt, src_mask, tgt_mask)
return self.generator(tgt_rep)
def decode(self, src_rep, tgt, src_mask, tgt_mask):
return self.decoder(src_rep, self.tgt_embed(tgt), src_mask, tgt_mask)
def encode(self, src, src_mask):
return self.encoder(self.src_embed(src), src_mask)
def get_transformer(src_vocab_size, tgt_vocab_size, N, d_model, h, d_ff, dropout=0.1):
c = copy.deepcopy
src_embed = Embedding(d_model, src_vocab_size)
tgt_embed = Embedding(d_model, tgt_vocab_size)
pe = PositionalEncoding(d_model, dropout=dropout)
src_embed = nn.Sequential(src_embed, c(pe))
tgt_embed = nn.Sequential(tgt_embed, c(pe))
attn = MultiheadAttention(d_model, h, dropout=dropout)
ff = FeedForward(d_model, d_ff, dropout=dropout)
norm = LayerNorm(d_model)
su = SublayerConnection(c(norm))
encoder = Encoder(EncoderLayer(c(attn), c(ff), c(su)), c(norm), N)
decoder = Decoder(DecoderLayer(c(attn), c(attn), c(ff), c(su)), c(norm), N)
generator = nn.Linear(d_model, tgt_vocab_size)
model = Transformer(src_embed, tgt_embed, encoder, decoder, generator)
for name, param in model.named_parameters():
if param.dim() > 1:
nn.init.xavier_uniform(param)
return model
def clone(layer, n):
return nn.ModuleList([copy.deepcopy(layer) for _ in range(n)])
class MultiheadAttention(nn.Module):
def __init__(self, d_model, h, dropout=0.1):
super(MultiheadAttention, self).__init__()
assert d_model % h == 0
self.h = h
self.d_k = d_model // h
self.w = clone(nn.Linear(d_model, d_model), 3)
self.dropout = nn.Dropout(p=dropout)
self.out = nn.Linear(d_model, d_model)
def forward(self, q, k, v, mask):
q, k, v = [rearrange(f(x), 'b s (h k) -> b h s k', h=self.h) for f, x in zip(self.w, [q, k, v])]
score = q @ k.transpose(-1, -2) / math.sqrt(self.d_k)
score = torch.masked_fill(score, mask, -1e9)
attn = F.softmax(score, dim=-1)
attn = self.dropout(attn)
w_v = attn @ v
c_v = rearrange(w_v, 'b h s k -> b s (h k)')
return self.out(c_v)
class FeedForward(nn.Module):
def __init__(self, d_model, d_ff, dropout=0.1):
super(FeedForward, self).__init__()
self.w_1 = nn.Linear(d_model, d_ff)
self.w_2 = nn.Linear(d_ff, d_model)
self.dropout = nn.Dropout(p=dropout)
def forward(self, x):
return self.w_2(self.dropout(F.gelu(self.w_1(x))))
class LayerNorm(nn.Module):
def __init__(self, d_model, eps=1e-9):
super(LayerNorm, self).__init__()
self.a_2 = nn.Parameter(torch.ones(d_model))
self.b_2 = nn.Parameter(torch.zeros(d_model))
self.eps = eps
def forward(self, x):
mean = x.mean(dim=-1, keepdim=True)
std = x.std(dim=-1, keepdim=True)
return self.a_2 * (x - mean) / (std + self.eps) + self.b_2
class SublayerConnection(nn.Module):
def __init__(self, norm, dropout=0.1):
super(SublayerConnection, self).__init__()
self.norm = norm
self.dropout = nn.Dropout(p=dropout)
def forward(self, x, layer):
return x + self.dropout(layer(self.norm(x)))
class Embedding(nn.Module):
def __init__(self, d_model, vocab_size):
super(Embedding, self).__init__()
self.embed = nn.Embedding(vocab_size, d_model)
self.d_model = d_model
def forward(self, x):
return self.embed(x) * math.sqrt(self.d_model)
class PositionalEncoding(nn.Module):
def __init__(self, d_model, max_len=50000, dropout=0.1):
super(PositionalEncoding, self).__init__()
pe = torch.zeros(max_len, d_model)
position = torch.arange(max_len).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2) * (-math.log(10000) / d_model))
pe[:, ::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
self.register_buffer('pe', pe.unsqueeze(0))
self.dropout = nn.Dropout(p=dropout)
def forward(self, x):
return self.dropout(x + self.pe[:, :x.size(1)])
def get_sample_data():
src = torch.arange(1, 101, 1).view(10, 10)
tgt = torch.arange(2, 102, 1).view(10, 10)
src_mask, tgt_mask = get_mask(src, tgt)
return src, tgt, src_mask, tgt_mask
def get_mask(src, tgt, pad_idx=0):
src_mask = get_pad_mask(src, pad_idx)
tgt_mask = get_pad_mask(tgt, pad_idx) | get_seq_mask(src)
return src_mask, tgt_mask
def get_seq_mask(src):
return torch.from_numpy(np.triu(np.ones(src.size(1)))) == 1
def get_pad_mask(src, pad_idx):
return (src == pad_idx).unsqueeze(1).unsqueeze(1)
| 31.046083
| 104
| 0.629212
|
3c95e61d32d1d5f2b303290e819aaca832ebf9df
| 1,712
|
py
|
Python
|
CIL-tutorial/walnut.py
|
paskino/CIL-work
|
20b7d6db2f74bcbb4b5dd524a5c3ae3fe7b1952d
|
[
"BSD-3-Clause"
] | null | null | null |
CIL-tutorial/walnut.py
|
paskino/CIL-work
|
20b7d6db2f74bcbb4b5dd524a5c3ae3fe7b1952d
|
[
"BSD-3-Clause"
] | null | null | null |
CIL-tutorial/walnut.py
|
paskino/CIL-work
|
20b7d6db2f74bcbb4b5dd524a5c3ae3fe7b1952d
|
[
"BSD-3-Clause"
] | 1
|
2021-07-01T11:27:58.000Z
|
2021-07-01T11:27:58.000Z
|
import numpy as np
import os
from cil.io import TXRMDataReader, TIFFWriter
from cil.processors import TransmissionAbsorptionConverter, Binner
from cil.plugins.tigre import FBP
from cil.utilities.display import show2D, show_geometry
base_dir = os.path.abspath(r"C:\Users\ofn77899\Data\walnut")
data_name = "valnut"
filename = os.path.join(base_dir, data_name, "valnut_2014-03-21_643_28/tomo-A/valnut_tomo-A.txrm")
is2D = False
data = TXRMDataReader(file_name=filename).read()
if is2D:
data = data.get_slice(vertical='centre')
else:
binner = Binner(roi={'horizontal': (None, None, 4),'vertical': (None, None, 4)})
data = binner(data)
# show_geometry(data.geometry)
data = TransmissionAbsorptionConverter()(data)
data.reorder(order='tigre')
ig = data.geometry.get_ImageGeometry()
fbp = FBP(ig, data.geometry)
recon = fbp(data)
from cil.io import NEXUSDataWriter
writer = NEXUSDataWriter()
writer.set_up(data=recon, file_name=os.path.abspath('noce.nxs'))
writer.write()
# show2D([data, recon], fix_range=(-0.01,0.06))
from ccpi.viewer import viewer2D, viewer3D
from ccpi.viewer.utils.conversion import Converter
import vtk
from functools import partial
v = viewer3D()
def clipping_plane(v, interactor, event):
if interactor.GetKeyCode() == "c":
planew = vtk.vtkImplicitPlaneWidget()
planew.SetInteractor(v.getInteractor())
plane = vtk.vtkPlane()
planew.GetPlane(plane)
planew.PlaceWidget()
v.volume_mapper.AddClippingPlane(plane)
v.plane = plane
v.planew = planew
v.style.AddObserver('KeyPressEvent', partial(clipping_plane, v), 1.0)
v.setInputData(Converter.numpy2vtkImage(recon.as_array()))
v.startRenderLoop()
| 27.612903
| 98
| 0.733061
|
719dab7a2a9a17ecf76aec878de83cf0dabdfc72
| 379
|
py
|
Python
|
setup.py
|
kirberich/textractor
|
fb55c27f2facbe8ff200bbf2cf30daaed7ab01b6
|
[
"MIT"
] | null | null | null |
setup.py
|
kirberich/textractor
|
fb55c27f2facbe8ff200bbf2cf30daaed7ab01b6
|
[
"MIT"
] | null | null | null |
setup.py
|
kirberich/textractor
|
fb55c27f2facbe8ff200bbf2cf30daaed7ab01b6
|
[
"MIT"
] | null | null | null |
from setuptools import setup
setup(
name='textractor',
version='1.0',
description='Minimalistic HTML text extraction library',
url='https://github.com/kirberich/textractor',
author='Robert Kirberich',
license='MIT',
py_modules=['textractor'],
zip_safe=False,
install_requires=[
'six',
'beautifulsoup4',
'lxml'
]
)
| 21.055556
| 60
| 0.627968
|
95b94e7327030cee33385ba574c8017ea793b5ef
| 3,775
|
py
|
Python
|
deepseries/model/wave2wave.py
|
EvilPsyCHo/Deep-Time-Series-Prediction
|
f6a6da060bb3f7d07f2a61967ee6007e9821064e
|
[
"Apache-2.0"
] | 334
|
2019-11-01T01:39:18.000Z
|
2022-03-31T08:10:17.000Z
|
deepseries/model/wave2wave.py
|
luxixiang/Deep-Time-Series-Prediction
|
f6a6da060bb3f7d07f2a61967ee6007e9821064e
|
[
"Apache-2.0"
] | 8
|
2019-12-30T08:01:32.000Z
|
2021-12-06T05:27:29.000Z
|
deepseries/model/wave2wave.py
|
luxixiang/Deep-Time-Series-Prediction
|
f6a6da060bb3f7d07f2a61967ee6007e9821064e
|
[
"Apache-2.0"
] | 57
|
2020-01-13T13:20:15.000Z
|
2022-03-31T08:10:20.000Z
|
# encoding: utf-8
"""
@author : zhirui zhou
@contact: evilpsycho42@gmail.com
@time : 2020/5/20 10:51
"""
import torch.nn as nn
import torch
from deepseries.nn.cnn import WaveNet
from deepseries.nn.comm import Embeddings, Concat
from deepseries.nn.loss import RMSE
from deepseries.log import get_logger
logger = get_logger(__name__)
class Wave2Wave(nn.Module):
def __init__(self, target_size, enc_cat_size=None, enc_num_size=None, dec_cat_size=None, dec_num_size=None,
residual_channels=32, share_embeds=False, skip_channels=32, num_blocks=3, num_layers=8,
dropout=.0, hidden_channels=128, loss_fn=RMSE(), debug=False, nonlinearity="Tanh"):
super(Wave2Wave, self).__init__()
self.debug = debug
self.enc_embeds = Embeddings(enc_cat_size, seq_last=True)
if share_embeds:
self.dec_embeds = self.enc_embeds
else:
self.dec_embeds = Embeddings(dec_cat_size, seq_last=True)
self.concat = Concat(dim=1)
self.dropout = nn.Dropout(dropout)
enc_input_channels = (self.enc_embeds.output_size +
target_size +
(enc_num_size if isinstance(enc_num_size, int) else 0))
dec_input_channels = (self.dec_embeds.output_size +
target_size +
(dec_num_size if isinstance(dec_num_size, int) else 0))
self.encoder = WaveNet(enc_input_channels, residual_channels, skip_channels, num_blocks, num_layers)
self.decoder = WaveNet(dec_input_channels, residual_channels, skip_channels, num_blocks, num_layers)
self.conv_output1 = nn.Conv1d(skip_channels, hidden_channels, kernel_size=1)
self.conv_output2 = nn.Conv1d(hidden_channels, target_size, kernel_size=1)
self.nonlinearity = getattr(nn, nonlinearity)()
self.loss_fn = loss_fn
def encode(self, x, num=None, cat=None):
x = self.concat(x, num, self.enc_embeds(cat))
x = self.dropout(x)
_, state = self.encoder.encode(x)
return state
def decode(self, x, state, num=None, cat=None):
x = self.concat(x, num, self.enc_embeds(cat))
x = self.dropout(x)
skips, state = self.decoder.decode(x, state)
output = self.nonlinearity(self.conv_output1(skips))
output = self.conv_output2(output)
return output, state
def batch_loss(self, x, y, w=None):
state = self.encode(x['enc_x'], x['enc_num'], x['enc_cat'])
preds = []
for step in range(x['dec_len']):
pred, state = self.decode(x['dec_x'][:, :, [step]],
state,
x['dec_num'][:, :, [step]] if x['dec_num'] is not None else None,
x['dec_cat'][:, :, [step]] if x['dec_cat'] is not None else None)
preds.append(pred)
preds = torch.cat(preds, dim=2)
if self.debug:
message = f"batch loss predict mean: {preds.mean():.3f}, target mean: {y.mean():.3f}"
logger.info(message)
loss = self.loss_fn(preds, y, w)
del state
return loss
@torch.no_grad()
def predict(self, enc_x, dec_len, enc_num=None, enc_cat=None, dec_num=None, dec_cat=None):
state = self.encode(enc_x, enc_num, enc_cat)
preds = []
y = enc_x[:, :, [-1]]
for step in range(dec_len):
y, state = self.decode(y, state,
dec_num[:, :, [step]] if dec_num is not None else None,
dec_cat[:, :, [step]] if dec_cat is not None else None)
preds.append(y)
del state
return torch.cat(preds, dim=2)
| 43.390805
| 111
| 0.593642
|
a47033aab9506653cb2c6aa31f6a3f6c67255717
| 299
|
py
|
Python
|
2. mergesort in multi-threading/test.py
|
yedkk/parallel-computing-and-mapreduce
|
e10448ae318deca0dbd9e7a0e1cf8bfc7a66c323
|
[
"MIT"
] | null | null | null |
2. mergesort in multi-threading/test.py
|
yedkk/parallel-computing-and-mapreduce
|
e10448ae318deca0dbd9e7a0e1cf8bfc7a66c323
|
[
"MIT"
] | null | null | null |
2. mergesort in multi-threading/test.py
|
yedkk/parallel-computing-and-mapreduce
|
e10448ae318deca0dbd9e7a0e1cf8bfc7a66c323
|
[
"MIT"
] | null | null | null |
words = count_them(["different","same"],['C:\\Users\yedkk\ds410-hw-yedkk\hw3\part-00000', 'C:\\Users\yedkk\ds410-hw-yedkk\hw3\part-00001', 'C:\\Users\yedkk\ds410-hw-yedkk\hw3\part-00002', 'C:\\Users\yedkk\ds410-hw-yedkk\hw3\part-00003'])
d.visualize('ati_out.png')
print(words[-5000], words[-10000])
| 99.666667
| 237
| 0.70903
|
1ea039ee17dfe9410263211646390f7e98b8af05
| 5,828
|
py
|
Python
|
mvm_python.py
|
JamesSample/icpw
|
47562f601fc8fe23720267d083dabc540889565e
|
[
"MIT"
] | 1
|
2020-05-06T21:18:33.000Z
|
2020-05-06T21:18:33.000Z
|
mvm_python.py
|
JamesSample/icpw
|
47562f601fc8fe23720267d083dabc540889565e
|
[
"MIT"
] | 1
|
2020-02-05T16:50:23.000Z
|
2020-03-26T16:16:37.000Z
|
mvm_python.py
|
JamesSample/icpw
|
47562f601fc8fe23720267d083dabc540889565e
|
[
"MIT"
] | null | null | null |
#-------------------------------------------------------------------------------
# Name: mvm_python.py
# Purpose: Download data from the MVM web API.
#
# Author: James Sample
#
# Created: 19/10/2018
# Copyright: (c) James Sample and NIVA, 2018
# Licence: <your licence>
#-------------------------------------------------------------------------------
""" Basic Python functions for downloading data from the Swedish water quality
database:
https://miljodata.slu.se/mvm/
"""
def convert_dates(time_string):
""" Modifed from here:
https://stackoverflow.com/a/28507530/505698
"""
import re
from datetime import datetime, timedelta, timezone
epoch = datetime(1970, 1, 1, tzinfo=timezone.utc)
ticks, offset = re.match(r'/Date\((\d+)([+-]\d{4})?\)/$', time_string).groups()
utc_dt = epoch + timedelta(milliseconds=int(ticks))
if offset:
offset = int(offset)
hours, minutes = divmod(abs(offset), 100)
if offset < 0:
hours, minutes = -hours, -minutes
dt = utc_dt.astimezone(timezone(timedelta(hours=hours, minutes=minutes)))
return datetime(dt.year, dt.month, dt.day)
else:
return datetime(utc_dt.year, utc_dt.month, utc_date.day)
def get_mvm_token(token_path):
""" Read valid MVM access token from .xlsx
"""
import pandas as pd
import datetime as dt
# Read Excel
df = pd.read_excel(token_path, sheet_name='Sheet1')
# Get most recent token
df.sort_values(by=['Expiry_Date'],
inplace=True,
ascending=False)
token = df['Token'][0]
exp_date = df['Expiry_Date'][0]
# Check valid
if exp_date < dt.datetime.now():
raise ValueError('The token file has no valid tokens.\n'
'Please log-in to MVM and update the tokens in the file:\n\n'
' https://miljodata.slu.se/mvm/')
return token
def query_mvm_station_data(site_id, st_yr, end_yr, token_path):
""" Download data for a specific site using the Miljödata MVM API.
Based on documentation here:
http://miljodata.slu.se/mvm/OpenAPI
Args:
site_id: Int. MD-MVM ID for site of interest
st_yr: Int. Start year of interest
end_yr: Int. End year of interest
public_token: Raw str. Path to .xlsx containing tokens. See
example .xlsx for details
Returns:
Dataframe
"""
import requests
import pandas as pd
import numpy as np
# Get access token
token = get_mvm_token(token_path)
# Build url and get data
url = (r'http://miljodata.slu.se/mvm/ws/ObservationsService.svc/rest'
r'/GetSamplesBySite?token=%s&siteid=%s&fromYear=%s&toYear=%s' % (token,
site_id,
st_yr,
end_yr))
response = requests.get(url)
data = response.json()
# Dict for data of interest
data_dict = {'mvm_id':[],
'station_name':[],
'station_type':[],
'sample_id':[],
'sample_date':[],
'depth1':[],
'depth2':[],
'par_name':[],
'par_desc':[],
'unit':[],
'value':[],
'flag':[]}
# Loop over samples
for samp in data:
# Get sample data
stn_id = int(samp['SiteId'])
stn_name = samp['SiteName']
stn_type = samp['SiteType']
samp_id = int(samp['SampleId'])
date = convert_dates(samp['SampleDate'])
depth1 = samp['MinDepth']
if depth1:
depth1 = float(samp['MinDepth'].replace(',', '.'))
else:
depth1 = np.nan
depth2 = samp['MaxDepth']
if depth2:
depth2 = float(samp['MaxDepth'].replace(',', '.'))
else:
depth2 = np.nan
# Loop over pars
for par in samp['ObservationValues']:
# Get par data
par_name = par['PropertyAbbrevName']
par_desc = par['PropertyName']
par_unit = par['UnitOfMeasureName']
# Deal with LOD flags
par_value = par['ReportedValue']
if par_value:
par_value = par_value.replace(',', '.')
if par_value[0] in ('<', '>'):
flag = par_value[0]
par_value = par_value[1:]
else:
flag = np.nan
par_value = float(par_value)
else:
par_value = np.nan
flag = np.nan
# Add to dict
data_dict['mvm_id'].append(stn_id)
data_dict['station_name'].append(stn_name)
data_dict['station_type'].append(stn_type)
data_dict['sample_id'].append(samp_id)
data_dict['sample_date'].append(date)
data_dict['depth1'].append(depth1)
data_dict['depth2'].append(depth2)
data_dict['par_name'].append(par_name)
data_dict['par_desc'].append(par_desc)
data_dict['unit'].append(par_unit)
data_dict['value'].append(par_value)
data_dict['flag'].append(flag)
df = pd.DataFrame(data_dict)
df.sort_values(by=['sample_date', 'par_name'],
inplace=True)
df.reset_index(inplace=True, drop=True)
return df
| 33.494253
| 86
| 0.497598
|
fb085d03d58f8d54a2dc6855fc977bf8bb57a57e
| 2,939
|
py
|
Python
|
Award/models.py
|
Eugeneiregi/Awards
|
5cd6d51d59f170cd6ee4ee464f152484191884bd
|
[
"MIT"
] | null | null | null |
Award/models.py
|
Eugeneiregi/Awards
|
5cd6d51d59f170cd6ee4ee464f152484191884bd
|
[
"MIT"
] | 8
|
2020-06-11T10:03:53.000Z
|
2022-03-12T00:34:32.000Z
|
Award/models.py
|
Eugeneiregi/Awards
|
5cd6d51d59f170cd6ee4ee464f152484191884bd
|
[
"MIT"
] | null | null | null |
from django.contrib.auth.models import User
from django.core.validators import MinValueValidator, MaxValueValidator
from django.db import models
class Profile(models.Model):
profile_picture = models.ImageField(upload_to='profile_pics/', blank=True)
prof_user = models.ForeignKey(User, on_delete=models.PROTECT, null=True)
bio = models.TextField(default="")
contact_info = models.CharField(max_length=200, blank=True)
profile_Id = models.IntegerField(default=0)
all_projects = models.ForeignKey('Project', on_delete=models.PROTECT, null=True)
def __str__(self):
return self.bio
def save_profile(self):
self.save()
def delete_profile(self):
self.delete()
def update_bio(self, bio):
self.bio = bio
self.save()
@classmethod
def get_profile_data(cls):
return Profile.objects.all()
class Meta:
db_table = 'profiles'
class Project(models.Model):
title = models.CharField(max_length=100)
details = models.TextField()
link = models.CharField(max_length=100)
user = models.ForeignKey(User, on_delete=models.PROTECT, null=True)
image = models.ImageField(upload_to='project_images', blank=True)
user_project_id = models.IntegerField(default=0)
design = models.IntegerField(choices=list(zip(range(0, 11), range(0, 11))), default=0)
usability = models.IntegerField(choices=list(zip(range(0, 11), range(0, 11))), default=0)
creativity = models.IntegerField(choices=list(zip(range(0, 11), range(0, 11))), default=0)
content = models.IntegerField(choices=list(zip(range(0, 11), range(0, 11))), default=0)
vote_submissions = models.IntegerField(default=0)
def __str__(self):
return self.title
@classmethod
def fetch_all_images(cls):
all_images = Project.objects.all()
return all_images
@classmethod
def get_single_project(cls, project):
project = cls.objects.get(id=project)
return project
@classmethod
def search_project_by_title(cls, search_term):
project = cls.objects.filter(title__icontains=search_term)
return project
class Meta:
db_table = 'projects'
ordering = ['-id']
class Rate(models.Model):
user = models.ForeignKey(User, on_delete=models.PROTECT)
post = models.ForeignKey(Project, on_delete=models.PROTECT, related_name='likes', null=True)
design = models.IntegerField(validators=[MinValueValidator(1), MaxValueValidator(10)])
usability = models.IntegerField(validators=[MinValueValidator(1), MaxValueValidator(10)], null=True)
creativity = models.IntegerField(validators=[MinValueValidator(1), MaxValueValidator(10)])
content = models.IntegerField(validators=[MinValueValidator(1), MaxValueValidator(10)])
def save_rate(self):
self.save()
def delete_rate(self):
self.delete()
class Meta:
db_table = 'ratings'
| 34.576471
| 104
| 0.699898
|
96be3af8e55400ee7ddc00de1dbd1d53417e6945
| 936
|
py
|
Python
|
matrix-python-project/utils/decrypt.py
|
hokaso/hocassian-media-matrix
|
2c2e5a4c72dfa43d2eed0f083f5b19238aea2765
|
[
"MIT"
] | 141
|
2021-06-27T03:18:54.000Z
|
2022-03-17T03:24:26.000Z
|
matrix-python-project/utils/decrypt.py
|
hokaso/hocassian-media-matrix
|
2c2e5a4c72dfa43d2eed0f083f5b19238aea2765
|
[
"MIT"
] | 1
|
2021-08-06T17:35:01.000Z
|
2021-08-06T17:35:01.000Z
|
matrix-python-project/utils/decrypt.py
|
hokaso/hocassian-media-matrix
|
2c2e5a4c72dfa43d2eed0f083f5b19238aea2765
|
[
"MIT"
] | 24
|
2021-06-29T01:58:59.000Z
|
2022-03-02T01:42:43.000Z
|
# -*- coding: utf-8 -*-
# AES-demo #采用AES对称加密算法
import os, json, time, base64
from Crypto.Cipher import AES
# str不是16的倍数那就补足为16的倍数
def add_to_16(value):
while len(value) % 16 != 0:
value += '\0'
return str.encode(value)
# 解密方法
def decrypt():
with open("../key.txt", 'r') as f:
key = f.readline()
with open("list.txt", "r") as f0:
_info = f0.readlines()
info = [i.replace("\n", "") for i in _info]
for ikey in info:
with open(ikey + ".en", "r") as f1:
text = f1.read()
aes = AES.new(add_to_16(key), AES.MODE_ECB)
base64_decrypted = base64.decodebytes(text.encode(encoding='cp936'))
_decrypted_text = str(aes.decrypt(base64_decrypted), encoding='gbk').replace('\0', '')
decrypted_text = eval(_decrypted_text)
with open(ikey, 'wb') as f2:
f2.write(decrypted_text)
if __name__ == '__main__':
decrypt()
| 22.285714
| 94
| 0.58547
|
9126fb916a06464b0b12a1884880e97d4ddef2bd
| 4,809
|
py
|
Python
|
pymc3/step_methods/arraystep.py
|
ds7788/hello-world
|
f16466f4d4de8cd412771415a361738ba020e568
|
[
"Apache-2.0"
] | 1
|
2018-08-16T22:03:21.000Z
|
2018-08-16T22:03:21.000Z
|
pymc3/step_methods/arraystep.py
|
ds7788/hello-world
|
f16466f4d4de8cd412771415a361738ba020e568
|
[
"Apache-2.0"
] | null | null | null |
pymc3/step_methods/arraystep.py
|
ds7788/hello-world
|
f16466f4d4de8cd412771415a361738ba020e568
|
[
"Apache-2.0"
] | null | null | null |
from ..core import *
from .compound import CompoundStep
from ..vartypes import *
import numpy as np
from numpy.random import uniform
from numpy import log, isfinite
from enum import IntEnum, unique
__all__ = ['ArrayStep', 'ArrayStepShared', 'metrop_select', 'SamplerHist',
'Competence', 'Constant']
# TODO Add docstrings to ArrayStep
@unique
class Competence(IntEnum):
"""Enum for charaterizing competence classes of step methods.
Values include:
0: incompatible
1: compatible
2: preferred
3: ideal
"""
incompatible = 0
compatible = 1
preferred = 2
ideal = 3
class BlockedStep(object):
def __new__(cls, *args, **kwargs):
blocked = kwargs.get('blocked')
if blocked is None:
# Try to look up default value from class
blocked = getattr(cls, 'default_blocked', True)
kwargs['blocked'] = blocked
model = modelcontext(kwargs.get('model'))
# vars can either be first arg or a kwarg
if 'vars' not in kwargs and len(args) >= 1:
vars = args[0]
args = args[1:]
elif 'vars' in kwargs:
vars = kwargs.pop('vars')
else: # Assume all model variables
vars = model.vars
#get the actual inputs from the vars
vars = inputvars(vars)
if not blocked and len(vars) > 1:
# In this case we create a separate sampler for each var
# and append them to a CompoundStep
steps = []
for var in vars:
step = super(BlockedStep, cls).__new__(cls)
# If we don't return the instance we have to manually
# call __init__
step.__init__([var], *args, **kwargs)
steps.append(step)
return CompoundStep(steps)
else:
return super(BlockedStep, cls).__new__(cls)
@staticmethod
def competence(var):
return Competence.incompatible
@classmethod
def _competence(cls, vars):
return [cls.competence(var) for var in np.atleast_1d(vars)]
class ArrayStep(BlockedStep):
def __init__(self, vars, fs, allvars=False, blocked=True):
self.vars = vars
self.ordering = ArrayOrdering(vars)
self.fs = fs
self.allvars = allvars
self.blocked = blocked
def step(self, point):
bij = DictToArrayBijection(self.ordering, point)
inputs = list(map(bij.mapf, self.fs))
if self.allvars:
inputs += [point]
apoint = self.astep(bij.map(point), *inputs)
return bij.rmap(apoint)
class ArrayStepShared(BlockedStep):
"""Faster version of ArrayStep that requires the substep method that does not wrap the functions the step method uses.
Works by setting shared variables before using the step. This eliminates the mapping and unmapping overhead as well
as moving fewer variables around.
"""
def __init__(self, vars, shared, blocked=True):
"""
Parameters
----------
vars : list of sampling variables
shared : dict of theano variable -> shared variable
blocked : Boolean (default True)
"""
self.vars = vars
self.ordering = ArrayOrdering(vars)
self.shared = { str(var) : shared for var, shared in shared.items() }
self.blocked = blocked
def step(self, point):
for var, share in self.shared.items():
share.container.storage[0] = point[var]
bij = DictToArrayBijection(self.ordering, point)
apoint = self.astep(bij.map(point))
return bij.rmap(apoint)
def metrop_select(mr, q, q0):
# Perform rejection/acceptance step for Metropolis class samplers
# Compare acceptance ratio to uniform random number
if isfinite(mr) and log(uniform()) < mr:
# Accept proposed value
return q
else:
# Reject proposed value
return q0
class SamplerHist(object):
def __init__(self):
self.metrops = []
def acceptr(self):
return np.minimum(np.exp(self.metrops), 1)
class Constant(ArrayStep):
"""
Dummy sampler that returns the current value at every iteration. Useful for
fixing parameters at a particular value.
Parameters
----------
vars : list
List of variables for sampler.
model : PyMC Model
Optional model for sampling step. Defaults to None (taken from context).
"""
def __init__(self, vars, model=None, **kwargs):
model = modelcontext(model)
self.model = model
vars = inputvars(vars)
super(Constant, self).__init__(vars, [model.fastlogp], **kwargs)
def astep(self, q0, logp):
return q0
| 28.96988
| 122
| 0.607403
|
b2706655401d1fd3621fc1a9adfdba984f20015f
| 3,296
|
py
|
Python
|
pyspheregl/demo/world_points.py
|
JosephOHagan/pyspheregl
|
4258f8bb69372f0cb33ea1d859aaa1d8195e0167
|
[
"BSD-2-Clause"
] | null | null | null |
pyspheregl/demo/world_points.py
|
JosephOHagan/pyspheregl
|
4258f8bb69372f0cb33ea1d859aaa1d8195e0167
|
[
"BSD-2-Clause"
] | null | null | null |
pyspheregl/demo/world_points.py
|
JosephOHagan/pyspheregl
|
4258f8bb69372f0cb33ea1d859aaa1d8195e0167
|
[
"BSD-2-Clause"
] | 3
|
2018-08-09T18:57:51.000Z
|
2019-11-23T08:27:33.000Z
|
import numpy as np
import pyglet
from pyglet.gl import *
# sphere stuff
from ..sim.sphere_sim import getshader, resource_file
from ..sim import sphere_sim
from ..sphere import sphere
from ..utils.graphics_utils import make_unit_quad_tile
from ..utils.shader import ShaderVBO, shader_from_file
from ..utils.np_vbo import VBuf, IBuf
from ..utils import transformations as tn
from ..touch.rotater import RotationHandler
from ..touch import rotater
import zipfile
def load_cities():
city_zip = resource_file("data/cities1000.zip")
lonlats_radians = []
with zipfile.ZipFile(city_zip) as z:
all_cities = z.open("cities1000.txt")
for line in all_cities:
fields = line.split("\t")
lat, lon = float(fields[4]), -float(fields[5])
lonlats_radians.append([np.radians(lon), np.radians(lat)])
return np.array(lonlats_radians, dtype=np.float32)
class WorldPoints(object):
def __init__(self):
self.viewer = sphere_sim.make_viewer(show_touches=True, draw_fn=self.draw,
tick_fn=self.tick,
touch_fn=self.touch)
self.rotater = RotationHandler(rotater.EQUATORIAL)
world_indices, world_verts, world_texs = make_unit_quad_tile(1)
world_texture = pyglet.image.load(resource_file("data/azworld.png"))
whole_shader = shader_from_file([getshader("sphere.vert"), getshader("user/whole_sphere.vert")], [getshader("user/whole_sphere_tex.frag")])
self.world_render = ShaderVBO(whole_shader, IBuf(world_indices),
buffers={"quad_vtx":VBuf(world_verts),},
textures={"tex":world_texture.texture})
pts = load_cities()
# point shader; simple coloured circles, with no spherical correction
point_shader = shader_from_file([getshader("sphere.vert"), getshader("user/point.vert")], [getshader("user/point.frag")])
self.point_vbo = ShaderVBO(point_shader, IBuf(np.arange(len(pts))),
buffers={"position":VBuf(pts), },
vars={"constant_size":1,
},
attribs={"color":(1,1,1,1)},
primitives=GL_POINTS)
self.viewer.start()
def touch(self, events):
for event in events:
xyz = sphere.polar_to_cart(*event.touch.lonlat)
if event.event=="DRAG":
self.rotater.drag(event.touch.id, xyz)
if event.event=="UP":
self.rotater.up(event.touch.id, xyz)
if event.event=="DOWN":
self.rotater.down(event.touch.id, xyz)
def draw(self):
glClearColor(0.0,0.0,0.0,1)
glClear(GL_COLOR_BUFFER_BIT)
glEnable(GL_VERTEX_PROGRAM_POINT_SIZE)
glEnable(GL_POINT_SPRITE)
self.world_render.draw()
self.point_vbo.draw(vars={"quat":self.rotater.orientation})
def tick(self):
self.rotater.update(1/60.0)
pass
if __name__=="__main__":
p = WorldPoints()
| 37.033708
| 148
| 0.581311
|
858c644641578fab57269fe79cb1d579c6db5942
| 11,752
|
py
|
Python
|
froide/helper/email_parsing.py
|
OpendataCH/froide
|
8136bac0d8caa56f9cfc7ba15480be987280e55d
|
[
"MIT"
] | null | null | null |
froide/helper/email_parsing.py
|
OpendataCH/froide
|
8136bac0d8caa56f9cfc7ba15480be987280e55d
|
[
"MIT"
] | null | null | null |
froide/helper/email_parsing.py
|
OpendataCH/froide
|
8136bac0d8caa56f9cfc7ba15480be987280e55d
|
[
"MIT"
] | null | null | null |
"""
Original EmailParser Code by Ian Lewis:
http://www.ianlewis.org/en/parsing-email-attachments-python
Licensed under MIT
"""
import re
import time
from contextlib import closing
from datetime import datetime, timedelta
from email.header import decode_header
from email.message import EmailMessage
from email.parser import BytesParser as Parser
from email.utils import getaddresses, parseaddr, parsedate_tz
from io import BytesIO
from typing import Dict, List, Optional, Tuple
from urllib.parse import unquote
from django.utils.functional import cached_property
import pytz
from .email_utils import (
AuthenticityStatus,
check_dkim,
check_dmarc,
check_spf,
detect_auto_reply,
get_bounce_info,
)
from .text_utils import convert_html_to_text
# Restrict to max 3 consecutive newlines in email body
MULTI_NL_RE = re.compile("((?:\r?\n){,3})(?:\r?\n)*")
DISPO_SPLIT = re.compile(r"""((?:[^;"']|"[^"]*"|'[^']*')+)""")
# Reassemble regular-parameter section
# https://tools.ietf.org/html/rfc2231#7
DISPO_MULTI_VALUE = re.compile(r"(\w+)\*\d+$")
def split_with_quotes(dispo):
return [x.strip() for x in DISPO_SPLIT.split(dispo.strip()) if x and x != ";"]
def get_email_headers(message_bytes, headers=None):
p = Parser()
with closing(BytesIO(message_bytes)) as stream:
msgobj = p.parse(stream)
if headers is None:
headers = dict(msgobj)
return {k: [parse_header_field(x) for x in msgobj.get_all(k, [])] for k in headers}
class EmailAttachment(BytesIO):
content_type: str
size: int
name: Optional[str]
create_date: Optional[str]
mod_date: Optional[str]
read_date: Optional[str]
def parse_email_body(
msgobj: EmailMessage,
) -> Tuple[List[str], List[str], List[EmailAttachment]]:
body = []
html = []
attachments = []
for part in msgobj.walk():
attachment = parse_attachment(part)
if attachment:
attachments.append(attachment)
elif part.get_content_type() == "text/plain":
body.append(decode_message_part(part))
elif part.get_content_type() == "text/html":
html.append(decode_message_part(part))
return body, html, attachments
def decode_message_part(part):
charset = part.get_content_charset() or "ascii"
return str(part.get_payload(decode=True), charset, "replace")
def parse_main_headers(msgobj):
subject = parse_header_field(msgobj["Subject"])
tos = get_address_list(msgobj.get_all("To", []))
x_original_tos = get_address_list(msgobj.get_all("X-Original-To", []))
ccs = get_address_list(msgobj.get_all("Cc", []))
resent_tos = get_address_list(msgobj.get_all("resent-to", []))
resent_ccs = get_address_list(msgobj.get_all("resent-cc", []))
from_field = parseaddr(str(msgobj.get("From")))
from_field = (
parse_header_field(from_field[0]),
from_field[1].lower() if from_field[1] else from_field[1],
)
date = parse_date(str(msgobj.get("Date")))
return {
"message_id": msgobj.get("Message-Id"),
"date": date,
"subject": subject,
"from_": from_field,
"to": tos,
"x_original_to": x_original_tos,
"cc": ccs,
"resent_to": resent_tos,
"resent_cc": resent_ccs,
}
def parse_dispositions(dispo):
if not isinstance(dispo, str):
dispo = parse_header_field(dispo)
dispos = split_with_quotes(dispo)
dispo_name = dispos[0].lower()
dispo_dict = {}
for param in dispos[1:]:
if "=" not in param:
continue
name, value = param.split("=", 1)
name = name.lower().strip()
value = value.strip()
if value.startswith('"') and value.endswith('"'):
value = value[1:-1]
multi_name = DISPO_MULTI_VALUE.match(name)
if multi_name:
name = multi_name.group(1)
if name in dispo_dict:
dispo_dict[name] += value
else:
dispo_dict[name] = value
else:
dispo_dict[name] = value
for name, value in dispo_dict.items():
dispo_dict[name] = parse_header_field(value)
return dispo_name, dispo_dict
def parse_attachment(message_part):
content_disposition = message_part.get("Content-Disposition", None)
if not content_disposition:
return None
dispo_type, dispo_dict = parse_dispositions(content_disposition)
if not (
dispo_type == "attachment"
or (dispo_type == "inline" and "filename" in dispo_dict)
):
return None
content_type = message_part.get("Content-Type", None)
file_data = message_part.get_payload(decode=True)
if file_data is None:
payloads = message_part.get_payload()
file_data = "\n\n".join([p.as_string() for p in payloads]).encode("utf-8")
attachment = EmailAttachment(file_data)
attachment.content_type = message_part.get_content_type()
attachment.size = len(file_data)
attachment.name = None
attachment.create_date = None
attachment.mod_date = None
attachment.read_date = None
attachment.name = get_attachment_name(
attachment, dispo_dict, content_type=content_type
)
if "create-date" in dispo_dict:
attachment.create_date = dispo_dict["create-date"] # TODO: datetime
if "modification-date" in dispo_dict:
attachment.mod_date = dispo_dict["modification-date"] # TODO: datetime
if "read-date" in dispo_dict:
attachment.read_date = dispo_dict["read-date"] # TODO: datetime
return attachment
def get_attachment_name(attachment, dispo_dict, content_type=None):
name = None
if "filename" in dispo_dict:
name = dispo_dict["filename"]
if name is None and "filename*" in dispo_dict:
name = parse_extended_header_field(dispo_dict["filename*"])
if content_type:
_, content_dict = parse_dispositions(content_type)
if "name" in content_dict:
name = content_dict["name"]
if name is None and "name*" in content_dict:
name = parse_extended_header_field(content_dict["name*"])
if name is None and content_type == "message/rfc822":
attachment_bytes = attachment.getvalue()
attachment_headers = get_email_headers(attachment_bytes, ["Subject"])
subject = attachment_headers["Subject"]
if subject:
name = "%s.eml" % subject[0][:45]
return name
def parse_header_field(field):
if field is None:
return None
if isinstance(field, str):
# For Python 2
# see http://stackoverflow.com/questions/7331351/python-email-header-decoding-utf-8
field = re.sub(r"(=\?[\w-]+\?\w\?.*\?=)(?!$)", r"\1 ", field)
field = field.replace("\n ", "")
try:
decodefrag = decode_header(field)
except UnicodeEncodeError:
# Python 2 failure
if isinstance(field, str):
return field
return try_decoding(field)
if decodefrag and isinstance(decodefrag[0][0], bytes) and b"=?" in decodefrag[0][0]:
# Likely failed to decode!
# Python expects encoded words in individual lines
# https://github.com/python/cpython/blob/a8d5e2f255f1a20fc8af7dc16a7cb708e014952a/Lib/email/header.py#L86
# But encoded words may have been split up!
# Let's remove newlines that are not preceded by
# encoded word terminator and try again
field = re.sub(r"(?<!\?\=)\n ", "=20", field)
decodefrag = decode_header(field)
fragments = []
for s, enc in decodefrag:
decoded = None
if enc or not isinstance(s, str):
decoded = try_decoding(s, encoding=enc)
else:
decoded = s
fragments.append(decoded.strip(" "))
field = " ".join(fragments)
return field.replace("\n\t", " ").replace("\n", "").replace("\r", "")
def parse_extended_header_field(field):
"""
https://tools.ietf.org/html/rfc5987#section-3.2
"""
try:
fname_encoding, fname_lang, fname = field.split("'")
except ValueError:
return str(field)
return unquote(fname, encoding=fname_encoding)
def try_decoding(encoded, encoding=None):
decoded = None
if encoding and encoding != "unknown-8bit":
try:
decoded = encoded.decode(encoding, errors="replace")
except (UnicodeDecodeError, LookupError):
pass
if decoded is None:
# Try common encodings
for enc in ("utf-8", "latin1"):
try:
decoded = encoded.decode(enc)
break
except UnicodeDecodeError:
continue
if decoded is None:
# Fall back to ascii and replace
decoded = encoded.decode("ascii", errors="replace")
return decoded
def get_address_list(values):
values = [parse_header_field(value) for value in values]
address_list = getaddresses(values)
fixed = []
for addr in address_list:
fixed.append((parse_header_field(addr[0]), addr[1]))
return fixed
def parse_date(date_str):
date_tuple = parsedate_tz(date_str)
if date_tuple is None:
return None
date = datetime.fromtimestamp(time.mktime(date_tuple[:9]))
offset = date_tuple[9]
if offset is not None:
date = date - timedelta(seconds=offset)
return pytz.utc.localize(date)
EmailField = Tuple[str, str]
class ParsedEmail(object):
message_id: str = ""
date: datetime = None
subject: str
body: str
html: Optional[str]
from_: EmailField
to: List[EmailField] = []
x_original_to: List[EmailField] = []
cc: List[EmailField] = []
resent_to: List[EmailField] = []
resent_cc: List[EmailField] = []
attachments: List[EmailAttachment] = []
def __init__(self, msgobj, **kwargs):
self.msgobj: EmailMessage = msgobj
for k, v in kwargs.items():
setattr(self, k, v)
@cached_property
def bounce_info(self):
return self.get_bounce_info()
def get_bounce_info(self):
return get_bounce_info(self.body, msgobj=self.msgobj, date=self.date)
@cached_property
def is_auto_reply(self):
return self.detect_auto_reply()
def detect_auto_reply(self):
return detect_auto_reply(self.from_, subject=self.subject, msgobj=self.msgobj)
def is_direct_recipient(self, email_address):
return any(email.lower() == email_address.lower() for name, email in self.to)
@cached_property
def fails_authenticity(self):
checks = self.get_authenticity_checks()
return [c for c in checks if c.failed]
def get_authenticity_checks(self) -> Dict[str, AuthenticityStatus]:
if hasattr(self, "_authenticity_checks"):
return self._authenticity_checks
checks = []
status = check_spf(self.msgobj)
if status:
checks.append(status)
status = check_dmarc(self.msgobj)
if status:
checks.append(status)
status = check_dkim(self.msgobj)
if status:
checks.append(status)
self._authenticity_checks = checks
return checks
def fix_email_body(body):
return MULTI_NL_RE.sub("\\1", body)
def parse_email(bytesfile: BytesIO) -> ParsedEmail:
p = Parser()
msgobj = p.parse(bytesfile)
body, html, attachments = parse_email_body(msgobj)
body = "\n".join(body).strip()
html = "\n".join(html).strip()
if not body and html:
body = convert_html_to_text(html)
body = fix_email_body(body)
email_info = parse_main_headers(msgobj)
email_info.update({"body": body, "html": html, "attachments": attachments})
return ParsedEmail(msgobj, **email_info)
| 31.007916
| 113
| 0.646273
|
5a5ddc6c3e8a40ca8b0cbc723191a01cf09b8a05
| 4,064
|
py
|
Python
|
custom_components/ge_home/devices/base.py
|
bendavis/ha_components
|
cc19732fd5d234881f74c90f636658ca7b04cb47
|
[
"MIT"
] | null | null | null |
custom_components/ge_home/devices/base.py
|
bendavis/ha_components
|
cc19732fd5d234881f74c90f636658ca7b04cb47
|
[
"MIT"
] | null | null | null |
custom_components/ge_home/devices/base.py
|
bendavis/ha_components
|
cc19732fd5d234881f74c90f636658ca7b04cb47
|
[
"MIT"
] | null | null | null |
import asyncio
import logging
from typing import Dict, List, Optional
from gehomesdk import GeAppliance
from gehomesdk.erd import ErdCode, ErdCodeType, ErdApplianceType
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator
from ..const import DOMAIN
_LOGGER = logging.getLogger(__name__)
class ApplianceApi:
"""
API class to represent a single physical device.
Since a physical device can have many entities, we"ll pool common elements here
"""
APPLIANCE_TYPE = None # type: Optional[ErdApplianceType]
def __init__(self, coordinator: DataUpdateCoordinator, appliance: GeAppliance):
if not appliance.initialized:
raise RuntimeError("Appliance not ready")
self._appliance = appliance
self._loop = appliance.client.loop
self._hass = coordinator.hass
self.coordinator = coordinator
self.initial_update = False
self._entities = {} # type: Optional[Dict[str, Entity]]
@property
def hass(self) -> HomeAssistant:
return self._hass
@property
def loop(self) -> Optional[asyncio.AbstractEventLoop]:
if self._loop is None:
self._loop = self._appliance.client.loop
return self._loop
@property
def appliance(self) -> GeAppliance:
return self._appliance
@appliance.setter
def appliance(self, value: GeAppliance):
self._appliance = value
@property
def available(self) -> bool:
#Note - online will be there since we're using the GE coordinator
#Didn't want to deal with the circular references to get the type hints
#working.
return self.appliance.available and self.coordinator.online
@property
def serial_number(self) -> str:
return self.appliance.get_erd_value(ErdCode.SERIAL_NUMBER)
@property
def model_number(self) -> str:
return self.appliance.get_erd_value(ErdCode.MODEL_NUMBER)
@property
def sw_version(self) -> str:
appVer = self.try_get_erd_value(ErdCode.APPLIANCE_SW_VERSION)
wifiVer = self.try_get_erd_value(ErdCode.WIFI_MODULE_SW_VERSION)
return 'Appliance=' + str(appVer or 'Unknown') + '/Wifi=' + str(wifiVer or 'Unknown')
@property
def name(self) -> str:
appliance_type = self.appliance.appliance_type
if appliance_type is None or appliance_type == ErdApplianceType.UNKNOWN:
appliance_type = "Appliance"
else:
appliance_type = appliance_type.name.replace("_", " ").title()
return f"GE {appliance_type} {self.serial_number}"
@property
def device_info(self) -> Dict:
"""Device info dictionary."""
return {
"identifiers": {(DOMAIN, self.serial_number)},
"name": self.name,
"manufacturer": "GE",
"model": self.model_number,
"sw_version": self.sw_version
}
@property
def entities(self) -> List[Entity]:
return list(self._entities.values())
def get_all_entities(self) -> List[Entity]:
"""Create Entities for this device."""
from ..entities import GeErdSensor, GeErdSwitch
entities = [
GeErdSensor(self, ErdCode.CLOCK_TIME),
GeErdSwitch(self, ErdCode.SABBATH_MODE),
]
return entities
def build_entities_list(self) -> None:
"""Build the entities list, adding anything new."""
from ..entities import GeErdEntity
entities = [
e for e in self.get_all_entities()
if not isinstance(e, GeErdEntity) or e.erd_code in self.appliance.known_properties
]
for entity in entities:
if entity.unique_id not in self._entities:
self._entities[entity.unique_id] = entity
def try_get_erd_value(self, code: ErdCodeType):
try:
return self.appliance.get_erd_value(code)
except:
return None
| 32.253968
| 94
| 0.656988
|
69f6c22cb075dcba695f64c5a667da858d2dbf88
| 4,293
|
py
|
Python
|
task-automation/AMITaggingFromEc2py.py
|
witaloandrade/botools
|
b37ef276fc7f1882e5c20a992c20c13e80078fe8
|
[
"MIT"
] | null | null | null |
task-automation/AMITaggingFromEc2py.py
|
witaloandrade/botools
|
b37ef276fc7f1882e5c20a992c20c13e80078fe8
|
[
"MIT"
] | null | null | null |
task-automation/AMITaggingFromEc2py.py
|
witaloandrade/botools
|
b37ef276fc7f1882e5c20a992c20c13e80078fe8
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
"""
Witalo Andrade - 2021-07-15
This script will copy old BillingId/Owner tag values to new values.
How run it: python3 ./<NAME>.py
"""
import os
import boto3
from time import sleep
from botocore.exceptions import ClientError
import logging
import re
logging.basicConfig(format='%(levelname)s : %(message)s', level=logging.ERROR)
logger = logging.getLogger()
#Static most used regions
aws_regions = ["us-east-1","us-west-2","ap-southeast-1","ap-southeast-2","eu-central-1","eu-west-1"]
def pullAwsCreds(file='~/.aws/credentials'):
"""
This function will return a list with all aws profiles from user default aws cred file.
"""
path = file
full_path = os.path.expanduser(path)
with open(full_path, 'r') as f:
lines = f.readlines()
f.close()
return [line[1:-2] for line in lines if line.startswith('[') ]
def addTags(aws_accounts):
"""
This function will iterate through ec2 instances and update the tags from regexLowerCaseTags return.
"""
for account in aws_accounts:
print('Working on aws credential :', account)
for each_reg in aws_regions:
session=boto3.Session(profile_name=account,region_name=each_reg)
client = session.client('ec2')
response = client.describe_instances(
Filters=[ # add any filters if needed, used for dev
#{
# 'Name': 'tag:Name',
# 'Values': ['xyz']
#}
]
)
for reservation in response['Reservations']:
for instance_description in reservation['Instances']:
print('Listing :', instance_description['InstanceId'])
sleep(2)
to_tag = regexLowerCaseTags(instance_description['Tags'])
if to_tag:
logger.info('Func returned new tags: %s for %s ', to_tag, instance_description['InstanceId'] )
try:
print("Tagging ec2 {} - {}".format(instance_description['InstanceId'], to_tag, end='\n\n'))
client.create_tags(Resources=[instance_description['InstanceId']], DryRun=False, Tags=to_tag)
except ClientError as e:
if e.response['Error'].get('Code') == 'DryRunOperation':
print("DryRun executed, ec2 would have been tagged.",end='\n\n')
else:
logger.error('Failed to tag ec2 with error : %s', account,end='\n\n')
except Exception as e:
print(e)
def lowerCaseTags(tags):
new_tags = []
for t in tags:
if t['Key'] == 'BillingId':
new_tags.append({'Key':'billingid', 'Value': t['Value']})
elif t['Key'] == 'Owner':
new_tags.append({'Key':'owner', 'Value': t['Value']})
if len(new_tags) > 0:
return new_tags
def regexLowerCaseTags(tags):
"""
This function will return the new tag values from the existing tags.
I will try to regex filter best and not valid values.
"""
new_tags = []
billKeyRegex = re.compile(r'bill', re.IGNORECASE)
billValueRegex = re.compile(r'\D+-\d+', re.IGNORECASE)
ownerRegex = re.compile(r'owner', re.IGNORECASE)
emailRegex = re.compile(r'\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Z|a-z]{2,}\b')
for t in tags:
if bool(billKeyRegex.search(t['Key'])) == True and bool(billValueRegex.search(t['Value'])) == True:
print('Found {}'.format(t))
b = {'Key':'billingid', 'Value': t['Value']}
elif bool(ownerRegex.search(t['Key'])) == True and bool(emailRegex.search(t['Value'])) == True:
print('Found {}'.format(t))
o = {'Key':'owner', 'Value': emailRegex.search(t['Value']).group()}
try:
new_tags.append(b)
except Exception as e:
logger.info('Billing not found')
try:
new_tags.append(o)
except Exception as e:
logger.info('Owner not found')
#print('Result is :', new_tags)
if len(new_tags) > 0:
return new_tags
def main():
addTags(pullAwsCreds())
if __name__ == '__main__':
main()
| 38.330357
| 121
| 0.569299
|
0d8c21969cb6a759e7ccc7dc7d78c05395f52aaf
| 5,255
|
py
|
Python
|
python/examples/depthwise_conv/depthwise_conv_2d_bench.py
|
yzhang93/iree-llvm-sandbox
|
1566f318f27585b60464516a63b49e5bc09f8fcb
|
[
"Apache-2.0"
] | null | null | null |
python/examples/depthwise_conv/depthwise_conv_2d_bench.py
|
yzhang93/iree-llvm-sandbox
|
1566f318f27585b60464516a63b49e5bc09f8fcb
|
[
"Apache-2.0"
] | null | null | null |
python/examples/depthwise_conv/depthwise_conv_2d_bench.py
|
yzhang93/iree-llvm-sandbox
|
1566f318f27585b60464516a63b49e5bc09f8fcb
|
[
"Apache-2.0"
] | null | null | null |
# RUN: %PYTHON %s 2>&1 | FileCheck %s
# This file contains simple test cases that combine various codegen options.
from ..core.experts import *
from ..core.harness import *
from ..core.transforms import *
from .definitions import *
fun_name = 'depthwise_conv_2d_nhwc_hwc'
op_name = 'linalg.depthwise_conv_2d_nhwc_hwc'
################################################################################
# Compilation strategies.
################################################################################
# Note: `\` char at the end of next line prevents formatter reflows, keep it.
all_names = [ \
"DepthWiseConv2DExpert"
]
all_experts = [
# Note: `\` char at the end of next line prevents formatter reflows, keep it.
e.print_ir(after_all=False, at_begin=False, llvm=False) for e in [ \
DoubleTileAndDecompose(
fun_name=fun_name,
op_name=op_name,
# N H W C KH, KW
tile_sizes=[1, 8, 14, 32],
peel=[0, 1, 2],
# N H W C KH, KW
tile_sizes2=[1, 1, 7, 32, 1, 3],
peel2=[0, 1, 2])
.then(Vectorize(fun_name, ''))
.then(Bufferize())
.then(LowerVectors())
.then(LowerToLLVM())
]
]
################################################################################
# Problem instantiation
################################################################################
keys = ['N', 'H', 'W', 'C', 'KH', 'KW', 'strides', 'dilations']
# CHECK-NOT: FAILURE
def main():
n_iters = 1000
# N H W C KH KW st dil
microbenchmark_problem_size_list = [
[1, 16, 16, 32, 3, 3, [1, 1], [1, 1]],
[1, 16, 16, 32, 3, 3, [1, 2], [1, 2]],
[1, 16, 16, 32, 3, 3, [2, 1], [1, 2]],
[1, 16, 16, 32, 3, 3, [2, 2], [2, 2]],
[1, 16, 16, 32, 3, 3, [2, 3], [3, 2]],
[1, 16, 16, 32, 3, 3, [3, 2], [2, 3]],
]
benchmark_problem_size_list = [
####################################################
# /* H W KH KW PH PW S D G */
####################################################
# b->Args({112, 112, 3, 3, 2, 2, 1, 1, 32});
# b->Args({ 56, 56, 3, 3, 2, 2, 1, 1, 128});
# b->Args({ 56, 56, 3, 3, 2, 2, 2, 1, 128});
# b->Args({ 28, 28, 3, 3, 2, 2, 1, 1, 256});
# b->Args({ 28, 28, 3, 3, 2, 2, 2, 1, 256});
# b->Args({ 14, 14, 3, 3, 2, 2, 1, 1, 512});
# b->Args({ 14, 14, 3, 3, 2, 2, 2, 1, 512});
####################################################
# N H W C KH KW st dil
####################################################
[1, 112, 112, 32, 3, 3, [1, 1], [1, 1]],
[1, 56, 56, 128, 3, 3, [1, 1], [1, 1]],
[1, 56, 56, 128, 3, 3, [2, 2], [1, 1]],
[1, 28, 28, 256, 3, 3, [1, 1], [1, 1]],
[1, 28, 28, 256, 3, 3, [2, 2], [1, 1]],
[1, 14, 14, 512, 3, 3, [1, 1], [1, 1]],
[1, 14, 14, 512, 3, 3, [2, 2], [1, 1]],
[1, 7, 7, 1024, 3, 3, [1, 1], [1, 1]],
]
# Specify default configuration and parse command line.
args = test_argparser(
"depthwise conv 2d benchmark",
default_n_iters=1000,
default_problem_sizes_list=benchmark_problem_size_list,
default_expert_list=all_names,
default_dynamic_at_compile_time_list=[
[], # case 1: static at compile time
# case 2: partially dynamic at compile time
['H', 'W'],
# case 3: partially dynamic at compile time
['C'],
# case 4: fully dynamic at compile time (except KH, KW)
['N', 'H', 'W', 'C'],
],
default_spec_list=[])
for dynamic_at_compile_time in args.dynamic_at_compile_time_list:
def numpy_kernel(args, sizes, types):
problem = DepthwiseConvolutionProblem('NHWC',
'HWC',
strides=sizes['strides'],
dilations=sizes['dilations'])
problem.reference_np(*args)
def pytorch_kernel(args, sizes, types):
problem = DepthwiseConvolutionProblem('NHWC',
'HWC',
strides=sizes['strides'],
dilations=sizes['dilations'])
problem.reference_pt(*args)
test_harness(lambda sizes, t: DepthwiseConvolutionProblem(
'NHWC', 'HWC', strides=sizes['strides'], dilations=sizes['dilations']),
[[np.float32] * 3],
test_sizes(keys, args.problem_sizes_list),
test_experts(all_experts, all_names, args.expert_list),
n_iters=args.n_iters,
dynamic_at_compile_time_sizes=set(
dynamic_at_compile_time).intersection(keys),
function_name=fun_name,
dump_ir_to_file='/tmp/abcd.mlir',
dump_obj_to_file='/tmp/abcd.o',
numpy_benchmark=numpy_kernel,
pytorch_benchmark=pytorch_kernel,
dump_data_to_file=args.dump_data)
if __name__ == '__main__':
main()
| 38.357664
| 81
| 0.442055
|
648a04658683e4dfd71ff4e6f2a620d5a400b2da
| 4,028
|
py
|
Python
|
duke-cs671-fall21-coupon-recommendation/outputs/rules/RF/18_features/numtrees_12/rule_4.py
|
apcarrik/kaggle
|
6e2d4db58017323e7ba5510bcc2598e01a4ee7bf
|
[
"MIT"
] | null | null | null |
duke-cs671-fall21-coupon-recommendation/outputs/rules/RF/18_features/numtrees_12/rule_4.py
|
apcarrik/kaggle
|
6e2d4db58017323e7ba5510bcc2598e01a4ee7bf
|
[
"MIT"
] | null | null | null |
duke-cs671-fall21-coupon-recommendation/outputs/rules/RF/18_features/numtrees_12/rule_4.py
|
apcarrik/kaggle
|
6e2d4db58017323e7ba5510bcc2598e01a4ee7bf
|
[
"MIT"
] | null | null | null |
def findDecision(obj): #obj[0]: Passanger, obj[1]: Weather, obj[2]: Time, obj[3]: Coupon, obj[4]: Coupon_validity, obj[5]: Gender, obj[6]: Age, obj[7]: Maritalstatus, obj[8]: Children, obj[9]: Education, obj[10]: Occupation, obj[11]: Income, obj[12]: Bar, obj[13]: Coffeehouse, obj[14]: Restaurantlessthan20, obj[15]: Restaurant20to50, obj[16]: Direction_same, obj[17]: Distance
# {"feature": "Coupon", "instances": 85, "metric_value": 0.9879, "depth": 1}
if obj[3]>1:
# {"feature": "Coffeehouse", "instances": 62, "metric_value": 0.9072, "depth": 2}
if obj[13]<=1.0:
# {"feature": "Income", "instances": 35, "metric_value": 0.9947, "depth": 3}
if obj[11]<=6:
# {"feature": "Occupation", "instances": 31, "metric_value": 0.9992, "depth": 4}
if obj[10]>3:
# {"feature": "Bar", "instances": 26, "metric_value": 0.9829, "depth": 5}
if obj[12]<=2.0:
# {"feature": "Education", "instances": 23, "metric_value": 0.9321, "depth": 6}
if obj[9]<=2:
# {"feature": "Coupon_validity", "instances": 14, "metric_value": 0.5917, "depth": 7}
if obj[4]>0:
# {"feature": "Weather", "instances": 8, "metric_value": 0.8113, "depth": 8}
if obj[1]<=0:
# {"feature": "Time", "instances": 7, "metric_value": 0.5917, "depth": 9}
if obj[2]<=2:
return 'True'
elif obj[2]>2:
# {"feature": "Gender", "instances": 2, "metric_value": 1.0, "depth": 10}
if obj[5]<=0:
return 'True'
elif obj[5]>0:
return 'False'
else: return 'False'
else: return 'True'
elif obj[1]>0:
return 'False'
else: return 'False'
elif obj[4]<=0:
return 'True'
else: return 'True'
elif obj[9]>2:
# {"feature": "Distance", "instances": 9, "metric_value": 0.9183, "depth": 7}
if obj[17]>1:
# {"feature": "Passanger", "instances": 5, "metric_value": 0.971, "depth": 8}
if obj[0]>1:
# {"feature": "Gender", "instances": 3, "metric_value": 0.9183, "depth": 9}
if obj[5]<=0:
return 'False'
elif obj[5]>0:
return 'True'
else: return 'True'
elif obj[0]<=1:
return 'True'
else: return 'True'
elif obj[17]<=1:
return 'False'
else: return 'False'
else: return 'False'
elif obj[12]>2.0:
return 'False'
else: return 'False'
elif obj[10]<=3:
return 'False'
else: return 'False'
elif obj[11]>6:
return 'True'
else: return 'True'
elif obj[13]>1.0:
# {"feature": "Passanger", "instances": 27, "metric_value": 0.6052, "depth": 3}
if obj[0]>0:
# {"feature": "Income", "instances": 26, "metric_value": 0.5159, "depth": 4}
if obj[11]<=4:
return 'True'
elif obj[11]>4:
# {"feature": "Restaurantlessthan20", "instances": 11, "metric_value": 0.8454, "depth": 5}
if obj[14]<=2.0:
return 'True'
elif obj[14]>2.0:
# {"feature": "Time", "instances": 5, "metric_value": 0.971, "depth": 6}
if obj[2]>1:
# {"feature": "Gender", "instances": 3, "metric_value": 0.9183, "depth": 7}
if obj[5]>0:
return 'True'
elif obj[5]<=0:
return 'False'
else: return 'False'
elif obj[2]<=1:
return 'False'
else: return 'False'
else: return 'False'
else: return 'True'
elif obj[0]<=0:
return 'False'
else: return 'False'
else: return 'True'
elif obj[3]<=1:
# {"feature": "Education", "instances": 23, "metric_value": 0.8281, "depth": 2}
if obj[9]>1:
return 'False'
elif obj[9]<=1:
# {"feature": "Occupation", "instances": 10, "metric_value": 0.971, "depth": 3}
if obj[10]>2:
# {"feature": "Distance", "instances": 7, "metric_value": 0.5917, "depth": 4}
if obj[17]<=2:
return 'True'
elif obj[17]>2:
return 'False'
else: return 'False'
elif obj[10]<=2:
return 'False'
else: return 'False'
else: return 'True'
else: return 'False'
| 37.296296
| 378
| 0.542205
|
35c4094b846663cdbc4b77e0ba392b8ebfae8185
| 901
|
py
|
Python
|
animal_shelters/migrations/0014_auto_20210716_1333.py
|
KrzysztofCalus/Animal_shelters_app
|
982689c719377d94bac22145fb6fc4bfb81821a2
|
[
"CC0-1.0"
] | null | null | null |
animal_shelters/migrations/0014_auto_20210716_1333.py
|
KrzysztofCalus/Animal_shelters_app
|
982689c719377d94bac22145fb6fc4bfb81821a2
|
[
"CC0-1.0"
] | null | null | null |
animal_shelters/migrations/0014_auto_20210716_1333.py
|
KrzysztofCalus/Animal_shelters_app
|
982689c719377d94bac22145fb6fc4bfb81821a2
|
[
"CC0-1.0"
] | null | null | null |
# Generated by Django 3.2.4 on 2021-07-16 13:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('animal_shelters', '0013_auto_20210715_1935'),
]
operations = [
migrations.AlterField(
model_name='owner',
name='about',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='owner',
name='capacity',
field=models.PositiveIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='owner',
name='donations',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='owner',
name='regulations',
field=models.TextField(blank=True, null=True),
),
]
| 26.5
| 69
| 0.564928
|
6322cae22e46b1890a4644a369af6810818abe11
| 495
|
py
|
Python
|
cat_blog/users/tests/test_tasks.py
|
turamant/cat_blog
|
0a04978db78d805f6468626ab23454ffa52b2411
|
[
"MIT"
] | null | null | null |
cat_blog/users/tests/test_tasks.py
|
turamant/cat_blog
|
0a04978db78d805f6468626ab23454ffa52b2411
|
[
"MIT"
] | 4
|
2021-03-30T14:29:55.000Z
|
2021-06-10T19:56:22.000Z
|
cat_blog/users/tests/test_tasks.py
|
turamant/cat_blog
|
0a04978db78d805f6468626ab23454ffa52b2411
|
[
"MIT"
] | null | null | null |
import pytest
from celery.result import EagerResult
from cat_blog.users.tasks import get_users_count
from cat_blog.users.tests.factories import UserFactory
pytestmark = pytest.mark.django_db
def test_user_count(settings):
"""A basic test to execute the get_users_count Celery task."""
UserFactory.create_batch(3)
settings.CELERY_TASK_ALWAYS_EAGER = True
task_result = get_users_count.delay()
assert isinstance(task_result, EagerResult)
assert task_result.result == 3
| 29.117647
| 66
| 0.789899
|
a7cba21667e624e970fb95c8372f19d0fd0e5e2c
| 4,582
|
py
|
Python
|
test/functional/mempool_reorg.py
|
Carbon-Reduction-Initiative/CARI
|
ff7e8d4d67628aa1a833d338704b37c3d8188aaa
|
[
"MIT"
] | 2
|
2021-04-23T20:50:52.000Z
|
2021-11-08T20:09:46.000Z
|
test/functional/mempool_reorg.py
|
Carbon-Reduction-Initiative/CARI
|
ff7e8d4d67628aa1a833d338704b37c3d8188aaa
|
[
"MIT"
] | null | null | null |
test/functional/mempool_reorg.py
|
Carbon-Reduction-Initiative/CARI
|
ff7e8d4d67628aa1a833d338704b37c3d8188aaa
|
[
"MIT"
] | 5
|
2020-11-06T10:06:07.000Z
|
2022-01-26T17:19:20.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test mempool re-org scenarios.
Test re-org scenarios with a mempool that contains transactions
that spend (directly or indirectly) coinbase transactions.
"""
from test_framework.test_framework import CariTestFramework
from test_framework.util import *
import time
# Create one-input, one-output, no-fee transaction:
class MempoolCoinbaseTest(CariTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.extra_args = [["-checkmempool"]] * 2
def run_test(self):
# Start with a 200 block chain
assert_equal(self.nodes[0].getblockcount(), 200)
# Mine four blocks. After this, nodes[0] blocks
# 101, 102, and 103 are spend-able.
new_blocks = self.nodes[1].generate(4)
self.sync_all()
node0_address = self.nodes[0].getnewaddress()
node1_address = self.nodes[1].getnewaddress()
# Three scenarios for re-orging coinbase spends in the memory pool:
# 1. Direct coinbase spend : spend_101
# 2. Indirect (coinbase spend in chain, child in mempool) : spend_102 and spend_102_1
# 3. Indirect (coinbase and child both in chain) : spend_103 and spend_103_1
# Use invalidatblock to make all of the above coinbase spends invalid (immature coinbase),
# and make sure the mempool code behaves correctly.
b = [ self.nodes[0].getblockhash(n) for n in range(101, 105) ]
coinbase_txids = [ self.nodes[0].getblock(h)['tx'][0] for h in b ]
spend_101_raw = create_tx(self.nodes[0], coinbase_txids[1], node1_address, 249.99)
spend_102_raw = create_tx(self.nodes[0], coinbase_txids[2], node0_address, 249.99)
spend_103_raw = create_tx(self.nodes[0], coinbase_txids[3], node0_address, 249.99)
# Create a transaction which is time-locked to two blocks in the future
timelock_tx = self.nodes[0].createrawtransaction([{"txid": coinbase_txids[0], "vout": 0}], {node0_address: 249.99})
# Set the time lock
timelock_tx = timelock_tx.replace("ffffffff", "11111191", 1)
timelock_tx = timelock_tx[:-8] + hex(self.nodes[0].getblockcount() + 2)[2:] + "000000"
timelock_tx = self.nodes[0].signrawtransaction(timelock_tx)["hex"]
# This will raise an exception because the timelock transaction is too immature to spend
assert_raises_rpc_error(-26, "non-final", self.nodes[0].sendrawtransaction, timelock_tx)
# Broadcast and mine spend_102 and 103:
spend_102_id = self.nodes[0].sendrawtransaction(spend_102_raw)
spend_103_id = self.nodes[0].sendrawtransaction(spend_103_raw)
self.nodes[0].generate(1)
# Time-locked transaction is still too immature to spend
assert_raises_rpc_error(-26,'non-final', self.nodes[0].sendrawtransaction, timelock_tx)
# Create 102_1 and 103_1:
spend_102_1_raw = create_tx(self.nodes[0], spend_102_id, node1_address, 249.98)
spend_103_1_raw = create_tx(self.nodes[0], spend_103_id, node1_address, 249.98)
# Broadcast and mine 103_1:
spend_103_1_id = self.nodes[0].sendrawtransaction(spend_103_1_raw)
last_block = self.nodes[0].generate(1)
# Time-locked transaction can now be spent
#timelock_tx_id = self.nodes[0].sendrawtransaction(timelock_tx)
# ... now put spend_101 and spend_102_1 in memory pools:
spend_101_id = self.nodes[0].sendrawtransaction(spend_101_raw)
spend_102_1_id = self.nodes[0].sendrawtransaction(spend_102_1_raw)
assert_equal(set(self.nodes[0].getrawmempool()), {spend_101_id, spend_102_1_id})
self.sync_all()
for node in self.nodes:
node.invalidateblock(last_block[0])
# Time-locked transaction is now too immature and has been removed from the mempool
# spend_103_1 has been re-orged out of the chain and is back in the mempool
assert_equal(set(self.nodes[0].getrawmempool()), {spend_101_id, spend_102_1_id, spend_103_1_id})
# Use invalidateblock to re-org back and make all those coinbase spends
# immature/invalid:
for node in self.nodes:
node.invalidateblock(new_blocks[0])
# mempool should be empty.
assert_equal(set(self.nodes[0].getrawmempool()), set())
self.sync_all()
if __name__ == '__main__':
MempoolCoinbaseTest().main()
| 48.231579
| 123
| 0.690965
|
f72538c76f49ccac9bcd5d18c35fad8c0e5bdbe6
| 691
|
py
|
Python
|
backtracking/0216_combination_sum_3.py
|
MartinMa28/Algorithms_review
|
3f2297038c00f5a560941360ca702e6868530f34
|
[
"MIT"
] | null | null | null |
backtracking/0216_combination_sum_3.py
|
MartinMa28/Algorithms_review
|
3f2297038c00f5a560941360ca702e6868530f34
|
[
"MIT"
] | null | null | null |
backtracking/0216_combination_sum_3.py
|
MartinMa28/Algorithms_review
|
3f2297038c00f5a560941360ca702e6868530f34
|
[
"MIT"
] | null | null | null |
class Solution:
def __init__(self):
self.combs = []
def _backtrack(self, candidates, cur, target, k):
if len(cur) == k and sum(cur) == target:
self.combs.append(cur[:])
return
if sum(cur) > target:
return
elif len(cur) < k:
for idx, candi in enumerate(candidates):
cur.append(candi)
self._backtrack(candidates[idx + 1:], cur, target, k)
# backtracking
cur.pop()
def combinationSum3(self, k: int, n: int) -> list:
self._backtrack(range(1, 10), [], n, k)
return self.combs
| 30.043478
| 69
| 0.474674
|
3fae588403a5e2909f831ba1636738eee83128c4
| 2,965
|
py
|
Python
|
ggpacket.py
|
twasilczyk/gg-server
|
e267eb9845f8c743b153279b7ae716d60fe1f611
|
[
"MIT"
] | 1
|
2018-01-16T04:01:06.000Z
|
2018-01-16T04:01:06.000Z
|
ggpacket.py
|
twasilczyk/gg-server
|
e267eb9845f8c743b153279b7ae716d60fe1f611
|
[
"MIT"
] | null | null | null |
ggpacket.py
|
twasilczyk/gg-server
|
e267eb9845f8c743b153279b7ae716d60fe1f611
|
[
"MIT"
] | null | null | null |
import struct
import time
import ggproto
import packets_pb2
def readUIN(data):
return int(data[2:])
def writeUIN(uin):
uin = str(uin)
return struct.pack("<BB", 0, len(uin)) + uin
class GGPacket:
def __init__(self, pid):
self.pid = pid
def body(self):
return ""
def __str__(self):
b = self.body()
header = struct.pack("<II", self.pid, len(b))
return header + b
class Notify105Last:
def ParseFromString(self, data):
self.blist = []
while (len(data) > 0):
(_, uin_len) = struct.unpack("=BB", data[:2])
data = data[2:]
uin = data[:uin_len]
data = data[(uin_len + 1):]
self.blist.append(int(uin))
class Ping:
def __init__(self):
pass
def parsePacket(p_type, p_data):
if (p_type == ggproto.LOGIN105):
p = packets_pb2.GG105Login()
p.ParseFromString(p_data)
return p
elif (p_type == ggproto.NOTIFY105_LAST):
p = Notify105Last()
p.ParseFromString(p_data)
return p
elif (p_type == ggproto.PING):
return Ping()
else:
return None
class Welcome(GGPacket):
def __init__(self):
GGPacket.__init__(self, ggproto.WELCOME)
def body(self):
return struct.pack("<I", 0x01020304)
class Login110OK(GGPacket):
def __init__(self, uin):
GGPacket.__init__(self, ggproto.LOGIN110_OK)
self.uin = uin
def body(self):
p = packets_pb2.GG110LoginOK()
p.dummy1 = 1
p.dummyhash = "nej2844d8d43s2dMNea2584sdf1sf418"
p.uin = self.uin
p.server_time = int(time.time())
return p.SerializeToString()
class IMToken(GGPacket):
def __init__(self):
GGPacket.__init__(self, ggproto.IMTOKEN)
def body(self):
p = packets_pb2.GG110Imtoken()
p.imtoken = "1234567890123456789012345678901234567890"
return p.SerializeToString()
class Status80(GGPacket):
def __init__(self, uin):
GGPacket.__init__(self, ggproto.STATUS80)
self.uin = uin
def body(self):
return struct.pack("<IIIIhBBII", self.uin, ggproto.STATUS_AVAIL, 0, 0, 0, 255, 0, 0, 0)
class NotifyReply80(GGPacket):
def __init__(self, blist):
GGPacket.__init__(self, ggproto.NOTIFY_REPLY80)
self.blist = blist
def body(self):
p = ""
for uin in self.blist:
# XXX: copy-pasta
p = p + struct.pack("<IIIIhBBII", uin, ggproto.STATUS_AVAIL, 0, 0, 0, 255, 0, 0, 0)
return p
class UserData(GGPacket):
def __init__(self, blist):
GGPacket.__init__(self, ggproto.USER_DATA)
self.blist = blist
def body(self):
p = struct.pack("<II", 3, len(self.blist))
for uin in self.blist:
p = p + struct.pack("<II", uin, 0)
return p
class Pong110(GGPacket):
def __init__(self):
GGPacket.__init__(self, ggproto.PONG110)
def body(self):
p = packets_pb2.GG110Pong()
p.server_time = int(time.time())
return p.SerializeToString()
class MagicNotification(GGPacket):
def __init__(self):
GGPacket.__init__(self, ggproto.MAGIC_NOTIFICATION)
def body(self):
p = packets_pb2.GG110MagicNotification()
p.dummy1 = 2
p.seq = 1
p.dummy2 = 1
p.dummy3 = 1
p.uin = writeUIN(1234567)
p.dummy4 = ""
return p.SerializeToString()
| 23.164063
| 89
| 0.693761
|
435808ecd070b23c6baf2ccf88b907be018ba667
| 12,172
|
py
|
Python
|
venv/lib/python3.8/site-packages/mercurial/lock.py
|
JesseDavids/mqtta
|
389eb4f06242d4473fe1bcff7fc6a22290e0d99c
|
[
"Apache-2.0"
] | 4
|
2021-02-05T10:57:39.000Z
|
2022-02-25T04:43:23.000Z
|
venv/lib/python3.8/site-packages/mercurial/lock.py
|
JesseDavids/mqtta
|
389eb4f06242d4473fe1bcff7fc6a22290e0d99c
|
[
"Apache-2.0"
] | null | null | null |
venv/lib/python3.8/site-packages/mercurial/lock.py
|
JesseDavids/mqtta
|
389eb4f06242d4473fe1bcff7fc6a22290e0d99c
|
[
"Apache-2.0"
] | null | null | null |
# lock.py - simple advisory locking scheme for mercurial
#
# Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from __future__ import absolute_import
import contextlib
import errno
import os
import signal
import socket
import time
import warnings
from .i18n import _
from .pycompat import getattr
from . import (
encoding,
error,
pycompat,
util,
)
from .utils import procutil
def _getlockprefix():
"""Return a string which is used to differentiate pid namespaces
It's useful to detect "dead" processes and remove stale locks with
confidence. Typically it's just hostname. On modern linux, we include an
extra Linux-specific pid namespace identifier.
"""
result = encoding.strtolocal(socket.gethostname())
if pycompat.sysplatform.startswith(b'linux'):
try:
result += b'/%x' % os.stat(b'/proc/self/ns/pid').st_ino
except OSError as ex:
if ex.errno not in (errno.ENOENT, errno.EACCES, errno.ENOTDIR):
raise
return result
@contextlib.contextmanager
def _delayedinterrupt():
"""Block signal interrupt while doing something critical
This makes sure that the code block wrapped by this context manager won't
be interrupted.
For Windows developers: It appears not possible to guard time.sleep()
from CTRL_C_EVENT, so please don't use time.sleep() to test if this is
working.
"""
assertedsigs = []
blocked = False
orighandlers = {}
def raiseinterrupt(num):
if num == getattr(signal, 'SIGINT', None) or num == getattr(
signal, 'CTRL_C_EVENT', None
):
raise KeyboardInterrupt
else:
raise error.SignalInterrupt
def catchterm(num, frame):
if blocked:
assertedsigs.append(num)
else:
raiseinterrupt(num)
try:
# save handlers first so they can be restored even if a setup is
# interrupted between signal.signal() and orighandlers[] =.
for name in [
b'CTRL_C_EVENT',
b'SIGINT',
b'SIGBREAK',
b'SIGHUP',
b'SIGTERM',
]:
num = getattr(signal, name, None)
if num and num not in orighandlers:
orighandlers[num] = signal.getsignal(num)
try:
for num in orighandlers:
signal.signal(num, catchterm)
except ValueError:
pass # in a thread? no luck
blocked = True
yield
finally:
# no simple way to reliably restore all signal handlers because
# any loops, recursive function calls, except blocks, etc. can be
# interrupted. so instead, make catchterm() raise interrupt.
blocked = False
try:
for num, handler in orighandlers.items():
signal.signal(num, handler)
except ValueError:
pass # in a thread?
# re-raise interrupt exception if any, which may be shadowed by a new
# interrupt occurred while re-raising the first one
if assertedsigs:
raiseinterrupt(assertedsigs[0])
def trylock(ui, vfs, lockname, timeout, warntimeout, *args, **kwargs):
"""return an acquired lock or raise an a LockHeld exception
This function is responsible to issue warnings and or debug messages about
the held lock while trying to acquires it."""
def printwarning(printer, locker):
"""issue the usual "waiting on lock" message through any channel"""
# show more details for new-style locks
if b':' in locker:
host, pid = locker.split(b":", 1)
msg = _(
b"waiting for lock on %s held by process %r on host %r\n"
) % (
pycompat.bytestr(l.desc),
pycompat.bytestr(pid),
pycompat.bytestr(host),
)
else:
msg = _(b"waiting for lock on %s held by %r\n") % (
l.desc,
pycompat.bytestr(locker),
)
printer(msg)
l = lock(vfs, lockname, 0, *args, dolock=False, **kwargs)
debugidx = 0 if (warntimeout and timeout) else -1
warningidx = 0
if not timeout:
warningidx = -1
elif warntimeout:
warningidx = warntimeout
delay = 0
while True:
try:
l._trylock()
break
except error.LockHeld as inst:
if delay == debugidx:
printwarning(ui.debug, inst.locker)
if delay == warningidx:
printwarning(ui.warn, inst.locker)
if timeout <= delay:
raise error.LockHeld(
errno.ETIMEDOUT, inst.filename, l.desc, inst.locker
)
time.sleep(1)
delay += 1
l.delay = delay
if l.delay:
if 0 <= warningidx <= l.delay:
ui.warn(_(b"got lock after %d seconds\n") % l.delay)
else:
ui.debug(b"got lock after %d seconds\n" % l.delay)
if l.acquirefn:
l.acquirefn()
return l
class lock(object):
"""An advisory lock held by one process to control access to a set
of files. Non-cooperating processes or incorrectly written scripts
can ignore Mercurial's locking scheme and stomp all over the
repository, so don't do that.
Typically used via localrepository.lock() to lock the repository
store (.hg/store/) or localrepository.wlock() to lock everything
else under .hg/."""
# lock is symlink on platforms that support it, file on others.
# symlink is used because create of directory entry and contents
# are atomic even over nfs.
# old-style lock: symlink to pid
# new-style lock: symlink to hostname:pid
_host = None
def __init__(
self,
vfs,
fname,
timeout=-1,
releasefn=None,
acquirefn=None,
desc=None,
signalsafe=True,
dolock=True,
):
self.vfs = vfs
self.f = fname
self.held = 0
self.timeout = timeout
self.releasefn = releasefn
self.acquirefn = acquirefn
self.desc = desc
if signalsafe:
self._maybedelayedinterrupt = _delayedinterrupt
else:
self._maybedelayedinterrupt = util.nullcontextmanager
self.postrelease = []
self.pid = self._getpid()
if dolock:
self.delay = self.lock()
if self.acquirefn:
self.acquirefn()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_tb):
success = all(a is None for a in (exc_type, exc_value, exc_tb))
self.release(success=success)
def __del__(self):
if self.held:
warnings.warn(
"use lock.release instead of del lock",
category=DeprecationWarning,
stacklevel=2,
)
# ensure the lock will be removed
# even if recursive locking did occur
self.held = 1
self.release()
def _getpid(self):
# wrapper around procutil.getpid() to make testing easier
return procutil.getpid()
def lock(self):
timeout = self.timeout
while True:
try:
self._trylock()
return self.timeout - timeout
except error.LockHeld as inst:
if timeout != 0:
time.sleep(1)
if timeout > 0:
timeout -= 1
continue
raise error.LockHeld(
errno.ETIMEDOUT, inst.filename, self.desc, inst.locker
)
def _trylock(self):
if self.held:
self.held += 1
return
if lock._host is None:
lock._host = _getlockprefix()
lockname = b'%s:%d' % (lock._host, self.pid)
retry = 5
while not self.held and retry:
retry -= 1
try:
with self._maybedelayedinterrupt():
self.vfs.makelock(lockname, self.f)
self.held = 1
except (OSError, IOError) as why:
if why.errno == errno.EEXIST:
locker = self._readlock()
if locker is None:
continue
locker = self._testlock(locker)
if locker is not None:
raise error.LockHeld(
errno.EAGAIN,
self.vfs.join(self.f),
self.desc,
locker,
)
else:
raise error.LockUnavailable(
why.errno, why.strerror, why.filename, self.desc
)
if not self.held:
# use empty locker to mean "busy for frequent lock/unlock
# by many processes"
raise error.LockHeld(
errno.EAGAIN, self.vfs.join(self.f), self.desc, b""
)
def _readlock(self):
"""read lock and return its value
Returns None if no lock exists, pid for old-style locks, and host:pid
for new-style locks.
"""
try:
return self.vfs.readlock(self.f)
except (OSError, IOError) as why:
if why.errno == errno.ENOENT:
return None
raise
def _lockshouldbebroken(self, locker):
if locker is None:
return False
try:
host, pid = locker.split(b":", 1)
except ValueError:
return False
if host != lock._host:
return False
try:
pid = int(pid)
except ValueError:
return False
if procutil.testpid(pid):
return False
return True
def _testlock(self, locker):
if not self._lockshouldbebroken(locker):
return locker
# if locker dead, break lock. must do this with another lock
# held, or can race and break valid lock.
try:
with lock(self.vfs, self.f + b'.break', timeout=0):
locker = self._readlock()
if not self._lockshouldbebroken(locker):
return locker
self.vfs.unlink(self.f)
except error.LockError:
return locker
def testlock(self):
"""return id of locker if lock is valid, else None.
If old-style lock, we cannot tell what machine locker is on.
with new-style lock, if locker is on this machine, we can
see if locker is alive. If locker is on this machine but
not alive, we can safely break lock.
The lock file is only deleted when None is returned.
"""
locker = self._readlock()
return self._testlock(locker)
def release(self, success=True):
"""release the lock and execute callback function if any
If the lock has been acquired multiple times, the actual release is
delayed to the last release call."""
if self.held > 1:
self.held -= 1
elif self.held == 1:
self.held = 0
if self._getpid() != self.pid:
# we forked, and are not the parent
return
try:
if self.releasefn:
self.releasefn()
finally:
try:
self.vfs.unlink(self.f)
except OSError:
pass
# The postrelease functions typically assume the lock is not held
# at all.
for callback in self.postrelease:
callback(success)
# Prevent double usage and help clear cycles.
self.postrelease = None
def release(*locks):
for lock in locks:
if lock is not None:
lock.release()
| 30.582915
| 78
| 0.552498
|
a67e32ba36d9f5b94b86acb9e52573d6ccd245fa
| 10,703
|
py
|
Python
|
odoo/addons/website_sale/models/sale_order.py
|
tuanquanghpvn/odoo8-tutorial
|
52d25f1ca5f233c431cb9d3b24b79c3b4fb5127e
|
[
"MIT"
] | 1
|
2019-12-29T11:53:56.000Z
|
2019-12-29T11:53:56.000Z
|
odoo/addons/website_sale/models/sale_order.py
|
tuanquanghpvn/odoo8-tutorial
|
52d25f1ca5f233c431cb9d3b24b79c3b4fb5127e
|
[
"MIT"
] | null | null | null |
odoo/addons/website_sale/models/sale_order.py
|
tuanquanghpvn/odoo8-tutorial
|
52d25f1ca5f233c431cb9d3b24b79c3b4fb5127e
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import random
from openerp import SUPERUSER_ID
from openerp.osv import osv, orm, fields
from openerp.addons.web.http import request
from openerp.tools.translate import _
class sale_order(osv.Model):
_inherit = "sale.order"
def _cart_qty(self, cr, uid, ids, field_name, arg, context=None):
res = dict()
for order in self.browse(cr, uid, ids, context=context):
res[order.id] = int(sum(l.product_uom_qty for l in (order.website_order_line or [])))
return res
_columns = {
'website_order_line': fields.one2many(
'sale.order.line', 'order_id',
string='Order Lines displayed on Website', readonly=True,
help='Order Lines to be displayed on the website. They should not be used for computation purpose.',
),
'cart_quantity': fields.function(_cart_qty, type='integer', string='Cart Quantity'),
'payment_acquirer_id': fields.many2one('payment.acquirer', 'Payment Acquirer', on_delete='set null', copy=False),
'payment_tx_id': fields.many2one('payment.transaction', 'Transaction', on_delete='set null', copy=False),
}
def _get_errors(self, cr, uid, order, context=None):
return []
def _get_website_data(self, cr, uid, order, context):
return {
'partner': order.partner_id.id,
'order': order
}
def _cart_find_product_line(self, cr, uid, ids, product_id=None, line_id=None, context=None, **kwargs):
for so in self.browse(cr, uid, ids, context=context):
domain = [('order_id', '=', so.id), ('product_id', '=', product_id)]
if line_id:
domain += [('id', '=', line_id)]
return self.pool.get('sale.order.line').search(cr, SUPERUSER_ID, domain, context=context)
def _website_product_id_change(self, cr, uid, ids, order_id, product_id, qty=0, line_id=None, context=None):
so = self.pool.get('sale.order').browse(cr, uid, order_id, context=context)
values = self.pool.get('sale.order.line').product_id_change(cr, SUPERUSER_ID, [],
pricelist=so.pricelist_id.id,
product=product_id,
partner_id=so.partner_id.id,
fiscal_position=so.fiscal_position.id,
qty=qty,
context=context
)['value']
if line_id:
line = self.pool.get('sale.order.line').browse(cr, SUPERUSER_ID, line_id, context=context)
values['name'] = line.name
else:
product = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
values['name'] = product.description_sale and "%s\n%s" % (product.display_name, product.description_sale) or product.display_name
values['product_id'] = product_id
values['order_id'] = order_id
if values.get('tax_id') != None:
values['tax_id'] = [(6, 0, values['tax_id'])]
return values
def _cart_update(self, cr, uid, ids, product_id=None, line_id=None, add_qty=0, set_qty=0, context=None, **kwargs):
""" Add or set product quantity, add_qty can be negative """
sol = self.pool.get('sale.order.line')
quantity = 0
for so in self.browse(cr, uid, ids, context=context):
if so.state != 'draft':
request.session['sale_order_id'] = None
raise osv.except_osv(_('Error!'), _('It is forbidden to modify a sale order which is not in draft status'))
if line_id != False:
line_ids = so._cart_find_product_line(product_id, line_id, context=context, **kwargs)
if line_ids:
line_id = line_ids[0]
# Create line if no line with product_id can be located
if not line_id:
values = self._website_product_id_change(cr, uid, ids, so.id, product_id, qty=1, context=context)
line_id = sol.create(cr, SUPERUSER_ID, values, context=context)
if add_qty:
add_qty -= 1
# compute new quantity
if set_qty:
quantity = set_qty
elif add_qty != None:
quantity = sol.browse(cr, SUPERUSER_ID, line_id, context=context).product_uom_qty + (add_qty or 0)
# Remove zero of negative lines
if quantity <= 0:
sol.unlink(cr, SUPERUSER_ID, [line_id], context=context)
else:
# update line
values = self._website_product_id_change(cr, uid, ids, so.id, product_id, qty=quantity, line_id=line_id, context=context)
values['product_uom_qty'] = quantity
sol.write(cr, SUPERUSER_ID, [line_id], values, context=context)
return {'line_id': line_id, 'quantity': quantity}
def _cart_accessories(self, cr, uid, ids, context=None):
for order in self.browse(cr, uid, ids, context=context):
s = set(j.id for l in (order.website_order_line or []) for j in (l.product_id.accessory_product_ids or []))
s -= set(l.product_id.id for l in order.order_line)
product_ids = random.sample(s, min(len(s),3))
return self.pool['product.product'].browse(cr, uid, product_ids, context=context)
class website(orm.Model):
_inherit = 'website'
_columns = {
'pricelist_id': fields.related('user_id','partner_id','property_product_pricelist',
type='many2one', relation='product.pricelist', string='Default Pricelist'),
'currency_id': fields.related('pricelist_id','currency_id',
type='many2one', relation='res.currency', string='Default Currency'),
}
def sale_product_domain(self, cr, uid, ids, context=None):
return [("sale_ok", "=", True)]
def sale_get_order(self, cr, uid, ids, force_create=False, code=None, update_pricelist=None, context=None):
sale_order_obj = self.pool['sale.order']
sale_order_id = request.session.get('sale_order_id')
sale_order = None
# create so if needed
if not sale_order_id and (force_create or code):
# TODO cache partner_id session
partner = self.pool['res.users'].browse(cr, SUPERUSER_ID, uid, context=context).partner_id
for w in self.browse(cr, uid, ids):
values = {
'user_id': w.user_id.id,
'partner_id': partner.id,
'pricelist_id': partner.property_product_pricelist.id,
'section_id': self.pool.get('ir.model.data').get_object_reference(cr, uid, 'website', 'salesteam_website_sales')[1],
}
sale_order_id = sale_order_obj.create(cr, SUPERUSER_ID, values, context=context)
values = sale_order_obj.onchange_partner_id(cr, SUPERUSER_ID, [], partner.id, context=context)['value']
sale_order_obj.write(cr, SUPERUSER_ID, [sale_order_id], values, context=context)
request.session['sale_order_id'] = sale_order_id
if sale_order_id:
# TODO cache partner_id session
partner = self.pool['res.users'].browse(cr, SUPERUSER_ID, uid, context=context).partner_id
sale_order = sale_order_obj.browse(cr, SUPERUSER_ID, sale_order_id, context=context)
if not sale_order.exists():
request.session['sale_order_id'] = None
return None
# check for change of pricelist with a coupon
if code and code != sale_order.pricelist_id.code:
pricelist_ids = self.pool['product.pricelist'].search(cr, SUPERUSER_ID, [('code', '=', code)], context=context)
if pricelist_ids:
pricelist_id = pricelist_ids[0]
request.session['sale_order_code_pricelist_id'] = pricelist_id
update_pricelist = True
pricelist_id = request.session.get('sale_order_code_pricelist_id') or partner.property_product_pricelist.id
# check for change of partner_id ie after signup
if sale_order.partner_id.id != partner.id and request.website.partner_id.id != partner.id:
flag_pricelist = False
if pricelist_id != sale_order.pricelist_id.id:
flag_pricelist = True
fiscal_position = sale_order.fiscal_position and sale_order.fiscal_position.id or False
values = sale_order_obj.onchange_partner_id(cr, SUPERUSER_ID, [sale_order_id], partner.id, context=context)['value']
if values.get('fiscal_position'):
order_lines = map(int,sale_order.order_line)
values.update(sale_order_obj.onchange_fiscal_position(cr, SUPERUSER_ID, [],
values['fiscal_position'], [[6, 0, order_lines]], context=context)['value'])
values['partner_id'] = partner.id
sale_order_obj.write(cr, SUPERUSER_ID, [sale_order_id], values, context=context)
if flag_pricelist or values.get('fiscal_position', False) != fiscal_position:
update_pricelist = True
# update the pricelist
if update_pricelist:
values = {'pricelist_id': pricelist_id}
values.update(sale_order.onchange_pricelist_id(pricelist_id, None)['value'])
sale_order.write(values)
for line in sale_order.order_line:
if line.exists():
sale_order._cart_update(product_id=line.product_id.id, line_id=line.id, add_qty=0)
# update browse record
if (code and code != sale_order.pricelist_id.code) or sale_order.partner_id.id != partner.id:
sale_order = sale_order_obj.browse(cr, SUPERUSER_ID, sale_order.id, context=context)
return sale_order
def sale_get_transaction(self, cr, uid, ids, context=None):
transaction_obj = self.pool.get('payment.transaction')
tx_id = request.session.get('sale_transaction_id')
if tx_id:
tx_ids = transaction_obj.search(cr, SUPERUSER_ID, [('id', '=', tx_id), ('state', 'not in', ['cancel'])], context=context)
if tx_ids:
return transaction_obj.browse(cr, SUPERUSER_ID, tx_ids[0], context=context)
else:
request.session['sale_transaction_id'] = False
return False
def sale_reset(self, cr, uid, ids, context=None):
request.session.update({
'sale_order_id': False,
'sale_transaction_id': False,
'sale_order_code_pricelist_id': False,
})
| 48.429864
| 141
| 0.613286
|
e73da25b738650b85ba56a05411845edcd2b5f27
| 5,935
|
py
|
Python
|
nerfies/schedules.py
|
oahzxl/nerfies
|
9e8b007d8fc7059f6a42e07233c76c4f356d8439
|
[
"Apache-2.0"
] | 716
|
2021-02-03T08:37:54.000Z
|
2022-03-31T19:40:45.000Z
|
nerfies/schedules.py
|
oahzxl/nerfies
|
9e8b007d8fc7059f6a42e07233c76c4f356d8439
|
[
"Apache-2.0"
] | 53
|
2021-02-04T21:07:44.000Z
|
2022-03-31T15:58:21.000Z
|
nerfies/schedules.py
|
oahzxl/nerfies
|
9e8b007d8fc7059f6a42e07233c76c4f356d8439
|
[
"Apache-2.0"
] | 107
|
2021-02-03T09:57:48.000Z
|
2022-03-29T09:19:33.000Z
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Annealing Schedules."""
import abc
import collections
import copy
import math
from typing import Any, Iterable, List, Tuple, Union
from jax import numpy as jnp
def from_tuple(x):
schedule_type, *args = x
return SCHEDULE_MAP[schedule_type](*args)
def from_dict(d):
d = copy.copy(dict(d))
schedule_type = d.pop('type')
return SCHEDULE_MAP[schedule_type](**d)
def from_config(schedule):
if isinstance(schedule, Schedule):
return schedule
if isinstance(schedule, Tuple) or isinstance(schedule, List):
return from_tuple(schedule)
if isinstance(schedule, collections.Mapping):
return from_dict(schedule)
raise ValueError(f'Unknown type {type(schedule)}.')
class Schedule(abc.ABC):
"""An interface for generic schedules.."""
@abc.abstractmethod
def get(self, step):
"""Get the value for the given step."""
raise NotImplementedError
def __call__(self, step):
return self.get(step)
class ConstantSchedule(Schedule):
"""Linearly scaled scheduler."""
def __init__(self, value):
super().__init__()
self.value = value
def get(self, step):
"""Get the value for the given step."""
return jnp.full_like(step, self.value, dtype=jnp.float32)
class LinearSchedule(Schedule):
"""Linearly scaled scheduler."""
def __init__(self, initial_value, final_value, num_steps):
super().__init__()
self.initial_value = initial_value
self.final_value = final_value
self.num_steps = num_steps
def get(self, step):
"""Get the value for the given step."""
if self.num_steps == 0:
return jnp.full_like(step, self.final_value, dtype=jnp.float32)
alpha = jnp.minimum(step / self.num_steps, 1.0)
return (1.0 - alpha) * self.initial_value + alpha * self.final_value
class ExponentialSchedule(Schedule):
"""Exponentially decaying scheduler."""
def __init__(self, initial_value, final_value, num_steps, eps=1e-10):
super().__init__()
if initial_value <= final_value:
raise ValueError('Final value must be less than initial value.')
self.initial_value = initial_value
self.final_value = final_value
self.num_steps = num_steps
self.eps = eps
def get(self, step):
"""Get the value for the given step."""
if step >= self.num_steps:
return jnp.full_like(step, self.final_value, dtype=jnp.float32)
final_value = max(self.final_value, self.eps)
base = final_value / self.initial_value
exponent = step / (self.num_steps - 1)
if step >= self.num_steps:
return jnp.full_like(step, self.final_value, dtype=jnp.float32)
return self.initial_value * base**exponent
class CosineEasingSchedule(Schedule):
"""Schedule that eases slowsly using a cosine."""
def __init__(self, initial_value, final_value, num_steps):
super().__init__()
self.initial_value = initial_value
self.final_value = final_value
self.num_steps = num_steps
def get(self, step):
"""Get the value for the given step."""
alpha = jnp.minimum(step / self.num_steps, 1.0)
scale = self.final_value - self.initial_value
x = min(max(alpha, 0.0), 1.0)
return (self.initial_value
+ scale * 0.5 * (1 + math.cos(jnp.pi * x + jnp.pi)))
class StepSchedule(Schedule):
"""Schedule that eases slowsly using a cosine."""
def __init__(self,
initial_value,
decay_interval,
decay_factor,
max_decays,
final_value=None):
super().__init__()
self.initial_value = initial_value
self.decay_factor = decay_factor
self.decay_interval = decay_interval
self.max_decays = max_decays
if final_value is None:
final_value = self.initial_value * self.decay_factor**self.max_decays
self.final_value = final_value
def get(self, step):
"""Get the value for the given step."""
phase = step // self.decay_interval
if phase >= self.max_decays:
return self.final_value
else:
return self.initial_value * self.decay_factor**phase
class PiecewiseSchedule(Schedule):
"""A piecewise combination of multiple schedules."""
def __init__(
self, schedules: Iterable[Tuple[int, Union[Schedule, Iterable[Any]]]]):
self.schedules = [from_config(s) for ms, s in schedules]
milestones = jnp.array([ms for ms, s in schedules])
self.milestones = jnp.cumsum(milestones)[:-1]
def get(self, step):
idx = jnp.searchsorted(self.milestones, step, side='right')
schedule = self.schedules[idx]
base_idx = self.milestones[idx - 1] if idx >= 1 else 0
return schedule.get(step - base_idx)
class DelayedSchedule(Schedule):
"""Delays the start of the base schedule."""
def __init__(self, base_schedule: Schedule, delay_steps, delay_mult):
self.base_schedule = from_config(base_schedule)
self.delay_steps = delay_steps
self.delay_mult = delay_mult
def get(self, step):
delay_rate = (
self.delay_mult
+ (1 - self.delay_mult)
* jnp.sin(0.5 * jnp.pi * jnp.clip(step / self.delay_steps, 0, 1)))
return delay_rate * self.base_schedule(step)
SCHEDULE_MAP = {
'constant': ConstantSchedule,
'linear': LinearSchedule,
'exponential': ExponentialSchedule,
'cosine_easing': CosineEasingSchedule,
'step': StepSchedule,
'piecewise': PiecewiseSchedule,
'delayed': DelayedSchedule,
}
| 29.527363
| 77
| 0.693513
|
6d325f33051561006bca2e2b619761bd0a157753
| 10,688
|
py
|
Python
|
vyper/parser/lll_node.py
|
mpcnat/vyper
|
731263c9bea826a167639989350688833f68c182
|
[
"MIT"
] | null | null | null |
vyper/parser/lll_node.py
|
mpcnat/vyper
|
731263c9bea826a167639989350688833f68c182
|
[
"MIT"
] | null | null | null |
vyper/parser/lll_node.py
|
mpcnat/vyper
|
731263c9bea826a167639989350688833f68c182
|
[
"MIT"
] | null | null | null |
import re
from vyper.types import (
ceil32,
BaseType,
NodeType,
NullType
)
from vyper.opcodes import (
comb_opcodes
)
from vyper.utils import valid_lll_macros
# Set default string representation for ints in LLL output.
AS_HEX_DEFAULT = True
# Terminal color types
OKBLUE = '\033[94m'
OKMAGENTA = '\033[35m'
OKLIGHTMAGENTA = '\033[95m'
OKLIGHTBLUE = '\033[94m'
ENDC = '\033[0m'
class NullAttractor():
def __add__(self, other):
return NullAttractor()
def __repr__(self):
return 'None'
__radd__ = __add__
__mul__ = __add__
# Data structure for LLL parse tree
class LLLnode():
repr_show_gas = False
def __init__(self, value, args=None, typ=None, location=None, pos=None, annotation='', mutable=True, add_gas_estimate=0):
if args is None:
args = []
self.value = value
self.args = args
self.typ = typ
assert isinstance(self.typ, NodeType) or self.typ is None, repr(self.typ)
self.location = location
self.pos = pos
self.annotation = annotation
self.mutable = mutable
self.add_gas_estimate = add_gas_estimate
self.as_hex = AS_HEX_DEFAULT
# Determine this node's valency (1 if it pushes a value on the stack,
# 0 otherwise) and checks to make sure the number and valencies of
# children are correct. Also, find an upper bound on gas consumption
# Numbers
if isinstance(self.value, int):
self.valency = 1
self.gas = 5
elif isinstance(self.value, str):
# Opcodes and pseudo-opcodes (e.g. clamp)
if self.value.upper() in comb_opcodes:
_, ins, outs, gas = comb_opcodes[self.value.upper()]
self.valency = outs
if len(self.args) != ins:
raise Exception("Number of arguments mismatched: %r %r" % (self.value, self.args))
# We add 2 per stack height at push time and take it back
# at pop time; this makes `break` easier to handle
self.gas = gas + 2 * (outs - ins)
for arg in self.args:
if arg.valency == 0:
raise Exception("Can't have a zerovalent argument to an opcode or a pseudo-opcode! %r" % arg)
self.gas += arg.gas
# Dynamic gas cost: 8 gas for each byte of logging data
if self.value.upper()[0:3] == 'LOG' and isinstance(self.args[1].value, int):
self.gas += self.args[1].value * 8
# Dynamic gas cost: non-zero-valued call
if self.value.upper() == 'CALL' and self.args[2].value != 0:
self.gas += 34000
# Dynamic gas cost: filling sstore (ie. not clearing)
elif self.value.upper() == 'SSTORE' and self.args[1].value != 0:
self.gas += 15000
# Dynamic gas cost: calldatacopy
elif self.value.upper() in ('CALLDATACOPY', 'CODECOPY'):
size = 34000
if isinstance(self.args[2].value, int):
size = self.args[2].value
elif isinstance(self.args[2], LLLnode) and len(self.args[2].args) > 0:
size = self.args[2].args / [-1].value
self.gas += ceil32(size) // 32 * 3
# Gas limits in call
if self.value.upper() == 'CALL' and isinstance(self.args[0].value, int):
self.gas += self.args[0].value
# If statements
elif self.value == 'if':
if len(self.args) == 3:
self.gas = self.args[0].gas + max(self.args[1].gas, self.args[2].gas) + 3
if self.args[1].valency != self.args[2].valency:
raise Exception("Valency mismatch between then and else clause: %r %r" % (self.args[1], self.args[2]))
if len(self.args) == 2:
self.gas = self.args[0].gas + self.args[1].gas + 17
if self.args[1].valency:
raise Exception("2-clause if statement must have a zerovalent body: %r" % self.args[1])
if not self.args[0].valency:
raise Exception("Can't have a zerovalent argument as a test to an if statement! %r" % self.args[0])
if len(self.args) not in (2, 3):
raise Exception("If can only have 2 or 3 arguments")
self.valency = self.args[1].valency
# With statements: with <var> <initial> <statement>
elif self.value == 'with':
if len(self.args) != 3:
raise Exception("With statement must have 3 arguments")
if len(self.args[0].args) or not isinstance(self.args[0].value, str):
raise Exception("First argument to with statement must be a variable")
if not self.args[1].valency:
raise Exception("Second argument to with statement (initial value) cannot be zerovalent: %r" % self.args[1])
self.valency = self.args[2].valency
self.gas = sum([arg.gas for arg in self.args]) + 5
# Repeat statements: repeat <index_memloc> <startval> <rounds> <body>
elif self.value == 'repeat':
if len(self.args[2].args) or not isinstance(self.args[2].value, int) or self.args[2].value <= 0:
raise Exception("Number of times repeated must be a constant nonzero positive integer: %r" % self.args[2])
if not self.args[0].valency:
raise Exception("First argument to repeat (memory location) cannot be zerovalent: %r" % self.args[0])
if not self.args[1].valency:
raise Exception("Second argument to repeat (start value) cannot be zerovalent: %r" % self.args[1])
if self.args[3].valency:
raise Exception("Third argument to repeat (clause to be repeated) must be zerovalent: %r" % self.args[3])
self.valency = 0
if self.args[1].value == 'mload' or self.args[1].value == 'sload':
rounds = self.args[2].value
else:
rounds = abs(self.args[2].value - self.args[1].value)
self.gas = rounds * (self.args[3].gas + 50) + 30
# Seq statements: seq <statement> <statement> ...
elif self.value == 'seq':
self.valency = self.args[-1].valency if self.args else 0
self.gas = sum([arg.gas for arg in self.args]) + 30
# Multi statements: multi <expr> <expr> ...
elif self.value == 'multi':
for arg in self.args:
if not arg.valency:
raise Exception("Multi expects all children to not be zerovalent: %r" % arg)
self.valency = sum([arg.valency for arg in self.args])
self.gas = sum([arg.gas for arg in self.args])
# LLL brackets (don't bother gas counting)
elif self.value == 'lll':
self.valency = 1
self.gas = NullAttractor()
# Stack variables
else:
self.valency = 1
self.gas = 5
elif self.value is None and isinstance(self.typ, NullType):
self.valency = 1
self.gas = 5
else:
raise Exception("Invalid value for LLL AST node: %r" % self.value)
assert isinstance(self.args, list)
self.gas += self.add_gas_estimate
def to_list(self):
return [self.value] + [a.to_list() for a in self.args]
@property
def repr_value(self):
if isinstance(self.value, int) and self.as_hex:
return hex(self.value)
if not isinstance(self.value, str):
return str(self.value)
return self.value
@staticmethod
def _colorise_keywords(val):
if val.lower() in valid_lll_macros: # highlight macro
return OKLIGHTMAGENTA + val + ENDC
elif val.upper() in comb_opcodes.keys():
return OKMAGENTA + val + ENDC
return val
def repr(self):
if not len(self.args):
if self.annotation:
return '%r ' % self.repr_value + OKLIGHTBLUE + '<%s>' % self.annotation + ENDC
else:
return str(self.repr_value)
# x = repr(self.to_list())
# if len(x) < 80:
# return x
o = ''
if self.annotation:
o += '/* %s */ \n' % self.annotation
if self.repr_show_gas and self.gas:
o += OKBLUE + "{" + ENDC + str(self.gas) + OKBLUE + "} " + ENDC # add gas for info.
o += '[' + self._colorise_keywords(self.repr_value)
prev_lineno = self.pos[0] if self.pos else None
arg_lineno = None
annotated = False
has_inner_newlines = False
for arg in self.args:
o += ',\n '
arg_lineno = arg.pos[0] if arg.pos else None
if arg_lineno is not None and arg_lineno != prev_lineno and self.value in ('seq', 'if'):
o += '# Line %d\n ' % (arg_lineno)
prev_lineno = arg_lineno
annotated = True
arg_repr = arg.repr()
if '\n' in arg_repr:
has_inner_newlines = True
sub = arg_repr.replace('\n', '\n ').strip(' ')
o += self._colorise_keywords(sub)
output = o.rstrip(' ') + ']'
output_on_one_line = re.sub(r',\n *', ', ', output).replace('\n', '')
if (len(output_on_one_line) < 80 or len(self.args) == 1) and not annotated and not has_inner_newlines:
return output_on_one_line
else:
return output
def __repr__(self):
return self.repr()
@classmethod
def from_list(cls, obj, typ=None, location=None, pos=None, annotation=None, mutable=True, add_gas_estimate=0):
if isinstance(typ, str):
typ = BaseType(typ)
if isinstance(obj, LLLnode):
if obj.pos is None:
obj.pos = pos
if obj.location is None:
obj.location = location
return obj
elif not isinstance(obj, list):
return cls(obj, [], typ, location, pos, annotation, mutable, add_gas_estimate=add_gas_estimate)
else:
return cls(obj[0], [cls.from_list(o, pos=pos) for o in obj[1:]], typ, location, pos, annotation, mutable, add_gas_estimate=add_gas_estimate)
| 44.719665
| 152
| 0.544068
|
cbcdb6909198784a7079e1ce5f3995fffb5b72f0
| 598
|
py
|
Python
|
src/sentry/testutils/middleware.py
|
learninto/sentry
|
4f9f564841498b3af49c1677d6b61f3e47b01923
|
[
"BSD-3-Clause"
] | 1
|
2019-10-17T17:46:16.000Z
|
2019-10-17T17:46:16.000Z
|
src/sentry/testutils/middleware.py
|
learninto/sentry
|
4f9f564841498b3af49c1677d6b61f3e47b01923
|
[
"BSD-3-Clause"
] | null | null | null |
src/sentry/testutils/middleware.py
|
learninto/sentry
|
4f9f564841498b3af49c1677d6b61f3e47b01923
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import absolute_import
from sentry.middleware.sudo import SudoMiddleware as BaseSudoMiddleware
class BrokenRequestMiddleware(object):
def process_request(self, request):
raise ImportError("request")
class BrokenResponseMiddleware(object):
def process_response(self, request, response):
raise ImportError("response")
class BrokenViewMiddleware(object):
def process_view(self, request, func, args, kwargs):
raise ImportError("view")
class SudoMiddleware(BaseSudoMiddleware):
def has_sudo_privileges(self, request):
return True
| 24.916667
| 71
| 0.757525
|
d8f3ee92b81b82e85b49144b1a59b7a4c93ecd5d
| 1,588
|
py
|
Python
|
src/baxter_tools/__init__.py
|
birlrobotics/baxter_tools
|
0561030eb8cacd0e241d8f282d1f238466d44504
|
[
"BSD-3-Clause"
] | null | null | null |
src/baxter_tools/__init__.py
|
birlrobotics/baxter_tools
|
0561030eb8cacd0e241d8f282d1f238466d44504
|
[
"BSD-3-Clause"
] | null | null | null |
src/baxter_tools/__init__.py
|
birlrobotics/baxter_tools
|
0561030eb8cacd0e241d8f282d1f238466d44504
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2013-2014, Rethink Robotics
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the Rethink Robotics nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from .smoketests import SmokeTest
| 54.758621
| 77
| 0.785894
|
67c99cf8281b54455de7bb1998602f66105a3994
| 256
|
py
|
Python
|
solutions/976_largest_perimeter_triangle.py
|
YiqunPeng/leetcode_pro
|
7e6376984f9baec49a5e827d98330fe3d1b656f0
|
[
"MIT"
] | null | null | null |
solutions/976_largest_perimeter_triangle.py
|
YiqunPeng/leetcode_pro
|
7e6376984f9baec49a5e827d98330fe3d1b656f0
|
[
"MIT"
] | null | null | null |
solutions/976_largest_perimeter_triangle.py
|
YiqunPeng/leetcode_pro
|
7e6376984f9baec49a5e827d98330fe3d1b656f0
|
[
"MIT"
] | null | null | null |
class Solution:
def largestPerimeter(self, nums: List[int]) -> int:
nums.sort()
for i in range(len(nums)-1, 1, -1):
if nums[i-1] + nums[i-2] > nums[i]:
return nums[i-1] + nums[i-2] + nums[i]
return 0
| 32
| 55
| 0.5
|
23362a8c6eacc40514591784e5e13606c819b9fc
| 17,684
|
py
|
Python
|
evalml/tests/model_understanding_tests/prediction_explanations_tests/test_user_interface.py
|
freddyaboulton/evalml
|
53242f9d7397a1af3f8c48d498a023042790d4c3
|
[
"BSD-3-Clause"
] | 454
|
2020-09-25T15:36:06.000Z
|
2022-03-30T04:48:49.000Z
|
evalml/tests/model_understanding_tests/prediction_explanations_tests/test_user_interface.py
|
freddyaboulton/evalml
|
53242f9d7397a1af3f8c48d498a023042790d4c3
|
[
"BSD-3-Clause"
] | 2,175
|
2020-09-25T17:05:45.000Z
|
2022-03-31T19:54:54.000Z
|
evalml/tests/model_understanding_tests/prediction_explanations_tests/test_user_interface.py
|
freddyaboulton/evalml
|
53242f9d7397a1af3f8c48d498a023042790d4c3
|
[
"BSD-3-Clause"
] | 66
|
2020-09-25T18:46:27.000Z
|
2022-03-02T18:33:30.000Z
|
import copy
import json
from itertools import product
import numpy as np
import pandas as pd
import pytest
from evalml.model_understanding.prediction_explanations._user_interface import (
_BinarySHAPTable,
_make_json_serializable,
_make_rows,
_make_text_table,
_MultiClassSHAPTable,
_RegressionSHAPTable,
)
make_rows_test_cases = [
({"a": [0.2], "b": [0.1]}, 3, [["a", "1.20", "++"], ["b", "1.10", "+"]]),
(
{
"a": [0.3],
"b": [-0.9],
"c": [0.5],
"d": [0.33],
"e": [-0.67],
"f": [-0.2],
"g": [0.71],
},
4,
[
["g", "1.71", "++++"],
["c", "1.50", "+++"],
["e", "0.33", "----"],
["b", "0.10", "-----"],
],
),
(
{"a": [1.0], "f": [-1.0], "e": [0.0]},
5,
[["a", "2.00", "+++++"], ["e", "1.00", "+"], ["f", "0.00", "-----"]],
),
]
@pytest.mark.parametrize(
"test_case,include_shap_values,include_string_features",
product(make_rows_test_cases, [True, False], [True, False]),
)
def test_make_rows_and_make_table(
test_case, include_shap_values, include_string_features
):
values, top_k, answer = test_case
pipeline_features = pd.DataFrame(
{name: value[0] + 1 for name, value in values.items()}, index=[5]
)
if include_string_features:
pipeline_features["a"] = ["foo-feature"]
pipeline_features["b"] = [np.datetime64("2020-08-14")]
if include_shap_values:
new_answer = copy.deepcopy(answer)
for row in new_answer:
row.append("{:.2f}".format(values[row[0]][0]))
else:
new_answer = copy.deepcopy(answer)
if include_string_features:
filtered_answer = []
for row in new_answer:
filtered_answer.append(row)
val = row[1]
if row[0] == "a":
val = "foo-feature"
elif row[0] == "b":
val = "2020-08-14 00:00:00"
filtered_answer[-1][1] = val
new_answer = filtered_answer
assert (
_make_rows(
values,
values,
pipeline_features,
pipeline_features,
top_k,
include_shap_values,
)
== new_answer
)
table = _make_text_table(
values, values, pipeline_features, pipeline_features, top_k, include_shap_values
).splitlines()
if include_shap_values:
assert "SHAP Value" in table[0]
# Subtracting two because a header and a line under the header are included in the table.
assert len(table) - 2 == len(new_answer)
@pytest.mark.parametrize(
"value,answer",
[
(np.int64(3), 3),
(np.float32(3.2), 3.2),
(np.str_("foo"), "foo"),
(np.bool_(True), True),
],
)
def test_make_json_serializable(value, answer):
value = _make_json_serializable(value)
if answer != "foo":
np.testing.assert_almost_equal(value, answer)
else:
assert value == answer
json.dumps(value)
regression = {
"a": [6.500],
"b": [1.770],
"c": [0.570],
"d": [-0.090],
"e": [-0.290],
"f": [-1.910],
"foo": [0.01],
"bar": [-0.02],
}
regression_normalized = {
"a": [0.6214],
"b": [0.1692],
"bar": [-0.0019],
"c": [0.0544],
"d": [-0.0086],
"e": [-0.0277],
"f": [-0.8],
"foo": [0.0001],
}
regression_pipeline_features = pd.DataFrame(
{
"a": 7.5,
"b": 2.77,
"c": 1.57,
"d": 0.91,
"e": 0.71,
"f": -0.21,
"foo": -20,
"bar": -30,
},
index=[31],
)
regression_original_features = pd.DataFrame(
{
"a": 0.75,
"b": 0.277,
"c": 0.57,
"d": 1.91,
"e": 1.71,
"f": -1.21,
"foo": -20,
"bar": -40,
},
index=[31],
)
regression_table = """Feature Name Feature Value Contribution to Prediction
=========================================================
a 7.50 ++++
b 2.77 +
f -0.21 -----""".splitlines()
regression_table_shap = """Feature Name Feature Value Contribution to Prediction SHAP Value
======================================================================
a 7.50 ++++ 6.50
b 2.77 + 1.77
f -0.21 ----- -1.91""".splitlines()
regression_dict = {
"explanations": [
{
"feature_names": ["a", "b", "f"],
"feature_values": [7.5, 2.77, -0.21],
"qualitative_explanation": ["++++", "+", "-----"],
"quantitative_explanation": [None, None, None],
"drill_down": {},
"class_name": None,
"expected_value": [0],
}
]
}
regression_dict_shap = {
"explanations": [
{
"feature_names": ["a", "b", "f"],
"feature_values": [7.5, 2.77, -0.21],
"qualitative_explanation": ["++++", "+", "-----"],
"quantitative_explanation": [6.50, 1.77, -1.91],
"drill_down": {},
"class_name": None,
"expected_value": [0],
}
]
}
binary = [
{"a": [0], "b": [0], "c": [0], "d": [0], "e": [0], "f": [0], "foo": [-1]},
{
"a": [1.180],
"b": [0.0],
"c": [1.120],
"d": [-0.560],
"e": [-2.600],
"f": [-0.900],
"foo": [-1],
},
]
binary_normalized = [
{
"a": [0.0],
"b": [0.0],
"c": [0.0],
"d": [0.0],
"e": [0.0],
"f": [0.0],
"foo": [-1.0],
},
{
"a": [0.16],
"b": [0.0],
"c": [0.15],
"d": [-0.08],
"e": [-0.35],
"f": [-0.12],
"foo": [-0.14],
},
]
binary_pipeline_features = pd.DataFrame(
{"a": 2.18, "b": 2.12, "c": 1.0, "d": -1.56, "e": -1.8, "f": -1.9, "foo": -20},
index=[23],
)
binary_original_features = pd.DataFrame(
{"a": 1.18, "b": 1.12, "c": 2.0, "d": -2.56, "e": -2.8, "f": -2.9, "foo": -30},
index=[23],
)
binary_table = """Feature Name Feature Value Contribution to Prediction
=========================================================
a 2.18 +
c 1.00 +
e -1.80 --""".splitlines()
binary_table_shap = """Feature Name Feature Value Contribution to Prediction SHAP Value
======================================================================
a 2.18 + 1.18
c 1.00 + 1.12
e -1.80 -- -2.60""".splitlines()
binary_dict = {
"explanations": [
{
"feature_names": ["a", "c", "e"],
"feature_values": [2.180, 1.0, -1.80],
"qualitative_explanation": ["+", "+", "--"],
"quantitative_explanation": [None, None, None],
"drill_down": {},
"class_name": "1",
"expected_value": [0],
}
]
}
binary_dict_shap = {
"explanations": [
{
"feature_names": ["a", "c", "e"],
"feature_values": [2.180, 1.0, -1.80],
"qualitative_explanation": ["+", "+", "--"],
"quantitative_explanation": [1.180, 1.120, -2.60],
"drill_down": {},
"class_name": "1",
"expected_value": [0],
}
]
}
multiclass = [
{"a": [0], "b": [0], "c": [0], "d": [0.11], "e": [0.18], "f": [0], "foo": [-1]},
{
"a": [1.180],
"b": [1.120],
"c": [0.000],
"d": [-2.560],
"e": [-2.800],
"f": [-2.900],
"foo": [-1],
},
{
"a": [0.680],
"b": [0.000],
"c": [0.000],
"d": [-2.040],
"e": [-1.840],
"f": [-2.680],
"foo": [-1],
},
]
multiclass_normalized = [
{
"a": [0.0],
"b": [0.0],
"c": [0.0],
"d": [0.07],
"e": [0.08],
"f": [0.0],
"foo": [-1.0],
},
{
"a": [0.102],
"b": [0.097],
"c": [0.0],
"d": [-0.221],
"e": [-0.242],
"f": [-0.251],
"foo": [-0.0865],
},
{
"a": [0.08],
"b": [0.0],
"c": [0.0],
"d": [-0.25],
"e": [-0.22],
"f": [-0.33],
"foo": [-0.12],
},
]
multiclass_pipeline_features = pd.DataFrame(
{"a": 2.18, "b": 2.12, "c": 1.0, "d": -1.56, "e": -1.8, "f": -1.9, "foo": 30},
index=[10],
)
multiclass_original_features = pd.DataFrame(
{"a": 1.18, "b": 1.12, "c": 2.0, "d": -2.56, "e": -4.8, "f": -5.9, "foo": 40},
index=[10],
)
multiclass_table = """Class: 0
Feature Name Feature Value Contribution to Prediction
=========================================================
e -1.80 +
d -1.56 +
foo 30.00 -----
Class: 1
Feature Name Feature Value Contribution to Prediction
=========================================================
d -1.56 --
e -1.80 --
f -1.90 --
Class: 2
Feature Name Feature Value Contribution to Prediction
=========================================================
e -1.80 --
d -1.56 --
f -1.90 --""".splitlines()
multiclass_table_shap = """Class: 0
Feature Name Feature Value Contribution to Prediction SHAP Value
======================================================================
e -1.80 + 0.18
d -1.56 + 0.11
foo 30.00 ----- -1.00
Class: 1
Feature Name Feature Value Contribution to Prediction SHAP Value
======================================================================
d -1.56 -- -2.56
e -1.80 -- -2.80
f -1.90 -- -2.90
Class: 2
Feature Name Feature Value Contribution to Prediction SHAP Value
======================================================================
e -1.80 -- -1.84
d -1.56 -- -2.04
f -1.90 -- -2.68""".splitlines()
multiclass_dict = {
"explanations": [
{
"feature_names": ["e", "d", "foo"],
"feature_values": [-1.8, -1.56, 30],
"qualitative_explanation": ["+", "+", "-----"],
"quantitative_explanation": [None, None, None],
"drill_down": {},
"class_name": "0",
"expected_value": 0,
},
{
"feature_names": ["d", "e", "f"],
"feature_values": [-1.56, -1.8, -1.9],
"qualitative_explanation": ["--", "--", "--"],
"quantitative_explanation": [None, None, None],
"drill_down": {},
"class_name": "1",
"expected_value": 1,
},
{
"feature_names": ["e", "d", "f"],
"feature_values": [-1.8, -1.56, -1.9],
"qualitative_explanation": ["--", "--", "--"],
"quantitative_explanation": [None, None, None],
"drill_down": {},
"class_name": "2",
"expected_value": 2,
},
]
}
multiclass_dict_shap = {
"explanations": [
{
"feature_names": ["e", "d", "foo"],
"feature_values": [-1.8, -1.56, 30],
"qualitative_explanation": ["+", "+", "-----"],
"quantitative_explanation": [0.18, 0.11, -1],
"drill_down": {},
"class_name": "0",
"expected_value": 0,
},
{
"feature_names": ["d", "e", "f"],
"feature_values": [-1.56, -1.8, -1.9],
"qualitative_explanation": ["--", "--", "--"],
"quantitative_explanation": [-2.56, -2.8, -2.9],
"drill_down": {},
"class_name": "1",
"expected_value": 1,
},
{
"feature_names": ["e", "d", "f"],
"feature_values": [-1.8, -1.56, -1.9],
"qualitative_explanation": ["--", "--", "--"],
"quantitative_explanation": [-1.84, -2.04, -2.68],
"drill_down": {},
"class_name": "2",
"expected_value": 2,
},
]
}
@pytest.mark.parametrize(
"values,normalized_values,pipeline_features,original_features,include_shap,expected_values, output_format,answer",
[
(
regression,
regression_normalized,
regression_pipeline_features,
regression_original_features,
False,
[0],
"text",
regression_table,
),
(
regression,
regression_normalized,
regression_pipeline_features,
regression_original_features,
True,
[0],
"text",
regression_table_shap,
),
(
regression,
regression_normalized,
regression_pipeline_features,
regression_original_features,
False,
[0],
"dict",
regression_dict,
),
(
regression,
regression_normalized,
regression_pipeline_features,
regression_original_features,
True,
[0],
"dict",
regression_dict_shap,
),
(
binary,
binary_normalized,
binary_pipeline_features,
binary_original_features,
False,
[0],
"text",
binary_table,
),
(
binary,
binary_normalized,
binary_pipeline_features,
binary_original_features,
True,
[0],
"text",
binary_table_shap,
),
(
binary,
binary_normalized,
binary_pipeline_features,
binary_original_features,
False,
[0],
"dict",
binary_dict,
),
(
binary,
binary_normalized,
binary_pipeline_features,
binary_original_features,
True,
[0],
"dict",
binary_dict_shap,
),
(
multiclass,
multiclass_normalized,
multiclass_pipeline_features,
multiclass_original_features,
False,
[0, 1, 2],
"text",
multiclass_table,
),
(
multiclass,
multiclass_normalized,
multiclass_pipeline_features,
multiclass_original_features,
True,
[0, 1, 2],
"text",
multiclass_table_shap,
),
(
multiclass,
multiclass_normalized,
multiclass_pipeline_features,
multiclass_original_features,
False,
[0, 1, 2],
"dict",
multiclass_dict,
),
(
multiclass,
multiclass_normalized,
multiclass_pipeline_features,
multiclass_original_features,
True,
[0, 1, 2],
"dict",
multiclass_dict_shap,
),
],
)
def test_make_single_prediction_table(
values,
normalized_values,
pipeline_features,
original_features,
include_shap,
expected_values,
output_format,
answer,
):
class_names = ["0", "1", "2"]
if isinstance(values, list):
if len(values) > 2:
table_maker = _MultiClassSHAPTable(
top_k=3,
include_shap_values=include_shap,
include_expected_value=False,
class_names=class_names,
provenance={},
)
else:
table_maker = _BinarySHAPTable(
class_names=class_names,
top_k=3,
include_shap_values=include_shap,
include_expected_value=False,
provenance={},
)
else:
table_maker = _RegressionSHAPTable(
top_k=3,
include_shap_values=include_shap,
include_expected_value=False,
provenance={},
)
table_maker = (
table_maker.make_text if output_format == "text" else table_maker.make_dict
)
table = table_maker(
aggregated_shap_values=values,
aggregated_normalized_values=normalized_values,
shap_values=values,
normalized_values=normalized_values,
pipeline_features=pipeline_features,
original_features=pipeline_features,
expected_value=expected_values,
)
# Making sure the content is the same, regardless of formatting.
if output_format == "text":
for index, (row_table, row_answer) in enumerate(
zip(table.splitlines(), answer)
):
assert row_table.strip().split() == row_answer.strip().split()
else:
assert table == answer
| 26.998473
| 118
| 0.417892
|
599b48d82b505fbcafd05e8e6288511fe975f658
| 9,051
|
py
|
Python
|
contrib/python/numpy/numpy/core/shape_base.py
|
HeyLey/catboost
|
f472aed90604ebe727537d9d4a37147985e10ec2
|
[
"Apache-2.0"
] | 60
|
2017-08-05T21:47:56.000Z
|
2022-03-08T21:46:29.000Z
|
contrib/python/numpy/numpy/core/shape_base.py
|
HeyLey/catboost
|
f472aed90604ebe727537d9d4a37147985e10ec2
|
[
"Apache-2.0"
] | 48
|
2020-05-20T08:26:54.000Z
|
2022-03-29T21:19:52.000Z
|
contrib/python/numpy/numpy/core/shape_base.py
|
HeyLey/catboost
|
f472aed90604ebe727537d9d4a37147985e10ec2
|
[
"Apache-2.0"
] | 31
|
2020-05-25T21:14:56.000Z
|
2022-03-25T08:57:43.000Z
|
from __future__ import division, absolute_import, print_function
__all__ = ['atleast_1d', 'atleast_2d', 'atleast_3d', 'vstack', 'hstack',
'stack']
from . import numeric as _nx
from .numeric import asanyarray, newaxis
def atleast_1d(*arys):
"""
Convert inputs to arrays with at least one dimension.
Scalar inputs are converted to 1-dimensional arrays, whilst
higher-dimensional inputs are preserved.
Parameters
----------
arys1, arys2, ... : array_like
One or more input arrays.
Returns
-------
ret : ndarray
An array, or sequence of arrays, each with ``a.ndim >= 1``.
Copies are made only if necessary.
See Also
--------
atleast_2d, atleast_3d
Examples
--------
>>> np.atleast_1d(1.0)
array([ 1.])
>>> x = np.arange(9.0).reshape(3,3)
>>> np.atleast_1d(x)
array([[ 0., 1., 2.],
[ 3., 4., 5.],
[ 6., 7., 8.]])
>>> np.atleast_1d(x) is x
True
>>> np.atleast_1d(1, [3, 4])
[array([1]), array([3, 4])]
"""
res = []
for ary in arys:
ary = asanyarray(ary)
if len(ary.shape) == 0:
result = ary.reshape(1)
else:
result = ary
res.append(result)
if len(res) == 1:
return res[0]
else:
return res
def atleast_2d(*arys):
"""
View inputs as arrays with at least two dimensions.
Parameters
----------
arys1, arys2, ... : array_like
One or more array-like sequences. Non-array inputs are converted
to arrays. Arrays that already have two or more dimensions are
preserved.
Returns
-------
res, res2, ... : ndarray
An array, or tuple of arrays, each with ``a.ndim >= 2``.
Copies are avoided where possible, and views with two or more
dimensions are returned.
See Also
--------
atleast_1d, atleast_3d
Examples
--------
>>> np.atleast_2d(3.0)
array([[ 3.]])
>>> x = np.arange(3.0)
>>> np.atleast_2d(x)
array([[ 0., 1., 2.]])
>>> np.atleast_2d(x).base is x
True
>>> np.atleast_2d(1, [1, 2], [[1, 2]])
[array([[1]]), array([[1, 2]]), array([[1, 2]])]
"""
res = []
for ary in arys:
ary = asanyarray(ary)
if len(ary.shape) == 0:
result = ary.reshape(1, 1)
elif len(ary.shape) == 1:
result = ary[newaxis,:]
else:
result = ary
res.append(result)
if len(res) == 1:
return res[0]
else:
return res
def atleast_3d(*arys):
"""
View inputs as arrays with at least three dimensions.
Parameters
----------
arys1, arys2, ... : array_like
One or more array-like sequences. Non-array inputs are converted to
arrays. Arrays that already have three or more dimensions are
preserved.
Returns
-------
res1, res2, ... : ndarray
An array, or tuple of arrays, each with ``a.ndim >= 3``. Copies are
avoided where possible, and views with three or more dimensions are
returned. For example, a 1-D array of shape ``(N,)`` becomes a view
of shape ``(1, N, 1)``, and a 2-D array of shape ``(M, N)`` becomes a
view of shape ``(M, N, 1)``.
See Also
--------
atleast_1d, atleast_2d
Examples
--------
>>> np.atleast_3d(3.0)
array([[[ 3.]]])
>>> x = np.arange(3.0)
>>> np.atleast_3d(x).shape
(1, 3, 1)
>>> x = np.arange(12.0).reshape(4,3)
>>> np.atleast_3d(x).shape
(4, 3, 1)
>>> np.atleast_3d(x).base is x
True
>>> for arr in np.atleast_3d([1, 2], [[1, 2]], [[[1, 2]]]):
... print(arr, arr.shape)
...
[[[1]
[2]]] (1, 2, 1)
[[[1]
[2]]] (1, 2, 1)
[[[1 2]]] (1, 1, 2)
"""
res = []
for ary in arys:
ary = asanyarray(ary)
if len(ary.shape) == 0:
result = ary.reshape(1, 1, 1)
elif len(ary.shape) == 1:
result = ary[newaxis,:, newaxis]
elif len(ary.shape) == 2:
result = ary[:,:, newaxis]
else:
result = ary
res.append(result)
if len(res) == 1:
return res[0]
else:
return res
def vstack(tup):
"""
Stack arrays in sequence vertically (row wise).
Take a sequence of arrays and stack them vertically to make a single
array. Rebuild arrays divided by `vsplit`.
Parameters
----------
tup : sequence of ndarrays
Tuple containing arrays to be stacked. The arrays must have the same
shape along all but the first axis.
Returns
-------
stacked : ndarray
The array formed by stacking the given arrays.
See Also
--------
stack : Join a sequence of arrays along a new axis.
hstack : Stack arrays in sequence horizontally (column wise).
dstack : Stack arrays in sequence depth wise (along third dimension).
concatenate : Join a sequence of arrays along an existing axis.
vsplit : Split array into a list of multiple sub-arrays vertically.
Notes
-----
Equivalent to ``np.concatenate(tup, axis=0)`` if `tup` contains arrays that
are at least 2-dimensional.
Examples
--------
>>> a = np.array([1, 2, 3])
>>> b = np.array([2, 3, 4])
>>> np.vstack((a,b))
array([[1, 2, 3],
[2, 3, 4]])
>>> a = np.array([[1], [2], [3]])
>>> b = np.array([[2], [3], [4]])
>>> np.vstack((a,b))
array([[1],
[2],
[3],
[2],
[3],
[4]])
"""
return _nx.concatenate([atleast_2d(_m) for _m in tup], 0)
def hstack(tup):
"""
Stack arrays in sequence horizontally (column wise).
Take a sequence of arrays and stack them horizontally to make
a single array. Rebuild arrays divided by `hsplit`.
Parameters
----------
tup : sequence of ndarrays
All arrays must have the same shape along all but the second axis.
Returns
-------
stacked : ndarray
The array formed by stacking the given arrays.
See Also
--------
stack : Join a sequence of arrays along a new axis.
vstack : Stack arrays in sequence vertically (row wise).
dstack : Stack arrays in sequence depth wise (along third axis).
concatenate : Join a sequence of arrays along an existing axis.
hsplit : Split array along second axis.
Notes
-----
Equivalent to ``np.concatenate(tup, axis=1)``
Examples
--------
>>> a = np.array((1,2,3))
>>> b = np.array((2,3,4))
>>> np.hstack((a,b))
array([1, 2, 3, 2, 3, 4])
>>> a = np.array([[1],[2],[3]])
>>> b = np.array([[2],[3],[4]])
>>> np.hstack((a,b))
array([[1, 2],
[2, 3],
[3, 4]])
"""
arrs = [atleast_1d(_m) for _m in tup]
# As a special case, dimension 0 of 1-dimensional arrays is "horizontal"
if arrs[0].ndim == 1:
return _nx.concatenate(arrs, 0)
else:
return _nx.concatenate(arrs, 1)
def stack(arrays, axis=0):
"""
Join a sequence of arrays along a new axis.
The `axis` parameter specifies the index of the new axis in the dimensions
of the result. For example, if ``axis=0`` it will be the first dimension
and if ``axis=-1`` it will be the last dimension.
.. versionadded:: 1.10.0
Parameters
----------
arrays : sequence of array_like
Each array must have the same shape.
axis : int, optional
The axis in the result array along which the input arrays are stacked.
Returns
-------
stacked : ndarray
The stacked array has one more dimension than the input arrays.
See Also
--------
concatenate : Join a sequence of arrays along an existing axis.
split : Split array into a list of multiple sub-arrays of equal size.
Examples
--------
>>> arrays = [np.random.randn(3, 4) for _ in range(10)]
>>> np.stack(arrays, axis=0).shape
(10, 3, 4)
>>> np.stack(arrays, axis=1).shape
(3, 10, 4)
>>> np.stack(arrays, axis=2).shape
(3, 4, 10)
>>> a = np.array([1, 2, 3])
>>> b = np.array([2, 3, 4])
>>> np.stack((a, b))
array([[1, 2, 3],
[2, 3, 4]])
>>> np.stack((a, b), axis=-1)
array([[1, 2],
[2, 3],
[3, 4]])
"""
arrays = [asanyarray(arr) for arr in arrays]
if not arrays:
raise ValueError('need at least one array to stack')
shapes = set(arr.shape for arr in arrays)
if len(shapes) != 1:
raise ValueError('all input arrays must have the same shape')
result_ndim = arrays[0].ndim + 1
if not -result_ndim <= axis < result_ndim:
msg = 'axis {0} out of bounds [-{1}, {1})'.format(axis, result_ndim)
raise IndexError(msg)
if axis < 0:
axis += result_ndim
sl = (slice(None),) * axis + (_nx.newaxis,)
expanded_arrays = [arr[sl] for arr in arrays]
return _nx.concatenate(expanded_arrays, axis=axis)
| 25.786325
| 79
| 0.54204
|
b186e4947ad4ffefd3ec4fdc0971a6081e9e068a
| 548
|
py
|
Python
|
test_cases/general/array/of/path/py/test_generate/__init__.py
|
Parquery/mapry
|
93515307f9eba8447fe64b0ac7cc68b2d07205a7
|
[
"MIT"
] | 11
|
2019-06-26T05:56:41.000Z
|
2021-03-28T16:44:16.000Z
|
test_cases/general/array/of/path/py/test_generate/__init__.py
|
Parquery/mapry
|
93515307f9eba8447fe64b0ac7cc68b2d07205a7
|
[
"MIT"
] | 4
|
2019-10-18T14:43:59.000Z
|
2020-04-02T19:12:07.000Z
|
test_cases/general/array/of/path/py/test_generate/__init__.py
|
Parquery/mapry
|
93515307f9eba8447fe64b0ac7cc68b2d07205a7
|
[
"MIT"
] | 3
|
2019-06-17T07:39:03.000Z
|
2020-04-01T14:01:23.000Z
|
# File automatically generated by mapry. DO NOT EDIT OR APPEND!
"""defines some object graph."""
import pathlib
import typing
class SomeGraph:
"""defines some object graph."""
def __init__(
self,
array_of_paths: typing.List[pathlib.Path]) -> None:
"""
initializes an instance of SomeGraph with the given values.
:param array_of_paths: tests an array of paths.
"""
self.array_of_paths = array_of_paths
# File automatically generated by mapry. DO NOT EDIT OR APPEND!
| 20.296296
| 67
| 0.65146
|
ad2c19c7613ac72dbe8324af1f3c7a8052b05216
| 42,215
|
py
|
Python
|
mmdet3d/models/dense_heads/fcos_mono3d_head.py
|
amirulhaq/mmdetection3d
|
88855ae872aa1411ab124fd1702edc3e2105c3dd
|
[
"Apache-2.0"
] | 29
|
2021-09-29T13:31:12.000Z
|
2022-03-15T13:31:25.000Z
|
mmdet3d/models/dense_heads/fcos_mono3d_head.py
|
amirulhaq/mmdetection3d
|
88855ae872aa1411ab124fd1702edc3e2105c3dd
|
[
"Apache-2.0"
] | 3
|
2021-12-13T01:21:12.000Z
|
2022-02-24T01:46:14.000Z
|
mmdet3d/models/dense_heads/fcos_mono3d_head.py
|
amirulhaq/mmdetection3d
|
88855ae872aa1411ab124fd1702edc3e2105c3dd
|
[
"Apache-2.0"
] | 1
|
2021-06-22T23:43:29.000Z
|
2021-06-22T23:43:29.000Z
|
import numpy as np
import torch
from mmcv.cnn import Scale, normal_init
from mmcv.runner import force_fp32
from torch import nn as nn
from mmdet3d.core import box3d_multiclass_nms, limit_period, xywhr2xyxyr
from mmdet.core import multi_apply
from mmdet.models.builder import HEADS, build_loss
from .anchor_free_mono3d_head import AnchorFreeMono3DHead
INF = 1e8
@HEADS.register_module()
class FCOSMono3DHead(AnchorFreeMono3DHead):
"""Anchor-free head used in FCOS3D.
Args:
num_classes (int): Number of categories excluding the background
category.
in_channels (int): Number of channels in the input feature map.
regress_ranges (tuple[tuple[int, int]]): Regress range of multiple
level points.
center_sampling (bool): If true, use center sampling. Default: True.
center_sample_radius (float): Radius of center sampling. Default: 1.5.
norm_on_bbox (bool): If true, normalize the regression targets
with FPN strides. Default: True.
centerness_on_reg (bool): If true, position centerness on the
regress branch. Please refer to https://github.com/tianzhi0549/FCOS/issues/89#issuecomment-516877042.
Default: True.
centerness_alpha: Parameter used to adjust the intensity attenuation
from the center to the periphery. Default: 2.5.
loss_cls (dict): Config of classification loss.
loss_bbox (dict): Config of localization loss.
loss_dir (dict): Config of direction classification loss.
loss_attr (dict): Config of attribute classification loss.
loss_centerness (dict): Config of centerness loss.
norm_cfg (dict): dictionary to construct and config norm layer.
Default: norm_cfg=dict(type='GN', num_groups=32, requires_grad=True).
centerness_branch (tuple[int]): Channels for centerness branch.
Default: (64, ).
""" # noqa: E501
def __init__(self,
num_classes,
in_channels,
regress_ranges=((-1, 48), (48, 96), (96, 192), (192, 384),
(384, INF)),
center_sampling=True,
center_sample_radius=1.5,
norm_on_bbox=True,
centerness_on_reg=True,
centerness_alpha=2.5,
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(
type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0),
loss_dir=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_attr=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_centerness=dict(
type='CrossEntropyLoss',
use_sigmoid=True,
loss_weight=1.0),
norm_cfg=dict(type='GN', num_groups=32, requires_grad=True),
centerness_branch=(64, ),
**kwargs):
self.regress_ranges = regress_ranges
self.center_sampling = center_sampling
self.center_sample_radius = center_sample_radius
self.norm_on_bbox = norm_on_bbox
self.centerness_on_reg = centerness_on_reg
self.centerness_alpha = centerness_alpha
self.centerness_branch = centerness_branch
super().__init__(
num_classes,
in_channels,
loss_cls=loss_cls,
loss_bbox=loss_bbox,
loss_dir=loss_dir,
loss_attr=loss_attr,
norm_cfg=norm_cfg,
**kwargs)
self.loss_centerness = build_loss(loss_centerness)
def _init_layers(self):
"""Initialize layers of the head."""
super()._init_layers()
self.conv_centerness_prev = self._init_branch(
conv_channels=self.centerness_branch,
conv_strides=(1, ) * len(self.centerness_branch))
self.conv_centerness = nn.Conv2d(self.centerness_branch[-1], 1, 1)
self.scales = nn.ModuleList([
nn.ModuleList([Scale(1.0) for _ in range(3)]) for _ in self.strides
]) # only for offset, depth and size regression
def init_weights(self):
"""Initialize weights of the head."""
super().init_weights()
for m in self.conv_centerness_prev:
if isinstance(m.conv, nn.Conv2d):
normal_init(m.conv, std=0.01)
normal_init(self.conv_centerness, std=0.01)
def forward(self, feats):
"""Forward features from the upstream network.
Args:
feats (tuple[Tensor]): Features from the upstream network, each is
a 4D-tensor.
Returns:
tuple:
cls_scores (list[Tensor]): Box scores for each scale level,
each is a 4D-tensor, the channel number is
num_points * num_classes.
bbox_preds (list[Tensor]): Box energies / deltas for each scale
level, each is a 4D-tensor, the channel number is
num_points * bbox_code_size.
dir_cls_preds (list[Tensor]): Box scores for direction class
predictions on each scale level, each is a 4D-tensor,
the channel number is num_points * 2. (bin = 2).
attr_preds (list[Tensor]): Attribute scores for each scale
level, each is a 4D-tensor, the channel number is
num_points * num_attrs.
centernesses (list[Tensor]): Centerness for each scale level,
each is a 4D-tensor, the channel number is num_points * 1.
"""
return multi_apply(self.forward_single, feats, self.scales,
self.strides)
def forward_single(self, x, scale, stride):
"""Forward features of a single scale levle.
Args:
x (Tensor): FPN feature maps of the specified stride.
scale (:obj: `mmcv.cnn.Scale`): Learnable scale module to resize
the bbox prediction.
stride (int): The corresponding stride for feature maps, only
used to normalize the bbox prediction when self.norm_on_bbox
is True.
Returns:
tuple: scores for each class, bbox and direction class \
predictions, centerness predictions of input feature maps.
"""
cls_score, bbox_pred, dir_cls_pred, attr_pred, cls_feat, reg_feat = \
super().forward_single(x)
if self.centerness_on_reg:
clone_reg_feat = reg_feat.clone()
for conv_centerness_prev_layer in self.conv_centerness_prev:
clone_reg_feat = conv_centerness_prev_layer(clone_reg_feat)
centerness = self.conv_centerness(clone_reg_feat)
else:
clone_cls_feat = cls_feat.clone()
for conv_centerness_prev_layer in self.conv_centerness_prev:
clone_cls_feat = conv_centerness_prev_layer(clone_cls_feat)
centerness = self.conv_centerness(clone_cls_feat)
# scale the bbox_pred of different level
# only apply to offset, depth and size prediction
scale_offset, scale_depth, scale_size = scale[0:3]
clone_bbox_pred = bbox_pred.clone()
bbox_pred[:, :2] = scale_offset(clone_bbox_pred[:, :2]).float()
bbox_pred[:, 2] = scale_depth(clone_bbox_pred[:, 2]).float()
bbox_pred[:, 3:6] = scale_size(clone_bbox_pred[:, 3:6]).float()
bbox_pred[:, 2] = bbox_pred[:, 2].exp()
bbox_pred[:, 3:6] = bbox_pred[:, 3:6].exp() + 1e-6 # avoid size=0
assert self.norm_on_bbox is True, 'Setting norm_on_bbox to False '\
'has not been thoroughly tested for FCOS3D.'
if self.norm_on_bbox:
if not self.training:
# Note that this line is conducted only when testing
bbox_pred[:, :2] *= stride
return cls_score, bbox_pred, dir_cls_pred, attr_pred, centerness
@staticmethod
def add_sin_difference(boxes1, boxes2):
"""Convert the rotation difference to difference in sine function.
Args:
boxes1 (torch.Tensor): Original Boxes in shape (NxC), where C>=7
and the 7th dimension is rotation dimension.
boxes2 (torch.Tensor): Target boxes in shape (NxC), where C>=7 and
the 7th dimension is rotation dimension.
Returns:
tuple[torch.Tensor]: ``boxes1`` and ``boxes2`` whose 7th \
dimensions are changed.
"""
rad_pred_encoding = torch.sin(boxes1[..., 6:7]) * torch.cos(
boxes2[..., 6:7])
rad_tg_encoding = torch.cos(boxes1[..., 6:7]) * torch.sin(boxes2[...,
6:7])
boxes1 = torch.cat(
[boxes1[..., :6], rad_pred_encoding, boxes1[..., 7:]], dim=-1)
boxes2 = torch.cat([boxes2[..., :6], rad_tg_encoding, boxes2[..., 7:]],
dim=-1)
return boxes1, boxes2
@staticmethod
def get_direction_target(reg_targets,
dir_offset=0,
num_bins=2,
one_hot=True):
"""Encode direction to 0 ~ num_bins-1.
Args:
reg_targets (torch.Tensor): Bbox regression targets.
dir_offset (int): Direction offset.
num_bins (int): Number of bins to divide 2*PI.
one_hot (bool): Whether to encode as one hot.
Returns:
torch.Tensor: Encoded direction targets.
"""
rot_gt = reg_targets[..., 6]
offset_rot = limit_period(rot_gt - dir_offset, 0, 2 * np.pi)
dir_cls_targets = torch.floor(offset_rot /
(2 * np.pi / num_bins)).long()
dir_cls_targets = torch.clamp(dir_cls_targets, min=0, max=num_bins - 1)
if one_hot:
dir_targets = torch.zeros(
*list(dir_cls_targets.shape),
num_bins,
dtype=reg_targets.dtype,
device=dir_cls_targets.device)
dir_targets.scatter_(dir_cls_targets.unsqueeze(dim=-1).long(), 1.0)
dir_cls_targets = dir_targets
return dir_cls_targets
@force_fp32(
apply_to=('cls_scores', 'bbox_preds', 'dir_cls_preds', 'attr_preds',
'centernesses'))
def loss(self,
cls_scores,
bbox_preds,
dir_cls_preds,
attr_preds,
centernesses,
gt_bboxes,
gt_labels,
gt_bboxes_3d,
gt_labels_3d,
centers2d,
depths,
attr_labels,
img_metas,
gt_bboxes_ignore=None):
"""Compute loss of the head.
Args:
cls_scores (list[Tensor]): Box scores for each scale level,
each is a 4D-tensor, the channel number is
num_points * num_classes.
bbox_preds (list[Tensor]): Box energies / deltas for each scale
level, each is a 4D-tensor, the channel number is
num_points * bbox_code_size.
dir_cls_preds (list[Tensor]): Box scores for direction class
predictions on each scale level, each is a 4D-tensor,
the channel number is num_points * 2. (bin = 2)
attr_preds (list[Tensor]): Attribute scores for each scale level,
each is a 4D-tensor, the channel number is
num_points * num_attrs.
centernesses (list[Tensor]): Centerness for each scale level, each
is a 4D-tensor, the channel number is num_points * 1.
gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
gt_labels (list[Tensor]): class indices corresponding to each box
gt_bboxes_3d (list[Tensor]): 3D boxes ground truth with shape of
(num_gts, code_size).
gt_labels_3d (list[Tensor]): same as gt_labels
centers2d (list[Tensor]): 2D centers on the image with shape of
(num_gts, 2).
depths (list[Tensor]): Depth ground truth with shape of
(num_gts, ).
attr_labels (list[Tensor]): Attributes indices of each box.
img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
gt_bboxes_ignore (None | list[Tensor]): specify which bounding
boxes can be ignored when computing the loss.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
assert len(cls_scores) == len(bbox_preds) == len(centernesses) == len(
attr_preds)
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
all_level_points = self.get_points(featmap_sizes, bbox_preds[0].dtype,
bbox_preds[0].device)
labels_3d, bbox_targets_3d, centerness_targets, attr_targets = \
self.get_targets(
all_level_points, gt_bboxes, gt_labels, gt_bboxes_3d,
gt_labels_3d, centers2d, depths, attr_labels)
num_imgs = cls_scores[0].size(0)
# flatten cls_scores, bbox_preds, dir_cls_preds and centerness
flatten_cls_scores = [
cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels)
for cls_score in cls_scores
]
flatten_bbox_preds = [
bbox_pred.permute(0, 2, 3, 1).reshape(-1, sum(self.group_reg_dims))
for bbox_pred in bbox_preds
]
flatten_dir_cls_preds = [
dir_cls_pred.permute(0, 2, 3, 1).reshape(-1, 2)
for dir_cls_pred in dir_cls_preds
]
flatten_centerness = [
centerness.permute(0, 2, 3, 1).reshape(-1)
for centerness in centernesses
]
flatten_cls_scores = torch.cat(flatten_cls_scores)
flatten_bbox_preds = torch.cat(flatten_bbox_preds)
flatten_dir_cls_preds = torch.cat(flatten_dir_cls_preds)
flatten_centerness = torch.cat(flatten_centerness)
flatten_labels_3d = torch.cat(labels_3d)
flatten_bbox_targets_3d = torch.cat(bbox_targets_3d)
flatten_centerness_targets = torch.cat(centerness_targets)
# FG cat_id: [0, num_classes -1], BG cat_id: num_classes
bg_class_ind = self.num_classes
pos_inds = ((flatten_labels_3d >= 0)
& (flatten_labels_3d < bg_class_ind)).nonzero().reshape(-1)
num_pos = len(pos_inds)
loss_cls = self.loss_cls(
flatten_cls_scores,
flatten_labels_3d,
avg_factor=num_pos + num_imgs) # avoid num_pos is 0
pos_bbox_preds = flatten_bbox_preds[pos_inds]
pos_dir_cls_preds = flatten_dir_cls_preds[pos_inds]
pos_centerness = flatten_centerness[pos_inds]
if self.pred_attrs:
flatten_attr_preds = [
attr_pred.permute(0, 2, 3, 1).reshape(-1, self.num_attrs)
for attr_pred in attr_preds
]
flatten_attr_preds = torch.cat(flatten_attr_preds)
flatten_attr_targets = torch.cat(attr_targets)
pos_attr_preds = flatten_attr_preds[pos_inds]
if num_pos > 0:
pos_bbox_targets_3d = flatten_bbox_targets_3d[pos_inds]
pos_centerness_targets = flatten_centerness_targets[pos_inds]
if self.pred_attrs:
pos_attr_targets = flatten_attr_targets[pos_inds]
bbox_weights = pos_centerness_targets.new_ones(
len(pos_centerness_targets), sum(self.group_reg_dims))
equal_weights = pos_centerness_targets.new_ones(
pos_centerness_targets.shape)
code_weight = self.train_cfg.get('code_weight', None)
if code_weight:
assert len(code_weight) == sum(self.group_reg_dims)
bbox_weights = bbox_weights * bbox_weights.new_tensor(
code_weight)
if self.use_direction_classifier:
pos_dir_cls_targets = self.get_direction_target(
pos_bbox_targets_3d, self.dir_offset, one_hot=False)
if self.diff_rad_by_sin:
pos_bbox_preds, pos_bbox_targets_3d = self.add_sin_difference(
pos_bbox_preds, pos_bbox_targets_3d)
loss_offset = self.loss_bbox(
pos_bbox_preds[:, :2],
pos_bbox_targets_3d[:, :2],
weight=bbox_weights[:, :2],
avg_factor=equal_weights.sum())
loss_depth = self.loss_bbox(
pos_bbox_preds[:, 2],
pos_bbox_targets_3d[:, 2],
weight=bbox_weights[:, 2],
avg_factor=equal_weights.sum())
loss_size = self.loss_bbox(
pos_bbox_preds[:, 3:6],
pos_bbox_targets_3d[:, 3:6],
weight=bbox_weights[:, 3:6],
avg_factor=equal_weights.sum())
loss_rotsin = self.loss_bbox(
pos_bbox_preds[:, 6],
pos_bbox_targets_3d[:, 6],
weight=bbox_weights[:, 6],
avg_factor=equal_weights.sum())
loss_velo = None
if self.pred_velo:
loss_velo = self.loss_bbox(
pos_bbox_preds[:, 7:9],
pos_bbox_targets_3d[:, 7:9],
weight=bbox_weights[:, 7:9],
avg_factor=equal_weights.sum())
loss_centerness = self.loss_centerness(pos_centerness,
pos_centerness_targets)
# direction classification loss
loss_dir = None
# TODO: add more check for use_direction_classifier
if self.use_direction_classifier:
loss_dir = self.loss_dir(
pos_dir_cls_preds,
pos_dir_cls_targets,
equal_weights,
avg_factor=equal_weights.sum())
# attribute classification loss
loss_attr = None
if self.pred_attrs:
loss_attr = self.loss_attr(
pos_attr_preds,
pos_attr_targets,
pos_centerness_targets,
avg_factor=pos_centerness_targets.sum())
else:
# need absolute due to possible negative delta x/y
loss_offset = pos_bbox_preds[:, :2].sum()
loss_depth = pos_bbox_preds[:, 2].sum()
loss_size = pos_bbox_preds[:, 3:6].sum()
loss_rotsin = pos_bbox_preds[:, 6].sum()
loss_velo = None
if self.pred_velo:
loss_velo = pos_bbox_preds[:, 7:9].sum()
loss_centerness = pos_centerness.sum()
loss_dir = None
if self.use_direction_classifier:
loss_dir = pos_dir_cls_preds.sum()
loss_attr = None
if self.pred_attrs:
loss_attr = pos_attr_preds.sum()
loss_dict = dict(
loss_cls=loss_cls,
loss_offset=loss_offset,
loss_depth=loss_depth,
loss_size=loss_size,
loss_rotsin=loss_rotsin,
loss_centerness=loss_centerness)
if loss_velo is not None:
loss_dict['loss_velo'] = loss_velo
if loss_dir is not None:
loss_dict['loss_dir'] = loss_dir
if loss_attr is not None:
loss_dict['loss_attr'] = loss_attr
return loss_dict
@force_fp32(
apply_to=('cls_scores', 'bbox_preds', 'dir_cls_preds', 'attr_preds',
'centernesses'))
def get_bboxes(self,
cls_scores,
bbox_preds,
dir_cls_preds,
attr_preds,
centernesses,
img_metas,
cfg=None,
rescale=None):
"""Transform network output for a batch into bbox predictions.
Args:
cls_scores (list[Tensor]): Box scores for each scale level
Has shape (N, num_points * num_classes, H, W)
bbox_preds (list[Tensor]): Box energies / deltas for each scale
level with shape (N, num_points * 4, H, W)
dir_cls_preds (list[Tensor]): Box scores for direction class
predictions on each scale level, each is a 4D-tensor,
the channel number is num_points * 2. (bin = 2)
attr_preds (list[Tensor]): Attribute scores for each scale level
Has shape (N, num_points * num_attrs, H, W)
centernesses (list[Tensor]): Centerness for each scale level with
shape (N, num_points * 1, H, W)
img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
cfg (mmcv.Config): Test / postprocessing configuration,
if None, test_cfg would be used
rescale (bool): If True, return boxes in original image space
Returns:
list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple. \
The first item is an (n, 5) tensor, where the first 4 columns \
are bounding box positions (tl_x, tl_y, br_x, br_y) and the \
5-th column is a score between 0 and 1. The second item is a \
(n,) tensor where each item is the predicted class label of \
the corresponding box.
"""
assert len(cls_scores) == len(bbox_preds) == len(dir_cls_preds) == \
len(centernesses) == len(attr_preds)
num_levels = len(cls_scores)
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
mlvl_points = self.get_points(featmap_sizes, bbox_preds[0].dtype,
bbox_preds[0].device)
result_list = []
for img_id in range(len(img_metas)):
cls_score_list = [
cls_scores[i][img_id].detach() for i in range(num_levels)
]
bbox_pred_list = [
bbox_preds[i][img_id].detach() for i in range(num_levels)
]
if self.use_direction_classifier:
dir_cls_pred_list = [
dir_cls_preds[i][img_id].detach()
for i in range(num_levels)
]
else:
dir_cls_pred_list = [
cls_scores[i][img_id].new_full(
[2, *cls_scores[i][img_id].shape[1:]], 0).detach()
for i in range(num_levels)
]
if self.pred_attrs:
attr_pred_list = [
attr_preds[i][img_id].detach() for i in range(num_levels)
]
else:
attr_pred_list = [
cls_scores[i][img_id].new_full(
[self.num_attrs, *cls_scores[i][img_id].shape[1:]],
self.attr_background_label).detach()
for i in range(num_levels)
]
centerness_pred_list = [
centernesses[i][img_id].detach() for i in range(num_levels)
]
input_meta = img_metas[img_id]
det_bboxes = self._get_bboxes_single(
cls_score_list, bbox_pred_list, dir_cls_pred_list,
attr_pred_list, centerness_pred_list, mlvl_points, input_meta,
cfg, rescale)
result_list.append(det_bboxes)
return result_list
def _get_bboxes_single(self,
cls_scores,
bbox_preds,
dir_cls_preds,
attr_preds,
centernesses,
mlvl_points,
input_meta,
cfg,
rescale=False):
"""Transform outputs for a single batch item into bbox predictions.
Args:
cls_scores (list[Tensor]): Box scores for a single scale level
Has shape (num_points * num_classes, H, W).
bbox_preds (list[Tensor]): Box energies / deltas for a single scale
level with shape (num_points * bbox_code_size, H, W).
dir_cls_preds (list[Tensor]): Box scores for direction class
predictions on a single scale level with shape \
(num_points * 2, H, W)
attr_preds (list[Tensor]): Attribute scores for each scale level
Has shape (N, num_points * num_attrs, H, W)
centernesses (list[Tensor]): Centerness for a single scale level
with shape (num_points, H, W).
mlvl_points (list[Tensor]): Box reference for a single scale level
with shape (num_total_points, 2).
input_meta (dict): Metadata of input image.
cfg (mmcv.Config): Test / postprocessing configuration,
if None, test_cfg would be used.
rescale (bool): If True, return boxes in original image space.
Returns:
tuples[Tensor]: Predicted 3D boxes, scores, labels and attributes.
"""
view = np.array(input_meta['cam_intrinsic'])
scale_factor = input_meta['scale_factor']
cfg = self.test_cfg if cfg is None else cfg
assert len(cls_scores) == len(bbox_preds) == len(mlvl_points)
mlvl_centers2d = []
mlvl_bboxes = []
mlvl_scores = []
mlvl_dir_scores = []
mlvl_attr_scores = []
mlvl_centerness = []
for cls_score, bbox_pred, dir_cls_pred, attr_pred, centerness, \
points in zip(cls_scores, bbox_preds, dir_cls_preds,
attr_preds, centernesses, mlvl_points):
assert cls_score.size()[-2:] == bbox_pred.size()[-2:]
scores = cls_score.permute(1, 2, 0).reshape(
-1, self.cls_out_channels).sigmoid()
dir_cls_pred = dir_cls_pred.permute(1, 2, 0).reshape(-1, 2)
dir_cls_score = torch.max(dir_cls_pred, dim=-1)[1]
attr_pred = attr_pred.permute(1, 2, 0).reshape(-1, self.num_attrs)
attr_score = torch.max(attr_pred, dim=-1)[1]
centerness = centerness.permute(1, 2, 0).reshape(-1).sigmoid()
bbox_pred = bbox_pred.permute(1, 2,
0).reshape(-1,
sum(self.group_reg_dims))
bbox_pred = bbox_pred[:, :self.bbox_code_size]
nms_pre = cfg.get('nms_pre', -1)
if nms_pre > 0 and scores.shape[0] > nms_pre:
max_scores, _ = (scores * centerness[:, None]).max(dim=1)
_, topk_inds = max_scores.topk(nms_pre)
points = points[topk_inds, :]
bbox_pred = bbox_pred[topk_inds, :]
scores = scores[topk_inds, :]
dir_cls_pred = dir_cls_pred[topk_inds, :]
centerness = centerness[topk_inds]
dir_cls_score = dir_cls_score[topk_inds]
attr_score = attr_score[topk_inds]
# change the offset to actual center predictions
bbox_pred[:, :2] = points - bbox_pred[:, :2]
if rescale:
bbox_pred[:, :2] /= bbox_pred[:, :2].new_tensor(scale_factor)
pred_center2d = bbox_pred[:, :3].clone()
bbox_pred[:, :3] = self.pts2Dto3D(bbox_pred[:, :3], view)
mlvl_centers2d.append(pred_center2d)
mlvl_bboxes.append(bbox_pred)
mlvl_scores.append(scores)
mlvl_dir_scores.append(dir_cls_score)
mlvl_attr_scores.append(attr_score)
mlvl_centerness.append(centerness)
mlvl_centers2d = torch.cat(mlvl_centers2d)
mlvl_bboxes = torch.cat(mlvl_bboxes)
mlvl_dir_scores = torch.cat(mlvl_dir_scores)
# change local yaw to global yaw for 3D nms
if mlvl_bboxes.shape[0] > 0:
dir_rot = limit_period(mlvl_bboxes[..., 6] - self.dir_offset, 0,
np.pi)
mlvl_bboxes[..., 6] = (
dir_rot + self.dir_offset +
np.pi * mlvl_dir_scores.to(mlvl_bboxes.dtype))
cam_intrinsic = mlvl_centers2d.new_zeros((4, 4))
cam_intrinsic[:view.shape[0], :view.shape[1]] = \
mlvl_centers2d.new_tensor(view)
mlvl_bboxes[:, 6] = torch.atan2(
mlvl_centers2d[:, 0] - cam_intrinsic[0, 2],
cam_intrinsic[0, 0]) + mlvl_bboxes[:, 6]
mlvl_bboxes_for_nms = xywhr2xyxyr(input_meta['box_type_3d'](
mlvl_bboxes, box_dim=self.bbox_code_size,
origin=(0.5, 0.5, 0.5)).bev)
mlvl_scores = torch.cat(mlvl_scores)
padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1)
# remind that we set FG labels to [0, num_class-1] since mmdet v2.0
# BG cat_id: num_class
mlvl_scores = torch.cat([mlvl_scores, padding], dim=1)
mlvl_attr_scores = torch.cat(mlvl_attr_scores)
mlvl_centerness = torch.cat(mlvl_centerness)
# no scale_factors in box3d_multiclass_nms
# Then we multiply it from outside
mlvl_nms_scores = mlvl_scores * mlvl_centerness[:, None]
results = box3d_multiclass_nms(mlvl_bboxes, mlvl_bboxes_for_nms,
mlvl_nms_scores, cfg.score_thr,
cfg.max_per_img, cfg, mlvl_dir_scores,
mlvl_attr_scores)
bboxes, scores, labels, dir_scores, attrs = results
attrs = attrs.to(labels.dtype) # change data type to int
bboxes = input_meta['box_type_3d'](
bboxes, box_dim=self.bbox_code_size, origin=(0.5, 0.5, 0.5))
# Note that the predictions use origin (0.5, 0.5, 0.5)
# Due to the ground truth centers2d are the gravity center of objects
# v0.10.0 fix inplace operation to the input tensor of cam_box3d
# So here we also need to add origin=(0.5, 0.5, 0.5)
if not self.pred_attrs:
attrs = None
return bboxes, scores, labels, attrs
@staticmethod
def pts2Dto3D(points, view):
"""
Args:
points (torch.Tensor): points in 2D images, [N, 3], \
3 corresponds with x, y in the image and depth.
view (np.ndarray): camera instrinsic, [3, 3]
Returns:
torch.Tensor: points in 3D space. [N, 3], \
3 corresponds with x, y, z in 3D space.
"""
assert view.shape[0] <= 4
assert view.shape[1] <= 4
assert points.shape[1] == 3
points2D = points[:, :2]
depths = points[:, 2].view(-1, 1)
unnorm_points2D = torch.cat([points2D * depths, depths], dim=1)
viewpad = torch.eye(4, dtype=points2D.dtype, device=points2D.device)
viewpad[:view.shape[0], :view.shape[1]] = points2D.new_tensor(view)
inv_viewpad = torch.inverse(viewpad).transpose(0, 1)
# Do operation in homogenous coordinates.
nbr_points = unnorm_points2D.shape[0]
homo_points2D = torch.cat(
[unnorm_points2D,
points2D.new_ones((nbr_points, 1))], dim=1)
points3D = torch.mm(homo_points2D, inv_viewpad)[:, :3]
return points3D
def _get_points_single(self,
featmap_size,
stride,
dtype,
device,
flatten=False):
"""Get points according to feature map sizes."""
y, x = super()._get_points_single(featmap_size, stride, dtype, device)
points = torch.stack((x.reshape(-1) * stride, y.reshape(-1) * stride),
dim=-1) + stride // 2
return points
def get_targets(self, points, gt_bboxes_list, gt_labels_list,
gt_bboxes_3d_list, gt_labels_3d_list, centers2d_list,
depths_list, attr_labels_list):
"""Compute regression, classification and centerss targets for points
in multiple images.
Args:
points (list[Tensor]): Points of each fpn level, each has shape
(num_points, 2).
gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image,
each has shape (num_gt, 4).
gt_labels_list (list[Tensor]): Ground truth labels of each box,
each has shape (num_gt,).
gt_bboxes_3d_list (list[Tensor]): 3D Ground truth bboxes of each
image, each has shape (num_gt, bbox_code_size).
gt_labels_3d_list (list[Tensor]): 3D Ground truth labels of each
box, each has shape (num_gt,).
centers2d_list (list[Tensor]): Projected 3D centers onto 2D image,
each has shape (num_gt, 2).
depths_list (list[Tensor]): Depth of projected 3D centers onto 2D
image, each has shape (num_gt, 1).
attr_labels_list (list[Tensor]): Attribute labels of each box,
each has shape (num_gt,).
Returns:
tuple:
concat_lvl_labels (list[Tensor]): Labels of each level. \
concat_lvl_bbox_targets (list[Tensor]): BBox targets of each \
level.
"""
assert len(points) == len(self.regress_ranges)
num_levels = len(points)
# expand regress ranges to align with points
expanded_regress_ranges = [
points[i].new_tensor(self.regress_ranges[i])[None].expand_as(
points[i]) for i in range(num_levels)
]
# concat all levels points and regress ranges
concat_regress_ranges = torch.cat(expanded_regress_ranges, dim=0)
concat_points = torch.cat(points, dim=0)
# the number of points per img, per lvl
num_points = [center.size(0) for center in points]
if attr_labels_list is None:
attr_labels_list = [
gt_labels.new_full(gt_labels.shape, self.attr_background_label)
for gt_labels in gt_labels_list
]
# get labels and bbox_targets of each image
_, _, labels_3d_list, bbox_targets_3d_list, centerness_targets_list, \
attr_targets_list = multi_apply(
self._get_target_single,
gt_bboxes_list,
gt_labels_list,
gt_bboxes_3d_list,
gt_labels_3d_list,
centers2d_list,
depths_list,
attr_labels_list,
points=concat_points,
regress_ranges=concat_regress_ranges,
num_points_per_lvl=num_points)
# split to per img, per level
labels_3d_list = [
labels_3d.split(num_points, 0) for labels_3d in labels_3d_list
]
bbox_targets_3d_list = [
bbox_targets_3d.split(num_points, 0)
for bbox_targets_3d in bbox_targets_3d_list
]
centerness_targets_list = [
centerness_targets.split(num_points, 0)
for centerness_targets in centerness_targets_list
]
attr_targets_list = [
attr_targets.split(num_points, 0)
for attr_targets in attr_targets_list
]
# concat per level image
concat_lvl_labels_3d = []
concat_lvl_bbox_targets_3d = []
concat_lvl_centerness_targets = []
concat_lvl_attr_targets = []
for i in range(num_levels):
concat_lvl_labels_3d.append(
torch.cat([labels[i] for labels in labels_3d_list]))
concat_lvl_centerness_targets.append(
torch.cat([
centerness_targets[i]
for centerness_targets in centerness_targets_list
]))
bbox_targets_3d = torch.cat([
bbox_targets_3d[i] for bbox_targets_3d in bbox_targets_3d_list
])
concat_lvl_attr_targets.append(
torch.cat(
[attr_targets[i] for attr_targets in attr_targets_list]))
if self.norm_on_bbox:
bbox_targets_3d[:, :
2] = bbox_targets_3d[:, :2] / self.strides[i]
concat_lvl_bbox_targets_3d.append(bbox_targets_3d)
return concat_lvl_labels_3d, concat_lvl_bbox_targets_3d, \
concat_lvl_centerness_targets, concat_lvl_attr_targets
def _get_target_single(self, gt_bboxes, gt_labels, gt_bboxes_3d,
gt_labels_3d, centers2d, depths, attr_labels,
points, regress_ranges, num_points_per_lvl):
"""Compute regression and classification targets for a single image."""
num_points = points.size(0)
num_gts = gt_labels.size(0)
if not isinstance(gt_bboxes_3d, torch.Tensor):
gt_bboxes_3d = gt_bboxes_3d.tensor.to(gt_bboxes.device)
if num_gts == 0:
return gt_labels.new_full((num_points,), self.background_label), \
gt_bboxes.new_zeros((num_points, 4)), \
gt_labels_3d.new_full(
(num_points,), self.background_label), \
gt_bboxes_3d.new_zeros((num_points, self.bbox_code_size)), \
gt_bboxes_3d.new_zeros((num_points,)), \
attr_labels.new_full(
(num_points,), self.attr_background_label)
areas = (gt_bboxes[:, 2] - gt_bboxes[:, 0]) * (
gt_bboxes[:, 3] - gt_bboxes[:, 1])
areas = areas[None].repeat(num_points, 1)
regress_ranges = regress_ranges[:, None, :].expand(
num_points, num_gts, 2)
gt_bboxes = gt_bboxes[None].expand(num_points, num_gts, 4)
centers2d = centers2d[None].expand(num_points, num_gts, 2)
gt_bboxes_3d = gt_bboxes_3d[None].expand(num_points, num_gts,
self.bbox_code_size)
depths = depths[None, :, None].expand(num_points, num_gts, 1)
xs, ys = points[:, 0], points[:, 1]
xs = xs[:, None].expand(num_points, num_gts)
ys = ys[:, None].expand(num_points, num_gts)
delta_xs = (xs - centers2d[..., 0])[..., None]
delta_ys = (ys - centers2d[..., 1])[..., None]
bbox_targets_3d = torch.cat(
(delta_xs, delta_ys, depths, gt_bboxes_3d[..., 3:]), dim=-1)
left = xs - gt_bboxes[..., 0]
right = gt_bboxes[..., 2] - xs
top = ys - gt_bboxes[..., 1]
bottom = gt_bboxes[..., 3] - ys
bbox_targets = torch.stack((left, top, right, bottom), -1)
assert self.center_sampling is True, 'Setting center_sampling to '\
'False has not been implemented for FCOS3D.'
# condition1: inside a `center bbox`
radius = self.center_sample_radius
center_xs = centers2d[..., 0]
center_ys = centers2d[..., 1]
center_gts = torch.zeros_like(gt_bboxes)
stride = center_xs.new_zeros(center_xs.shape)
# project the points on current lvl back to the `original` sizes
lvl_begin = 0
for lvl_idx, num_points_lvl in enumerate(num_points_per_lvl):
lvl_end = lvl_begin + num_points_lvl
stride[lvl_begin:lvl_end] = self.strides[lvl_idx] * radius
lvl_begin = lvl_end
center_gts[..., 0] = center_xs - stride
center_gts[..., 1] = center_ys - stride
center_gts[..., 2] = center_xs + stride
center_gts[..., 3] = center_ys + stride
cb_dist_left = xs - center_gts[..., 0]
cb_dist_right = center_gts[..., 2] - xs
cb_dist_top = ys - center_gts[..., 1]
cb_dist_bottom = center_gts[..., 3] - ys
center_bbox = torch.stack(
(cb_dist_left, cb_dist_top, cb_dist_right, cb_dist_bottom), -1)
inside_gt_bbox_mask = center_bbox.min(-1)[0] > 0
# condition2: limit the regression range for each location
max_regress_distance = bbox_targets.max(-1)[0]
inside_regress_range = (
(max_regress_distance >= regress_ranges[..., 0])
& (max_regress_distance <= regress_ranges[..., 1]))
# center-based criterion to deal with ambiguity
dists = torch.sqrt(torch.sum(bbox_targets_3d[..., :2]**2, dim=-1))
dists[inside_gt_bbox_mask == 0] = INF
dists[inside_regress_range == 0] = INF
min_dist, min_dist_inds = dists.min(dim=1)
labels = gt_labels[min_dist_inds]
labels_3d = gt_labels_3d[min_dist_inds]
attr_labels = attr_labels[min_dist_inds]
labels[min_dist == INF] = self.background_label # set as BG
labels_3d[min_dist == INF] = self.background_label # set as BG
attr_labels[min_dist == INF] = self.attr_background_label
bbox_targets = bbox_targets[range(num_points), min_dist_inds]
bbox_targets_3d = bbox_targets_3d[range(num_points), min_dist_inds]
relative_dists = torch.sqrt(
torch.sum(bbox_targets_3d[..., :2]**2,
dim=-1)) / (1.414 * stride[:, 0])
# [N, 1] / [N, 1]
centerness_targets = torch.exp(-self.centerness_alpha * relative_dists)
return labels, bbox_targets, labels_3d, bbox_targets_3d, \
centerness_targets, attr_labels
| 44.624736
| 113
| 0.571195
|
38bdd5891b4f18a611c0d1400eab329f8e3c270e
| 1,759
|
py
|
Python
|
manga_db/db/id_map.py
|
nilfoer/mangadb
|
860d7de310002735631ea26810b4df5b6bc08d7b
|
[
"MIT"
] | 3
|
2021-01-14T16:22:41.000Z
|
2022-02-21T03:31:22.000Z
|
manga_db/db/id_map.py
|
nilfoer/mangadb
|
860d7de310002735631ea26810b4df5b6bc08d7b
|
[
"MIT"
] | 13
|
2021-01-14T10:34:19.000Z
|
2021-05-20T08:47:54.000Z
|
manga_db/db/id_map.py
|
nilfoer/mangadb
|
860d7de310002735631ea26810b4df5b6bc08d7b
|
[
"MIT"
] | 1
|
2022-02-24T03:10:04.000Z
|
2022-02-24T03:10:04.000Z
|
import weakref
class IndentityMap:
def __init__(self):
# d[key] directly returns acutal object (not weakref)
# if value gets gc'd key/entry gets autmatically removed from WeakValueDictionary
# if we retrieved and assigned the obj to a var then it wont be collected by gc anymore
self._dict = weakref.WeakValueDictionary()
def add(self, obj):
if not obj._in_db:
return False
else:
key = obj.key
if key in self._dict:
# TODO custom exception
raise Exception(f"Can't add {key} to IndentityMap, since there is already "
"an instance present for this key!")
else:
self._dict[key] = obj
return True
def add_unprecedented(self, obj):
if not obj._in_db:
return False
else:
key = obj.key
self._dict[key] = obj
return True
def remove(self, key):
del self._dict[key]
def discard(self, key):
try:
self.remove(key)
return True
except KeyError:
return False
def __getitem__(self, key):
return self._dict[key]
def __contains__(self, key):
return key in self._dict
def get(self, key, default=None):
try:
obj = self._dict[key]
return obj
except KeyError:
return default
def items(self):
return self._dict.items()
def keys(self):
return self._dict.keys()
def values(self):
return self._dict.values()
def __iter__(self):
return iter(self._dict)
def __len__(self):
return len(self._dict)
| 25.128571
| 95
| 0.549744
|
6e1a1ad6f798eaa5838ca53f1f5a62f22e5313bb
| 482
|
py
|
Python
|
packages/python/plotly/plotly/validators/splom/hoverlabel/_bordercolor.py
|
mastermind88/plotly.py
|
efa70710df1af22958e1be080e105130042f1839
|
[
"MIT"
] | null | null | null |
packages/python/plotly/plotly/validators/splom/hoverlabel/_bordercolor.py
|
mastermind88/plotly.py
|
efa70710df1af22958e1be080e105130042f1839
|
[
"MIT"
] | null | null | null |
packages/python/plotly/plotly/validators/splom/hoverlabel/_bordercolor.py
|
mastermind88/plotly.py
|
efa70710df1af22958e1be080e105130042f1839
|
[
"MIT"
] | null | null | null |
import _plotly_utils.basevalidators
class BordercolorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="bordercolor", parent_name="splom.hoverlabel", **kwargs
):
super(BordercolorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
| 32.133333
| 81
| 0.653527
|
262a0ab73cb1436d2b54b1a0aefa4f16d78ce539
| 800
|
py
|
Python
|
migrations/versions/6abc139be248_creating_table_subjects.py
|
gabrieldosprazeres/e-plus
|
0cd2cf77a2be5ac5b577e22c64f78180d3f5ff69
|
[
"MIT"
] | null | null | null |
migrations/versions/6abc139be248_creating_table_subjects.py
|
gabrieldosprazeres/e-plus
|
0cd2cf77a2be5ac5b577e22c64f78180d3f5ff69
|
[
"MIT"
] | null | null | null |
migrations/versions/6abc139be248_creating_table_subjects.py
|
gabrieldosprazeres/e-plus
|
0cd2cf77a2be5ac5b577e22c64f78180d3f5ff69
|
[
"MIT"
] | null | null | null |
"""creating table 'subjects'
Revision ID: 6abc139be248
Revises: 567869708e99
Create Date: 2022-01-06 21:48:30.749836
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '6abc139be248'
down_revision = '567869708e99'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('subjects',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('subject', sa.String(length=50), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('subject')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('subjects')
# ### end Alembic commands ###
| 23.529412
| 65
| 0.685
|
d1fb329cc395e99e9cee3db38eb481f56c890e73
| 1,436
|
py
|
Python
|
src/its_preselector/test/test_cal_source.py
|
NTIA/Preselector
|
ee275a6bd48b452cc1d2cd8870e1e617d7e4373b
|
[
"RSA-MD"
] | null | null | null |
src/its_preselector/test/test_cal_source.py
|
NTIA/Preselector
|
ee275a6bd48b452cc1d2cd8870e1e617d7e4373b
|
[
"RSA-MD"
] | 1
|
2022-03-25T21:57:57.000Z
|
2022-03-25T21:57:57.000Z
|
src/its_preselector/test/test_cal_source.py
|
NTIA/Preselector
|
ee275a6bd48b452cc1d2cd8870e1e617d7e4373b
|
[
"RSA-MD"
] | null | null | null |
import unittest
from its_preselector.web_relay_preselector import WebRelayPreselector
import json
class TestCalSource(unittest.TestCase):
@classmethod
def setUpClass(cls):
file = open('test_metadata.sigmf-meta')
sensor_def = json.load(file)
file.close()
cls.preselector = WebRelayPreselector(sensor_def, {})
null_file = open('null_preselector.sigmf-meta')
null_def = json.load(null_file)
null_file.close()
cls.empty_preselector = WebRelayPreselector(null_def, {})
def test_valid_cal_source(self):
cal_sources = self.preselector.cal_sources
self.assertEqual(1, len(cal_sources))
cal_source = cal_sources[0]
self.assertEqual(14.6, cal_source.enr)
self.assertEqual("Calibrated noise source", cal_source.type)
def test_valid_cal_source_spec(self):
spec = self.preselector.cal_sources[0].cal_source_spec
self.assertEqual("SG53400067", spec.id)
self.assertEqual("Keysight 346B", spec.model)
self.assertEqual("https://www.keysight.com/en/pd-1000001299%3Aepsg%3Apro-pn-346B/noise-source-10-mhz-to-18-ghz-nominal-enr-15-db?cc=US&lc=eng",spec.supplemental_information)
def test_empty_cal_source(self):
self.assertIsNotNone(self.empty_preselector.cal_sources)
self.assertEqual(0, len(self.empty_preselector.cal_sources))
if __name__ == '__main__':
unittest.main()
| 37.789474
| 181
| 0.711003
|
b58d03ffa8a18b32ecf99a2f312c35311e37f471
| 3,246
|
py
|
Python
|
monk/pytorch/losses/retrieve_loss.py
|
Shreyashwaghe/monk_v1
|
4ee4d9483e8ffac9b73a41f3c378e5abf5fc799b
|
[
"Apache-2.0"
] | 2
|
2020-09-16T06:05:50.000Z
|
2021-04-07T12:05:20.000Z
|
monk/pytorch/losses/retrieve_loss.py
|
Shreyashwaghe/monk_v1
|
4ee4d9483e8ffac9b73a41f3c378e5abf5fc799b
|
[
"Apache-2.0"
] | null | null | null |
monk/pytorch/losses/retrieve_loss.py
|
Shreyashwaghe/monk_v1
|
4ee4d9483e8ffac9b73a41f3c378e5abf5fc799b
|
[
"Apache-2.0"
] | null | null | null |
from pytorch.losses.imports import *
from system.imports import *
@accepts(dict, post_trace=True)
@TraceFunction(trace_args=False, trace_rv=False)
def retrieve_loss(system_dict):
'''
Retrieve loss post state changes
Args:
system_dict (dict): System dictionary storing experiment state and set variables
Returns:
dict: updated system dict
'''
system_dict["local"]["criterion"] = system_dict["hyper-parameters"]["loss"]["name"];
name = system_dict["local"]["criterion"];
if(name == "l1"):
system_dict["local"]["criterion"] = torch.nn.L1Loss(
reduction='mean');
elif(name == "l2"):
system_dict["local"]["criterion"] = torch.nn.MSELoss(
reduction='mean');
elif(name == "softmaxcrossentropy"):
system_dict["local"]["criterion"] = torch.nn.CrossEntropyLoss(
weight=system_dict["hyper-parameters"]["loss"]["params"]["weight"],
reduction='mean');
elif(name == "crossentropy"):
system_dict["local"]["criterion"] = torch.nn.NLLLoss(
weight=system_dict["hyper-parameters"]["loss"]["params"]["weight"],
reduction='mean');
elif(name == "sigmoidbinarycrossentropy"):
system_dict["local"]["criterion"] = torch.nn.BCEWithLogitsLoss(
weight=system_dict["hyper-parameters"]["loss"]["params"]["weight"],
reduction='mean');
elif(name == "binarycrossentropy"):
system_dict["local"]["criterion"] = torch.nn.BCELoss(
weight=system_dict["hyper-parameters"]["loss"]["params"]["weight"],
reduction='mean');
elif(name == "kldiv"):
system_dict["local"]["criterion"] = torch.nn.KLDivLoss(
reduction='mean');
elif(name == "poissonnll"):
system_dict["local"]["criterion"] = torch.nn.PoissonNLLLoss(
log_input=system_dict["hyper-parameters"]["loss"]["params"]["log_pre_applied"],
reduction='mean');
elif(name == "huber"):
system_dict["local"]["criterion"] = torch.nn.SmoothL1Loss(
reduction='mean');
elif(name == "hinge"):
system_dict["local"]["criterion"] = torch.nn.HingeEmbeddingLoss(
margin=system_dict["hyper-parameters"]["loss"]["params"]["margin"],
reduction='mean');
elif(name == "squaredhinge"):
system_dict["local"]["criterion"] = torch.nn.SoftMarginLoss(
reduction='mean');
elif(name == "multimargin"):
system_dict["local"]["criterion"] = torch.nn.MultiMarginLoss(
p=1,
margin=system_dict["hyper-parameters"]["loss"]["params"]["margin"],
reduction='mean');
elif(name == "squaredmultimargin"):
system_dict["local"]["criterion"] = torch.nn.MultiMarginLoss(
p=2,
margin=system_dict["hyper-parameters"]["loss"]["params"]["margin"],
reduction='mean');
elif(name == "multilabelmargin"):
system_dict["local"]["criterion"] = torch.nn.MultiLabelMarginLoss(
reduction='mean');
elif(name == "multilabelsoftmargin"):
system_dict["local"]["criterion"] = torch.nn.MultiLabelSoftMarginLoss(
reduction='mean');
return system_dict;
| 35.67033
| 91
| 0.601664
|
be4e5c3c81fefd213e5a89f63bac36543c571e1c
| 259
|
py
|
Python
|
lib/model/nms/nms_gpu.py
|
user-never-lose/fpn.pytorch
|
21b15e1414db9bb8fb8c5574cfc0ce0d40e01286
|
[
"MIT"
] | 945
|
2018-01-14T05:11:11.000Z
|
2022-03-30T07:44:41.000Z
|
lib/model/nms/nms_gpu.py
|
smartprobe/SKCD
|
f8d52f060d07f043c2d208ec84e5ce55225aaef6
|
[
"MIT"
] | 55
|
2018-01-20T22:34:47.000Z
|
2021-12-03T05:48:51.000Z
|
lib/model/nms/nms_gpu.py
|
smartprobe/SKCD
|
f8d52f060d07f043c2d208ec84e5ce55225aaef6
|
[
"MIT"
] | 254
|
2018-01-14T05:12:16.000Z
|
2022-03-19T12:50:31.000Z
|
import torch
import numpy as np
from _ext import nms
import pdb
def nms_gpu(dets, thresh):
keep = dets.new(dets.size(0), 1).zero_().int()
num_out = dets.new(1).zero_().int()
nms.nms_cuda(keep, dets, num_out, thresh)
keep = keep[:num_out[0]]
return keep
| 21.583333
| 47
| 0.702703
|
6b226ec047dbd0edfe152e91a361e5b89a26216b
| 3,502
|
py
|
Python
|
OS/10. Round Robin.py
|
pratikpc/OpenGL-Pracs
|
8827eb65576db788733a011dcabee4daa3d32d94
|
[
"MIT"
] | 1
|
2019-04-26T15:09:57.000Z
|
2019-04-26T15:09:57.000Z
|
OS/10. Round Robin.py
|
pratikpc/OpenGL-Pracs
|
8827eb65576db788733a011dcabee4daa3d32d94
|
[
"MIT"
] | null | null | null |
OS/10. Round Robin.py
|
pratikpc/OpenGL-Pracs
|
8827eb65576db788733a011dcabee4daa3d32d94
|
[
"MIT"
] | 1
|
2020-11-19T07:59:13.000Z
|
2020-11-19T07:59:13.000Z
|
# Algorithm
# Create A Class Process
# Store Usual params in it
# ProcessId, Burst, Arrival, Completion etc
#
# But also create Param
# <b>*RobinLeft*</b>
# Set *RobinLeft=Burst*
# Also IterationCount = 0
# ProcessAppendNotAddedAhead
# Iterate through all processes
# Add to Chart all Processes which have
# SUPER IMP
# (Arrival <= chart[top].arrival + Quantum) &&&&& Never Before been added
# Check this by iterationCount = 0
# Main Code
# Input all Processes
# Run Sort By Process Arrival Time
# Create a deque chart (use for creating queue)
# Add top Element of Process to Chart
# First Set Top Element IterationCount as 1
# Loop till Chart Not Empty
# EXTRACT TOP CHART ELEMENT
# Run ProcessAppendNotAddedAhead on Processes & Chart & Quantum
# Compare Process RobinLeft with Quantum
# If LESS or EQUAL
# WORK DONE
# SET COMPLETION = COMPLETION + ROBIN LEFT
# ROBIN LEFT = 0
# PROCESS COMPLETION = COMPLETION
# PROCESS TURN AROUND = PROCESS COMPLETION - PROCESS ARRIVAL
# PROCESS WAITING = PROCESS TURN AROUND - PROCESS BURST
# ADD TO *RESULT* LIST
# ELSE
# Work is YET TO BE FINISHED
# SET PROCESS ROBIN LEFT = PROCESS ROBIN LEFT - QUANTUM
# PROCESS ITERATION COUNT += 1
# PROCESS ADD TO CHART QUEUE
class Process:
def __init__(self, processId, arrival, burst):
self.processId = processId
self.arrival = arrival
self.burst = burst
self.robinLeft = burst
self.completion = 0
self.turnAround = 0
self.waiting = 0
self.iterationCount = 0
def ProcessAppendNotAddedAhead(processes, chart, quantum):
for process in processes:
if process.arrival <= (chart[0].arrival + quantum) and process.iterationCount == 0:
process.iterationCount = process.iterationCount+1
chart.append(process)
# Example present in Sir Notebook
processes = [Process(1,5,5),Process(2,4,6),Process(3,3,7),Process(4,1,9),Process(5,2,2), Process(6,6,3)]
quantum = 3
processes = sorted(processes, reverse=False, key=lambda process: process.arrival)
from collections import deque
chart = deque([])
processTop = processes[0]
processTop.iterationCount = 1
chart.append(processTop)
totalTimeLimit = processes[0].arrival
for process in processes:
if (process.arrival > totalTimeLimit):
totalTimeLimit = process.arrival
else:
totalTimeLimit = process.burst + totalTimeLimit
result = []
completionTime = processes[0].arrival
while len(chart) > 0:
ProcessAppendNotAddedAhead(processes, chart, quantum)
process = chart.popleft()
print(process.robinLeft, " Process ", process.processId)
if (process.arrival > completionTime):
completionTime = process.arrival
if (process.robinLeft <= quantum):
completionTime += process.robinLeft
process.completion = completionTime
process.turnAround = process.completion - process.arrival
process.waiting = process.turnAround - process.burst
print ("Process ", process.processId, " done at ", process.completion)
result.append(process)
else:
completionTime += quantum
process.iterationCount = process.iterationCount + 1
process.robinLeft -= quantum
chart.append(process)
turnAroundTot = 0
waitingTot = 0
for process in result:
turnAroundTot = turnAroundTot + process.turnAround
waitingTot = waitingTot + process.waiting
print ("It took Waiting =", waitingTot , " and Turn Around= ", turnAroundTot)
turnAroundAvg = turnAroundTot/len(processes)
waitingAvg = waitingTot/len(processes)
print ("It took Avg Waiting =", waitingAvg , " and Avg Turn Around= ", turnAroundAvg)
| 31.836364
| 104
| 0.736722
|
f0cec8c3aac51ad838bb6de92bd719f169eca75d
| 7,962
|
py
|
Python
|
app.py
|
syedparsa/mealRestAPI
|
1fd2dcb41fc1a320f6724471bfa89070d872d5e5
|
[
"MIT"
] | null | null | null |
app.py
|
syedparsa/mealRestAPI
|
1fd2dcb41fc1a320f6724471bfa89070d872d5e5
|
[
"MIT"
] | null | null | null |
app.py
|
syedparsa/mealRestAPI
|
1fd2dcb41fc1a320f6724471bfa89070d872d5e5
|
[
"MIT"
] | null | null | null |
from flask_migrate import Migrate
from flask import Flask, request, jsonify, make_response
from flask_sqlalchemy import SQLAlchemy
from werkzeug.security import generate_password_hash, check_password_hash
import uuid
import jwt
import datetime
import json
from functools import wraps
app = Flask(__name__)
app.config["DATABASE_URL"] = 'postgres://khbjjuusbhztzo:6e3ffe89e02363413236a1a658cada7dc5e50e1c5b120a1be8c39a33d621582a@ec2-34-241-19-183.eu-west-1.compute.amazonaws.com:5432/dmuftpqtlgggo'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True
app.config['SECRET_KEY'] = 'Th1s1ss3cr3t'
db = SQLAlchemy(app)
migrate = Migrate(app, db)
class Users(db.Model):
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
name = db.Column(db.String(50))
password = db.Column(db.String())
admin = db.Column(db.Boolean)
class Meals (db.Model):
mealId = db.Column(db.Integer, primary_key=True, autoincrement=True)
price = db.Column(db.Integer)
name = db.Column(db.String(100), unique=True, nullable=False)
ingredients = db.Column(db.String(500))
isSpicy = db.Column(db.Boolean, default=False)
isVegan = db.Column(db.Boolean, default=False)
isGlutenFree = db.Column(db.Boolean, default=False)
description = db.Column(db.String(500))
db.create_all()
db.session.commit()
def token_required(is_admin):
def decorator(f):
@wraps(f)
def wrapper(*args, **kwargs):
token = None
if 'x-access-tokens' in request.headers:
token = request.headers['x-access-tokens']
if not token:
return error_handler(400, "Valid token is missing")
try:
data = jwt.decode(
token,
app.config["SECRET_KEY"],
algorithms=["HS256"])
current_user = Users.query.filter_by(id=data['id']).first()
if is_admin:
if not current_user.admin:
return error_handler(401, "Unauthorized Access!")
except BaseException:
return error_handler(404, "User not found!")
return f(*args, **kwargs)
return wrapper
return decorator
def responce_handler(
status_code,
message_header,
message,
additional_message=None):
if additional_message:
return jsonify({'staus': status_code,
message_header: message,
'note': additional_message})
return jsonify({'staus': status_code, message_header: message})
def success_handler(message):
return responce_handler(200, "message", message)
def error_handler(status_code, message):
return responce_handler(status_code, "error", message)
@app.route('/register', methods=['POST'])
def signup_user():
data = request.get_json()
password = data['password']
name = data['name']
if password.strip() is None or name.strip() is None:
return error_handler(400, "Invalid Input!")
if Users.query.filter_by(name=name).first() is not None:
return error_handler(403, "User already exists!")
hashed_password = generate_passworpid_hash(password, method='sha256')
new_user = Users(name=name, password=hashed_password, admin=False)
db.session.add(new_user)
db.session.commit()
return success_handler("registered successfully")
@app.route('/login', methods=['GET', 'POST'])
def login_user():
auth = request.authorization
if not auth or not auth.username or not auth.password:
return error_handler(401, 'Username or password is incorrect')
user = Users.query.filter_by(name=auth.username).first()
if check_password_hash(user.password, auth.password):
token = jwt.encode({'id': user.id, 'exp': datetime.datetime.utcnow(
) + datetime.timedelta(minutes=30)}, app.config["SECRET_KEY"], algorithm="HS256")
return responce_handler(200, "token", token)
return error_handler(404, "User Not Found")
@app.route('/users', methods=['GET'])
@token_required(is_admin=True)
def get_all_users():
users = Users.query.all()
result = []
for user in users:
user_data = {}
user_data['id'] = user.id
user_data['name'] = user.name
user_data['admin'] = user.admin
result.append(user_data)
return responce_handler(200, "users", result)
@app.route('/')
def index():
return success_handler("Welcome to Meals REST API")
@app.route('/createMeal', methods=['POST'])
@token_required(is_admin=True)
def create_Meal():
data = request.get_json()
name = data['name']
price = data['price']
ingredients = data['ingredients']
isSpicy = data['isSpicy']
isVegan = data['isVegan']
isGlutenFree = data['isGlutenFree']
description = data['description']
new_Meal = Meals(
name=name,
price=price,
ingredients=ingredients,
isSpicy=isSpicy,
isVegan=isVegan,
isGlutenFree=isGlutenFree,
description=description
)
db.session.add(new_Meal)
db.session.commit()
return success_handler("new meal added!")
@app.route('/meal', methods=['GET'])
@token_required(is_admin=False)
def get_Meals():
all_meals = Meals.query.all()
result = []
for meal in all_meals:
eat_data = get_meal_details(meal)
result.append(eat_data)
return responce_handler(200, "meals", result)
def get_meal_details(meal):
return {'id': meal.mealId,
'name': meal.name,
'price': meal.price,
'ingredients': meal.ingredients,
'isSpicy': meal.isSpicy,
'isVegan': meal.isVegan,
'isGlutenFree': meal.isGlutenFree,
'description': meal.description,
}
@app.route('/updateMeal/<id>', methods=['POST'])
@token_required(is_admin=True)
def update(id):
meal = Meals.query.filter_by(mealId=id)
data = request.get_json()
updateMeal(meal, data)
if 'name' in data.keys():
return responce_handler(
200,
'message',
'Meal Updated',
additional_message="Meal's name cannot be updated!\nDelete and create new one if you want!!")
return success_handler("Meal updated!")
@app.route('/updateMeal/<name>', methods=['POST'])
@token_required(is_admin=False)
def update_by_name(name):
meal = Meals.query.filter_by(name=name)
data = request.get_json()
updateMeal(meal, data)
if 'name' in data.keys():
return responce_handler(
200,
'message',
'Meal Updated',
additional_message="Meal's name cannot be updated!\nDelete and create new one if you want!!")
return success_handler("Meal updated!")
def updateMeal(meal, data):
if meal:
for d in data.keys():
if d == 'price':
meal.price = data[d]
elif d == 'ingredients':
meal.ingredients = data[d]
elif d == 'isSpicy':
meal.isSpicy = data[d]
elif d == 'isVegan':
meal.isVegan = data[d]
elif d == 'isGlutenFree':
meal.isGlutenFree = data[d]
elif d == 'description':
meal.description = data[d]
db.session.commit()
@app.route('/meal/<id>', methods=['GET'])
@token_required(is_admin=False)
def get_FMeals(id):
meal = Meals.query.filter_by(mealId=id)
if not meal:
return error_handler(404, "meal not found!")
return responce_handler(200, "meal", meal)
@app.route('/meal/<id>', methods=['DELETE'])
@token_required(is_admin=True)
def delete_Meals(id):
meal = Meals.query.filter_by(mealId=id)
if meal:
meal.delete()
db.session.commit()
return success_handler("Deleted successfuly")
return error_handler(404, "Meal Not Found!!")
if __name__ == '__main__':
app.run(debug=True)
| 30.273764
| 190
| 0.632756
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.