hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
fe17f72c7021d8205a27e05f2ecce89723852ecd
| 1,289
|
py
|
Python
|
qcloudsdkscaling/DescribeScheduledTaskRequest.py
|
f3n9/qcloudcli
|
b965a4f0e6cdd79c1245c1d0cd2ca9c460a56f19
|
[
"Apache-2.0"
] | null | null | null |
qcloudsdkscaling/DescribeScheduledTaskRequest.py
|
f3n9/qcloudcli
|
b965a4f0e6cdd79c1245c1d0cd2ca9c460a56f19
|
[
"Apache-2.0"
] | null | null | null |
qcloudsdkscaling/DescribeScheduledTaskRequest.py
|
f3n9/qcloudcli
|
b965a4f0e6cdd79c1245c1d0cd2ca9c460a56f19
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from qcloudsdkcore.request import Request
class DescribeScheduledTaskRequest(Request):
def __init__(self):
super(DescribeScheduledTaskRequest, self).__init__(
'scaling', 'qcloudcliV1', 'DescribeScheduledTask', 'scaling.api.qcloud.com')
def get_limit(self):
return self.get_params().get('limit')
def set_limit(self, limit):
self.add_param('limit', limit)
def get_offset(self):
return self.get_params().get('offset')
def set_offset(self, offset):
self.add_param('offset', offset)
def get_scalingGroupId(self):
return self.get_params().get('scalingGroupId')
def set_scalingGroupId(self, scalingGroupId):
self.add_param('scalingGroupId', scalingGroupId)
def get_scalingScheduledTaskIds(self):
return self.get_params().get('scalingScheduledTaskIds')
def set_scalingScheduledTaskIds(self, scalingScheduledTaskIds):
self.add_param('scalingScheduledTaskIds', scalingScheduledTaskIds)
def get_scalingScheduledTaskName(self):
return self.get_params().get('scalingScheduledTaskName')
def set_scalingScheduledTaskName(self, scalingScheduledTaskName):
self.add_param('scalingScheduledTaskName', scalingScheduledTaskName)
| 32.225
| 88
| 0.719162
|
20605e5821481f371f58f1260baf237fd327d9b5
| 5,619
|
py
|
Python
|
pyram/etc/rd_hal_pure.py
|
Hoseung/pyRamAn
|
f9386fa5a9f045f98590039988d3cd50bc488dc2
|
[
"MIT"
] | 1
|
2021-11-25T16:11:56.000Z
|
2021-11-25T16:11:56.000Z
|
pyram/etc/rd_hal_pure.py
|
Hoseung/pyRamAn
|
f9386fa5a9f045f98590039988d3cd50bc488dc2
|
[
"MIT"
] | 6
|
2020-02-17T13:44:43.000Z
|
2020-06-25T15:35:05.000Z
|
pyram/etc/rd_hal_pure.py
|
Hoseung/pyRamAn
|
f9386fa5a9f045f98590039988d3cd50bc488dc2
|
[
"MIT"
] | 1
|
2021-11-25T16:11:56.000Z
|
2021-11-25T16:11:56.000Z
|
import numpy as np
import struct
def load_header(brick_data, double=False):
offset = 4
if double:
nbytes = 8
dtype_float="d"
else:
nbytes = 4
dtype_float="f"
nbodies = struct.unpack("i", brick_data[4:8])[0]
offset += 12
massp = struct.unpack(dtype_float, brick_data[offset:offset+nbytes])[0]
offset += 8 + nbytes
aexp = struct.unpack(dtype_float, brick_data[offset:offset+nbytes])[0]
offset += 8 + nbytes
omegat = struct.unpack(dtype_float, brick_data[offset:offset+nbytes])[0]
offset += 8 + nbytes
age = struct.unpack(dtype_float, brick_data[offset:offset+nbytes])[0]
offset += 8 + nbytes
halnum = struct.unpack("i", brick_data[offset:offset+4])[0]
subnum = struct.unpack("i", brick_data[offset+4:offset+8])[0]
return offset+16, halnum, subnum
def load_a_halo(brick_data, offset, dd, is_gal=True, double=False):
if double:
nbytes = 8
dtype_float="d"
else:
nbytes = 4
dtype_float="f"
npart = struct.unpack("i", brick_data[offset:offset+4])[0]
dd["np"]=npart
offset += 12 # 12 = 4 + 8
ids = struct.unpack_from("<{}i".format(npart), brick_data[offset:offset+4*npart])
offset += 4*npart + 8
dd["id"] = struct.unpack("i", brick_data[offset:offset+4])[0]
offset += 24
dd["level"],dd["host"],dd["sub"],dd["nsub"],dd["nextsub"]\
= struct.unpack_from("<5i", brick_data[offset:offset+20])
offset += 28
dd["m"] = struct.unpack(dtype_float, brick_data[offset:offset+nbytes])[0]
offset += 8 + nbytes
dd["x"],dd["y"],dd["z"] = struct.unpack_from("<3"+dtype_float, brick_data[offset:offset+3*nbytes])
offset += 8 + 3*nbytes
dd["vx"],dd["vy"],dd["vz"] = struct.unpack_from("<3"+dtype_float, brick_data[offset:offset+3*nbytes])
offset += 8 + 3*nbytes
dd["ax"],dd["ay"],dd["az"] = struct.unpack_from("<3"+dtype_float, brick_data[offset:offset+3*nbytes])
offset += 8 + 3*nbytes
radius= struct.unpack_from("<4"+dtype_float, brick_data[offset:offset+4*nbytes])
dd["r"],dd["abc"] = radius[0], radius[1:]
offset += 8 + 4*nbytes
dd["energy"] = struct.unpack_from("<3"+dtype_float, brick_data[offset:offset+3*nbytes])
offset += 8 + 3*nbytes
dd["sp"] = struct.unpack(dtype_float, brick_data[offset:offset+nbytes])[0]
offset += 8 + nbytes
if is_gal:
dd["sig"], dd["sigbulge"], dd["mbulge"]\
= struct.unpack_from("<3"+dtype_float, brick_data[offset:offset+3*nbytes])
offset += 8+ 3*nbytes
dd["mvir"],dd["rvir"],dd["tvir"],dd["cvel"]\
= struct.unpack_from("<4"+dtype_float, brick_data[offset:offset+4*nbytes])
offset += 8+4*nbytes
dd["p_rho"],dd["p_c"] = struct.unpack_from("<2"+dtype_float, brick_data[offset:offset+2*nbytes])
offset += 8+2*nbytes
if is_gal:
g_nbin = struct.unpack("i", brick_data[offset:offset+4])[0]
dd["g_nbin"]=g_nbin
offset += 12
dd["g_rr"] = struct.unpack_from("<{}".format(g_nbin)+dtype_float, brick_data[offset:offset+g_nbin*nbytes])
offset += 8 + g_nbin*nbytes
dd["g_rho"] = struct.unpack_from("<{}".format(g_nbin)+dtype_float, brick_data[offset:offset+g_nbin*nbytes])
offset += 8 + g_nbin*nbytes
return offset, ids
def load_hm(fn, double=True, is_gal=True, return_idlists=[]):
"""
Return catalog in numpy array, and list of member particles in a list.
>>> catalog, member_ids = load_hm("TREE_DM/tree_bricks500", is_gal=False, return_idlist=[1,3,5,7])
Paramters
---------
double : logical
if True, assume real are in double precision
is_gal : logical
If True, read GalaxyMaker output. If False, read HaloMaker output.
return_idlists: sequence(list, array, range, tuple)
Give halo/galaxy ids in a list(sequence) to retrieve member particle ID of the halos.
NOTE
----
Reading tree_bricks in Fortranis 10x faster.
But, maybe it's OK to be a bit slow. NH catalogues are small, anyways.
"""
if double:
dtype_float = "<f8"
else:
dtype_float = "<f4"
dtype_halo = [('np', '<i4'), ('id', '<i4'), ('level', '<i4'),
('host', '<i4'), ('sub', '<i4'), ('nsub', '<i4'),
('nextsub', '<i4'),
('m', dtype_float), ('mvir', dtype_float),
('r', dtype_float), ('rvir', dtype_float),
('tvir', dtype_float), ('cvel', dtype_float),
('x', dtype_float), ('y', dtype_float), ('z', dtype_float),
('vx', dtype_float), ('vy', dtype_float), ('vz', dtype_float),
('ax', dtype_float), ('ay', dtype_float), ('az', dtype_float),
('sp', dtype_float), ('idx', '<i4'),
('p_rho', dtype_float),('p_c', dtype_float),
('energy', '<f8', (3,)), ('abc', '<f8', (3,))]
if is_gal:
dtype_halo += [('sig', dtype_float), ('sigbulge', dtype_float),
('mbulge', dtype_float), ('hosthalo', '<i4'),
('g_nbin', '<i4'), ('g_rr', dtype_float, (100,)),
('g_rho', dtype_float, (100,))]
idlists=[]
f = open(fn, "rb")
brick_data = f.read()
offset, halnum, subnum = load_header(brick_data, double=double)
gcat = np.zeros(halnum+subnum, dtype=dtype_halo)
for i in range(halnum+subnum):
offset,_ = load_a_halo(brick_data, offset, gcat[i], is_gal=is_gal, double=double)
if gcat[i]["id"] in return_idlists:
idlists.append(_)
f.close()
return gcat, idlists
| 40.42446
| 115
| 0.587649
|
c34eb1b6c536dd7f28acf602ddafddfa29b163f0
| 2,758
|
py
|
Python
|
MS5803.py
|
joachimlindborg/onion_omega
|
cd6e634fcf39796c790f41d09ab03139871c49a5
|
[
"MIT"
] | null | null | null |
MS5803.py
|
joachimlindborg/onion_omega
|
cd6e634fcf39796c790f41d09ab03139871c49a5
|
[
"MIT"
] | null | null | null |
MS5803.py
|
joachimlindborg/onion_omega
|
cd6e634fcf39796c790f41d09ab03139871c49a5
|
[
"MIT"
] | null | null | null |
# Distributed with a free-will license.
# Use it any way you want, profit or free, provided it fits in the licenses of its associated works.
# MS5803_30BA
# This code is designed to work with the MS5803_30BA_I2CS I2C Mini Module available from ControlEverything.com.
# https://www.controleverything.com/content/Analog-Digital-Converters?sku=MS5803-30BA_I2CS#tabs-0-product_tabset-2
from OmegaExpansion import onionI2C
import time
# Get I2C bus
i2c = onionI2C.OnionI2C(0)
# MS5803_30BA address, 0x76(118)
# 0x1E(30) Reset command
data = [0x1E]
i2c.write(0x76, data)
time.sleep(0.5)
# Read 12 bytes of calibration data
# Read pressure sensitivity
data = i2c.readBytes(0x76, 0xA2, 2)
C1 = data[0] * 256 + data[1]
# Read pressure offset
data = i2c.readBytes(0x76, 0xA4, 2)
C2 = data[0] * 256 + data[1]
# Read temperature coefficient of pressure sensitivity
data = i2c.readBytes(0x76, 0xA6, 2)
C3 = data[0] * 256 + data[1]
# Read temperature coefficient of pressure offset
data = i2c.readBytes(0x76, 0xA8, 2)
C4 = data[0] * 256 + data[1]
# Read reference temperature
data = i2c.readBytes(0x76, 0xAA, 2)
C5 = data[0] * 256 + data[1]
# Read temperature coefficient of the temperature
data = i2c.readBytes(0x76, 0xAC, 2)
C6 = data[0] * 256 + data[1]
# MS5803_30BA address, 0x76(118)
# 0x40(64) Pressure conversion(OSR = 256) command
data = [0x40]
i2c.write(0x76, data)
time.sleep(0.5)
# Read digital pressure value
# Read data back from 0x00(0), 3 bytes
# D1 MSB2, D1 MSB1, D1 LSB
value = i2c.readBytes(0x76, 0x00, 3)
D1 = value[0] * 65536 + value[1] * 256 + value[2]
# MS5803_30BA address, 0x76(118)
# 0x50(64) Temperature conversion(OSR = 256) command
data = [0x50]
i2c.write(0x76, data)
time.sleep(0.5)
# Read digital temperature value
# Read data back from 0x00(0), 3 bytes
# D2 MSB2, D2 MSB1, D2 LSB
value = i2c.readBytes(0x76, 0x00, 3)
D2 = value[0] * 65536 + value[1] * 256 + value[2]
dT = D2 - C5 * 256
TEMP = 2000 + dT * C6 / 8388608
OFF = C2 * 65536 + (C4 * dT) / 128
SENS = C1 * 32768 + (C3 * dT ) / 256
T2 = 0
OFF2 = 0
SENS2 = 0
if TEMP >= 2000 :
T2 = 7 * (dT * dT) / 137438953472
OFF2 = ((TEMP - 2000) * (TEMP - 2000)) / 16
SENS2= 0
elif TEMP < 2000 :
T2 = 3 * (dT * dT) / 8589934592
OFF2= 3 * ((TEMP - 2000) * (TEMP - 2000)) / 2
SENS2= 5 * ((TEMP - 2000) * (TEMP - 2000)) / 8
if TEMP < -1500:
OFF2 = OFF2 + 7 * ((TEMP + 1500) * (TEMP + 1500))
SENS2 = SENS2 + 4 * ((TEMP + 1500) * (TEMP +1500))
TEMP = TEMP - T2
OFF = OFF - OFF2
SENS = SENS - SENS2
pressure = ((((D1 * SENS2) / 2097152) - OFF2) / 8192.0) / 10.0
cTemp = TEMP / 100.0
fTemp = cTemp * 1.8 + 32
# Output data to screen
print "Pressure : %.2f mbar" %pressure
print "Temperature in Celsius : %.2f C" %cTemp
print "Temperature in Fahrenheit : %.2f F" %fTemp
| 26.776699
| 114
| 0.668238
|
49ee47ce9bca2eabe80c879f6fbef5b8a68ca61d
| 5,684
|
py
|
Python
|
src/gt4sd/algorithms/controlled_sampling/tests/test_class_controlled_sampling.py
|
YoelShoshan/gt4sd-core
|
9ee86fc28634b43d69542159fe06a7a5132e23ae
|
[
"MIT"
] | 1
|
2022-02-22T02:06:10.000Z
|
2022-02-22T02:06:10.000Z
|
src/gt4sd/algorithms/controlled_sampling/tests/test_class_controlled_sampling.py
|
kwehden/gt4sd-core
|
ac907c1f6cfc6b0ff38b71325dd749001071c863
|
[
"MIT"
] | 12
|
2022-02-21T12:59:24.000Z
|
2022-02-22T12:25:49.000Z
|
src/gt4sd/algorithms/controlled_sampling/tests/test_class_controlled_sampling.py
|
C-nit/gt4sd-core
|
01854438f2fdbf7f8123a322aeed5520beb1e696
|
[
"MIT"
] | null | null | null |
"""CLaSS tests."""
import pickle
from typing import ClassVar, Type
import pytest
from gt4sd.algorithms.core import AlgorithmConfiguration
from gt4sd.algorithms.registry import ApplicationsRegistry
from gt4sd.extras import EXTRAS_ENABLED
if not EXTRAS_ENABLED:
pytest.skip("Extras from custom PyPI disabled", allow_module_level=True)
else:
from gt4sd.algorithms.controlled_sampling.class_controlled_sampling import (
PAG,
CLaSS,
CogMol,
)
from gt4sd.algorithms.controlled_sampling.class_controlled_sampling.implementation import (
UnsupportedTargetError,
)
def get_classvar_type(class_var):
"""Extract type from ClassVar type annotation: `ClassVar[T]] -> T`."""
return class_var.__args__[0]
MPRO = "SGFRKMAFPSGKVEGCMVQVTCGTTTLNGLWLDDVVYCPRHVICTSEDMLNPNYEDLLIRKSNHNFLVQAGNVQLRVIGHSMQNCVLKLKVDTANPKTPKYKFVRIQPGQTFSVLACYNGSPSGVYQCAMRPNFTIKGSFLNGSCGSVGFNIDYDCVSFCYMHHMELPTGVHAGTDLEGNFYGPFVDRQTAQAAGTDTTITVNVLAWLYAAVINGDRWFLNRFTTTLNDFNLVAMKYNYEPLTQDHVDILGPLSAQTGIAVLDMCASLKELLQNGMNGRTILGSALLEDEFTPFDVVRQCSGVTFQ"
@pytest.mark.parametrize(
"config_class, algorithm_type, domain, algorithm_name",
[
(
CogMol,
"controlled_sampling",
"materials",
CLaSS.__name__,
),
(
PAG,
"controlled_sampling",
"materials",
CLaSS.__name__,
),
],
)
def test_config_class(
config_class: Type[AlgorithmConfiguration],
algorithm_type: str,
domain: str,
algorithm_name: str,
):
assert config_class.algorithm_type == algorithm_type
assert config_class.domain == domain
assert config_class.algorithm_name == algorithm_name
for keyword, type_annotation in config_class.__annotations__.items():
if keyword in ("algorithm_type", "domain", "algorithm_name"):
assert type_annotation.__origin__ is ClassVar # type: ignore
assert str == get_classvar_type(type_annotation)
@pytest.mark.parametrize(
"config_class",
[
(CogMol),
(PAG),
],
)
def test_config_instance(config_class: Type[AlgorithmConfiguration]):
config = config_class() # type:ignore
assert config.algorithm_application == config_class.__name__
@pytest.mark.parametrize(
"config_class",
[
(CogMol),
(PAG),
],
)
def test_available_versions(config_class: Type[AlgorithmConfiguration]):
versions = config_class.list_versions()
assert "v0" in versions
@pytest.mark.parametrize(
"config, example_target, algorithm, kwargs",
[
(
CogMol,
MPRO,
CLaSS,
{
"samples_per_round": 173,
"max_length": 40,
"temperature": 0.8,
"num_proteins_selectivity": 20,
},
),
(
PAG,
None,
CLaSS,
{
"samples_per_round": 173,
"max_length": 40,
"temperature": 0.8,
},
),
],
)
def test_generation_via_import(config, example_target, algorithm, kwargs):
class_sampling = algorithm(
configuration=config(**kwargs),
target=example_target,
)
items = list(class_sampling.sample(5))
assert len(items) == 5
@pytest.mark.parametrize(
"algorithm_application, target",
[
(
CogMol.__name__,
MPRO,
),
(
PAG.__name__,
None,
),
],
)
def test_generation_via_registry(target, algorithm_application):
class_sampling = ApplicationsRegistry.get_application_instance(
target=target,
algorithm_type="controlled_sampling",
domain="materials",
algorithm_name=CLaSS.__name__,
algorithm_application=algorithm_application,
)
items = list(class_sampling.sample(5))
assert len(items) == 5
def test_unsupported_target(algorithm_application=CogMol.__name__, target=MPRO):
invalid_target = target[:30] # assuming this makes it invalid
# on construction
with pytest.raises(UnsupportedTargetError):
ApplicationsRegistry.get_application_instance(
target=invalid_target,
algorithm_type="controlled_sampling",
domain="materials",
algorithm_name=CLaSS.__name__,
algorithm_application=algorithm_application,
)
# on sampling with changed targed
config = CogMol()
implementation = config.get_class_instance( # type: ignore
resources_path=config.ensure_artifacts(), target=target
)
with pytest.raises(UnsupportedTargetError):
implementation.sample_accepted(invalid_target)
@pytest.mark.parametrize("config_class", [(CogMol), (PAG)])
def test_configuration_pickable(config_class: Type[AlgorithmConfiguration]):
# implementation
obj = config_class(algorithm_version="test")
# ---
import inspect
inspect.getmodule(config_class)
# ---
pickled_obj = pickle.dumps(obj)
restored_obj = pickle.loads(pickled_obj)
assert restored_obj.algorithm_version == "test"
assert restored_obj == obj
# registered
Config = ApplicationsRegistry.get_application(
algorithm_type="controlled_sampling",
domain="materials",
algorithm_name=CLaSS.__name__,
algorithm_application=config_class.__name__,
).configuration_class
obj = Config(algorithm_version="test")
pickled_obj = pickle.dumps(obj)
restored_obj = pickle.loads(pickled_obj)
assert restored_obj.algorithm_version == "test"
assert restored_obj == obj
| 28.278607
| 315
| 0.663265
|
ed02c156168253705662bbce24bf570f0bdd2f1f
| 105,120
|
py
|
Python
|
venv/Lib/site-packages/numpy/core/tests/test_numeric.py
|
unbun/snake.ai
|
0c017357608dc7c06af0ca3ca57d870641461207
|
[
"MIT"
] | 6,989
|
2017-07-18T06:23:18.000Z
|
2022-03-31T15:58:36.000Z
|
venv/Lib/site-packages/numpy/core/tests/test_numeric.py
|
unbun/snake.ai
|
0c017357608dc7c06af0ca3ca57d870641461207
|
[
"MIT"
] | 1,978
|
2017-07-18T09:17:58.000Z
|
2022-03-31T14:28:43.000Z
|
venv/Lib/site-packages/numpy/core/tests/test_numeric.py
|
unbun/snake.ai
|
0c017357608dc7c06af0ca3ca57d870641461207
|
[
"MIT"
] | 1,228
|
2017-07-18T09:03:13.000Z
|
2022-03-29T05:57:40.000Z
|
from __future__ import division, absolute_import, print_function
import sys
import warnings
import itertools
import platform
import pytest
from decimal import Decimal
import numpy as np
from numpy.core import umath
from numpy.random import rand, randint, randn
from numpy.testing import (
assert_, assert_equal, assert_raises, assert_raises_regex,
assert_array_equal, assert_almost_equal, assert_array_almost_equal,
HAS_REFCOUNT
)
class TestResize(object):
def test_copies(self):
A = np.array([[1, 2], [3, 4]])
Ar1 = np.array([[1, 2, 3, 4], [1, 2, 3, 4]])
assert_equal(np.resize(A, (2, 4)), Ar1)
Ar2 = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
assert_equal(np.resize(A, (4, 2)), Ar2)
Ar3 = np.array([[1, 2, 3], [4, 1, 2], [3, 4, 1], [2, 3, 4]])
assert_equal(np.resize(A, (4, 3)), Ar3)
def test_zeroresize(self):
A = np.array([[1, 2], [3, 4]])
Ar = np.resize(A, (0,))
assert_array_equal(Ar, np.array([]))
assert_equal(A.dtype, Ar.dtype)
Ar = np.resize(A, (0, 2))
assert_equal(Ar.shape, (0, 2))
Ar = np.resize(A, (2, 0))
assert_equal(Ar.shape, (2, 0))
def test_reshape_from_zero(self):
# See also gh-6740
A = np.zeros(0, dtype=[('a', np.float32, 1)])
Ar = np.resize(A, (2, 1))
assert_array_equal(Ar, np.zeros((2, 1), Ar.dtype))
assert_equal(A.dtype, Ar.dtype)
class TestNonarrayArgs(object):
# check that non-array arguments to functions wrap them in arrays
def test_choose(self):
choices = [[0, 1, 2],
[3, 4, 5],
[5, 6, 7]]
tgt = [5, 1, 5]
a = [2, 0, 1]
out = np.choose(a, choices)
assert_equal(out, tgt)
def test_clip(self):
arr = [-1, 5, 2, 3, 10, -4, -9]
out = np.clip(arr, 2, 7)
tgt = [2, 5, 2, 3, 7, 2, 2]
assert_equal(out, tgt)
def test_compress(self):
arr = [[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]]
tgt = [[5, 6, 7, 8, 9]]
out = np.compress([0, 1], arr, axis=0)
assert_equal(out, tgt)
def test_count_nonzero(self):
arr = [[0, 1, 7, 0, 0],
[3, 0, 0, 2, 19]]
tgt = np.array([2, 3])
out = np.count_nonzero(arr, axis=1)
assert_equal(out, tgt)
def test_cumproduct(self):
A = [[1, 2, 3], [4, 5, 6]]
assert_(np.all(np.cumproduct(A) == np.array([1, 2, 6, 24, 120, 720])))
def test_diagonal(self):
a = [[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11]]
out = np.diagonal(a)
tgt = [0, 5, 10]
assert_equal(out, tgt)
def test_mean(self):
A = [[1, 2, 3], [4, 5, 6]]
assert_(np.mean(A) == 3.5)
assert_(np.all(np.mean(A, 0) == np.array([2.5, 3.5, 4.5])))
assert_(np.all(np.mean(A, 1) == np.array([2., 5.])))
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', RuntimeWarning)
assert_(np.isnan(np.mean([])))
assert_(w[0].category is RuntimeWarning)
def test_ptp(self):
a = [3, 4, 5, 10, -3, -5, 6.0]
assert_equal(np.ptp(a, axis=0), 15.0)
def test_prod(self):
arr = [[1, 2, 3, 4],
[5, 6, 7, 9],
[10, 3, 4, 5]]
tgt = [24, 1890, 600]
assert_equal(np.prod(arr, axis=-1), tgt)
def test_ravel(self):
a = [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]
tgt = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
assert_equal(np.ravel(a), tgt)
def test_repeat(self):
a = [1, 2, 3]
tgt = [1, 1, 2, 2, 3, 3]
out = np.repeat(a, 2)
assert_equal(out, tgt)
def test_reshape(self):
arr = [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]
tgt = [[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]]
assert_equal(np.reshape(arr, (2, 6)), tgt)
def test_round(self):
arr = [1.56, 72.54, 6.35, 3.25]
tgt = [1.6, 72.5, 6.4, 3.2]
assert_equal(np.around(arr, decimals=1), tgt)
def test_searchsorted(self):
arr = [-8, -5, -1, 3, 6, 10]
out = np.searchsorted(arr, 0)
assert_equal(out, 3)
def test_size(self):
A = [[1, 2, 3], [4, 5, 6]]
assert_(np.size(A) == 6)
assert_(np.size(A, 0) == 2)
assert_(np.size(A, 1) == 3)
def test_squeeze(self):
A = [[[1, 1, 1], [2, 2, 2], [3, 3, 3]]]
assert_(np.squeeze(A).shape == (3, 3))
def test_std(self):
A = [[1, 2, 3], [4, 5, 6]]
assert_almost_equal(np.std(A), 1.707825127659933)
assert_almost_equal(np.std(A, 0), np.array([1.5, 1.5, 1.5]))
assert_almost_equal(np.std(A, 1), np.array([0.81649658, 0.81649658]))
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', RuntimeWarning)
assert_(np.isnan(np.std([])))
assert_(w[0].category is RuntimeWarning)
def test_swapaxes(self):
tgt = [[[0, 4], [2, 6]], [[1, 5], [3, 7]]]
a = [[[0, 1], [2, 3]], [[4, 5], [6, 7]]]
out = np.swapaxes(a, 0, 2)
assert_equal(out, tgt)
def test_sum(self):
m = [[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
tgt = [[6], [15], [24]]
out = np.sum(m, axis=1, keepdims=True)
assert_equal(tgt, out)
def test_take(self):
tgt = [2, 3, 5]
indices = [1, 2, 4]
a = [1, 2, 3, 4, 5]
out = np.take(a, indices)
assert_equal(out, tgt)
def test_trace(self):
c = [[1, 2], [3, 4], [5, 6]]
assert_equal(np.trace(c), 5)
def test_transpose(self):
arr = [[1, 2], [3, 4], [5, 6]]
tgt = [[1, 3, 5], [2, 4, 6]]
assert_equal(np.transpose(arr, (1, 0)), tgt)
def test_var(self):
A = [[1, 2, 3], [4, 5, 6]]
assert_almost_equal(np.var(A), 2.9166666666666665)
assert_almost_equal(np.var(A, 0), np.array([2.25, 2.25, 2.25]))
assert_almost_equal(np.var(A, 1), np.array([0.66666667, 0.66666667]))
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', RuntimeWarning)
assert_(np.isnan(np.var([])))
assert_(w[0].category is RuntimeWarning)
class TestIsscalar(object):
def test_isscalar(self):
assert_(np.isscalar(3.1))
assert_(np.isscalar(np.int16(12345)))
assert_(np.isscalar(False))
assert_(np.isscalar('numpy'))
assert_(not np.isscalar([3.1]))
assert_(not np.isscalar(None))
# PEP 3141
from fractions import Fraction
assert_(np.isscalar(Fraction(5, 17)))
from numbers import Number
assert_(np.isscalar(Number()))
class TestBoolScalar(object):
def test_logical(self):
f = np.False_
t = np.True_
s = "xyz"
assert_((t and s) is s)
assert_((f and s) is f)
def test_bitwise_or(self):
f = np.False_
t = np.True_
assert_((t | t) is t)
assert_((f | t) is t)
assert_((t | f) is t)
assert_((f | f) is f)
def test_bitwise_and(self):
f = np.False_
t = np.True_
assert_((t & t) is t)
assert_((f & t) is f)
assert_((t & f) is f)
assert_((f & f) is f)
def test_bitwise_xor(self):
f = np.False_
t = np.True_
assert_((t ^ t) is f)
assert_((f ^ t) is t)
assert_((t ^ f) is t)
assert_((f ^ f) is f)
class TestBoolArray(object):
def setup(self):
# offset for simd tests
self.t = np.array([True] * 41, dtype=bool)[1::]
self.f = np.array([False] * 41, dtype=bool)[1::]
self.o = np.array([False] * 42, dtype=bool)[2::]
self.nm = self.f.copy()
self.im = self.t.copy()
self.nm[3] = True
self.nm[-2] = True
self.im[3] = False
self.im[-2] = False
def test_all_any(self):
assert_(self.t.all())
assert_(self.t.any())
assert_(not self.f.all())
assert_(not self.f.any())
assert_(self.nm.any())
assert_(self.im.any())
assert_(not self.nm.all())
assert_(not self.im.all())
# check bad element in all positions
for i in range(256 - 7):
d = np.array([False] * 256, dtype=bool)[7::]
d[i] = True
assert_(np.any(d))
e = np.array([True] * 256, dtype=bool)[7::]
e[i] = False
assert_(not np.all(e))
assert_array_equal(e, ~d)
# big array test for blocked libc loops
for i in list(range(9, 6000, 507)) + [7764, 90021, -10]:
d = np.array([False] * 100043, dtype=bool)
d[i] = True
assert_(np.any(d), msg="%r" % i)
e = np.array([True] * 100043, dtype=bool)
e[i] = False
assert_(not np.all(e), msg="%r" % i)
def test_logical_not_abs(self):
assert_array_equal(~self.t, self.f)
assert_array_equal(np.abs(~self.t), self.f)
assert_array_equal(np.abs(~self.f), self.t)
assert_array_equal(np.abs(self.f), self.f)
assert_array_equal(~np.abs(self.f), self.t)
assert_array_equal(~np.abs(self.t), self.f)
assert_array_equal(np.abs(~self.nm), self.im)
np.logical_not(self.t, out=self.o)
assert_array_equal(self.o, self.f)
np.abs(self.t, out=self.o)
assert_array_equal(self.o, self.t)
def test_logical_and_or_xor(self):
assert_array_equal(self.t | self.t, self.t)
assert_array_equal(self.f | self.f, self.f)
assert_array_equal(self.t | self.f, self.t)
assert_array_equal(self.f | self.t, self.t)
np.logical_or(self.t, self.t, out=self.o)
assert_array_equal(self.o, self.t)
assert_array_equal(self.t & self.t, self.t)
assert_array_equal(self.f & self.f, self.f)
assert_array_equal(self.t & self.f, self.f)
assert_array_equal(self.f & self.t, self.f)
np.logical_and(self.t, self.t, out=self.o)
assert_array_equal(self.o, self.t)
assert_array_equal(self.t ^ self.t, self.f)
assert_array_equal(self.f ^ self.f, self.f)
assert_array_equal(self.t ^ self.f, self.t)
assert_array_equal(self.f ^ self.t, self.t)
np.logical_xor(self.t, self.t, out=self.o)
assert_array_equal(self.o, self.f)
assert_array_equal(self.nm & self.t, self.nm)
assert_array_equal(self.im & self.f, False)
assert_array_equal(self.nm & True, self.nm)
assert_array_equal(self.im & False, self.f)
assert_array_equal(self.nm | self.t, self.t)
assert_array_equal(self.im | self.f, self.im)
assert_array_equal(self.nm | True, self.t)
assert_array_equal(self.im | False, self.im)
assert_array_equal(self.nm ^ self.t, self.im)
assert_array_equal(self.im ^ self.f, self.im)
assert_array_equal(self.nm ^ True, self.im)
assert_array_equal(self.im ^ False, self.im)
class TestBoolCmp(object):
def setup(self):
self.f = np.ones(256, dtype=np.float32)
self.ef = np.ones(self.f.size, dtype=bool)
self.d = np.ones(128, dtype=np.float64)
self.ed = np.ones(self.d.size, dtype=bool)
# generate values for all permutation of 256bit simd vectors
s = 0
for i in range(32):
self.f[s:s+8] = [i & 2**x for x in range(8)]
self.ef[s:s+8] = [(i & 2**x) != 0 for x in range(8)]
s += 8
s = 0
for i in range(16):
self.d[s:s+4] = [i & 2**x for x in range(4)]
self.ed[s:s+4] = [(i & 2**x) != 0 for x in range(4)]
s += 4
self.nf = self.f.copy()
self.nd = self.d.copy()
self.nf[self.ef] = np.nan
self.nd[self.ed] = np.nan
self.inff = self.f.copy()
self.infd = self.d.copy()
self.inff[::3][self.ef[::3]] = np.inf
self.infd[::3][self.ed[::3]] = np.inf
self.inff[1::3][self.ef[1::3]] = -np.inf
self.infd[1::3][self.ed[1::3]] = -np.inf
self.inff[2::3][self.ef[2::3]] = np.nan
self.infd[2::3][self.ed[2::3]] = np.nan
self.efnonan = self.ef.copy()
self.efnonan[2::3] = False
self.ednonan = self.ed.copy()
self.ednonan[2::3] = False
self.signf = self.f.copy()
self.signd = self.d.copy()
self.signf[self.ef] *= -1.
self.signd[self.ed] *= -1.
self.signf[1::6][self.ef[1::6]] = -np.inf
self.signd[1::6][self.ed[1::6]] = -np.inf
self.signf[3::6][self.ef[3::6]] = -np.nan
self.signd[3::6][self.ed[3::6]] = -np.nan
self.signf[4::6][self.ef[4::6]] = -0.
self.signd[4::6][self.ed[4::6]] = -0.
def test_float(self):
# offset for alignment test
for i in range(4):
assert_array_equal(self.f[i:] > 0, self.ef[i:])
assert_array_equal(self.f[i:] - 1 >= 0, self.ef[i:])
assert_array_equal(self.f[i:] == 0, ~self.ef[i:])
assert_array_equal(-self.f[i:] < 0, self.ef[i:])
assert_array_equal(-self.f[i:] + 1 <= 0, self.ef[i:])
r = self.f[i:] != 0
assert_array_equal(r, self.ef[i:])
r2 = self.f[i:] != np.zeros_like(self.f[i:])
r3 = 0 != self.f[i:]
assert_array_equal(r, r2)
assert_array_equal(r, r3)
# check bool == 0x1
assert_array_equal(r.view(np.int8), r.astype(np.int8))
assert_array_equal(r2.view(np.int8), r2.astype(np.int8))
assert_array_equal(r3.view(np.int8), r3.astype(np.int8))
# isnan on amd64 takes the same code path
assert_array_equal(np.isnan(self.nf[i:]), self.ef[i:])
assert_array_equal(np.isfinite(self.nf[i:]), ~self.ef[i:])
assert_array_equal(np.isfinite(self.inff[i:]), ~self.ef[i:])
assert_array_equal(np.isinf(self.inff[i:]), self.efnonan[i:])
assert_array_equal(np.signbit(self.signf[i:]), self.ef[i:])
def test_double(self):
# offset for alignment test
for i in range(2):
assert_array_equal(self.d[i:] > 0, self.ed[i:])
assert_array_equal(self.d[i:] - 1 >= 0, self.ed[i:])
assert_array_equal(self.d[i:] == 0, ~self.ed[i:])
assert_array_equal(-self.d[i:] < 0, self.ed[i:])
assert_array_equal(-self.d[i:] + 1 <= 0, self.ed[i:])
r = self.d[i:] != 0
assert_array_equal(r, self.ed[i:])
r2 = self.d[i:] != np.zeros_like(self.d[i:])
r3 = 0 != self.d[i:]
assert_array_equal(r, r2)
assert_array_equal(r, r3)
# check bool == 0x1
assert_array_equal(r.view(np.int8), r.astype(np.int8))
assert_array_equal(r2.view(np.int8), r2.astype(np.int8))
assert_array_equal(r3.view(np.int8), r3.astype(np.int8))
# isnan on amd64 takes the same code path
assert_array_equal(np.isnan(self.nd[i:]), self.ed[i:])
assert_array_equal(np.isfinite(self.nd[i:]), ~self.ed[i:])
assert_array_equal(np.isfinite(self.infd[i:]), ~self.ed[i:])
assert_array_equal(np.isinf(self.infd[i:]), self.ednonan[i:])
assert_array_equal(np.signbit(self.signd[i:]), self.ed[i:])
class TestSeterr(object):
def test_default(self):
err = np.geterr()
assert_equal(err,
dict(divide='warn',
invalid='warn',
over='warn',
under='ignore')
)
def test_set(self):
with np.errstate():
err = np.seterr()
old = np.seterr(divide='print')
assert_(err == old)
new = np.seterr()
assert_(new['divide'] == 'print')
np.seterr(over='raise')
assert_(np.geterr()['over'] == 'raise')
assert_(new['divide'] == 'print')
np.seterr(**old)
assert_(np.geterr() == old)
@pytest.mark.skipif(platform.machine() == "armv5tel", reason="See gh-413.")
def test_divide_err(self):
with np.errstate(divide='raise'):
with assert_raises(FloatingPointError):
np.array([1.]) / np.array([0.])
np.seterr(divide='ignore')
np.array([1.]) / np.array([0.])
def test_errobj(self):
olderrobj = np.geterrobj()
self.called = 0
try:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
with np.errstate(divide='warn'):
np.seterrobj([20000, 1, None])
np.array([1.]) / np.array([0.])
assert_equal(len(w), 1)
def log_err(*args):
self.called += 1
extobj_err = args
assert_(len(extobj_err) == 2)
assert_("divide" in extobj_err[0])
with np.errstate(divide='ignore'):
np.seterrobj([20000, 3, log_err])
np.array([1.]) / np.array([0.])
assert_equal(self.called, 1)
np.seterrobj(olderrobj)
with np.errstate(divide='ignore'):
np.divide(1., 0., extobj=[20000, 3, log_err])
assert_equal(self.called, 2)
finally:
np.seterrobj(olderrobj)
del self.called
def test_errobj_noerrmask(self):
# errmask = 0 has a special code path for the default
olderrobj = np.geterrobj()
try:
# set errobj to something non default
np.seterrobj([umath.UFUNC_BUFSIZE_DEFAULT,
umath.ERR_DEFAULT + 1, None])
# call a ufunc
np.isnan(np.array([6]))
# same with the default, lots of times to get rid of possible
# pre-existing stack in the code
for i in range(10000):
np.seterrobj([umath.UFUNC_BUFSIZE_DEFAULT, umath.ERR_DEFAULT,
None])
np.isnan(np.array([6]))
finally:
np.seterrobj(olderrobj)
class TestFloatExceptions(object):
def assert_raises_fpe(self, fpeerr, flop, x, y):
ftype = type(x)
try:
flop(x, y)
assert_(False,
"Type %s did not raise fpe error '%s'." % (ftype, fpeerr))
except FloatingPointError as exc:
assert_(str(exc).find(fpeerr) >= 0,
"Type %s raised wrong fpe error '%s'." % (ftype, exc))
def assert_op_raises_fpe(self, fpeerr, flop, sc1, sc2):
# Check that fpe exception is raised.
#
# Given a floating operation `flop` and two scalar values, check that
# the operation raises the floating point exception specified by
# `fpeerr`. Tests all variants with 0-d array scalars as well.
self.assert_raises_fpe(fpeerr, flop, sc1, sc2)
self.assert_raises_fpe(fpeerr, flop, sc1[()], sc2)
self.assert_raises_fpe(fpeerr, flop, sc1, sc2[()])
self.assert_raises_fpe(fpeerr, flop, sc1[()], sc2[()])
def test_floating_exceptions(self):
# Test basic arithmetic function errors
with np.errstate(all='raise'):
# Test for all real and complex float types
for typecode in np.typecodes['AllFloat']:
ftype = np.obj2sctype(typecode)
if np.dtype(ftype).kind == 'f':
# Get some extreme values for the type
fi = np.finfo(ftype)
ft_tiny = fi.tiny
ft_max = fi.max
ft_eps = fi.eps
underflow = 'underflow'
divbyzero = 'divide by zero'
else:
# 'c', complex, corresponding real dtype
rtype = type(ftype(0).real)
fi = np.finfo(rtype)
ft_tiny = ftype(fi.tiny)
ft_max = ftype(fi.max)
ft_eps = ftype(fi.eps)
# The complex types raise different exceptions
underflow = ''
divbyzero = ''
overflow = 'overflow'
invalid = 'invalid'
self.assert_raises_fpe(underflow,
lambda a, b: a/b, ft_tiny, ft_max)
self.assert_raises_fpe(underflow,
lambda a, b: a*b, ft_tiny, ft_tiny)
self.assert_raises_fpe(overflow,
lambda a, b: a*b, ft_max, ftype(2))
self.assert_raises_fpe(overflow,
lambda a, b: a/b, ft_max, ftype(0.5))
self.assert_raises_fpe(overflow,
lambda a, b: a+b, ft_max, ft_max*ft_eps)
self.assert_raises_fpe(overflow,
lambda a, b: a-b, -ft_max, ft_max*ft_eps)
self.assert_raises_fpe(overflow,
np.power, ftype(2), ftype(2**fi.nexp))
self.assert_raises_fpe(divbyzero,
lambda a, b: a/b, ftype(1), ftype(0))
self.assert_raises_fpe(invalid,
lambda a, b: a/b, ftype(np.inf), ftype(np.inf))
self.assert_raises_fpe(invalid,
lambda a, b: a/b, ftype(0), ftype(0))
self.assert_raises_fpe(invalid,
lambda a, b: a-b, ftype(np.inf), ftype(np.inf))
self.assert_raises_fpe(invalid,
lambda a, b: a+b, ftype(np.inf), ftype(-np.inf))
self.assert_raises_fpe(invalid,
lambda a, b: a*b, ftype(0), ftype(np.inf))
def test_warnings(self):
# test warning code path
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
with np.errstate(all="warn"):
np.divide(1, 0.)
assert_equal(len(w), 1)
assert_("divide by zero" in str(w[0].message))
np.array(1e300) * np.array(1e300)
assert_equal(len(w), 2)
assert_("overflow" in str(w[-1].message))
np.array(np.inf) - np.array(np.inf)
assert_equal(len(w), 3)
assert_("invalid value" in str(w[-1].message))
np.array(1e-300) * np.array(1e-300)
assert_equal(len(w), 4)
assert_("underflow" in str(w[-1].message))
class TestTypes(object):
def check_promotion_cases(self, promote_func):
# tests that the scalars get coerced correctly.
b = np.bool_(0)
i8, i16, i32, i64 = np.int8(0), np.int16(0), np.int32(0), np.int64(0)
u8, u16, u32, u64 = np.uint8(0), np.uint16(0), np.uint32(0), np.uint64(0)
f32, f64, fld = np.float32(0), np.float64(0), np.longdouble(0)
c64, c128, cld = np.complex64(0), np.complex128(0), np.clongdouble(0)
# coercion within the same kind
assert_equal(promote_func(i8, i16), np.dtype(np.int16))
assert_equal(promote_func(i32, i8), np.dtype(np.int32))
assert_equal(promote_func(i16, i64), np.dtype(np.int64))
assert_equal(promote_func(u8, u32), np.dtype(np.uint32))
assert_equal(promote_func(f32, f64), np.dtype(np.float64))
assert_equal(promote_func(fld, f32), np.dtype(np.longdouble))
assert_equal(promote_func(f64, fld), np.dtype(np.longdouble))
assert_equal(promote_func(c128, c64), np.dtype(np.complex128))
assert_equal(promote_func(cld, c128), np.dtype(np.clongdouble))
assert_equal(promote_func(c64, fld), np.dtype(np.clongdouble))
# coercion between kinds
assert_equal(promote_func(b, i32), np.dtype(np.int32))
assert_equal(promote_func(b, u8), np.dtype(np.uint8))
assert_equal(promote_func(i8, u8), np.dtype(np.int16))
assert_equal(promote_func(u8, i32), np.dtype(np.int32))
assert_equal(promote_func(i64, u32), np.dtype(np.int64))
assert_equal(promote_func(u64, i32), np.dtype(np.float64))
assert_equal(promote_func(i32, f32), np.dtype(np.float64))
assert_equal(promote_func(i64, f32), np.dtype(np.float64))
assert_equal(promote_func(f32, i16), np.dtype(np.float32))
assert_equal(promote_func(f32, u32), np.dtype(np.float64))
assert_equal(promote_func(f32, c64), np.dtype(np.complex64))
assert_equal(promote_func(c128, f32), np.dtype(np.complex128))
assert_equal(promote_func(cld, f64), np.dtype(np.clongdouble))
# coercion between scalars and 1-D arrays
assert_equal(promote_func(np.array([b]), i8), np.dtype(np.int8))
assert_equal(promote_func(np.array([b]), u8), np.dtype(np.uint8))
assert_equal(promote_func(np.array([b]), i32), np.dtype(np.int32))
assert_equal(promote_func(np.array([b]), u32), np.dtype(np.uint32))
assert_equal(promote_func(np.array([i8]), i64), np.dtype(np.int8))
assert_equal(promote_func(u64, np.array([i32])), np.dtype(np.int32))
assert_equal(promote_func(i64, np.array([u32])), np.dtype(np.uint32))
assert_equal(promote_func(np.int32(-1), np.array([u64])),
np.dtype(np.float64))
assert_equal(promote_func(f64, np.array([f32])), np.dtype(np.float32))
assert_equal(promote_func(fld, np.array([f32])), np.dtype(np.float32))
assert_equal(promote_func(np.array([f64]), fld), np.dtype(np.float64))
assert_equal(promote_func(fld, np.array([c64])),
np.dtype(np.complex64))
assert_equal(promote_func(c64, np.array([f64])),
np.dtype(np.complex128))
assert_equal(promote_func(np.complex64(3j), np.array([f64])),
np.dtype(np.complex128))
# coercion between scalars and 1-D arrays, where
# the scalar has greater kind than the array
assert_equal(promote_func(np.array([b]), f64), np.dtype(np.float64))
assert_equal(promote_func(np.array([b]), i64), np.dtype(np.int64))
assert_equal(promote_func(np.array([b]), u64), np.dtype(np.uint64))
assert_equal(promote_func(np.array([i8]), f64), np.dtype(np.float64))
assert_equal(promote_func(np.array([u16]), f64), np.dtype(np.float64))
# uint and int are treated as the same "kind" for
# the purposes of array-scalar promotion.
assert_equal(promote_func(np.array([u16]), i32), np.dtype(np.uint16))
# float and complex are treated as the same "kind" for
# the purposes of array-scalar promotion, so that you can do
# (0j + float32array) to get a complex64 array instead of
# a complex128 array.
assert_equal(promote_func(np.array([f32]), c128),
np.dtype(np.complex64))
def test_coercion(self):
def res_type(a, b):
return np.add(a, b).dtype
self.check_promotion_cases(res_type)
# Use-case: float/complex scalar * bool/int8 array
# shouldn't narrow the float/complex type
for a in [np.array([True, False]), np.array([-3, 12], dtype=np.int8)]:
b = 1.234 * a
assert_equal(b.dtype, np.dtype('f8'), "array type %s" % a.dtype)
b = np.longdouble(1.234) * a
assert_equal(b.dtype, np.dtype(np.longdouble),
"array type %s" % a.dtype)
b = np.float64(1.234) * a
assert_equal(b.dtype, np.dtype('f8'), "array type %s" % a.dtype)
b = np.float32(1.234) * a
assert_equal(b.dtype, np.dtype('f4'), "array type %s" % a.dtype)
b = np.float16(1.234) * a
assert_equal(b.dtype, np.dtype('f2'), "array type %s" % a.dtype)
b = 1.234j * a
assert_equal(b.dtype, np.dtype('c16'), "array type %s" % a.dtype)
b = np.clongdouble(1.234j) * a
assert_equal(b.dtype, np.dtype(np.clongdouble),
"array type %s" % a.dtype)
b = np.complex128(1.234j) * a
assert_equal(b.dtype, np.dtype('c16'), "array type %s" % a.dtype)
b = np.complex64(1.234j) * a
assert_equal(b.dtype, np.dtype('c8'), "array type %s" % a.dtype)
# The following use-case is problematic, and to resolve its
# tricky side-effects requires more changes.
#
# Use-case: (1-t)*a, where 't' is a boolean array and 'a' is
# a float32, shouldn't promote to float64
#
# a = np.array([1.0, 1.5], dtype=np.float32)
# t = np.array([True, False])
# b = t*a
# assert_equal(b, [1.0, 0.0])
# assert_equal(b.dtype, np.dtype('f4'))
# b = (1-t)*a
# assert_equal(b, [0.0, 1.5])
# assert_equal(b.dtype, np.dtype('f4'))
#
# Probably ~t (bitwise negation) is more proper to use here,
# but this is arguably less intuitive to understand at a glance, and
# would fail if 't' is actually an integer array instead of boolean:
#
# b = (~t)*a
# assert_equal(b, [0.0, 1.5])
# assert_equal(b.dtype, np.dtype('f4'))
def test_result_type(self):
self.check_promotion_cases(np.result_type)
assert_(np.result_type(None) == np.dtype(None))
def test_promote_types_endian(self):
# promote_types should always return native-endian types
assert_equal(np.promote_types('<i8', '<i8'), np.dtype('i8'))
assert_equal(np.promote_types('>i8', '>i8'), np.dtype('i8'))
assert_equal(np.promote_types('>i8', '>U16'), np.dtype('U21'))
assert_equal(np.promote_types('<i8', '<U16'), np.dtype('U21'))
assert_equal(np.promote_types('>U16', '>i8'), np.dtype('U21'))
assert_equal(np.promote_types('<U16', '<i8'), np.dtype('U21'))
assert_equal(np.promote_types('<S5', '<U8'), np.dtype('U8'))
assert_equal(np.promote_types('>S5', '>U8'), np.dtype('U8'))
assert_equal(np.promote_types('<U8', '<S5'), np.dtype('U8'))
assert_equal(np.promote_types('>U8', '>S5'), np.dtype('U8'))
assert_equal(np.promote_types('<U5', '<U8'), np.dtype('U8'))
assert_equal(np.promote_types('>U8', '>U5'), np.dtype('U8'))
assert_equal(np.promote_types('<M8', '<M8'), np.dtype('M8'))
assert_equal(np.promote_types('>M8', '>M8'), np.dtype('M8'))
assert_equal(np.promote_types('<m8', '<m8'), np.dtype('m8'))
assert_equal(np.promote_types('>m8', '>m8'), np.dtype('m8'))
def test_promote_types_strings(self):
assert_equal(np.promote_types('bool', 'S'), np.dtype('S5'))
assert_equal(np.promote_types('b', 'S'), np.dtype('S4'))
assert_equal(np.promote_types('u1', 'S'), np.dtype('S3'))
assert_equal(np.promote_types('u2', 'S'), np.dtype('S5'))
assert_equal(np.promote_types('u4', 'S'), np.dtype('S10'))
assert_equal(np.promote_types('u8', 'S'), np.dtype('S20'))
assert_equal(np.promote_types('i1', 'S'), np.dtype('S4'))
assert_equal(np.promote_types('i2', 'S'), np.dtype('S6'))
assert_equal(np.promote_types('i4', 'S'), np.dtype('S11'))
assert_equal(np.promote_types('i8', 'S'), np.dtype('S21'))
assert_equal(np.promote_types('bool', 'U'), np.dtype('U5'))
assert_equal(np.promote_types('b', 'U'), np.dtype('U4'))
assert_equal(np.promote_types('u1', 'U'), np.dtype('U3'))
assert_equal(np.promote_types('u2', 'U'), np.dtype('U5'))
assert_equal(np.promote_types('u4', 'U'), np.dtype('U10'))
assert_equal(np.promote_types('u8', 'U'), np.dtype('U20'))
assert_equal(np.promote_types('i1', 'U'), np.dtype('U4'))
assert_equal(np.promote_types('i2', 'U'), np.dtype('U6'))
assert_equal(np.promote_types('i4', 'U'), np.dtype('U11'))
assert_equal(np.promote_types('i8', 'U'), np.dtype('U21'))
assert_equal(np.promote_types('bool', 'S1'), np.dtype('S5'))
assert_equal(np.promote_types('bool', 'S30'), np.dtype('S30'))
assert_equal(np.promote_types('b', 'S1'), np.dtype('S4'))
assert_equal(np.promote_types('b', 'S30'), np.dtype('S30'))
assert_equal(np.promote_types('u1', 'S1'), np.dtype('S3'))
assert_equal(np.promote_types('u1', 'S30'), np.dtype('S30'))
assert_equal(np.promote_types('u2', 'S1'), np.dtype('S5'))
assert_equal(np.promote_types('u2', 'S30'), np.dtype('S30'))
assert_equal(np.promote_types('u4', 'S1'), np.dtype('S10'))
assert_equal(np.promote_types('u4', 'S30'), np.dtype('S30'))
assert_equal(np.promote_types('u8', 'S1'), np.dtype('S20'))
assert_equal(np.promote_types('u8', 'S30'), np.dtype('S30'))
def test_can_cast(self):
assert_(np.can_cast(np.int32, np.int64))
assert_(np.can_cast(np.float64, complex))
assert_(not np.can_cast(complex, float))
assert_(np.can_cast('i8', 'f8'))
assert_(not np.can_cast('i8', 'f4'))
assert_(np.can_cast('i4', 'S11'))
assert_(np.can_cast('i8', 'i8', 'no'))
assert_(not np.can_cast('<i8', '>i8', 'no'))
assert_(np.can_cast('<i8', '>i8', 'equiv'))
assert_(not np.can_cast('<i4', '>i8', 'equiv'))
assert_(np.can_cast('<i4', '>i8', 'safe'))
assert_(not np.can_cast('<i8', '>i4', 'safe'))
assert_(np.can_cast('<i8', '>i4', 'same_kind'))
assert_(not np.can_cast('<i8', '>u4', 'same_kind'))
assert_(np.can_cast('<i8', '>u4', 'unsafe'))
assert_(np.can_cast('bool', 'S5'))
assert_(not np.can_cast('bool', 'S4'))
assert_(np.can_cast('b', 'S4'))
assert_(not np.can_cast('b', 'S3'))
assert_(np.can_cast('u1', 'S3'))
assert_(not np.can_cast('u1', 'S2'))
assert_(np.can_cast('u2', 'S5'))
assert_(not np.can_cast('u2', 'S4'))
assert_(np.can_cast('u4', 'S10'))
assert_(not np.can_cast('u4', 'S9'))
assert_(np.can_cast('u8', 'S20'))
assert_(not np.can_cast('u8', 'S19'))
assert_(np.can_cast('i1', 'S4'))
assert_(not np.can_cast('i1', 'S3'))
assert_(np.can_cast('i2', 'S6'))
assert_(not np.can_cast('i2', 'S5'))
assert_(np.can_cast('i4', 'S11'))
assert_(not np.can_cast('i4', 'S10'))
assert_(np.can_cast('i8', 'S21'))
assert_(not np.can_cast('i8', 'S20'))
assert_(np.can_cast('bool', 'S5'))
assert_(not np.can_cast('bool', 'S4'))
assert_(np.can_cast('b', 'U4'))
assert_(not np.can_cast('b', 'U3'))
assert_(np.can_cast('u1', 'U3'))
assert_(not np.can_cast('u1', 'U2'))
assert_(np.can_cast('u2', 'U5'))
assert_(not np.can_cast('u2', 'U4'))
assert_(np.can_cast('u4', 'U10'))
assert_(not np.can_cast('u4', 'U9'))
assert_(np.can_cast('u8', 'U20'))
assert_(not np.can_cast('u8', 'U19'))
assert_(np.can_cast('i1', 'U4'))
assert_(not np.can_cast('i1', 'U3'))
assert_(np.can_cast('i2', 'U6'))
assert_(not np.can_cast('i2', 'U5'))
assert_(np.can_cast('i4', 'U11'))
assert_(not np.can_cast('i4', 'U10'))
assert_(np.can_cast('i8', 'U21'))
assert_(not np.can_cast('i8', 'U20'))
assert_raises(TypeError, np.can_cast, 'i4', None)
assert_raises(TypeError, np.can_cast, None, 'i4')
# Also test keyword arguments
assert_(np.can_cast(from_=np.int32, to=np.int64))
def test_can_cast_simple_to_structured(self):
# Non-structured can only be cast to structured in 'unsafe' mode.
assert_(not np.can_cast('i4', 'i4,i4'))
assert_(not np.can_cast('i4', 'i4,i2'))
assert_(np.can_cast('i4', 'i4,i4', casting='unsafe'))
assert_(np.can_cast('i4', 'i4,i2', casting='unsafe'))
# Even if there is just a single field which is OK.
assert_(not np.can_cast('i2', [('f1', 'i4')]))
assert_(not np.can_cast('i2', [('f1', 'i4')], casting='same_kind'))
assert_(np.can_cast('i2', [('f1', 'i4')], casting='unsafe'))
# It should be the same for recursive structured or subarrays.
assert_(not np.can_cast('i2', [('f1', 'i4,i4')]))
assert_(np.can_cast('i2', [('f1', 'i4,i4')], casting='unsafe'))
assert_(not np.can_cast('i2', [('f1', '(2,3)i4')]))
assert_(np.can_cast('i2', [('f1', '(2,3)i4')], casting='unsafe'))
def test_can_cast_structured_to_simple(self):
# Need unsafe casting for structured to simple.
assert_(not np.can_cast([('f1', 'i4')], 'i4'))
assert_(np.can_cast([('f1', 'i4')], 'i4', casting='unsafe'))
assert_(np.can_cast([('f1', 'i4')], 'i2', casting='unsafe'))
# Since it is unclear what is being cast, multiple fields to
# single should not work even for unsafe casting.
assert_(not np.can_cast('i4,i4', 'i4', casting='unsafe'))
# But a single field inside a single field is OK.
assert_(not np.can_cast([('f1', [('x', 'i4')])], 'i4'))
assert_(np.can_cast([('f1', [('x', 'i4')])], 'i4', casting='unsafe'))
# And a subarray is fine too - it will just take the first element
# (arguably not very consistently; might also take the first field).
assert_(not np.can_cast([('f0', '(3,)i4')], 'i4'))
assert_(np.can_cast([('f0', '(3,)i4')], 'i4', casting='unsafe'))
# But a structured subarray with multiple fields should fail.
assert_(not np.can_cast([('f0', ('i4,i4'), (2,))], 'i4',
casting='unsafe'))
def test_can_cast_values(self):
# gh-5917
for dt in np.sctypes['int'] + np.sctypes['uint']:
ii = np.iinfo(dt)
assert_(np.can_cast(ii.min, dt))
assert_(np.can_cast(ii.max, dt))
assert_(not np.can_cast(ii.min - 1, dt))
assert_(not np.can_cast(ii.max + 1, dt))
for dt in np.sctypes['float']:
fi = np.finfo(dt)
assert_(np.can_cast(fi.min, dt))
assert_(np.can_cast(fi.max, dt))
# Custom exception class to test exception propagation in fromiter
class NIterError(Exception):
pass
class TestFromiter(object):
def makegen(self):
for x in range(24):
yield x**2
def test_types(self):
ai32 = np.fromiter(self.makegen(), np.int32)
ai64 = np.fromiter(self.makegen(), np.int64)
af = np.fromiter(self.makegen(), float)
assert_(ai32.dtype == np.dtype(np.int32))
assert_(ai64.dtype == np.dtype(np.int64))
assert_(af.dtype == np.dtype(float))
def test_lengths(self):
expected = np.array(list(self.makegen()))
a = np.fromiter(self.makegen(), int)
a20 = np.fromiter(self.makegen(), int, 20)
assert_(len(a) == len(expected))
assert_(len(a20) == 20)
assert_raises(ValueError, np.fromiter,
self.makegen(), int, len(expected) + 10)
def test_values(self):
expected = np.array(list(self.makegen()))
a = np.fromiter(self.makegen(), int)
a20 = np.fromiter(self.makegen(), int, 20)
assert_(np.alltrue(a == expected, axis=0))
assert_(np.alltrue(a20 == expected[:20], axis=0))
def load_data(self, n, eindex):
# Utility method for the issue 2592 tests.
# Raise an exception at the desired index in the iterator.
for e in range(n):
if e == eindex:
raise NIterError('error at index %s' % eindex)
yield e
def test_2592(self):
# Test iteration exceptions are correctly raised.
count, eindex = 10, 5
assert_raises(NIterError, np.fromiter,
self.load_data(count, eindex), dtype=int, count=count)
def test_2592_edge(self):
# Test iter. exceptions, edge case (exception at end of iterator).
count = 10
eindex = count-1
assert_raises(NIterError, np.fromiter,
self.load_data(count, eindex), dtype=int, count=count)
class TestNonzero(object):
def test_nonzero_trivial(self):
assert_equal(np.count_nonzero(np.array([])), 0)
assert_equal(np.count_nonzero(np.array([], dtype='?')), 0)
assert_equal(np.nonzero(np.array([])), ([],))
assert_equal(np.count_nonzero(np.array(0)), 0)
assert_equal(np.count_nonzero(np.array(0, dtype='?')), 0)
assert_equal(np.nonzero(np.array(0)), ([],))
assert_equal(np.count_nonzero(np.array(1)), 1)
assert_equal(np.count_nonzero(np.array(1, dtype='?')), 1)
assert_equal(np.nonzero(np.array(1)), ([0],))
def test_nonzero_onedim(self):
x = np.array([1, 0, 2, -1, 0, 0, 8])
assert_equal(np.count_nonzero(x), 4)
assert_equal(np.count_nonzero(x), 4)
assert_equal(np.nonzero(x), ([0, 2, 3, 6],))
x = np.array([(1, 2), (0, 0), (1, 1), (-1, 3), (0, 7)],
dtype=[('a', 'i4'), ('b', 'i2')])
assert_equal(np.count_nonzero(x['a']), 3)
assert_equal(np.count_nonzero(x['b']), 4)
assert_equal(np.nonzero(x['a']), ([0, 2, 3],))
assert_equal(np.nonzero(x['b']), ([0, 2, 3, 4],))
def test_nonzero_twodim(self):
x = np.array([[0, 1, 0], [2, 0, 3]])
assert_equal(np.count_nonzero(x), 3)
assert_equal(np.nonzero(x), ([0, 1, 1], [1, 0, 2]))
x = np.eye(3)
assert_equal(np.count_nonzero(x), 3)
assert_equal(np.nonzero(x), ([0, 1, 2], [0, 1, 2]))
x = np.array([[(0, 1), (0, 0), (1, 11)],
[(1, 1), (1, 0), (0, 0)],
[(0, 0), (1, 5), (0, 1)]], dtype=[('a', 'f4'), ('b', 'u1')])
assert_equal(np.count_nonzero(x['a']), 4)
assert_equal(np.count_nonzero(x['b']), 5)
assert_equal(np.nonzero(x['a']), ([0, 1, 1, 2], [2, 0, 1, 1]))
assert_equal(np.nonzero(x['b']), ([0, 0, 1, 2, 2], [0, 2, 0, 1, 2]))
assert_(not x['a'].T.flags.aligned)
assert_equal(np.count_nonzero(x['a'].T), 4)
assert_equal(np.count_nonzero(x['b'].T), 5)
assert_equal(np.nonzero(x['a'].T), ([0, 1, 1, 2], [1, 1, 2, 0]))
assert_equal(np.nonzero(x['b'].T), ([0, 0, 1, 2, 2], [0, 1, 2, 0, 2]))
def test_sparse(self):
# test special sparse condition boolean code path
for i in range(20):
c = np.zeros(200, dtype=bool)
c[i::20] = True
assert_equal(np.nonzero(c)[0], np.arange(i, 200 + i, 20))
c = np.zeros(400, dtype=bool)
c[10 + i:20 + i] = True
c[20 + i*2] = True
assert_equal(np.nonzero(c)[0],
np.concatenate((np.arange(10 + i, 20 + i), [20 + i*2])))
def test_return_type(self):
class C(np.ndarray):
pass
for view in (C, np.ndarray):
for nd in range(1, 4):
shape = tuple(range(2, 2+nd))
x = np.arange(np.prod(shape)).reshape(shape).view(view)
for nzx in (np.nonzero(x), x.nonzero()):
for nzx_i in nzx:
assert_(type(nzx_i) is np.ndarray)
assert_(nzx_i.flags.writeable)
def test_count_nonzero_axis(self):
# Basic check of functionality
m = np.array([[0, 1, 7, 0, 0], [3, 0, 0, 2, 19]])
expected = np.array([1, 1, 1, 1, 1])
assert_equal(np.count_nonzero(m, axis=0), expected)
expected = np.array([2, 3])
assert_equal(np.count_nonzero(m, axis=1), expected)
assert_raises(ValueError, np.count_nonzero, m, axis=(1, 1))
assert_raises(TypeError, np.count_nonzero, m, axis='foo')
assert_raises(np.AxisError, np.count_nonzero, m, axis=3)
assert_raises(TypeError, np.count_nonzero,
m, axis=np.array([[1], [2]]))
def test_count_nonzero_axis_all_dtypes(self):
# More thorough test that the axis argument is respected
# for all dtypes and responds correctly when presented with
# either integer or tuple arguments for axis
msg = "Mismatch for dtype: %s"
def assert_equal_w_dt(a, b, err_msg):
assert_equal(a.dtype, b.dtype, err_msg=err_msg)
assert_equal(a, b, err_msg=err_msg)
for dt in np.typecodes['All']:
err_msg = msg % (np.dtype(dt).name,)
if dt != 'V':
if dt != 'M':
m = np.zeros((3, 3), dtype=dt)
n = np.ones(1, dtype=dt)
m[0, 0] = n[0]
m[1, 0] = n[0]
else: # np.zeros doesn't work for np.datetime64
m = np.array(['1970-01-01'] * 9)
m = m.reshape((3, 3))
m[0, 0] = '1970-01-12'
m[1, 0] = '1970-01-12'
m = m.astype(dt)
expected = np.array([2, 0, 0], dtype=np.intp)
assert_equal_w_dt(np.count_nonzero(m, axis=0),
expected, err_msg=err_msg)
expected = np.array([1, 1, 0], dtype=np.intp)
assert_equal_w_dt(np.count_nonzero(m, axis=1),
expected, err_msg=err_msg)
expected = np.array(2)
assert_equal(np.count_nonzero(m, axis=(0, 1)),
expected, err_msg=err_msg)
assert_equal(np.count_nonzero(m, axis=None),
expected, err_msg=err_msg)
assert_equal(np.count_nonzero(m),
expected, err_msg=err_msg)
if dt == 'V':
# There are no 'nonzero' objects for np.void, so the testing
# setup is slightly different for this dtype
m = np.array([np.void(1)] * 6).reshape((2, 3))
expected = np.array([0, 0, 0], dtype=np.intp)
assert_equal_w_dt(np.count_nonzero(m, axis=0),
expected, err_msg=err_msg)
expected = np.array([0, 0], dtype=np.intp)
assert_equal_w_dt(np.count_nonzero(m, axis=1),
expected, err_msg=err_msg)
expected = np.array(0)
assert_equal(np.count_nonzero(m, axis=(0, 1)),
expected, err_msg=err_msg)
assert_equal(np.count_nonzero(m, axis=None),
expected, err_msg=err_msg)
assert_equal(np.count_nonzero(m),
expected, err_msg=err_msg)
def test_count_nonzero_axis_consistent(self):
# Check that the axis behaviour for valid axes in
# non-special cases is consistent (and therefore
# correct) by checking it against an integer array
# that is then casted to the generic object dtype
from itertools import combinations, permutations
axis = (0, 1, 2, 3)
size = (5, 5, 5, 5)
msg = "Mismatch for axis: %s"
rng = np.random.RandomState(1234)
m = rng.randint(-100, 100, size=size)
n = m.astype(object)
for length in range(len(axis)):
for combo in combinations(axis, length):
for perm in permutations(combo):
assert_equal(
np.count_nonzero(m, axis=perm),
np.count_nonzero(n, axis=perm),
err_msg=msg % (perm,))
def test_countnonzero_axis_empty(self):
a = np.array([[0, 0, 1], [1, 0, 1]])
assert_equal(np.count_nonzero(a, axis=()), a.astype(bool))
def test_array_method(self):
# Tests that the array method
# call to nonzero works
m = np.array([[1, 0, 0], [4, 0, 6]])
tgt = [[0, 1, 1], [0, 0, 2]]
assert_equal(m.nonzero(), tgt)
def test_nonzero_invalid_object(self):
# gh-9295
a = np.array([np.array([1, 2]), 3])
assert_raises(ValueError, np.nonzero, a)
class BoolErrors:
def __bool__(self):
raise ValueError("Not allowed")
def __nonzero__(self):
raise ValueError("Not allowed")
assert_raises(ValueError, np.nonzero, np.array([BoolErrors()]))
class TestIndex(object):
def test_boolean(self):
a = rand(3, 5, 8)
V = rand(5, 8)
g1 = randint(0, 5, size=15)
g2 = randint(0, 8, size=15)
V[g1, g2] = -V[g1, g2]
assert_((np.array([a[0][V > 0], a[1][V > 0], a[2][V > 0]]) == a[:, V > 0]).all())
def test_boolean_edgecase(self):
a = np.array([], dtype='int32')
b = np.array([], dtype='bool')
c = a[b]
assert_equal(c, [])
assert_equal(c.dtype, np.dtype('int32'))
class TestBinaryRepr(object):
def test_zero(self):
assert_equal(np.binary_repr(0), '0')
def test_positive(self):
assert_equal(np.binary_repr(10), '1010')
assert_equal(np.binary_repr(12522),
'11000011101010')
assert_equal(np.binary_repr(10736848),
'101000111101010011010000')
def test_negative(self):
assert_equal(np.binary_repr(-1), '-1')
assert_equal(np.binary_repr(-10), '-1010')
assert_equal(np.binary_repr(-12522),
'-11000011101010')
assert_equal(np.binary_repr(-10736848),
'-101000111101010011010000')
def test_sufficient_width(self):
assert_equal(np.binary_repr(0, width=5), '00000')
assert_equal(np.binary_repr(10, width=7), '0001010')
assert_equal(np.binary_repr(-5, width=7), '1111011')
def test_neg_width_boundaries(self):
# see gh-8670
# Ensure that the example in the issue does not
# break before proceeding to a more thorough test.
assert_equal(np.binary_repr(-128, width=8), '10000000')
for width in range(1, 11):
num = -2**(width - 1)
exp = '1' + (width - 1) * '0'
assert_equal(np.binary_repr(num, width=width), exp)
class TestBaseRepr(object):
def test_base3(self):
assert_equal(np.base_repr(3**5, 3), '100000')
def test_positive(self):
assert_equal(np.base_repr(12, 10), '12')
assert_equal(np.base_repr(12, 10, 4), '000012')
assert_equal(np.base_repr(12, 4), '30')
assert_equal(np.base_repr(3731624803700888, 36), '10QR0ROFCEW')
def test_negative(self):
assert_equal(np.base_repr(-12, 10), '-12')
assert_equal(np.base_repr(-12, 10, 4), '-000012')
assert_equal(np.base_repr(-12, 4), '-30')
def test_base_range(self):
with assert_raises(ValueError):
np.base_repr(1, 1)
with assert_raises(ValueError):
np.base_repr(1, 37)
class TestArrayComparisons(object):
def test_array_equal(self):
res = np.array_equal(np.array([1, 2]), np.array([1, 2]))
assert_(res)
assert_(type(res) is bool)
res = np.array_equal(np.array([1, 2]), np.array([1, 2, 3]))
assert_(not res)
assert_(type(res) is bool)
res = np.array_equal(np.array([1, 2]), np.array([3, 4]))
assert_(not res)
assert_(type(res) is bool)
res = np.array_equal(np.array([1, 2]), np.array([1, 3]))
assert_(not res)
assert_(type(res) is bool)
res = np.array_equal(np.array(['a'], dtype='S1'), np.array(['a'], dtype='S1'))
assert_(res)
assert_(type(res) is bool)
res = np.array_equal(np.array([('a', 1)], dtype='S1,u4'),
np.array([('a', 1)], dtype='S1,u4'))
assert_(res)
assert_(type(res) is bool)
def test_none_compares_elementwise(self):
a = np.array([None, 1, None], dtype=object)
assert_equal(a == None, [True, False, True])
assert_equal(a != None, [False, True, False])
a = np.ones(3)
assert_equal(a == None, [False, False, False])
assert_equal(a != None, [True, True, True])
def test_array_equiv(self):
res = np.array_equiv(np.array([1, 2]), np.array([1, 2]))
assert_(res)
assert_(type(res) is bool)
res = np.array_equiv(np.array([1, 2]), np.array([1, 2, 3]))
assert_(not res)
assert_(type(res) is bool)
res = np.array_equiv(np.array([1, 2]), np.array([3, 4]))
assert_(not res)
assert_(type(res) is bool)
res = np.array_equiv(np.array([1, 2]), np.array([1, 3]))
assert_(not res)
assert_(type(res) is bool)
res = np.array_equiv(np.array([1, 1]), np.array([1]))
assert_(res)
assert_(type(res) is bool)
res = np.array_equiv(np.array([1, 1]), np.array([[1], [1]]))
assert_(res)
assert_(type(res) is bool)
res = np.array_equiv(np.array([1, 2]), np.array([2]))
assert_(not res)
assert_(type(res) is bool)
res = np.array_equiv(np.array([1, 2]), np.array([[1], [2]]))
assert_(not res)
assert_(type(res) is bool)
res = np.array_equiv(np.array([1, 2]), np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]))
assert_(not res)
assert_(type(res) is bool)
def assert_array_strict_equal(x, y):
assert_array_equal(x, y)
# Check flags, 32 bit arches typically don't provide 16 byte alignment
if ((x.dtype.alignment <= 8 or
np.intp().dtype.itemsize != 4) and
sys.platform != 'win32'):
assert_(x.flags == y.flags)
else:
assert_(x.flags.owndata == y.flags.owndata)
assert_(x.flags.writeable == y.flags.writeable)
assert_(x.flags.c_contiguous == y.flags.c_contiguous)
assert_(x.flags.f_contiguous == y.flags.f_contiguous)
assert_(x.flags.writebackifcopy == y.flags.writebackifcopy)
# check endianness
assert_(x.dtype.isnative == y.dtype.isnative)
class TestClip(object):
def setup(self):
self.nr = 5
self.nc = 3
def fastclip(self, a, m, M, out=None):
if out is None:
return a.clip(m, M)
else:
return a.clip(m, M, out)
def clip(self, a, m, M, out=None):
# use slow-clip
selector = np.less(a, m) + 2*np.greater(a, M)
return selector.choose((a, m, M), out=out)
# Handy functions
def _generate_data(self, n, m):
return randn(n, m)
def _generate_data_complex(self, n, m):
return randn(n, m) + 1.j * rand(n, m)
def _generate_flt_data(self, n, m):
return (randn(n, m)).astype(np.float32)
def _neg_byteorder(self, a):
a = np.asarray(a)
if sys.byteorder == 'little':
a = a.astype(a.dtype.newbyteorder('>'))
else:
a = a.astype(a.dtype.newbyteorder('<'))
return a
def _generate_non_native_data(self, n, m):
data = randn(n, m)
data = self._neg_byteorder(data)
assert_(not data.dtype.isnative)
return data
def _generate_int_data(self, n, m):
return (10 * rand(n, m)).astype(np.int64)
def _generate_int32_data(self, n, m):
return (10 * rand(n, m)).astype(np.int32)
# Now the real test cases
def test_simple_double(self):
# Test native double input with scalar min/max.
a = self._generate_data(self.nr, self.nc)
m = 0.1
M = 0.6
ac = self.fastclip(a, m, M)
act = self.clip(a, m, M)
assert_array_strict_equal(ac, act)
def test_simple_int(self):
# Test native int input with scalar min/max.
a = self._generate_int_data(self.nr, self.nc)
a = a.astype(int)
m = -2
M = 4
ac = self.fastclip(a, m, M)
act = self.clip(a, m, M)
assert_array_strict_equal(ac, act)
def test_array_double(self):
# Test native double input with array min/max.
a = self._generate_data(self.nr, self.nc)
m = np.zeros(a.shape)
M = m + 0.5
ac = self.fastclip(a, m, M)
act = self.clip(a, m, M)
assert_array_strict_equal(ac, act)
def test_simple_nonnative(self):
# Test non native double input with scalar min/max.
# Test native double input with non native double scalar min/max.
a = self._generate_non_native_data(self.nr, self.nc)
m = -0.5
M = 0.6
ac = self.fastclip(a, m, M)
act = self.clip(a, m, M)
assert_array_equal(ac, act)
# Test native double input with non native double scalar min/max.
a = self._generate_data(self.nr, self.nc)
m = -0.5
M = self._neg_byteorder(0.6)
assert_(not M.dtype.isnative)
ac = self.fastclip(a, m, M)
act = self.clip(a, m, M)
assert_array_equal(ac, act)
def test_simple_complex(self):
# Test native complex input with native double scalar min/max.
# Test native input with complex double scalar min/max.
a = 3 * self._generate_data_complex(self.nr, self.nc)
m = -0.5
M = 1.
ac = self.fastclip(a, m, M)
act = self.clip(a, m, M)
assert_array_strict_equal(ac, act)
# Test native input with complex double scalar min/max.
a = 3 * self._generate_data(self.nr, self.nc)
m = -0.5 + 1.j
M = 1. + 2.j
ac = self.fastclip(a, m, M)
act = self.clip(a, m, M)
assert_array_strict_equal(ac, act)
def test_clip_complex(self):
# Address Issue gh-5354 for clipping complex arrays
# Test native complex input without explicit min/max
# ie, either min=None or max=None
a = np.ones(10, dtype=complex)
m = a.min()
M = a.max()
am = self.fastclip(a, m, None)
aM = self.fastclip(a, None, M)
assert_array_strict_equal(am, a)
assert_array_strict_equal(aM, a)
def test_clip_non_contig(self):
# Test clip for non contiguous native input and native scalar min/max.
a = self._generate_data(self.nr * 2, self.nc * 3)
a = a[::2, ::3]
assert_(not a.flags['F_CONTIGUOUS'])
assert_(not a.flags['C_CONTIGUOUS'])
ac = self.fastclip(a, -1.6, 1.7)
act = self.clip(a, -1.6, 1.7)
assert_array_strict_equal(ac, act)
def test_simple_out(self):
# Test native double input with scalar min/max.
a = self._generate_data(self.nr, self.nc)
m = -0.5
M = 0.6
ac = np.zeros(a.shape)
act = np.zeros(a.shape)
self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
def test_simple_int32_inout(self):
# Test native int32 input with double min/max and int32 out.
a = self._generate_int32_data(self.nr, self.nc)
m = np.float64(0)
M = np.float64(2)
ac = np.zeros(a.shape, dtype=np.int32)
act = ac.copy()
self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
def test_simple_int64_out(self):
# Test native int32 input with int32 scalar min/max and int64 out.
a = self._generate_int32_data(self.nr, self.nc)
m = np.int32(-1)
M = np.int32(1)
ac = np.zeros(a.shape, dtype=np.int64)
act = ac.copy()
self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
def test_simple_int64_inout(self):
# Test native int32 input with double array min/max and int32 out.
a = self._generate_int32_data(self.nr, self.nc)
m = np.zeros(a.shape, np.float64)
M = np.float64(1)
ac = np.zeros(a.shape, dtype=np.int32)
act = ac.copy()
self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
def test_simple_int32_out(self):
# Test native double input with scalar min/max and int out.
a = self._generate_data(self.nr, self.nc)
m = -1.0
M = 2.0
ac = np.zeros(a.shape, dtype=np.int32)
act = ac.copy()
self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
def test_simple_inplace_01(self):
# Test native double input with array min/max in-place.
a = self._generate_data(self.nr, self.nc)
ac = a.copy()
m = np.zeros(a.shape)
M = 1.0
self.fastclip(a, m, M, a)
self.clip(a, m, M, ac)
assert_array_strict_equal(a, ac)
def test_simple_inplace_02(self):
# Test native double input with scalar min/max in-place.
a = self._generate_data(self.nr, self.nc)
ac = a.copy()
m = -0.5
M = 0.6
self.fastclip(a, m, M, a)
self.clip(ac, m, M, ac)
assert_array_strict_equal(a, ac)
def test_noncontig_inplace(self):
# Test non contiguous double input with double scalar min/max in-place.
a = self._generate_data(self.nr * 2, self.nc * 3)
a = a[::2, ::3]
assert_(not a.flags['F_CONTIGUOUS'])
assert_(not a.flags['C_CONTIGUOUS'])
ac = a.copy()
m = -0.5
M = 0.6
self.fastclip(a, m, M, a)
self.clip(ac, m, M, ac)
assert_array_equal(a, ac)
def test_type_cast_01(self):
# Test native double input with scalar min/max.
a = self._generate_data(self.nr, self.nc)
m = -0.5
M = 0.6
ac = self.fastclip(a, m, M)
act = self.clip(a, m, M)
assert_array_strict_equal(ac, act)
def test_type_cast_02(self):
# Test native int32 input with int32 scalar min/max.
a = self._generate_int_data(self.nr, self.nc)
a = a.astype(np.int32)
m = -2
M = 4
ac = self.fastclip(a, m, M)
act = self.clip(a, m, M)
assert_array_strict_equal(ac, act)
def test_type_cast_03(self):
# Test native int32 input with float64 scalar min/max.
a = self._generate_int32_data(self.nr, self.nc)
m = -2
M = 4
ac = self.fastclip(a, np.float64(m), np.float64(M))
act = self.clip(a, np.float64(m), np.float64(M))
assert_array_strict_equal(ac, act)
def test_type_cast_04(self):
# Test native int32 input with float32 scalar min/max.
a = self._generate_int32_data(self.nr, self.nc)
m = np.float32(-2)
M = np.float32(4)
act = self.fastclip(a, m, M)
ac = self.clip(a, m, M)
assert_array_strict_equal(ac, act)
def test_type_cast_05(self):
# Test native int32 with double arrays min/max.
a = self._generate_int_data(self.nr, self.nc)
m = -0.5
M = 1.
ac = self.fastclip(a, m * np.zeros(a.shape), M)
act = self.clip(a, m * np.zeros(a.shape), M)
assert_array_strict_equal(ac, act)
def test_type_cast_06(self):
# Test native with NON native scalar min/max.
a = self._generate_data(self.nr, self.nc)
m = 0.5
m_s = self._neg_byteorder(m)
M = 1.
act = self.clip(a, m_s, M)
ac = self.fastclip(a, m_s, M)
assert_array_strict_equal(ac, act)
def test_type_cast_07(self):
# Test NON native with native array min/max.
a = self._generate_data(self.nr, self.nc)
m = -0.5 * np.ones(a.shape)
M = 1.
a_s = self._neg_byteorder(a)
assert_(not a_s.dtype.isnative)
act = a_s.clip(m, M)
ac = self.fastclip(a_s, m, M)
assert_array_strict_equal(ac, act)
def test_type_cast_08(self):
# Test NON native with native scalar min/max.
a = self._generate_data(self.nr, self.nc)
m = -0.5
M = 1.
a_s = self._neg_byteorder(a)
assert_(not a_s.dtype.isnative)
ac = self.fastclip(a_s, m, M)
act = a_s.clip(m, M)
assert_array_strict_equal(ac, act)
def test_type_cast_09(self):
# Test native with NON native array min/max.
a = self._generate_data(self.nr, self.nc)
m = -0.5 * np.ones(a.shape)
M = 1.
m_s = self._neg_byteorder(m)
assert_(not m_s.dtype.isnative)
ac = self.fastclip(a, m_s, M)
act = self.clip(a, m_s, M)
assert_array_strict_equal(ac, act)
def test_type_cast_10(self):
# Test native int32 with float min/max and float out for output argument.
a = self._generate_int_data(self.nr, self.nc)
b = np.zeros(a.shape, dtype=np.float32)
m = np.float32(-0.5)
M = np.float32(1)
act = self.clip(a, m, M, out=b)
ac = self.fastclip(a, m, M, out=b)
assert_array_strict_equal(ac, act)
def test_type_cast_11(self):
# Test non native with native scalar, min/max, out non native
a = self._generate_non_native_data(self.nr, self.nc)
b = a.copy()
b = b.astype(b.dtype.newbyteorder('>'))
bt = b.copy()
m = -0.5
M = 1.
self.fastclip(a, m, M, out=b)
self.clip(a, m, M, out=bt)
assert_array_strict_equal(b, bt)
def test_type_cast_12(self):
# Test native int32 input and min/max and float out
a = self._generate_int_data(self.nr, self.nc)
b = np.zeros(a.shape, dtype=np.float32)
m = np.int32(0)
M = np.int32(1)
act = self.clip(a, m, M, out=b)
ac = self.fastclip(a, m, M, out=b)
assert_array_strict_equal(ac, act)
def test_clip_with_out_simple(self):
# Test native double input with scalar min/max
a = self._generate_data(self.nr, self.nc)
m = -0.5
M = 0.6
ac = np.zeros(a.shape)
act = np.zeros(a.shape)
self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
def test_clip_with_out_simple2(self):
# Test native int32 input with double min/max and int32 out
a = self._generate_int32_data(self.nr, self.nc)
m = np.float64(0)
M = np.float64(2)
ac = np.zeros(a.shape, dtype=np.int32)
act = ac.copy()
self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
def test_clip_with_out_simple_int32(self):
# Test native int32 input with int32 scalar min/max and int64 out
a = self._generate_int32_data(self.nr, self.nc)
m = np.int32(-1)
M = np.int32(1)
ac = np.zeros(a.shape, dtype=np.int64)
act = ac.copy()
self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
def test_clip_with_out_array_int32(self):
# Test native int32 input with double array min/max and int32 out
a = self._generate_int32_data(self.nr, self.nc)
m = np.zeros(a.shape, np.float64)
M = np.float64(1)
ac = np.zeros(a.shape, dtype=np.int32)
act = ac.copy()
self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
def test_clip_with_out_array_outint32(self):
# Test native double input with scalar min/max and int out
a = self._generate_data(self.nr, self.nc)
m = -1.0
M = 2.0
ac = np.zeros(a.shape, dtype=np.int32)
act = ac.copy()
self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
def test_clip_with_out_transposed(self):
# Test that the out argument works when tranposed
a = np.arange(16).reshape(4, 4)
out = np.empty_like(a).T
a.clip(4, 10, out=out)
expected = self.clip(a, 4, 10)
assert_array_equal(out, expected)
def test_clip_with_out_memory_overlap(self):
# Test that the out argument works when it has memory overlap
a = np.arange(16).reshape(4, 4)
ac = a.copy()
a[:-1].clip(4, 10, out=a[1:])
expected = self.clip(ac[:-1], 4, 10)
assert_array_equal(a[1:], expected)
def test_clip_inplace_array(self):
# Test native double input with array min/max
a = self._generate_data(self.nr, self.nc)
ac = a.copy()
m = np.zeros(a.shape)
M = 1.0
self.fastclip(a, m, M, a)
self.clip(a, m, M, ac)
assert_array_strict_equal(a, ac)
def test_clip_inplace_simple(self):
# Test native double input with scalar min/max
a = self._generate_data(self.nr, self.nc)
ac = a.copy()
m = -0.5
M = 0.6
self.fastclip(a, m, M, a)
self.clip(a, m, M, ac)
assert_array_strict_equal(a, ac)
def test_clip_func_takes_out(self):
# Ensure that the clip() function takes an out=argument.
a = self._generate_data(self.nr, self.nc)
ac = a.copy()
m = -0.5
M = 0.6
a2 = np.clip(a, m, M, out=a)
self.clip(a, m, M, ac)
assert_array_strict_equal(a2, ac)
assert_(a2 is a)
def test_clip_nan(self):
d = np.arange(7.)
assert_equal(d.clip(min=np.nan), d)
assert_equal(d.clip(max=np.nan), d)
assert_equal(d.clip(min=np.nan, max=np.nan), d)
assert_equal(d.clip(min=-2, max=np.nan), d)
assert_equal(d.clip(min=np.nan, max=10), d)
class TestAllclose(object):
rtol = 1e-5
atol = 1e-8
def setup(self):
self.olderr = np.seterr(invalid='ignore')
def teardown(self):
np.seterr(**self.olderr)
def tst_allclose(self, x, y):
assert_(np.allclose(x, y), "%s and %s not close" % (x, y))
def tst_not_allclose(self, x, y):
assert_(not np.allclose(x, y), "%s and %s shouldn't be close" % (x, y))
def test_ip_allclose(self):
# Parametric test factory.
arr = np.array([100, 1000])
aran = np.arange(125).reshape((5, 5, 5))
atol = self.atol
rtol = self.rtol
data = [([1, 0], [1, 0]),
([atol], [0]),
([1], [1+rtol+atol]),
(arr, arr + arr*rtol),
(arr, arr + arr*rtol + atol*2),
(aran, aran + aran*rtol),
(np.inf, np.inf),
(np.inf, [np.inf])]
for (x, y) in data:
self.tst_allclose(x, y)
def test_ip_not_allclose(self):
# Parametric test factory.
aran = np.arange(125).reshape((5, 5, 5))
atol = self.atol
rtol = self.rtol
data = [([np.inf, 0], [1, np.inf]),
([np.inf, 0], [1, 0]),
([np.inf, np.inf], [1, np.inf]),
([np.inf, np.inf], [1, 0]),
([-np.inf, 0], [np.inf, 0]),
([np.nan, 0], [np.nan, 0]),
([atol*2], [0]),
([1], [1+rtol+atol*2]),
(aran, aran + aran*atol + atol*2),
(np.array([np.inf, 1]), np.array([0, np.inf]))]
for (x, y) in data:
self.tst_not_allclose(x, y)
def test_no_parameter_modification(self):
x = np.array([np.inf, 1])
y = np.array([0, np.inf])
np.allclose(x, y)
assert_array_equal(x, np.array([np.inf, 1]))
assert_array_equal(y, np.array([0, np.inf]))
def test_min_int(self):
# Could make problems because of abs(min_int) == min_int
min_int = np.iinfo(np.int_).min
a = np.array([min_int], dtype=np.int_)
assert_(np.allclose(a, a))
def test_equalnan(self):
x = np.array([1.0, np.nan])
assert_(np.allclose(x, x, equal_nan=True))
def test_return_class_is_ndarray(self):
# Issue gh-6475
# Check that allclose does not preserve subtypes
class Foo(np.ndarray):
def __new__(cls, *args, **kwargs):
return np.array(*args, **kwargs).view(cls)
a = Foo([1])
assert_(type(np.allclose(a, a)) is bool)
class TestIsclose(object):
rtol = 1e-5
atol = 1e-8
def setup(self):
atol = self.atol
rtol = self.rtol
arr = np.array([100, 1000])
aran = np.arange(125).reshape((5, 5, 5))
self.all_close_tests = [
([1, 0], [1, 0]),
([atol], [0]),
([1], [1 + rtol + atol]),
(arr, arr + arr*rtol),
(arr, arr + arr*rtol + atol),
(aran, aran + aran*rtol),
(np.inf, np.inf),
(np.inf, [np.inf]),
([np.inf, -np.inf], [np.inf, -np.inf]),
]
self.none_close_tests = [
([np.inf, 0], [1, np.inf]),
([np.inf, -np.inf], [1, 0]),
([np.inf, np.inf], [1, -np.inf]),
([np.inf, np.inf], [1, 0]),
([np.nan, 0], [np.nan, -np.inf]),
([atol*2], [0]),
([1], [1 + rtol + atol*2]),
(aran, aran + rtol*1.1*aran + atol*1.1),
(np.array([np.inf, 1]), np.array([0, np.inf])),
]
self.some_close_tests = [
([np.inf, 0], [np.inf, atol*2]),
([atol, 1, 1e6*(1 + 2*rtol) + atol], [0, np.nan, 1e6]),
(np.arange(3), [0, 1, 2.1]),
(np.nan, [np.nan, np.nan, np.nan]),
([0], [atol, np.inf, -np.inf, np.nan]),
(0, [atol, np.inf, -np.inf, np.nan]),
]
self.some_close_results = [
[True, False],
[True, False, False],
[True, True, False],
[False, False, False],
[True, False, False, False],
[True, False, False, False],
]
def test_ip_isclose(self):
self.setup()
tests = self.some_close_tests
results = self.some_close_results
for (x, y), result in zip(tests, results):
assert_array_equal(np.isclose(x, y), result)
def tst_all_isclose(self, x, y):
assert_(np.all(np.isclose(x, y)), "%s and %s not close" % (x, y))
def tst_none_isclose(self, x, y):
msg = "%s and %s shouldn't be close"
assert_(not np.any(np.isclose(x, y)), msg % (x, y))
def tst_isclose_allclose(self, x, y):
msg = "isclose.all() and allclose aren't same for %s and %s"
msg2 = "isclose and allclose aren't same for %s and %s"
if np.isscalar(x) and np.isscalar(y):
assert_(np.isclose(x, y) == np.allclose(x, y), msg=msg2 % (x, y))
else:
assert_array_equal(np.isclose(x, y).all(), np.allclose(x, y), msg % (x, y))
def test_ip_all_isclose(self):
self.setup()
for (x, y) in self.all_close_tests:
self.tst_all_isclose(x, y)
def test_ip_none_isclose(self):
self.setup()
for (x, y) in self.none_close_tests:
self.tst_none_isclose(x, y)
def test_ip_isclose_allclose(self):
self.setup()
tests = (self.all_close_tests + self.none_close_tests +
self.some_close_tests)
for (x, y) in tests:
self.tst_isclose_allclose(x, y)
def test_equal_nan(self):
assert_array_equal(np.isclose(np.nan, np.nan, equal_nan=True), [True])
arr = np.array([1.0, np.nan])
assert_array_equal(np.isclose(arr, arr, equal_nan=True), [True, True])
def test_masked_arrays(self):
# Make sure to test the output type when arguments are interchanged.
x = np.ma.masked_where([True, True, False], np.arange(3))
assert_(type(x) is type(np.isclose(2, x)))
assert_(type(x) is type(np.isclose(x, 2)))
x = np.ma.masked_where([True, True, False], [np.nan, np.inf, np.nan])
assert_(type(x) is type(np.isclose(np.inf, x)))
assert_(type(x) is type(np.isclose(x, np.inf)))
x = np.ma.masked_where([True, True, False], [np.nan, np.nan, np.nan])
y = np.isclose(np.nan, x, equal_nan=True)
assert_(type(x) is type(y))
# Ensure that the mask isn't modified...
assert_array_equal([True, True, False], y.mask)
y = np.isclose(x, np.nan, equal_nan=True)
assert_(type(x) is type(y))
# Ensure that the mask isn't modified...
assert_array_equal([True, True, False], y.mask)
x = np.ma.masked_where([True, True, False], [np.nan, np.nan, np.nan])
y = np.isclose(x, x, equal_nan=True)
assert_(type(x) is type(y))
# Ensure that the mask isn't modified...
assert_array_equal([True, True, False], y.mask)
def test_scalar_return(self):
assert_(np.isscalar(np.isclose(1, 1)))
def test_no_parameter_modification(self):
x = np.array([np.inf, 1])
y = np.array([0, np.inf])
np.isclose(x, y)
assert_array_equal(x, np.array([np.inf, 1]))
assert_array_equal(y, np.array([0, np.inf]))
def test_non_finite_scalar(self):
# GH7014, when two scalars are compared the output should also be a
# scalar
assert_(np.isclose(np.inf, -np.inf) is np.False_)
assert_(np.isclose(0, np.inf) is np.False_)
assert_(type(np.isclose(0, np.inf)) is np.bool_)
class TestStdVar(object):
def setup(self):
self.A = np.array([1, -1, 1, -1])
self.real_var = 1
def test_basic(self):
assert_almost_equal(np.var(self.A), self.real_var)
assert_almost_equal(np.std(self.A)**2, self.real_var)
def test_scalars(self):
assert_equal(np.var(1), 0)
assert_equal(np.std(1), 0)
def test_ddof1(self):
assert_almost_equal(np.var(self.A, ddof=1),
self.real_var*len(self.A)/float(len(self.A)-1))
assert_almost_equal(np.std(self.A, ddof=1)**2,
self.real_var*len(self.A)/float(len(self.A)-1))
def test_ddof2(self):
assert_almost_equal(np.var(self.A, ddof=2),
self.real_var*len(self.A)/float(len(self.A)-2))
assert_almost_equal(np.std(self.A, ddof=2)**2,
self.real_var*len(self.A)/float(len(self.A)-2))
def test_out_scalar(self):
d = np.arange(10)
out = np.array(0.)
r = np.std(d, out=out)
assert_(r is out)
assert_array_equal(r, out)
r = np.var(d, out=out)
assert_(r is out)
assert_array_equal(r, out)
r = np.mean(d, out=out)
assert_(r is out)
assert_array_equal(r, out)
class TestStdVarComplex(object):
def test_basic(self):
A = np.array([1, 1.j, -1, -1.j])
real_var = 1
assert_almost_equal(np.var(A), real_var)
assert_almost_equal(np.std(A)**2, real_var)
def test_scalars(self):
assert_equal(np.var(1j), 0)
assert_equal(np.std(1j), 0)
class TestCreationFuncs(object):
# Test ones, zeros, empty and full.
def setup(self):
dtypes = {np.dtype(tp) for tp in itertools.chain(*np.sctypes.values())}
# void, bytes, str
variable_sized = {tp for tp in dtypes if tp.str.endswith('0')}
self.dtypes = sorted(dtypes - variable_sized |
{np.dtype(tp.str.replace("0", str(i)))
for tp in variable_sized for i in range(1, 10)},
key=lambda dtype: dtype.str)
self.orders = {'C': 'c_contiguous', 'F': 'f_contiguous'}
self.ndims = 10
def check_function(self, func, fill_value=None):
par = ((0, 1, 2),
range(self.ndims),
self.orders,
self.dtypes)
fill_kwarg = {}
if fill_value is not None:
fill_kwarg = {'fill_value': fill_value}
for size, ndims, order, dtype in itertools.product(*par):
shape = ndims * [size]
# do not fill void type
if fill_kwarg and dtype.str.startswith('|V'):
continue
arr = func(shape, order=order, dtype=dtype,
**fill_kwarg)
assert_equal(arr.dtype, dtype)
assert_(getattr(arr.flags, self.orders[order]))
if fill_value is not None:
if dtype.str.startswith('|S'):
val = str(fill_value)
else:
val = fill_value
assert_equal(arr, dtype.type(val))
def test_zeros(self):
self.check_function(np.zeros)
def test_ones(self):
self.check_function(np.zeros)
def test_empty(self):
self.check_function(np.empty)
def test_full(self):
self.check_function(np.full, 0)
self.check_function(np.full, 1)
@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
def test_for_reference_leak(self):
# Make sure we have an object for reference
dim = 1
beg = sys.getrefcount(dim)
np.zeros([dim]*10)
assert_(sys.getrefcount(dim) == beg)
np.ones([dim]*10)
assert_(sys.getrefcount(dim) == beg)
np.empty([dim]*10)
assert_(sys.getrefcount(dim) == beg)
np.full([dim]*10, 0)
assert_(sys.getrefcount(dim) == beg)
class TestLikeFuncs(object):
'''Test ones_like, zeros_like, empty_like and full_like'''
def setup(self):
self.data = [
# Array scalars
(np.array(3.), None),
(np.array(3), 'f8'),
# 1D arrays
(np.arange(6, dtype='f4'), None),
(np.arange(6), 'c16'),
# 2D C-layout arrays
(np.arange(6).reshape(2, 3), None),
(np.arange(6).reshape(3, 2), 'i1'),
# 2D F-layout arrays
(np.arange(6).reshape((2, 3), order='F'), None),
(np.arange(6).reshape((3, 2), order='F'), 'i1'),
# 3D C-layout arrays
(np.arange(24).reshape(2, 3, 4), None),
(np.arange(24).reshape(4, 3, 2), 'f4'),
# 3D F-layout arrays
(np.arange(24).reshape((2, 3, 4), order='F'), None),
(np.arange(24).reshape((4, 3, 2), order='F'), 'f4'),
# 3D non-C/F-layout arrays
(np.arange(24).reshape(2, 3, 4).swapaxes(0, 1), None),
(np.arange(24).reshape(4, 3, 2).swapaxes(0, 1), '?'),
]
def compare_array_value(self, dz, value, fill_value):
if value is not None:
if fill_value:
try:
z = dz.dtype.type(value)
except OverflowError:
pass
else:
assert_(np.all(dz == z))
else:
assert_(np.all(dz == value))
def check_like_function(self, like_function, value, fill_value=False):
if fill_value:
fill_kwarg = {'fill_value': value}
else:
fill_kwarg = {}
for d, dtype in self.data:
# default (K) order, dtype
dz = like_function(d, dtype=dtype, **fill_kwarg)
assert_equal(dz.shape, d.shape)
assert_equal(np.array(dz.strides)*d.dtype.itemsize,
np.array(d.strides)*dz.dtype.itemsize)
assert_equal(d.flags.c_contiguous, dz.flags.c_contiguous)
assert_equal(d.flags.f_contiguous, dz.flags.f_contiguous)
if dtype is None:
assert_equal(dz.dtype, d.dtype)
else:
assert_equal(dz.dtype, np.dtype(dtype))
self.compare_array_value(dz, value, fill_value)
# C order, default dtype
dz = like_function(d, order='C', dtype=dtype, **fill_kwarg)
assert_equal(dz.shape, d.shape)
assert_(dz.flags.c_contiguous)
if dtype is None:
assert_equal(dz.dtype, d.dtype)
else:
assert_equal(dz.dtype, np.dtype(dtype))
self.compare_array_value(dz, value, fill_value)
# F order, default dtype
dz = like_function(d, order='F', dtype=dtype, **fill_kwarg)
assert_equal(dz.shape, d.shape)
assert_(dz.flags.f_contiguous)
if dtype is None:
assert_equal(dz.dtype, d.dtype)
else:
assert_equal(dz.dtype, np.dtype(dtype))
self.compare_array_value(dz, value, fill_value)
# A order
dz = like_function(d, order='A', dtype=dtype, **fill_kwarg)
assert_equal(dz.shape, d.shape)
if d.flags.f_contiguous:
assert_(dz.flags.f_contiguous)
else:
assert_(dz.flags.c_contiguous)
if dtype is None:
assert_equal(dz.dtype, d.dtype)
else:
assert_equal(dz.dtype, np.dtype(dtype))
self.compare_array_value(dz, value, fill_value)
# Test the 'subok' parameter
class MyNDArray(np.ndarray):
pass
a = np.array([[1, 2], [3, 4]]).view(MyNDArray)
b = like_function(a, **fill_kwarg)
assert_(type(b) is MyNDArray)
b = like_function(a, subok=False, **fill_kwarg)
assert_(type(b) is not MyNDArray)
def test_ones_like(self):
self.check_like_function(np.ones_like, 1)
def test_zeros_like(self):
self.check_like_function(np.zeros_like, 0)
def test_empty_like(self):
self.check_like_function(np.empty_like, None)
def test_filled_like(self):
self.check_like_function(np.full_like, 0, True)
self.check_like_function(np.full_like, 1, True)
self.check_like_function(np.full_like, 1000, True)
self.check_like_function(np.full_like, 123.456, True)
self.check_like_function(np.full_like, np.inf, True)
class TestCorrelate(object):
def _setup(self, dt):
self.x = np.array([1, 2, 3, 4, 5], dtype=dt)
self.xs = np.arange(1, 20)[::3]
self.y = np.array([-1, -2, -3], dtype=dt)
self.z1 = np.array([ -3., -8., -14., -20., -26., -14., -5.], dtype=dt)
self.z1_4 = np.array([-2., -5., -8., -11., -14., -5.], dtype=dt)
self.z1r = np.array([-15., -22., -22., -16., -10., -4., -1.], dtype=dt)
self.z2 = np.array([-5., -14., -26., -20., -14., -8., -3.], dtype=dt)
self.z2r = np.array([-1., -4., -10., -16., -22., -22., -15.], dtype=dt)
self.zs = np.array([-3., -14., -30., -48., -66., -84.,
-102., -54., -19.], dtype=dt)
def test_float(self):
self._setup(float)
z = np.correlate(self.x, self.y, 'full')
assert_array_almost_equal(z, self.z1)
z = np.correlate(self.x, self.y[:-1], 'full')
assert_array_almost_equal(z, self.z1_4)
z = np.correlate(self.y, self.x, 'full')
assert_array_almost_equal(z, self.z2)
z = np.correlate(self.x[::-1], self.y, 'full')
assert_array_almost_equal(z, self.z1r)
z = np.correlate(self.y, self.x[::-1], 'full')
assert_array_almost_equal(z, self.z2r)
z = np.correlate(self.xs, self.y, 'full')
assert_array_almost_equal(z, self.zs)
def test_object(self):
self._setup(Decimal)
z = np.correlate(self.x, self.y, 'full')
assert_array_almost_equal(z, self.z1)
z = np.correlate(self.y, self.x, 'full')
assert_array_almost_equal(z, self.z2)
def test_no_overwrite(self):
d = np.ones(100)
k = np.ones(3)
np.correlate(d, k)
assert_array_equal(d, np.ones(100))
assert_array_equal(k, np.ones(3))
def test_complex(self):
x = np.array([1, 2, 3, 4+1j], dtype=complex)
y = np.array([-1, -2j, 3+1j], dtype=complex)
r_z = np.array([3-1j, 6, 8+1j, 11+5j, -5+8j, -4-1j], dtype=complex)
r_z = r_z[::-1].conjugate()
z = np.correlate(y, x, mode='full')
assert_array_almost_equal(z, r_z)
class TestConvolve(object):
def test_object(self):
d = [1.] * 100
k = [1.] * 3
assert_array_almost_equal(np.convolve(d, k)[2:-2], np.full(98, 3))
def test_no_overwrite(self):
d = np.ones(100)
k = np.ones(3)
np.convolve(d, k)
assert_array_equal(d, np.ones(100))
assert_array_equal(k, np.ones(3))
class TestArgwhere(object):
def test_2D(self):
x = np.arange(6).reshape((2, 3))
assert_array_equal(np.argwhere(x > 1),
[[0, 2],
[1, 0],
[1, 1],
[1, 2]])
def test_list(self):
assert_equal(np.argwhere([4, 0, 2, 1, 3]), [[0], [2], [3], [4]])
class TestStringFunction(object):
def test_set_string_function(self):
a = np.array([1])
np.set_string_function(lambda x: "FOO", repr=True)
assert_equal(repr(a), "FOO")
np.set_string_function(None, repr=True)
assert_equal(repr(a), "array([1])")
np.set_string_function(lambda x: "FOO", repr=False)
assert_equal(str(a), "FOO")
np.set_string_function(None, repr=False)
assert_equal(str(a), "[1]")
class TestRoll(object):
def test_roll1d(self):
x = np.arange(10)
xr = np.roll(x, 2)
assert_equal(xr, np.array([8, 9, 0, 1, 2, 3, 4, 5, 6, 7]))
def test_roll2d(self):
x2 = np.reshape(np.arange(10), (2, 5))
x2r = np.roll(x2, 1)
assert_equal(x2r, np.array([[9, 0, 1, 2, 3], [4, 5, 6, 7, 8]]))
x2r = np.roll(x2, 1, axis=0)
assert_equal(x2r, np.array([[5, 6, 7, 8, 9], [0, 1, 2, 3, 4]]))
x2r = np.roll(x2, 1, axis=1)
assert_equal(x2r, np.array([[4, 0, 1, 2, 3], [9, 5, 6, 7, 8]]))
# Roll multiple axes at once.
x2r = np.roll(x2, 1, axis=(0, 1))
assert_equal(x2r, np.array([[9, 5, 6, 7, 8], [4, 0, 1, 2, 3]]))
x2r = np.roll(x2, (1, 0), axis=(0, 1))
assert_equal(x2r, np.array([[5, 6, 7, 8, 9], [0, 1, 2, 3, 4]]))
x2r = np.roll(x2, (-1, 0), axis=(0, 1))
assert_equal(x2r, np.array([[5, 6, 7, 8, 9], [0, 1, 2, 3, 4]]))
x2r = np.roll(x2, (0, 1), axis=(0, 1))
assert_equal(x2r, np.array([[4, 0, 1, 2, 3], [9, 5, 6, 7, 8]]))
x2r = np.roll(x2, (0, -1), axis=(0, 1))
assert_equal(x2r, np.array([[1, 2, 3, 4, 0], [6, 7, 8, 9, 5]]))
x2r = np.roll(x2, (1, 1), axis=(0, 1))
assert_equal(x2r, np.array([[9, 5, 6, 7, 8], [4, 0, 1, 2, 3]]))
x2r = np.roll(x2, (-1, -1), axis=(0, 1))
assert_equal(x2r, np.array([[6, 7, 8, 9, 5], [1, 2, 3, 4, 0]]))
# Roll the same axis multiple times.
x2r = np.roll(x2, 1, axis=(0, 0))
assert_equal(x2r, np.array([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]))
x2r = np.roll(x2, 1, axis=(1, 1))
assert_equal(x2r, np.array([[3, 4, 0, 1, 2], [8, 9, 5, 6, 7]]))
# Roll more than one turn in either direction.
x2r = np.roll(x2, 6, axis=1)
assert_equal(x2r, np.array([[4, 0, 1, 2, 3], [9, 5, 6, 7, 8]]))
x2r = np.roll(x2, -4, axis=1)
assert_equal(x2r, np.array([[4, 0, 1, 2, 3], [9, 5, 6, 7, 8]]))
def test_roll_empty(self):
x = np.array([])
assert_equal(np.roll(x, 1), np.array([]))
class TestRollaxis(object):
# expected shape indexed by (axis, start) for array of
# shape (1, 2, 3, 4)
tgtshape = {(0, 0): (1, 2, 3, 4), (0, 1): (1, 2, 3, 4),
(0, 2): (2, 1, 3, 4), (0, 3): (2, 3, 1, 4),
(0, 4): (2, 3, 4, 1),
(1, 0): (2, 1, 3, 4), (1, 1): (1, 2, 3, 4),
(1, 2): (1, 2, 3, 4), (1, 3): (1, 3, 2, 4),
(1, 4): (1, 3, 4, 2),
(2, 0): (3, 1, 2, 4), (2, 1): (1, 3, 2, 4),
(2, 2): (1, 2, 3, 4), (2, 3): (1, 2, 3, 4),
(2, 4): (1, 2, 4, 3),
(3, 0): (4, 1, 2, 3), (3, 1): (1, 4, 2, 3),
(3, 2): (1, 2, 4, 3), (3, 3): (1, 2, 3, 4),
(3, 4): (1, 2, 3, 4)}
def test_exceptions(self):
a = np.arange(1*2*3*4).reshape(1, 2, 3, 4)
assert_raises(np.AxisError, np.rollaxis, a, -5, 0)
assert_raises(np.AxisError, np.rollaxis, a, 0, -5)
assert_raises(np.AxisError, np.rollaxis, a, 4, 0)
assert_raises(np.AxisError, np.rollaxis, a, 0, 5)
def test_results(self):
a = np.arange(1*2*3*4).reshape(1, 2, 3, 4).copy()
aind = np.indices(a.shape)
assert_(a.flags['OWNDATA'])
for (i, j) in self.tgtshape:
# positive axis, positive start
res = np.rollaxis(a, axis=i, start=j)
i0, i1, i2, i3 = aind[np.array(res.shape) - 1]
assert_(np.all(res[i0, i1, i2, i3] == a))
assert_(res.shape == self.tgtshape[(i, j)], str((i,j)))
assert_(not res.flags['OWNDATA'])
# negative axis, positive start
ip = i + 1
res = np.rollaxis(a, axis=-ip, start=j)
i0, i1, i2, i3 = aind[np.array(res.shape) - 1]
assert_(np.all(res[i0, i1, i2, i3] == a))
assert_(res.shape == self.tgtshape[(4 - ip, j)])
assert_(not res.flags['OWNDATA'])
# positive axis, negative start
jp = j + 1 if j < 4 else j
res = np.rollaxis(a, axis=i, start=-jp)
i0, i1, i2, i3 = aind[np.array(res.shape) - 1]
assert_(np.all(res[i0, i1, i2, i3] == a))
assert_(res.shape == self.tgtshape[(i, 4 - jp)])
assert_(not res.flags['OWNDATA'])
# negative axis, negative start
ip = i + 1
jp = j + 1 if j < 4 else j
res = np.rollaxis(a, axis=-ip, start=-jp)
i0, i1, i2, i3 = aind[np.array(res.shape) - 1]
assert_(np.all(res[i0, i1, i2, i3] == a))
assert_(res.shape == self.tgtshape[(4 - ip, 4 - jp)])
assert_(not res.flags['OWNDATA'])
class TestMoveaxis(object):
def test_move_to_end(self):
x = np.random.randn(5, 6, 7)
for source, expected in [(0, (6, 7, 5)),
(1, (5, 7, 6)),
(2, (5, 6, 7)),
(-1, (5, 6, 7))]:
actual = np.moveaxis(x, source, -1).shape
assert_(actual, expected)
def test_move_new_position(self):
x = np.random.randn(1, 2, 3, 4)
for source, destination, expected in [
(0, 1, (2, 1, 3, 4)),
(1, 2, (1, 3, 2, 4)),
(1, -1, (1, 3, 4, 2)),
]:
actual = np.moveaxis(x, source, destination).shape
assert_(actual, expected)
def test_preserve_order(self):
x = np.zeros((1, 2, 3, 4))
for source, destination in [
(0, 0),
(3, -1),
(-1, 3),
([0, -1], [0, -1]),
([2, 0], [2, 0]),
(range(4), range(4)),
]:
actual = np.moveaxis(x, source, destination).shape
assert_(actual, (1, 2, 3, 4))
def test_move_multiples(self):
x = np.zeros((0, 1, 2, 3))
for source, destination, expected in [
([0, 1], [2, 3], (2, 3, 0, 1)),
([2, 3], [0, 1], (2, 3, 0, 1)),
([0, 1, 2], [2, 3, 0], (2, 3, 0, 1)),
([3, 0], [1, 0], (0, 3, 1, 2)),
([0, 3], [0, 1], (0, 3, 1, 2)),
]:
actual = np.moveaxis(x, source, destination).shape
assert_(actual, expected)
def test_errors(self):
x = np.random.randn(1, 2, 3)
assert_raises_regex(np.AxisError, 'source.*out of bounds',
np.moveaxis, x, 3, 0)
assert_raises_regex(np.AxisError, 'source.*out of bounds',
np.moveaxis, x, -4, 0)
assert_raises_regex(np.AxisError, 'destination.*out of bounds',
np.moveaxis, x, 0, 5)
assert_raises_regex(ValueError, 'repeated axis in `source`',
np.moveaxis, x, [0, 0], [0, 1])
assert_raises_regex(ValueError, 'repeated axis in `destination`',
np.moveaxis, x, [0, 1], [1, 1])
assert_raises_regex(ValueError, 'must have the same number',
np.moveaxis, x, 0, [0, 1])
assert_raises_regex(ValueError, 'must have the same number',
np.moveaxis, x, [0, 1], [0])
def test_array_likes(self):
x = np.ma.zeros((1, 2, 3))
result = np.moveaxis(x, 0, 0)
assert_(x.shape, result.shape)
assert_(isinstance(result, np.ma.MaskedArray))
x = [1, 2, 3]
result = np.moveaxis(x, 0, 0)
assert_(x, list(result))
assert_(isinstance(result, np.ndarray))
class TestCross(object):
def test_2x2(self):
u = [1, 2]
v = [3, 4]
z = -2
cp = np.cross(u, v)
assert_equal(cp, z)
cp = np.cross(v, u)
assert_equal(cp, -z)
def test_2x3(self):
u = [1, 2]
v = [3, 4, 5]
z = np.array([10, -5, -2])
cp = np.cross(u, v)
assert_equal(cp, z)
cp = np.cross(v, u)
assert_equal(cp, -z)
def test_3x3(self):
u = [1, 2, 3]
v = [4, 5, 6]
z = np.array([-3, 6, -3])
cp = np.cross(u, v)
assert_equal(cp, z)
cp = np.cross(v, u)
assert_equal(cp, -z)
def test_broadcasting(self):
# Ticket #2624 (Trac #2032)
u = np.tile([1, 2], (11, 1))
v = np.tile([3, 4], (11, 1))
z = -2
assert_equal(np.cross(u, v), z)
assert_equal(np.cross(v, u), -z)
assert_equal(np.cross(u, u), 0)
u = np.tile([1, 2], (11, 1)).T
v = np.tile([3, 4, 5], (11, 1))
z = np.tile([10, -5, -2], (11, 1))
assert_equal(np.cross(u, v, axisa=0), z)
assert_equal(np.cross(v, u.T), -z)
assert_equal(np.cross(v, v), 0)
u = np.tile([1, 2, 3], (11, 1)).T
v = np.tile([3, 4], (11, 1)).T
z = np.tile([-12, 9, -2], (11, 1))
assert_equal(np.cross(u, v, axisa=0, axisb=0), z)
assert_equal(np.cross(v.T, u.T), -z)
assert_equal(np.cross(u.T, u.T), 0)
u = np.tile([1, 2, 3], (5, 1))
v = np.tile([4, 5, 6], (5, 1)).T
z = np.tile([-3, 6, -3], (5, 1))
assert_equal(np.cross(u, v, axisb=0), z)
assert_equal(np.cross(v.T, u), -z)
assert_equal(np.cross(u, u), 0)
def test_broadcasting_shapes(self):
u = np.ones((2, 1, 3))
v = np.ones((5, 3))
assert_equal(np.cross(u, v).shape, (2, 5, 3))
u = np.ones((10, 3, 5))
v = np.ones((2, 5))
assert_equal(np.cross(u, v, axisa=1, axisb=0).shape, (10, 5, 3))
assert_raises(np.AxisError, np.cross, u, v, axisa=1, axisb=2)
assert_raises(np.AxisError, np.cross, u, v, axisa=3, axisb=0)
u = np.ones((10, 3, 5, 7))
v = np.ones((5, 7, 2))
assert_equal(np.cross(u, v, axisa=1, axisc=2).shape, (10, 5, 3, 7))
assert_raises(np.AxisError, np.cross, u, v, axisa=-5, axisb=2)
assert_raises(np.AxisError, np.cross, u, v, axisa=1, axisb=-4)
# gh-5885
u = np.ones((3, 4, 2))
for axisc in range(-2, 2):
assert_equal(np.cross(u, u, axisc=axisc).shape, (3, 4))
def test_outer_out_param():
arr1 = np.ones((5,))
arr2 = np.ones((2,))
arr3 = np.linspace(-2, 2, 5)
out1 = np.ndarray(shape=(5,5))
out2 = np.ndarray(shape=(2, 5))
res1 = np.outer(arr1, arr3, out1)
assert_equal(res1, out1)
assert_equal(np.outer(arr2, arr3, out2), out2)
class TestRequire(object):
flag_names = ['C', 'C_CONTIGUOUS', 'CONTIGUOUS',
'F', 'F_CONTIGUOUS', 'FORTRAN',
'A', 'ALIGNED',
'W', 'WRITEABLE',
'O', 'OWNDATA']
def generate_all_false(self, dtype):
arr = np.zeros((2, 2), [('junk', 'i1'), ('a', dtype)])
arr.setflags(write=False)
a = arr['a']
assert_(not a.flags['C'])
assert_(not a.flags['F'])
assert_(not a.flags['O'])
assert_(not a.flags['W'])
assert_(not a.flags['A'])
return a
def set_and_check_flag(self, flag, dtype, arr):
if dtype is None:
dtype = arr.dtype
b = np.require(arr, dtype, [flag])
assert_(b.flags[flag])
assert_(b.dtype == dtype)
# a further call to np.require ought to return the same array
# unless OWNDATA is specified.
c = np.require(b, None, [flag])
if flag[0] != 'O':
assert_(c is b)
else:
assert_(c.flags[flag])
def test_require_each(self):
id = ['f8', 'i4']
fd = [None, 'f8', 'c16']
for idtype, fdtype, flag in itertools.product(id, fd, self.flag_names):
a = self.generate_all_false(idtype)
self.set_and_check_flag(flag, fdtype, a)
def test_unknown_requirement(self):
a = self.generate_all_false('f8')
assert_raises(KeyError, np.require, a, None, 'Q')
def test_non_array_input(self):
a = np.require([1, 2, 3, 4], 'i4', ['C', 'A', 'O'])
assert_(a.flags['O'])
assert_(a.flags['C'])
assert_(a.flags['A'])
assert_(a.dtype == 'i4')
assert_equal(a, [1, 2, 3, 4])
def test_C_and_F_simul(self):
a = self.generate_all_false('f8')
assert_raises(ValueError, np.require, a, None, ['C', 'F'])
def test_ensure_array(self):
class ArraySubclass(np.ndarray):
pass
a = ArraySubclass((2, 2))
b = np.require(a, None, ['E'])
assert_(type(b) is np.ndarray)
def test_preserve_subtype(self):
class ArraySubclass(np.ndarray):
pass
for flag in self.flag_names:
a = ArraySubclass((2, 2))
self.set_and_check_flag(flag, None, a)
class TestBroadcast(object):
def test_broadcast_in_args(self):
# gh-5881
arrs = [np.empty((6, 7)), np.empty((5, 6, 1)), np.empty((7,)),
np.empty((5, 1, 7))]
mits = [np.broadcast(*arrs),
np.broadcast(np.broadcast(*arrs[:2]), np.broadcast(*arrs[2:])),
np.broadcast(arrs[0], np.broadcast(*arrs[1:-1]), arrs[-1])]
for mit in mits:
assert_equal(mit.shape, (5, 6, 7))
assert_equal(mit.ndim, 3)
assert_equal(mit.nd, 3)
assert_equal(mit.numiter, 4)
for a, ia in zip(arrs, mit.iters):
assert_(a is ia.base)
def test_broadcast_single_arg(self):
# gh-6899
arrs = [np.empty((5, 6, 7))]
mit = np.broadcast(*arrs)
assert_equal(mit.shape, (5, 6, 7))
assert_equal(mit.ndim, 3)
assert_equal(mit.nd, 3)
assert_equal(mit.numiter, 1)
assert_(arrs[0] is mit.iters[0].base)
def test_number_of_arguments(self):
arr = np.empty((5,))
for j in range(35):
arrs = [arr] * j
if j < 1 or j > 32:
assert_raises(ValueError, np.broadcast, *arrs)
else:
mit = np.broadcast(*arrs)
assert_equal(mit.numiter, j)
def test_broadcast_error_kwargs(self):
#gh-13455
arrs = [np.empty((5, 6, 7))]
mit = np.broadcast(*arrs)
mit2 = np.broadcast(*arrs, **{})
assert_equal(mit.shape, mit2.shape)
assert_equal(mit.ndim, mit2.ndim)
assert_equal(mit.nd, mit2.nd)
assert_equal(mit.numiter, mit2.numiter)
assert_(mit.iters[0].base is mit2.iters[0].base)
assert_raises(ValueError, np.broadcast, 1, **{'x': 1})
class TestKeepdims(object):
class sub_array(np.ndarray):
def sum(self, axis=None, dtype=None, out=None):
return np.ndarray.sum(self, axis, dtype, out, keepdims=True)
def test_raise(self):
sub_class = self.sub_array
x = np.arange(30).view(sub_class)
assert_raises(TypeError, np.sum, x, keepdims=True)
class TestTensordot(object):
def test_zero_dimension(self):
# Test resolution to issue #5663
a = np.ndarray((3,0))
b = np.ndarray((0,4))
td = np.tensordot(a, b, (1, 0))
assert_array_equal(td, np.dot(a, b))
assert_array_equal(td, np.einsum('ij,jk', a, b))
| 37.569693
| 91
| 0.533685
|
d1a0982887a670d4106c82412f101ca5cc3acaaa
| 165,496
|
py
|
Python
|
python/ccxt/async_support/okex.py
|
fyesgo/ccxt
|
ba75b256fcdc0ba781ecdb9ae69c293bc0fa22ef
|
[
"MIT"
] | 1
|
2022-01-20T10:47:10.000Z
|
2022-01-20T10:47:10.000Z
|
python/ccxt/async_support/okex.py
|
fyesgo/ccxt
|
ba75b256fcdc0ba781ecdb9ae69c293bc0fa22ef
|
[
"MIT"
] | null | null | null |
python/ccxt/async_support/okex.py
|
fyesgo/ccxt
|
ba75b256fcdc0ba781ecdb9ae69c293bc0fa22ef
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
# -----------------------------------------------------------------------------
try:
basestring # Python 3
except NameError:
basestring = str # Python 2
import hashlib
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import AccountSuspended
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidAddress
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import CancelPending
from ccxt.base.errors import NotSupported
from ccxt.base.errors import DDoSProtection
from ccxt.base.errors import RateLimitExceeded
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import OnMaintenance
from ccxt.base.errors import InvalidNonce
from ccxt.base.errors import RequestTimeout
from ccxt.base.decimal_to_precision import TRUNCATE
from ccxt.base.decimal_to_precision import TICK_SIZE
from ccxt.base.precise import Precise
class okex(Exchange):
def describe(self):
return self.deep_extend(super(okex, self).describe(), {
'id': 'okex',
'name': 'OKEX',
'countries': ['CN', 'US'],
'version': 'v3',
'rateLimit': 1000, # up to 3000 requests per 5 minutes ≈ 600 requests per minute ≈ 10 requests per second ≈ 100 ms
'pro': True,
'has': {
'cancelOrder': True,
'CORS': False,
'createOrder': True,
'fetchBalance': True,
'fetchClosedOrders': True,
'fetchCurrencies': False, # see below
'fetchDepositAddress': True,
'fetchDeposits': True,
'fetchLedger': True,
'fetchMarkets': True,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrders': False,
'fetchOrderTrades': True,
'fetchTime': True,
'fetchTicker': True,
'fetchTickers': True,
'fetchTrades': True,
'fetchTransactions': False,
'fetchWithdrawals': True,
'futures': True,
'withdraw': True,
},
'timeframes': {
'1m': '60',
'3m': '180',
'5m': '300',
'15m': '900',
'30m': '1800',
'1h': '3600',
'2h': '7200',
'4h': '14400',
'6h': '21600',
'12h': '43200',
'1d': '86400',
'1w': '604800',
'1M': '2678400',
'3M': '8035200',
'6M': '16070400',
'1y': '31536000',
},
'hostname': 'okex.com',
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/32552768-0d6dd3c6-c4a6-11e7-90f8-c043b64756a7.jpg',
'api': {
'rest': 'https://www.{hostname}',
},
'www': 'https://www.okex.com',
'doc': 'https://www.okex.com/docs/en/',
'fees': 'https://www.okex.com/pages/products/fees.html',
'referral': 'https://www.okex.com/join/1888677',
'test': {
'rest': 'https://testnet.okex.com',
},
},
'api': {
'general': {
'get': [
'time',
],
},
'account': {
'get': [
'wallet',
'sub-account',
'asset-valuation',
'wallet/{currency}',
'withdrawal/history',
'withdrawal/history/{currency}',
'ledger',
'deposit/address',
'deposit/history',
'deposit/history/{currency}',
'currencies',
'withdrawal/fee',
],
'post': [
'transfer',
'withdrawal',
],
},
'spot': {
'get': [
'accounts',
'accounts/{currency}',
'accounts/{currency}/ledger',
'orders',
'amend_order/{instrument_id}',
'orders_pending',
'orders/{order_id}',
'orders/{client_oid}',
'trade_fee',
'fills',
'algo',
# public
'instruments',
'instruments/{instrument_id}/book',
'instruments/ticker',
'instruments/{instrument_id}/ticker',
'instruments/{instrument_id}/trades',
'instruments/{instrument_id}/candles',
'instruments/{instrument_id}/history/candles',
],
'post': [
'order_algo',
'orders',
'batch_orders',
'cancel_orders/{order_id}',
'cancel_orders/{client_oid}',
'cancel_batch_algos',
'cancel_batch_orders',
],
},
'margin': {
'get': [
'accounts',
'accounts/{instrument_id}',
'accounts/{instrument_id}/ledger',
'accounts/availability',
'accounts/{instrument_id}/availability',
'accounts/borrowed',
'accounts/{instrument_id}/borrowed',
'orders',
'accounts/{instrument_id}/leverage',
'orders/{order_id}',
'orders/{client_oid}',
'orders_pending',
'fills',
# public
'instruments/{instrument_id}/mark_price',
],
'post': [
'accounts/borrow',
'accounts/repayment',
'orders',
'batch_orders',
'cancel_orders',
'cancel_orders/{order_id}',
'cancel_orders/{client_oid}',
'cancel_batch_orders',
'accounts/{instrument_id}/leverage',
],
},
'futures': {
'get': [
'position',
'{instrument_id}/position',
'accounts',
'accounts/{underlying}',
'accounts/{underlying}/leverage',
'accounts/{underlying}/ledger',
'order_algo/{instrument_id}',
'orders/{instrument_id}',
'orders/{instrument_id}/{order_id}',
'orders/{instrument_id}/{client_oid}',
'fills',
'trade_fee',
'accounts/{instrument_id}/holds',
'order_algo/{instrument_id}',
# public
'instruments',
'instruments/{instrument_id}/book',
'instruments/ticker',
'instruments/{instrument_id}/ticker',
'instruments/{instrument_id}/trades',
'instruments/{instrument_id}/candles',
'instruments/{instrument_id}/history/candles',
'instruments/{instrument_id}/index',
'rate',
'instruments/{instrument_id}/estimated_price',
'instruments/{instrument_id}/open_interest',
'instruments/{instrument_id}/price_limit',
'instruments/{instrument_id}/mark_price',
'instruments/{instrument_id}/liquidation',
],
'post': [
'accounts/{underlying}/leverage',
'order',
'amend_order/{instrument_id}',
'orders',
'cancel_order/{instrument_id}/{order_id}',
'cancel_order/{instrument_id}/{client_oid}',
'cancel_batch_orders/{instrument_id}',
'accounts/margin_mode',
'close_position',
'cancel_all',
'order_algo',
'cancel_algos',
],
},
'swap': {
'get': [
'position',
'{instrument_id}/position',
'accounts',
'{instrument_id}/accounts',
'accounts/{instrument_id}/settings',
'accounts/{instrument_id}/ledger',
'orders/{instrument_id}',
'orders/{instrument_id}/{order_id}',
'orders/{instrument_id}/{client_oid}',
'fills',
'accounts/{instrument_id}/holds',
'trade_fee',
'order_algo/{instrument_id}',
# public
'instruments',
'instruments/{instrument_id}/depth',
'instruments/ticker',
'instruments/{instrument_id}/ticker',
'instruments/{instrument_id}/trades',
'instruments/{instrument_id}/candles',
'instruments/{instrument_id}/history/candles',
'instruments/{instrument_id}/index',
'rate',
'instruments/{instrument_id}/open_interest',
'instruments/{instrument_id}/price_limit',
'instruments/{instrument_id}/liquidation',
'instruments/{instrument_id}/funding_time',
'instruments/{instrument_id}/mark_price',
'instruments/{instrument_id}/historical_funding_rate',
],
'post': [
'accounts/{instrument_id}/leverage',
'order',
'amend_order/{instrument_id}',
'orders',
'cancel_order/{instrument_id}/{order_id}',
'cancel_order/{instrument_id}/{client_oid}',
'cancel_batch_orders/{instrument_id}',
'order_algo',
'cancel_algos',
'close_position',
'cancel_all',
'order_algo',
'cancel_algos',
],
},
'option': {
'get': [
'accounts',
'position',
'{underlying}/position',
'accounts/{underlying}',
'orders/{underlying}',
'fills/{underlying}',
'accounts/{underlying}/ledger',
'trade_fee',
'orders/{underlying}/{order_id}',
'orders/{underlying}/{client_oid}',
# public
'underlying',
'instruments/{underlying}',
'instruments/{underlying}/summary',
'instruments/{underlying}/summary/{instrument_id}',
'instruments/{instrument_id}/book',
'instruments/{instrument_id}/trades',
'instruments/{instrument_id}/ticker',
'instruments/{instrument_id}/candles',
],
'post': [
'order',
'orders',
'cancel_order/{underlying}/{order_id}',
'cancel_order/{underlying}/{client_oid}',
'cancel_batch_orders/{underlying}',
'amend_order/{underlying}',
'amend_batch_orders/{underlying}',
],
},
'index': {
'get': [
'{instrument_id}/constituents',
],
},
},
'fees': {
'trading': {
'taker': 0.0015,
'maker': 0.0010,
},
'spot': {
'taker': 0.0015,
'maker': 0.0010,
},
'futures': {
'taker': 0.0005,
'maker': 0.0002,
},
'swap': {
'taker': 0.00075,
'maker': 0.00020,
},
},
'requiredCredentials': {
'apiKey': True,
'secret': True,
'password': True,
},
'exceptions': {
# http error codes
# 400 Bad Request — Invalid request format
# 401 Unauthorized — Invalid API Key
# 403 Forbidden — You do not have access to the requested resource
# 404 Not Found
# 429 Client Error: Too Many Requests for url
# 500 Internal Server Error — We had a problem with our server
'exact': {
'1': ExchangeError, # {"code": 1, "message": "System error"}
# undocumented
'failure to get a peer from the ring-balancer': ExchangeNotAvailable, # {"message": "failure to get a peer from the ring-balancer"}
'Server is busy, please try again.': ExchangeNotAvailable, # {"message": "Server is busy, please try again."}
'An unexpected error occurred': ExchangeError, # {"message": "An unexpected error occurred"}
'System error': ExchangeError, # {"error_message":"System error","message":"System error"}
'4010': PermissionDenied, # {"code": 4010, "message": "For the security of your funds, withdrawals are not permitted within 24 hours after changing fund password / mobile number / Google Authenticator settings "}
# common
# '0': ExchangeError, # 200 successful,when the order placement / cancellation / operation is successful
'4001': ExchangeError, # no data received in 30s
'4002': ExchangeError, # Buffer full. cannot write data
# --------------------------------------------------------
'30001': AuthenticationError, # {"code": 30001, "message": 'request header "OK_ACCESS_KEY" cannot be blank'}
'30002': AuthenticationError, # {"code": 30002, "message": 'request header "OK_ACCESS_SIGN" cannot be blank'}
'30003': AuthenticationError, # {"code": 30003, "message": 'request header "OK_ACCESS_TIMESTAMP" cannot be blank'}
'30004': AuthenticationError, # {"code": 30004, "message": 'request header "OK_ACCESS_PASSPHRASE" cannot be blank'}
'30005': InvalidNonce, # {"code": 30005, "message": "invalid OK_ACCESS_TIMESTAMP"}
'30006': AuthenticationError, # {"code": 30006, "message": "invalid OK_ACCESS_KEY"}
'30007': BadRequest, # {"code": 30007, "message": 'invalid Content_Type, please use "application/json" format'}
'30008': RequestTimeout, # {"code": 30008, "message": "timestamp request expired"}
'30009': ExchangeError, # {"code": 30009, "message": "system error"}
'30010': AuthenticationError, # {"code": 30010, "message": "API validation failed"}
'30011': PermissionDenied, # {"code": 30011, "message": "invalid IP"}
'30012': AuthenticationError, # {"code": 30012, "message": "invalid authorization"}
'30013': AuthenticationError, # {"code": 30013, "message": "invalid sign"}
'30014': DDoSProtection, # {"code": 30014, "message": "request too frequent"}
'30015': AuthenticationError, # {"code": 30015, "message": 'request header "OK_ACCESS_PASSPHRASE" incorrect'}
'30016': ExchangeError, # {"code": 30015, "message": "you are using v1 apiKey, please use v1 endpoint. If you would like to use v3 endpoint, please subscribe to v3 apiKey"}
'30017': ExchangeError, # {"code": 30017, "message": "apikey's broker id does not match"}
'30018': ExchangeError, # {"code": 30018, "message": "apikey's domain does not match"}
'30019': ExchangeNotAvailable, # {"code": 30019, "message": "Api is offline or unavailable"}
'30020': BadRequest, # {"code": 30020, "message": "body cannot be blank"}
'30021': BadRequest, # {"code": 30021, "message": "Json data format error"}, {"code": 30021, "message": "json data format error"}
'30022': PermissionDenied, # {"code": 30022, "message": "Api has been frozen"}
'30023': BadRequest, # {"code": 30023, "message": "{0} parameter cannot be blank"}
'30024': BadSymbol, # {"code":30024,"message":"\"instrument_id\" is an invalid parameter"}
'30025': BadRequest, # {"code": 30025, "message": "{0} parameter category error"}
'30026': DDoSProtection, # {"code": 30026, "message": "requested too frequent"}
'30027': AuthenticationError, # {"code": 30027, "message": "login failure"}
'30028': PermissionDenied, # {"code": 30028, "message": "unauthorized execution"}
'30029': AccountSuspended, # {"code": 30029, "message": "account suspended"}
'30030': ExchangeNotAvailable, # {"code": 30030, "message": "endpoint request failed. Please try again"}
'30031': BadRequest, # {"code": 30031, "message": "token does not exist"}
'30032': BadSymbol, # {"code": 30032, "message": "pair does not exist"}
'30033': BadRequest, # {"code": 30033, "message": "exchange domain does not exist"}
'30034': ExchangeError, # {"code": 30034, "message": "exchange ID does not exist"}
'30035': ExchangeError, # {"code": 30035, "message": "trading is not supported in self website"}
'30036': ExchangeError, # {"code": 30036, "message": "no relevant data"}
'30037': ExchangeNotAvailable, # {"code": 30037, "message": "endpoint is offline or unavailable"}
# '30038': AuthenticationError, # {"code": 30038, "message": "user does not exist"}
'30038': OnMaintenance, # {"client_oid":"","code":"30038","error_code":"30038","error_message":"Matching engine is being upgraded. Please try in about 1 minute.","message":"Matching engine is being upgraded. Please try in about 1 minute.","order_id":"-1","result":false}
'30044': RequestTimeout, # {"code":30044, "message":"Endpoint request timeout"}
# futures
'32001': AccountSuspended, # {"code": 32001, "message": "futures account suspended"}
'32002': PermissionDenied, # {"code": 32002, "message": "futures account does not exist"}
'32003': CancelPending, # {"code": 32003, "message": "canceling, please wait"}
'32004': ExchangeError, # {"code": 32004, "message": "you have no unfilled orders"}
'32005': InvalidOrder, # {"code": 32005, "message": "max order quantity"}
'32006': InvalidOrder, # {"code": 32006, "message": "the order price or trigger price exceeds USD 1 million"}
'32007': InvalidOrder, # {"code": 32007, "message": "leverage level must be the same for orders on the same side of the contract"}
'32008': InvalidOrder, # {"code": 32008, "message": "Max. positions to open(cross margin)"}
'32009': InvalidOrder, # {"code": 32009, "message": "Max. positions to open(fixed margin)"}
'32010': ExchangeError, # {"code": 32010, "message": "leverage cannot be changed with open positions"}
'32011': ExchangeError, # {"code": 32011, "message": "futures status error"}
'32012': ExchangeError, # {"code": 32012, "message": "futures order update error"}
'32013': ExchangeError, # {"code": 32013, "message": "token type is blank"}
'32014': ExchangeError, # {"code": 32014, "message": "your number of contracts closing is larger than the number of contracts available"}
'32015': ExchangeError, # {"code": 32015, "message": "margin ratio is lower than 100% before opening positions"}
'32016': ExchangeError, # {"code": 32016, "message": "margin ratio is lower than 100% after opening position"}
'32017': ExchangeError, # {"code": 32017, "message": "no BBO"}
'32018': ExchangeError, # {"code": 32018, "message": "the order quantity is less than 1, please try again"}
'32019': ExchangeError, # {"code": 32019, "message": "the order price deviates from the price of the previous minute by more than 3%"}
'32020': ExchangeError, # {"code": 32020, "message": "the price is not in the range of the price limit"}
'32021': ExchangeError, # {"code": 32021, "message": "leverage error"}
'32022': ExchangeError, # {"code": 32022, "message": "self function is not supported in your country or region according to the regulations"}
'32023': ExchangeError, # {"code": 32023, "message": "self account has outstanding loan"}
'32024': ExchangeError, # {"code": 32024, "message": "order cannot be placed during delivery"}
'32025': ExchangeError, # {"code": 32025, "message": "order cannot be placed during settlement"}
'32026': ExchangeError, # {"code": 32026, "message": "your account is restricted from opening positions"}
'32027': ExchangeError, # {"code": 32027, "message": "cancelled over 20 orders"}
'32028': ExchangeError, # {"code": 32028, "message": "account is suspended and liquidated"}
'32029': ExchangeError, # {"code": 32029, "message": "order info does not exist"}
'32030': InvalidOrder, # The order cannot be cancelled
'32031': ArgumentsRequired, # client_oid or order_id is required.
'32038': AuthenticationError, # User does not exist
'32040': ExchangeError, # User have open contract orders or position
'32044': ExchangeError, # {"code": 32044, "message": "The margin ratio after submitting self order is lower than the minimum requirement({0}) for your tier."}
'32045': ExchangeError, # String of commission over 1 million
'32046': ExchangeError, # Each user can hold up to 10 trade plans at the same time
'32047': ExchangeError, # system error
'32048': InvalidOrder, # Order strategy track range error
'32049': ExchangeError, # Each user can hold up to 10 track plans at the same time
'32050': InvalidOrder, # Order strategy rang error
'32051': InvalidOrder, # Order strategy ice depth error
'32052': ExchangeError, # String of commission over 100 thousand
'32053': ExchangeError, # Each user can hold up to 6 ice plans at the same time
'32057': ExchangeError, # The order price is zero. Market-close-all function cannot be executed
'32054': ExchangeError, # Trade not allow
'32055': InvalidOrder, # cancel order error
'32056': ExchangeError, # iceberg per order average should between {0}-{1} contracts
'32058': ExchangeError, # Each user can hold up to 6 initiative plans at the same time
'32059': InvalidOrder, # Total amount should exceed per order amount
'32060': InvalidOrder, # Order strategy type error
'32061': InvalidOrder, # Order strategy initiative limit error
'32062': InvalidOrder, # Order strategy initiative range error
'32063': InvalidOrder, # Order strategy initiative rate error
'32064': ExchangeError, # Time Stringerval of orders should set between 5-120s
'32065': ExchangeError, # Close amount exceeds the limit of Market-close-all(999 for BTC, and 9999 for the rest tokens)
'32066': ExchangeError, # You have open orders. Please cancel all open orders before changing your leverage level.
'32067': ExchangeError, # Account equity < required margin in self setting. Please adjust your leverage level again.
'32068': ExchangeError, # The margin for self position will fall short of the required margin in self setting. Please adjust your leverage level or increase your margin to proceed.
'32069': ExchangeError, # Target leverage level too low. Your account balance is insufficient to cover the margin required. Please adjust the leverage level again.
'32070': ExchangeError, # Please check open position or unfilled order
'32071': ExchangeError, # Your current liquidation mode does not support self action.
'32072': ExchangeError, # The highest available margin for your order’s tier is {0}. Please edit your margin and place a new order.
'32073': ExchangeError, # The action does not apply to the token
'32074': ExchangeError, # The number of contracts of your position, open orders, and the current order has exceeded the maximum order limit of self asset.
'32075': ExchangeError, # Account risk rate breach
'32076': ExchangeError, # Liquidation of the holding position(s) at market price will require cancellation of all pending close orders of the contracts.
'32077': ExchangeError, # Your margin for self asset in futures account is insufficient and the position has been taken over for liquidation.(You will not be able to place orders, close positions, transfer funds, or add margin during self period of time. Your account will be restored after the liquidation is complete.)
'32078': ExchangeError, # Please cancel all open orders before switching the liquidation mode(Please cancel all open orders before switching the liquidation mode)
'32079': ExchangeError, # Your open positions are at high risk.(Please add margin or reduce positions before switching the mode)
'32080': ExchangeError, # Funds cannot be transferred out within 30 minutes after futures settlement
'32083': ExchangeError, # The number of contracts should be a positive multiple of %%. Please place your order again
# token and margin trading
'33001': PermissionDenied, # {"code": 33001, "message": "margin account for self pair is not enabled yet"}
'33002': AccountSuspended, # {"code": 33002, "message": "margin account for self pair is suspended"}
'33003': InsufficientFunds, # {"code": 33003, "message": "no loan balance"}
'33004': ExchangeError, # {"code": 33004, "message": "loan amount cannot be smaller than the minimum limit"}
'33005': ExchangeError, # {"code": 33005, "message": "repayment amount must exceed 0"}
'33006': ExchangeError, # {"code": 33006, "message": "loan order not found"}
'33007': ExchangeError, # {"code": 33007, "message": "status not found"}
'33008': InsufficientFunds, # {"code": 33008, "message": "loan amount cannot exceed the maximum limit"}
'33009': ExchangeError, # {"code": 33009, "message": "user ID is blank"}
'33010': ExchangeError, # {"code": 33010, "message": "you cannot cancel an order during session 2 of call auction"}
'33011': ExchangeError, # {"code": 33011, "message": "no new market data"}
'33012': ExchangeError, # {"code": 33012, "message": "order cancellation failed"}
'33013': InvalidOrder, # {"code": 33013, "message": "order placement failed"}
'33014': OrderNotFound, # {"code": 33014, "message": "order does not exist"}
'33015': InvalidOrder, # {"code": 33015, "message": "exceeded maximum limit"}
'33016': ExchangeError, # {"code": 33016, "message": "margin trading is not open for self token"}
'33017': InsufficientFunds, # {"code": 33017, "message": "insufficient balance"}
'33018': ExchangeError, # {"code": 33018, "message": "self parameter must be smaller than 1"}
'33020': ExchangeError, # {"code": 33020, "message": "request not supported"}
'33021': BadRequest, # {"code": 33021, "message": "token and the pair do not match"}
'33022': InvalidOrder, # {"code": 33022, "message": "pair and the order do not match"}
'33023': ExchangeError, # {"code": 33023, "message": "you can only place market orders during call auction"}
'33024': InvalidOrder, # {"code": 33024, "message": "trading amount too small"}
'33025': InvalidOrder, # {"code": 33025, "message": "base token amount is blank"}
'33026': ExchangeError, # {"code": 33026, "message": "transaction completed"}
'33027': InvalidOrder, # {"code": 33027, "message": "cancelled order or order cancelling"}
'33028': InvalidOrder, # {"code": 33028, "message": "the decimal places of the trading price exceeded the limit"}
'33029': InvalidOrder, # {"code": 33029, "message": "the decimal places of the trading size exceeded the limit"}
'33034': ExchangeError, # {"code": 33034, "message": "You can only place limit order after Call Auction has started"}
'33035': ExchangeError, # This type of order cannot be canceled(This type of order cannot be canceled)
'33036': ExchangeError, # Exceeding the limit of entrust order
'33037': ExchangeError, # The buy order price should be lower than 130% of the trigger price
'33038': ExchangeError, # The sell order price should be higher than 70% of the trigger price
'33039': ExchangeError, # The limit of callback rate is 0 < x <= 5%
'33040': ExchangeError, # The trigger price of a buy order should be lower than the latest transaction price
'33041': ExchangeError, # The trigger price of a sell order should be higher than the latest transaction price
'33042': ExchangeError, # The limit of price variance is 0 < x <= 1%
'33043': ExchangeError, # The total amount must be larger than 0
'33044': ExchangeError, # The average amount should be 1/1000 * total amount <= x <= total amount
'33045': ExchangeError, # The price should not be 0, including trigger price, order price, and price limit
'33046': ExchangeError, # Price variance should be 0 < x <= 1%
'33047': ExchangeError, # Sweep ratio should be 0 < x <= 100%
'33048': ExchangeError, # Per order limit: Total amount/1000 < x <= Total amount
'33049': ExchangeError, # Total amount should be X > 0
'33050': ExchangeError, # Time interval should be 5 <= x <= 120s
'33051': ExchangeError, # cancel order number not higher limit: plan and track entrust no more than 10, ice and time entrust no more than 6
'33059': BadRequest, # {"code": 33059, "message": "client_oid or order_id is required"}
'33060': BadRequest, # {"code": 33060, "message": "Only fill in either parameter client_oid or order_id"}
'33061': ExchangeError, # Value of a single market price order cannot exceed 100,000 USD
'33062': ExchangeError, # The leverage ratio is too high. The borrowed position has exceeded the maximum position of self leverage ratio. Please readjust the leverage ratio
'33063': ExchangeError, # Leverage multiple is too low, there is insufficient margin in the account, please readjust the leverage ratio
'33064': ExchangeError, # The setting of the leverage ratio cannot be less than 2, please readjust the leverage ratio
'33065': ExchangeError, # Leverage ratio exceeds maximum leverage ratio, please readjust leverage ratio
'33085': InvalidOrder, # The value of the position and buying order has reached the position limit, and no further buying is allowed.
# account
'21009': ExchangeError, # Funds cannot be transferred out within 30 minutes after swap settlement(Funds cannot be transferred out within 30 minutes after swap settlement)
'34001': PermissionDenied, # {"code": 34001, "message": "withdrawal suspended"}
'34002': InvalidAddress, # {"code": 34002, "message": "please add a withdrawal address"}
'34003': ExchangeError, # {"code": 34003, "message": "sorry, self token cannot be withdrawn to xx at the moment"}
'34004': ExchangeError, # {"code": 34004, "message": "withdrawal fee is smaller than minimum limit"}
'34005': ExchangeError, # {"code": 34005, "message": "withdrawal fee exceeds the maximum limit"}
'34006': ExchangeError, # {"code": 34006, "message": "withdrawal amount is lower than the minimum limit"}
'34007': ExchangeError, # {"code": 34007, "message": "withdrawal amount exceeds the maximum limit"}
'34008': InsufficientFunds, # {"code": 34008, "message": "insufficient balance"}
'34009': ExchangeError, # {"code": 34009, "message": "your withdrawal amount exceeds the daily limit"}
'34010': ExchangeError, # {"code": 34010, "message": "transfer amount must be larger than 0"}
'34011': ExchangeError, # {"code": 34011, "message": "conditions not met"}
'34012': ExchangeError, # {"code": 34012, "message": "the minimum withdrawal amount for NEO is 1, and the amount must be an integer"}
'34013': ExchangeError, # {"code": 34013, "message": "please transfer"}
'34014': ExchangeError, # {"code": 34014, "message": "transfer limited"}
'34015': ExchangeError, # {"code": 34015, "message": "subaccount does not exist"}
'34016': PermissionDenied, # {"code": 34016, "message": "transfer suspended"}
'34017': AccountSuspended, # {"code": 34017, "message": "account suspended"}
'34018': AuthenticationError, # {"code": 34018, "message": "incorrect trades password"}
'34019': PermissionDenied, # {"code": 34019, "message": "please bind your email before withdrawal"}
'34020': PermissionDenied, # {"code": 34020, "message": "please bind your funds password before withdrawal"}
'34021': InvalidAddress, # {"code": 34021, "message": "Not verified address"}
'34022': ExchangeError, # {"code": 34022, "message": "Withdrawals are not available for sub accounts"}
'34023': PermissionDenied, # {"code": 34023, "message": "Please enable futures trading before transferring your funds"}
'34026': RateLimitExceeded, # transfer too frequently(transfer too frequently)
'34036': ExchangeError, # Parameter is incorrect, please refer to API documentation
'34037': ExchangeError, # Get the sub-account balance interface, account type is not supported
'34038': ExchangeError, # Since your C2C transaction is unusual, you are restricted from fund transfer. Please contact our customer support to cancel the restriction
'34039': ExchangeError, # You are now restricted from transferring out your funds due to abnormal trades on C2C Market. Please transfer your fund on our website or app instead to verify your identity
# swap
'35001': ExchangeError, # {"code": 35001, "message": "Contract does not exist"}
'35002': ExchangeError, # {"code": 35002, "message": "Contract settling"}
'35003': ExchangeError, # {"code": 35003, "message": "Contract paused"}
'35004': ExchangeError, # {"code": 35004, "message": "Contract pending settlement"}
'35005': AuthenticationError, # {"code": 35005, "message": "User does not exist"}
'35008': InvalidOrder, # {"code": 35008, "message": "Risk ratio too high"}
'35010': InvalidOrder, # {"code": 35010, "message": "Position closing too large"}
'35012': InvalidOrder, # {"code": 35012, "message": "Incorrect order size"}
'35014': InvalidOrder, # {"code": 35014, "message": "Order price is not within limit"}
'35015': InvalidOrder, # {"code": 35015, "message": "Invalid leverage level"}
'35017': ExchangeError, # {"code": 35017, "message": "Open orders exist"}
'35019': InvalidOrder, # {"code": 35019, "message": "Order size too large"}
'35020': InvalidOrder, # {"code": 35020, "message": "Order price too high"}
'35021': InvalidOrder, # {"code": 35021, "message": "Order size exceeded current tier limit"}
'35022': BadRequest, # {"code": 35022, "message": "Contract status error"}
'35024': BadRequest, # {"code": 35024, "message": "Contract not initialized"}
'35025': InsufficientFunds, # {"code": 35025, "message": "No account balance"}
'35026': BadRequest, # {"code": 35026, "message": "Contract settings not initialized"}
'35029': OrderNotFound, # {"code": 35029, "message": "Order does not exist"}
'35030': InvalidOrder, # {"code": 35030, "message": "Order size too large"}
'35031': InvalidOrder, # {"code": 35031, "message": "Cancel order size too large"}
'35032': ExchangeError, # {"code": 35032, "message": "Invalid user status"}
'35037': ExchangeError, # No last traded price in cache
'35039': InsufficientFunds, # {"code": 35039, "message": "Open order quantity exceeds limit"}
'35040': InvalidOrder, # {"error_message":"Invalid order type","result":"true","error_code":"35040","order_id":"-1"}
'35044': ExchangeError, # {"code": 35044, "message": "Invalid order status"}
'35046': InsufficientFunds, # {"code": 35046, "message": "Negative account balance"}
'35047': InsufficientFunds, # {"code": 35047, "message": "Insufficient account balance"}
'35048': ExchangeError, # {"code": 35048, "message": "User contract is frozen and liquidating"}
'35049': InvalidOrder, # {"code": 35049, "message": "Invalid order type"}
'35050': InvalidOrder, # {"code": 35050, "message": "Position settings are blank"}
'35052': InsufficientFunds, # {"code": 35052, "message": "Insufficient cross margin"}
'35053': ExchangeError, # {"code": 35053, "message": "Account risk too high"}
'35055': InsufficientFunds, # {"code": 35055, "message": "Insufficient account balance"}
'35057': ExchangeError, # {"code": 35057, "message": "No last traded price"}
'35058': ExchangeError, # {"code": 35058, "message": "No limit"}
'35059': BadRequest, # {"code": 35059, "message": "client_oid or order_id is required"}
'35060': BadRequest, # {"code": 35060, "message": "Only fill in either parameter client_oid or order_id"}
'35061': BadRequest, # {"code": 35061, "message": "Invalid instrument_id"}
'35062': InvalidOrder, # {"code": 35062, "message": "Invalid match_price"}
'35063': InvalidOrder, # {"code": 35063, "message": "Invalid order_size"}
'35064': InvalidOrder, # {"code": 35064, "message": "Invalid client_oid"}
'35066': InvalidOrder, # Order interval error
'35067': InvalidOrder, # Time-weighted order ratio error
'35068': InvalidOrder, # Time-weighted order range error
'35069': InvalidOrder, # Time-weighted single transaction limit error
'35070': InvalidOrder, # Algo order type error
'35071': InvalidOrder, # Order total must be larger than single order limit
'35072': InvalidOrder, # Maximum 6 unfulfilled time-weighted orders can be held at the same time
'35073': InvalidOrder, # Order price is 0. Market-close-all not available
'35074': InvalidOrder, # Iceberg order single transaction average error
'35075': InvalidOrder, # Failed to cancel order
'35076': InvalidOrder, # LTC 20x leverage. Not allowed to open position
'35077': InvalidOrder, # Maximum 6 unfulfilled iceberg orders can be held at the same time
'35078': InvalidOrder, # Order amount exceeded 100,000
'35079': InvalidOrder, # Iceberg order price variance error
'35080': InvalidOrder, # Callback rate error
'35081': InvalidOrder, # Maximum 10 unfulfilled trail orders can be held at the same time
'35082': InvalidOrder, # Trail order callback rate error
'35083': InvalidOrder, # Each user can only hold a maximum of 10 unfulfilled stop-limit orders at the same time
'35084': InvalidOrder, # Order amount exceeded 1 million
'35085': InvalidOrder, # Order amount is not in the correct range
'35086': InvalidOrder, # Price exceeds 100 thousand
'35087': InvalidOrder, # Price exceeds 100 thousand
'35088': InvalidOrder, # Average amount error
'35089': InvalidOrder, # Price exceeds 100 thousand
'35090': ExchangeError, # No stop-limit orders available for cancelation
'35091': ExchangeError, # No trail orders available for cancellation
'35092': ExchangeError, # No iceberg orders available for cancellation
'35093': ExchangeError, # No trail orders available for cancellation
'35094': ExchangeError, # Stop-limit order last traded price error
'35095': BadRequest, # Instrument_id error
'35096': ExchangeError, # Algo order status error
'35097': ExchangeError, # Order status and order ID cannot exist at the same time
'35098': ExchangeError, # An order status or order ID must exist
'35099': ExchangeError, # Algo order ID error
'35102': RateLimitExceeded, # {"error_message":"The operation that close all at market price is too frequent","result":"true","error_code":"35102","order_id":"-1"}
# option
'36001': BadRequest, # Invalid underlying index.
'36002': BadRequest, # Instrument does not exist.
'36005': ExchangeError, # Instrument status is invalid.
'36101': AuthenticationError, # Account does not exist.
'36102': PermissionDenied, # Account status is invalid.
'36103': PermissionDenied, # Account is suspended due to ongoing liquidation.
'36104': PermissionDenied, # Account is not enabled for options trading.
'36105': PermissionDenied, # Please enable the account for option contract.
'36106': PermissionDenied, # Funds cannot be transferred in or out, as account is suspended.
'36107': PermissionDenied, # Funds cannot be transferred out within 30 minutes after option exercising or settlement.
'36108': InsufficientFunds, # Funds cannot be transferred in or out, as equity of the account is less than zero.
'36109': PermissionDenied, # Funds cannot be transferred in or out during option exercising or settlement.
'36201': PermissionDenied, # New order function is blocked.
'36202': PermissionDenied, # Account does not have permission to short option.
'36203': InvalidOrder, # Invalid format for client_oid.
'36204': ExchangeError, # Invalid format for request_id.
'36205': BadRequest, # Instrument id does not match underlying index.
'36206': BadRequest, # Order_id and client_oid can not be used at the same time.
'36207': InvalidOrder, # Either order price or fartouch price must be present.
'36208': InvalidOrder, # Either order price or size must be present.
'36209': InvalidOrder, # Either order_id or client_oid must be present.
'36210': InvalidOrder, # Either order_ids or client_oids must be present.
'36211': InvalidOrder, # Exceeding max batch size for order submission.
'36212': InvalidOrder, # Exceeding max batch size for oder cancellation.
'36213': InvalidOrder, # Exceeding max batch size for order amendment.
'36214': ExchangeError, # Instrument does not have valid bid/ask quote.
'36216': OrderNotFound, # Order does not exist.
'36217': InvalidOrder, # Order submission failed.
'36218': InvalidOrder, # Order cancellation failed.
'36219': InvalidOrder, # Order amendment failed.
'36220': InvalidOrder, # Order is pending cancel.
'36221': InvalidOrder, # Order qty is not valid multiple of lot size.
'36222': InvalidOrder, # Order price is breaching highest buy limit.
'36223': InvalidOrder, # Order price is breaching lowest sell limit.
'36224': InvalidOrder, # Exceeding max order size.
'36225': InvalidOrder, # Exceeding max open order count for instrument.
'36226': InvalidOrder, # Exceeding max open order count for underlying.
'36227': InvalidOrder, # Exceeding max open size across all orders for underlying
'36228': InvalidOrder, # Exceeding max available qty for instrument.
'36229': InvalidOrder, # Exceeding max available qty for underlying.
'36230': InvalidOrder, # Exceeding max position limit for underlying.
},
'broad': {
},
},
'precisionMode': TICK_SIZE,
'options': {
'fetchOHLCV': {
'type': 'Candles', # Candles or HistoryCandles
},
'createMarketBuyOrderRequiresPrice': True,
'fetchMarkets': ['spot', 'futures', 'swap', 'option'],
'defaultType': 'spot', # 'account', 'spot', 'margin', 'futures', 'swap', 'option'
'auth': {
'time': 'public',
'currencies': 'private',
'instruments': 'public',
'rate': 'public',
'{instrument_id}/constituents': 'public',
},
},
'commonCurrencies': {
# OKEX refers to ERC20 version of Aeternity(AEToken)
'AE': 'AET', # https://github.com/ccxt/ccxt/issues/4981
'BOX': 'DefiBox',
'HOT': 'Hydro Protocol',
'HSR': 'HC',
'MAG': 'Maggie',
'SBTC': 'Super Bitcoin',
'YOYO': 'YOYOW',
'WIN': 'WinToken', # https://github.com/ccxt/ccxt/issues/5701
},
})
async def fetch_time(self, params={}):
response = await self.generalGetTime(params)
#
# {
# "iso": "2015-01-07T23:47:25.201Z",
# "epoch": 1420674445.201
# }
#
return self.parse8601(self.safe_string(response, 'iso'))
async def fetch_markets(self, params={}):
types = self.safe_value(self.options, 'fetchMarkets')
result = []
for i in range(0, len(types)):
markets = await self.fetch_markets_by_type(types[i], params)
result = self.array_concat(result, markets)
return result
def parse_markets(self, markets):
result = []
for i in range(0, len(markets)):
result.append(self.parse_market(markets[i]))
return result
def parse_market(self, market):
#
# spot markets
#
# {
# base_currency: "EOS",
# instrument_id: "EOS-OKB",
# min_size: "0.01",
# quote_currency: "OKB",
# size_increment: "0.000001",
# tick_size: "0.0001"
# }
#
# futures markets
#
# {
# instrument_id: "XRP-USD-200320",
# underlying_index: "XRP",
# quote_currency: "USD",
# tick_size: "0.0001",
# contract_val: "10",
# listing: "2020-03-06",
# delivery: "2020-03-20",
# trade_increment: "1",
# alias: "self_week",
# underlying: "XRP-USD",
# base_currency: "XRP",
# settlement_currency: "XRP",
# is_inverse: "true",
# contract_val_currency: "USD",
# }
#
# swap markets
#
# {
# instrument_id: "BSV-USD-SWAP",
# underlying_index: "BSV",
# quote_currency: "USD",
# coin: "BSV",
# contract_val: "10",
# listing: "2018-12-21T07:53:47.000Z",
# delivery: "2020-03-14T08:00:00.000Z",
# size_increment: "1",
# tick_size: "0.01",
# base_currency: "BSV",
# underlying: "BSV-USD",
# settlement_currency: "BSV",
# is_inverse: "true",
# contract_val_currency: "USD"
# }
#
# options markets
#
# {
# instrument_id: 'BTC-USD-200327-4000-C',
# underlying: 'BTC-USD',
# settlement_currency: 'BTC',
# contract_val: '0.1000',
# option_type: 'C',
# strike: '4000',
# tick_size: '0.0005',
# lot_size: '1.0000',
# listing: '2019-12-25T08:30:36.302Z',
# delivery: '2020-03-27T08:00:00.000Z',
# state: '2',
# trading_start_time: '2019-12-25T08:30:36.302Z',
# timestamp: '2020-03-13T08:05:09.456Z',
# }
#
id = self.safe_string(market, 'instrument_id')
marketType = 'spot'
spot = True
future = False
swap = False
option = False
baseId = self.safe_string(market, 'base_currency')
quoteId = self.safe_string(market, 'quote_currency')
contractVal = self.safe_number(market, 'contract_val')
if contractVal is not None:
if 'option_type' in market:
marketType = 'option'
spot = False
option = True
underlying = self.safe_string(market, 'underlying')
parts = underlying.split('-')
baseId = self.safe_string(parts, 0)
quoteId = self.safe_string(parts, 1)
else:
marketType = 'swap'
spot = False
swap = True
futuresAlias = self.safe_string(market, 'alias')
if futuresAlias is not None:
swap = False
future = True
marketType = 'futures'
baseId = self.safe_string(market, 'underlying_index')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = (base + '/' + quote) if spot else id
lotSize = self.safe_number_2(market, 'lot_size', 'trade_increment')
precision = {
'amount': self.safe_number(market, 'size_increment', lotSize),
'price': self.safe_number(market, 'tick_size'),
}
minAmount = self.safe_number_2(market, 'min_size', 'base_min_size')
active = True
fees = self.safe_value_2(self.fees, marketType, 'trading', {})
return self.extend(fees, {
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'info': market,
'type': marketType,
'spot': spot,
'futures': future,
'swap': swap,
'option': option,
'active': active,
'precision': precision,
'limits': {
'amount': {
'min': minAmount,
'max': None,
},
'price': {
'min': precision['price'],
'max': None,
},
'cost': {
'min': precision['price'],
'max': None,
},
},
})
async def fetch_markets_by_type(self, type, params={}):
if type == 'option':
underlying = await self.optionGetUnderlying(params)
result = []
for i in range(0, len(underlying)):
response = await self.optionGetInstrumentsUnderlying({
'underlying': underlying[i],
})
#
# options markets
#
# [
# {
# instrument_id: 'BTC-USD-200327-4000-C',
# underlying: 'BTC-USD',
# settlement_currency: 'BTC',
# contract_val: '0.1000',
# option_type: 'C',
# strike: '4000',
# tick_size: '0.0005',
# lot_size: '1.0000',
# listing: '2019-12-25T08:30:36.302Z',
# delivery: '2020-03-27T08:00:00.000Z',
# state: '2',
# trading_start_time: '2019-12-25T08:30:36.302Z',
# timestamp: '2020-03-13T08:05:09.456Z',
# },
# ]
#
result = self.array_concat(result, response)
return self.parse_markets(result)
elif (type == 'spot') or (type == 'futures') or (type == 'swap'):
method = type + 'GetInstruments'
response = await getattr(self, method)(params)
#
# spot markets
#
# [
# {
# base_currency: "EOS",
# instrument_id: "EOS-OKB",
# min_size: "0.01",
# quote_currency: "OKB",
# size_increment: "0.000001",
# tick_size: "0.0001"
# }
# ]
#
# futures markets
#
# [
# {
# instrument_id: "XRP-USD-200320",
# underlying_index: "XRP",
# quote_currency: "USD",
# tick_size: "0.0001",
# contract_val: "10",
# listing: "2020-03-06",
# delivery: "2020-03-20",
# trade_increment: "1",
# alias: "self_week",
# underlying: "XRP-USD",
# base_currency: "XRP",
# settlement_currency: "XRP",
# is_inverse: "true",
# contract_val_currency: "USD",
# }
# ]
#
# swap markets
#
# [
# {
# instrument_id: "BSV-USD-SWAP",
# underlying_index: "BSV",
# quote_currency: "USD",
# coin: "BSV",
# contract_val: "10",
# listing: "2018-12-21T07:53:47.000Z",
# delivery: "2020-03-14T08:00:00.000Z",
# size_increment: "1",
# tick_size: "0.01",
# base_currency: "BSV",
# underlying: "BSV-USD",
# settlement_currency: "BSV",
# is_inverse: "true",
# contract_val_currency: "USD"
# }
# ]
#
return self.parse_markets(response)
else:
raise NotSupported(self.id + ' fetchMarketsByType does not support market type ' + type)
async def fetch_currencies(self, params={}):
# has['fetchCurrencies'] is currently set to False
# despite that their docs say these endpoints are public:
# https://www.okex.com/api/account/v3/withdrawal/fee
# https://www.okex.com/api/account/v3/currencies
# it will still reply with {"code":30001, "message": "OK-ACCESS-KEY header is required"}
# if you attempt to access it without authentication
response = await self.accountGetCurrencies(params)
#
# [
# {
# name: '',
# currency: 'BTC',
# can_withdraw: '1',
# can_deposit: '1',
# min_withdrawal: '0.0100000000000000'
# },
# ]
#
result = {}
for i in range(0, len(response)):
currency = response[i]
id = self.safe_string(currency, 'currency')
code = self.safe_currency_code(id)
precision = 0.00000001 # default precision, todo: fix "magic constants"
name = self.safe_string(currency, 'name')
canDeposit = self.safe_integer(currency, 'can_deposit')
canWithdraw = self.safe_integer(currency, 'can_withdraw')
active = True if (canDeposit and canWithdraw) else False
result[code] = {
'id': id,
'code': code,
'info': currency,
'type': None,
'name': name,
'active': active,
'fee': None, # todo: redesign
'precision': precision,
'limits': {
'amount': {'min': None, 'max': None},
'withdraw': {
'min': self.safe_number(currency, 'min_withdrawal'),
'max': None,
},
},
}
return result
async def fetch_order_book(self, symbol, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
method = market['type'] + 'GetInstrumentsInstrumentId'
method += 'Depth' if (market['type'] == 'swap') else 'Book'
request = {
'instrument_id': market['id'],
}
if limit is not None:
request['size'] = limit # max 200
response = await getattr(self, method)(self.extend(request, params))
#
# spot
#
# { asks: [["0.02685268", "0.242571", "1"],
# ["0.02685493", "0.164085", "1"],
# ...
# ["0.02779", "1.039", "1"],
# ["0.027813", "0.0876", "1"] ],
# bids: [["0.02684052", "10.371849", "1"],
# ["0.02684051", "3.707", "4"],
# ...
# ["0.02634963", "0.132934", "1"],
# ["0.02634962", "0.264838", "2"] ],
# timestamp: "2018-12-17T20:24:16.159Z" }
#
# swap
#
# {
# "asks":[
# ["916.21","94","0","1"]
# ],
# "bids":[
# ["916.1","15","0","1"]
# ],
# "time":"2021-04-16T02:04:48.282Z"
# }
#
timestamp = self.parse8601(self.safe_string_2(response, 'timestamp', 'time'))
return self.parse_order_book(response, symbol, timestamp)
def parse_ticker(self, ticker, market=None):
#
# { best_ask: "0.02665472",
# best_bid: "0.02665221",
# instrument_id: "ETH-BTC",
# product_id: "ETH-BTC",
# last: "0.02665472",
# ask: "0.02665472", # missing in the docs
# bid: "0.02665221", # not mentioned in the docs
# open_24h: "0.02645482",
# high_24h: "0.02714633",
# low_24h: "0.02614109",
# base_volume_24h: "572298.901923",
# timestamp: "2018-12-17T21:20:07.856Z",
# quote_volume_24h: "15094.86831261" }
#
timestamp = self.parse8601(self.safe_string(ticker, 'timestamp'))
symbol = None
marketId = self.safe_string(ticker, 'instrument_id')
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
symbol = market['symbol']
elif marketId is not None:
parts = marketId.split('-')
numParts = len(parts)
if numParts == 2:
baseId, quoteId = parts
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
else:
symbol = marketId
if (symbol is None) and (market is not None):
symbol = market['symbol']
last = self.safe_number(ticker, 'last')
open = self.safe_number(ticker, 'open_24h')
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_number(ticker, 'high_24h'),
'low': self.safe_number(ticker, 'low_24h'),
'bid': self.safe_number(ticker, 'best_bid'),
'bidVolume': self.safe_number(ticker, 'best_bid_size'),
'ask': self.safe_number(ticker, 'best_ask'),
'askVolume': self.safe_number(ticker, 'best_ask_size'),
'vwap': None,
'open': open,
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': None,
'average': None,
'baseVolume': self.safe_number(ticker, 'base_volume_24h'),
'quoteVolume': self.safe_number(ticker, 'quote_volume_24h'),
'info': ticker,
}
async def fetch_ticker(self, symbol, params={}):
await self.load_markets()
market = self.market(symbol)
method = market['type'] + 'GetInstrumentsInstrumentIdTicker'
request = {
'instrument_id': market['id'],
}
response = await getattr(self, method)(self.extend(request, params))
#
# { best_ask: "0.02665472",
# best_bid: "0.02665221",
# instrument_id: "ETH-BTC",
# product_id: "ETH-BTC",
# last: "0.02665472",
# ask: "0.02665472",
# bid: "0.02665221",
# open_24h: "0.02645482",
# high_24h: "0.02714633",
# low_24h: "0.02614109",
# base_volume_24h: "572298.901923",
# timestamp: "2018-12-17T21:20:07.856Z",
# quote_volume_24h: "15094.86831261" }
#
return self.parse_ticker(response)
async def fetch_tickers_by_type(self, type, symbols=None, params={}):
await self.load_markets()
method = type + 'GetInstrumentsTicker'
response = await getattr(self, method)(params)
result = {}
for i in range(0, len(response)):
ticker = self.parse_ticker(response[i])
symbol = ticker['symbol']
result[symbol] = ticker
return self.filter_by_array(result, 'symbol', symbols)
async def fetch_tickers(self, symbols=None, params={}):
defaultType = self.safe_string_2(self.options, 'fetchTickers', 'defaultType')
type = self.safe_string(params, 'type', defaultType)
return await self.fetch_tickers_by_type(type, symbols, self.omit(params, 'type'))
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# spot trades
#
# {
# time: "2018-12-17T23:31:08.268Z",
# timestamp: "2018-12-17T23:31:08.268Z",
# trade_id: "409687906",
# price: "0.02677805",
# size: "0.923467",
# side: "sell"
# }
#
# futures trades, swap trades
#
# {
# trade_id: "1989230840021013",
# side: "buy",
# price: "92.42",
# qty: "184", # missing in swap markets
# size: "5", # missing in futures markets
# timestamp: "2018-12-17T23:26:04.613Z"
# }
#
# fetchOrderTrades(private)
#
# spot trades, margin trades
#
# {
# "created_at":"2019-03-15T02:52:56.000Z",
# "exec_type":"T", # whether the order is taker or maker
# "fee":"0.00000082",
# "instrument_id":"BTC-USDT",
# "ledger_id":"3963052721",
# "liquidity":"T", # whether the order is taker or maker
# "order_id":"2482659399697408",
# "price":"3888.6",
# "product_id":"BTC-USDT",
# "side":"buy",
# "size":"0.00055306",
# "timestamp":"2019-03-15T02:52:56.000Z"
# },
#
# futures trades, swap trades
#
# {
# "trade_id":"197429674631450625",
# "instrument_id":"EOS-USD-SWAP",
# "order_id":"6a-7-54d663a28-0",
# "price":"3.633",
# "order_qty":"1.0000",
# "fee":"-0.000551",
# "created_at":"2019-03-21T04:41:58.0Z", # missing in swap trades
# "timestamp":"2019-03-25T05:56:31.287Z", # missing in futures trades
# "exec_type":"M", # whether the order is taker or maker
# "side":"short", # "buy" in futures trades
# }
#
symbol = None
marketId = self.safe_string(trade, 'instrument_id')
base = None
quote = None
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
symbol = market['symbol']
base = market['base']
quote = market['quote']
elif marketId is not None:
parts = marketId.split('-')
numParts = len(parts)
if numParts == 2:
baseId, quoteId = parts
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
else:
symbol = marketId
if (symbol is None) and (market is not None):
symbol = market['symbol']
base = market['base']
quote = market['quote']
timestamp = self.parse8601(self.safe_string_2(trade, 'timestamp', 'created_at'))
priceString = self.safe_string(trade, 'price')
amountString = self.safe_string_2(trade, 'size', 'qty')
amountString = self.safe_string(trade, 'order_qty', amountString)
price = self.parse_number(priceString)
amount = self.parse_number(amountString)
cost = self.parse_number(Precise.string_mul(priceString, amountString))
takerOrMaker = self.safe_string_2(trade, 'exec_type', 'liquidity')
if takerOrMaker == 'M':
takerOrMaker = 'maker'
elif takerOrMaker == 'T':
takerOrMaker = 'taker'
side = self.safe_string(trade, 'side')
feeCost = self.safe_number(trade, 'fee')
fee = None
if feeCost is not None:
feeCurrency = base if (side == 'buy') else quote
fee = {
# fee is either a positive number(invitation rebate)
# or a negative number(transaction fee deduction)
# therefore we need to invert the fee
# more about it https://github.com/ccxt/ccxt/issues/5909
'cost': -feeCost,
'currency': feeCurrency,
}
orderId = self.safe_string(trade, 'order_id')
return {
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'id': self.safe_string_2(trade, 'trade_id', 'ledger_id'),
'order': orderId,
'type': None,
'takerOrMaker': takerOrMaker,
'side': side,
'price': price,
'amount': amount,
'cost': cost,
'fee': fee,
}
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
method = market['type'] + 'GetInstrumentsInstrumentIdTrades'
if (limit is None) or (limit > 100):
limit = 100 # maximum = default = 100
request = {
'instrument_id': market['id'],
'limit': limit,
# from: 'id',
# to: 'id',
}
response = await getattr(self, method)(self.extend(request, params))
#
# spot markets
#
# [
# {
# time: "2018-12-17T23:31:08.268Z",
# timestamp: "2018-12-17T23:31:08.268Z",
# trade_id: "409687906",
# price: "0.02677805",
# size: "0.923467",
# side: "sell"
# }
# ]
#
# futures markets, swap markets
#
# [
# {
# trade_id: "1989230840021013",
# side: "buy",
# price: "92.42",
# qty: "184", # missing in swap markets
# size: "5", # missing in futures markets
# timestamp: "2018-12-17T23:26:04.613Z"
# }
# ]
#
return self.parse_trades(response, market, since, limit)
def parse_ohlcv(self, ohlcv, market=None):
#
# spot markets
#
# {
# close: "0.02684545",
# high: "0.02685084",
# low: "0.02683312",
# open: "0.02683894",
# time: "2018-12-17T20:28:00.000Z",
# volume: "101.457222"
# }
#
# futures markets
#
# [
# 1545072720000,
# 0.3159,
# 0.3161,
# 0.3144,
# 0.3149,
# 22886,
# 725179.26172331,
# ]
#
if isinstance(ohlcv, list):
numElements = len(ohlcv)
volumeIndex = 6 if (numElements > 6) else 5
timestamp = self.safe_value(ohlcv, 0)
if isinstance(timestamp, basestring):
timestamp = self.parse8601(timestamp)
return [
timestamp, # timestamp
self.safe_number(ohlcv, 1), # Open
self.safe_number(ohlcv, 2), # High
self.safe_number(ohlcv, 3), # Low
self.safe_number(ohlcv, 4), # Close
# self.safe_number(ohlcv, 5), # Quote Volume
# self.safe_number(ohlcv, 6), # Base Volume
self.safe_number(ohlcv, volumeIndex), # Volume, okex will return base volume in the 7th element for future markets
]
else:
return [
self.parse8601(self.safe_string(ohlcv, 'time')),
self.safe_number(ohlcv, 'open'), # Open
self.safe_number(ohlcv, 'high'), # High
self.safe_number(ohlcv, 'low'), # Low
self.safe_number(ohlcv, 'close'), # Close
self.safe_number(ohlcv, 'volume'), # Base Volume
]
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
duration = self.parse_timeframe(timeframe)
request = {
'instrument_id': market['id'],
'granularity': self.timeframes[timeframe],
}
options = self.safe_value(self.options, 'fetchOHLCV', {})
defaultType = self.safe_string(options, 'type', 'Candles') # Candles or HistoryCandles
type = self.safe_string(params, 'type', defaultType)
params = self.omit(params, 'type')
method = market['type'] + 'GetInstrumentsInstrumentId' + type
if type == 'Candles':
if since is not None:
if limit is not None:
request['end'] = self.iso8601(self.sum(since, limit * duration * 1000))
request['start'] = self.iso8601(since)
else:
if limit is not None:
now = self.milliseconds()
request['start'] = self.iso8601(now - limit * duration * 1000)
request['end'] = self.iso8601(now)
elif type == 'HistoryCandles':
if market['option']:
raise NotSupported(self.id + ' fetchOHLCV does not have ' + type + ' for ' + market['type'] + ' markets')
if since is not None:
if limit is None:
limit = 300 # default
request['start'] = self.iso8601(self.sum(since, limit * duration * 1000))
request['end'] = self.iso8601(since)
else:
if limit is not None:
now = self.milliseconds()
request['end'] = self.iso8601(now - limit * duration * 1000)
request['start'] = self.iso8601(now)
response = await getattr(self, method)(self.extend(request, params))
#
# spot markets
#
# [
# {
# close: "0.02683401",
# high: "0.02683401",
# low: "0.02683401",
# open: "0.02683401",
# time: "2018-12-17T23:47:00.000Z",
# volume: "0"
# },
# {
# close: "0.02684545",
# high: "0.02685084",
# low: "0.02683312",
# open: "0.02683894",
# time: "2018-12-17T20:28:00.000Z",
# volume: "101.457222"
# }
# ]
#
# futures
#
# [
# [
# 1545090660000,
# 0.3171,
# 0.3174,
# 0.3171,
# 0.3173,
# 1648,
# 51930.38579450868
# ],
# [
# 1545072720000,
# 0.3159,
# 0.3161,
# 0.3144,
# 0.3149,
# 22886,
# 725179.26172331
# ]
# ]
#
return self.parse_ohlcvs(response, market, timeframe, since, limit)
def parse_account_balance(self, response):
#
# account
#
# [
# {
# balance: 0,
# available: 0,
# currency: "BTC",
# hold: 0
# },
# {
# balance: 0,
# available: 0,
# currency: "ETH",
# hold: 0
# }
# ]
#
# spot
#
# [
# {
# frozen: "0",
# hold: "0",
# id: "2149632",
# currency: "BTC",
# balance: "0.0000000497717339",
# available: "0.0000000497717339",
# holds: "0"
# },
# {
# frozen: "0",
# hold: "0",
# id: "2149632",
# currency: "ICN",
# balance: "0.00000000925",
# available: "0.00000000925",
# holds: "0"
# }
# ]
#
result = {'info': response}
for i in range(0, len(response)):
balance = response[i]
currencyId = self.safe_string(balance, 'currency')
code = self.safe_currency_code(currencyId)
account = self.account()
account['total'] = self.safe_number(balance, 'balance')
account['used'] = self.safe_number(balance, 'hold')
account['free'] = self.safe_number(balance, 'available')
result[code] = account
return self.parse_balance(result)
def parse_margin_balance(self, response):
#
# [
# {
# "currency:BTC": {
# "available":"0",
# "balance":"0",
# "borrowed":"0",
# "can_withdraw":"0",
# "frozen":"0",
# "hold":"0",
# "holds":"0",
# "lending_fee":"0"
# },
# "currency:USDT": {
# "available":"100",
# "balance":"100",
# "borrowed":"0",
# "can_withdraw":"100",
# "frozen":"0",
# "hold":"0",
# "holds":"0",
# "lending_fee":"0"
# },
# "instrument_id":"BTC-USDT",
# "liquidation_price":"0",
# "product_id":"BTC-USDT",
# "risk_rate":""
# },
# ]
#
result = {'info': response}
for i in range(0, len(response)):
balance = response[i]
marketId = self.safe_string(balance, 'instrument_id')
market = self.safe_value(self.markets_by_id, marketId)
symbol = None
if market is None:
baseId, quoteId = marketId.split('-')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
else:
symbol = market['symbol']
omittedBalance = self.omit(balance, [
'instrument_id',
'liquidation_price',
'product_id',
'risk_rate',
'margin_ratio',
'maint_margin_ratio',
'tiers',
])
keys = list(omittedBalance.keys())
accounts = {}
for k in range(0, len(keys)):
key = keys[k]
marketBalance = balance[key]
if key.find(':') >= 0:
parts = key.split(':')
currencyId = parts[1]
code = self.safe_currency_code(currencyId)
account = self.account()
account['total'] = self.safe_number(marketBalance, 'balance')
account['used'] = self.safe_number(marketBalance, 'hold')
account['free'] = self.safe_number(marketBalance, 'available')
accounts[code] = account
else:
raise NotSupported(self.id + ' margin balance response format has changed!')
result[symbol] = self.parse_balance(accounts)
return result
def parse_futures_balance(self, response):
#
# {
# "info":{
# "eos":{
# "auto_margin":"0",
# "contracts": [
# {
# "available_qty":"40.37069445",
# "fixed_balance":"0",
# "instrument_id":"EOS-USD-190329",
# "margin_for_unfilled":"0",
# "margin_frozen":"0",
# "realized_pnl":"0",
# "unrealized_pnl":"0"
# },
# {
# "available_qty":"40.37069445",
# "fixed_balance":"14.54895721",
# "instrument_id":"EOS-USD-190628",
# "margin_for_unfilled":"0",
# "margin_frozen":"10.64042157",
# "realized_pnl":"-3.90853564",
# "unrealized_pnl":"-0.259"
# },
# ],
# "equity":"50.75220665",
# "margin_mode":"fixed",
# "total_avail_balance":"40.37069445"
# },
# }
# }
#
# their root field name is "info", so our info will contain their info
result = {'info': response}
info = self.safe_value(response, 'info', {})
ids = list(info.keys())
for i in range(0, len(ids)):
id = ids[i]
code = self.safe_currency_code(id)
balance = self.safe_value(info, id, {})
account = self.account()
totalAvailBalance = self.safe_number(balance, 'total_avail_balance')
if self.safe_string(balance, 'margin_mode') == 'fixed':
contracts = self.safe_value(balance, 'contracts', [])
free = totalAvailBalance
for i in range(0, len(contracts)):
contract = contracts[i]
fixedBalance = self.safe_number(contract, 'fixed_balance')
realizedPnl = self.safe_number(contract, 'realized_pnl')
marginFrozen = self.safe_number(contract, 'margin_frozen')
marginForUnfilled = self.safe_number(contract, 'margin_for_unfilled')
margin = self.sum(fixedBalance, realizedPnl) - marginFrozen - marginForUnfilled
free = self.sum(free, margin)
account['free'] = free
else:
realizedPnl = self.safe_number(balance, 'realized_pnl')
unrealizedPnl = self.safe_number(balance, 'unrealized_pnl')
marginFrozen = self.safe_number(balance, 'margin_frozen')
marginForUnfilled = self.safe_number(balance, 'margin_for_unfilled')
account['free'] = self.sum(totalAvailBalance, realizedPnl, unrealizedPnl) - marginFrozen - marginForUnfilled
# it may be incorrect to use total, free and used for swap accounts
account['total'] = self.safe_number(balance, 'equity')
result[code] = account
return self.parse_balance(result)
def parse_swap_balance(self, response):
#
# {
# "info": [
# {
# "equity":"3.0139",
# "fixed_balance":"0.0000",
# "instrument_id":"EOS-USD-SWAP",
# "margin":"0.5523",
# "margin_frozen":"0.0000",
# "margin_mode":"crossed",
# "margin_ratio":"1.0913",
# "realized_pnl":"-0.0006",
# "timestamp":"2019-03-25T03:46:10.336Z",
# "total_avail_balance":"3.0000",
# "unrealized_pnl":"0.0145"
# }
# ]
# }
#
# their root field name is "info", so our info will contain their info
result = {'info': response}
info = self.safe_value(response, 'info', [])
for i in range(0, len(info)):
balance = info[i]
marketId = self.safe_string(balance, 'instrument_id')
symbol = marketId
if marketId in self.markets_by_id:
symbol = self.markets_by_id[marketId]['symbol']
account = self.account()
# it may be incorrect to use total, free and used for swap accounts
account['total'] = self.safe_number(balance, 'equity')
account['free'] = self.safe_number(balance, 'total_avail_balance')
result[symbol] = account
return self.parse_balance(result)
async def fetch_balance(self, params={}):
defaultType = self.safe_string_2(self.options, 'fetchBalance', 'defaultType')
type = self.safe_string(params, 'type', defaultType)
if type is None:
raise ArgumentsRequired(self.id + " fetchBalance() requires a type parameter(one of 'account', 'spot', 'margin', 'futures', 'swap')")
await self.load_markets()
suffix = 'Wallet' if (type == 'account') else 'Accounts'
method = type + 'Get' + suffix
query = self.omit(params, 'type')
response = await getattr(self, method)(query)
#
# account
#
# [
# {
# balance: 0,
# available: 0,
# currency: "BTC",
# hold: 0
# },
# {
# balance: 0,
# available: 0,
# currency: "ETH",
# hold: 0
# }
# ]
#
# spot
#
# [
# {
# frozen: "0",
# hold: "0",
# id: "2149632",
# currency: "BTC",
# balance: "0.0000000497717339",
# available: "0.0000000497717339",
# holds: "0"
# },
# {
# frozen: "0",
# hold: "0",
# id: "2149632",
# currency: "ICN",
# balance: "0.00000000925",
# available: "0.00000000925",
# holds: "0"
# }
# ]
#
# margin
#
# [
# {
# "currency:BTC": {
# "available":"0",
# "balance":"0",
# "borrowed":"0",
# "can_withdraw":"0",
# "frozen":"0",
# "hold":"0",
# "holds":"0",
# "lending_fee":"0"
# },
# "currency:USDT": {
# "available":"100",
# "balance":"100",
# "borrowed":"0",
# "can_withdraw":"100",
# "frozen":"0",
# "hold":"0",
# "holds":"0",
# "lending_fee":"0"
# },
# "instrument_id":"BTC-USDT",
# "liquidation_price":"0",
# "product_id":"BTC-USDT",
# "risk_rate":""
# },
# ]
#
# futures
#
# {
# "info":{
# "eos":{
# "auto_margin":"0",
# "contracts": [
# {
# "available_qty":"40.37069445",
# "fixed_balance":"0",
# "instrument_id":"EOS-USD-190329",
# "margin_for_unfilled":"0",
# "margin_frozen":"0",
# "realized_pnl":"0",
# "unrealized_pnl":"0"
# },
# {
# "available_qty":"40.37069445",
# "fixed_balance":"14.54895721",
# "instrument_id":"EOS-USD-190628",
# "margin_for_unfilled":"0",
# "margin_frozen":"10.64042157",
# "realized_pnl":"-3.90853564",
# "unrealized_pnl":"-0.259"
# },
# ],
# "equity":"50.75220665",
# "margin_mode":"fixed",
# "total_avail_balance":"40.37069445"
# },
# }
# }
#
# swap
#
# {
# "info": [
# {
# "equity":"3.0139",
# "fixed_balance":"0.0000",
# "instrument_id":"EOS-USD-SWAP",
# "margin":"0.5523",
# "margin_frozen":"0.0000",
# "margin_mode":"crossed",
# "margin_ratio":"1.0913",
# "realized_pnl":"-0.0006",
# "timestamp":"2019-03-25T03:46:10.336Z",
# "total_avail_balance":"3.0000",
# "unrealized_pnl":"0.0145"
# }
# ]
# }
#
return self.parse_balance_by_type(type, response)
def parse_balance_by_type(self, type, response):
if (type == 'account') or (type == 'spot'):
return self.parse_account_balance(response)
elif type == 'margin':
return self.parse_margin_balance(response)
elif type == 'futures':
return self.parse_futures_balance(response)
elif type == 'swap':
return self.parse_swap_balance(response)
raise NotSupported(self.id + " fetchBalance does not support the '" + type + "' type(the type must be one of 'account', 'spot', 'margin', 'futures', 'swap')")
async def create_order(self, symbol, type, side, amount, price=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'instrument_id': market['id'],
# 'client_oid': 'abcdef1234567890', # [a-z0-9]{1,32}
# 'order_type': '0', # 0 = Normal limit order, 1 = Post only, 2 = Fill Or Kill, 3 = Immediatel Or Cancel, 4 = Market for futures only
}
clientOrderId = self.safe_string_2(params, 'client_oid', 'clientOrderId')
if clientOrderId is not None:
request['client_oid'] = clientOrderId
params = self.omit(params, ['client_oid', 'clientOrderId'])
method = None
if market['futures'] or market['swap']:
size = self.number_to_string(amount) if market['futures'] else self.amount_to_precision(symbol, amount)
request = self.extend(request, {
'type': type, # 1:open long 2:open short 3:close long 4:close short for futures
'size': size,
# 'match_price': '0', # Order at best counter party price?(0:no 1:yes). The default is 0. If it is set as 1, the price parameter will be ignored. When posting orders at best bid price, order_type can only be 0(regular order).
})
orderType = self.safe_string(params, 'order_type')
# order_type == '4' means a market order
isMarketOrder = (type == 'market') or (orderType == '4')
if isMarketOrder:
request['order_type'] = '4'
else:
request['price'] = self.price_to_precision(symbol, price)
if market['futures']:
request['leverage'] = '10' # or '20'
method = market['type'] + 'PostOrder'
else:
marginTrading = self.safe_string(params, 'margin_trading', '1') # 1 = spot, 2 = margin
request = self.extend(request, {
'side': side,
'type': type, # limit/market
'margin_trading': marginTrading, # 1 = spot, 2 = margin
})
if type == 'limit':
request['price'] = self.price_to_precision(symbol, price)
request['size'] = self.amount_to_precision(symbol, amount)
elif type == 'market':
# for market buy it requires the amount of quote currency to spend
if side == 'buy':
notional = self.safe_number(params, 'notional')
createMarketBuyOrderRequiresPrice = self.safe_value(self.options, 'createMarketBuyOrderRequiresPrice', True)
if createMarketBuyOrderRequiresPrice:
if price is not None:
if notional is None:
notional = amount * price
elif notional is None:
raise InvalidOrder(self.id + " createOrder() requires the price argument with market buy orders to calculate total order cost(amount to spend), where cost = amount * price. Supply a price argument to createOrder() call if you want the cost to be calculated for you from price and amount, or, alternatively, add .options['createMarketBuyOrderRequiresPrice'] = False and supply the total cost value in the 'amount' argument or in the 'notional' extra parameter(the exchange-specific behaviour)")
else:
notional = amount if (notional is None) else notional
precision = market['precision']['price']
request['notional'] = self.decimal_to_precision(notional, TRUNCATE, precision, self.precisionMode)
else:
request['size'] = self.amount_to_precision(symbol, amount)
method = 'marginPostOrders' if (marginTrading == '2') else 'spotPostOrders'
response = await getattr(self, method)(self.extend(request, params))
#
# {
# "client_oid":"oktspot79",
# "error_code":"",
# "error_message":"",
# "order_id":"2510789768709120",
# "result":true
# }
#
order = self.parse_order(response, market)
return self.extend(order, {
'type': type,
'side': side,
})
async def cancel_order(self, id, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrder() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
type = None
if market['futures'] or market['swap']:
type = market['type']
else:
defaultType = self.safe_string_2(self.options, 'cancelOrder', 'defaultType', market['type'])
type = self.safe_string(params, 'type', defaultType)
if type is None:
raise ArgumentsRequired(self.id + " cancelOrder() requires a type parameter(one of 'spot', 'margin', 'futures', 'swap').")
method = type + 'PostCancelOrder'
request = {
'instrument_id': market['id'],
}
if market['futures'] or market['swap']:
method += 'InstrumentId'
else:
method += 's'
clientOrderId = self.safe_string_2(params, 'client_oid', 'clientOrderId')
if clientOrderId is not None:
method += 'ClientOid'
request['client_oid'] = clientOrderId
else:
method += 'OrderId'
request['order_id'] = id
query = self.omit(params, ['type', 'client_oid', 'clientOrderId'])
response = await getattr(self, method)(self.extend(request, query))
result = response if ('result' in response) else self.safe_value(response, market['id'], {})
#
# spot, margin
#
# {
# "btc-usdt": [
# {
# "result":true,
# "client_oid":"a123",
# "order_id": "2510832677225473"
# }
# ]
# }
#
# futures, swap
#
# {
# "result": True,
# "client_oid": "oktfuture10", # missing if requested by order_id
# "order_id": "2517535534836736",
# "instrument_id": "EOS-USD-190628"
# }
#
return self.parse_order(result, market)
def parse_order_status(self, status):
statuses = {
'-2': 'failed',
'-1': 'canceled',
'0': 'open',
'1': 'open',
'2': 'closed',
'3': 'open',
'4': 'canceled',
}
return self.safe_string(statuses, status, status)
def parse_order_side(self, side):
sides = {
'1': 'buy', # open long
'2': 'sell', # open short
'3': 'sell', # close long
'4': 'buy', # close short
}
return self.safe_string(sides, side, side)
def parse_order(self, order, market=None):
#
# createOrder
#
# {
# "client_oid":"oktspot79",
# "error_code":"",
# "error_message":"",
# "order_id":"2510789768709120",
# "result":true
# }
#
# cancelOrder
#
# {
# "result": True,
# "client_oid": "oktfuture10", # missing if requested by order_id
# "order_id": "2517535534836736",
# # instrument_id is missing for spot/margin orders
# # available in futures and swap orders only
# "instrument_id": "EOS-USD-190628",
# }
#
# fetchOrder, fetchOrdersByState, fetchOpenOrders, fetchClosedOrders
#
# # spot and margin orders
#
# {
# "client_oid":"oktspot76",
# "created_at":"2019-03-18T07:26:49.000Z",
# "filled_notional":"3.9734",
# "filled_size":"0.001", # filled_qty in futures and swap orders
# "funds":"", # self is most likely the same as notional
# "instrument_id":"BTC-USDT",
# "notional":"",
# "order_id":"2500723297813504",
# "order_type":"0",
# "price":"4013",
# "product_id":"BTC-USDT", # missing in futures and swap orders
# "side":"buy",
# "size":"0.001",
# "status":"filled",
# "state": "2",
# "timestamp":"2019-03-18T07:26:49.000Z",
# "type":"limit"
# }
#
# # futures and swap orders
#
# {
# "instrument_id":"EOS-USD-190628",
# "size":"10",
# "timestamp":"2019-03-20T10:04:55.000Z",
# "filled_qty":"10", # filled_size in spot and margin orders
# "fee":"-0.00841043",
# "order_id":"2512669605501952",
# "price":"3.668",
# "price_avg":"3.567", # missing in spot and margin orders
# "status":"2",
# "state": "2",
# "type":"4",
# "contract_val":"10",
# "leverage":"10", # missing in swap, spot and margin orders
# "client_oid":"",
# "pnl":"1.09510794", # missing in swap, spo and margin orders
# "order_type":"0"
# }
#
id = self.safe_string(order, 'order_id')
timestamp = self.parse8601(self.safe_string(order, 'timestamp'))
side = self.safe_string(order, 'side')
type = self.safe_string(order, 'type')
if (side != 'buy') and (side != 'sell'):
side = self.parse_order_side(type)
symbol = None
marketId = self.safe_string(order, 'instrument_id')
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
symbol = market['symbol']
else:
symbol = marketId
if market is not None:
if symbol is None:
symbol = market['symbol']
amount = self.safe_number(order, 'size')
filled = self.safe_number_2(order, 'filled_size', 'filled_qty')
remaining = None
if amount is not None:
if filled is not None:
amount = max(amount, filled)
remaining = max(0, amount - filled)
if type == 'market':
remaining = 0
cost = self.safe_number_2(order, 'filled_notional', 'funds')
price = self.safe_number(order, 'price')
average = self.safe_number(order, 'price_avg')
if cost is None:
if filled is not None and average is not None:
cost = average * filled
else:
if (average is None) and (filled is not None) and (filled > 0):
average = cost / filled
status = self.parse_order_status(self.safe_string(order, 'state'))
feeCost = self.safe_number(order, 'fee')
fee = None
if feeCost is not None:
feeCurrency = None
fee = {
'cost': feeCost,
'currency': feeCurrency,
}
clientOrderId = self.safe_string(order, 'client_oid')
if (clientOrderId is not None) and (len(clientOrderId) < 1):
clientOrderId = None # fix empty clientOrderId string
stopPrice = self.safe_number(order, 'trigger_price')
return {
'info': order,
'id': id,
'clientOrderId': clientOrderId,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': symbol,
'type': type,
'timeInForce': None,
'postOnly': None,
'side': side,
'price': price,
'stopPrice': stopPrice,
'average': average,
'cost': cost,
'amount': amount,
'filled': filled,
'remaining': remaining,
'status': status,
'fee': fee,
'trades': None,
}
async def fetch_order(self, id, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrder() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
defaultType = self.safe_string_2(self.options, 'fetchOrder', 'defaultType', market['type'])
type = self.safe_string(params, 'type', defaultType)
if type is None:
raise ArgumentsRequired(self.id + " fetchOrder() requires a type parameter(one of 'spot', 'margin', 'futures', 'swap').")
instrumentId = 'InstrumentId' if (market['futures'] or market['swap']) else ''
method = type + 'GetOrders' + instrumentId
request = {
'instrument_id': market['id'],
# 'client_oid': 'abcdef12345', # optional, [a-z0-9]{1,32}
# 'order_id': id,
}
clientOid = self.safe_string(params, 'client_oid')
if clientOid is not None:
method += 'ClientOid'
request['client_oid'] = clientOid
else:
method += 'OrderId'
request['order_id'] = id
query = self.omit(params, 'type')
response = await getattr(self, method)(self.extend(request, query))
#
# spot, margin
#
# {
# "client_oid":"oktspot70",
# "created_at":"2019-03-15T02:52:56.000Z",
# "filled_notional":"3.8886",
# "filled_size":"0.001",
# "funds":"",
# "instrument_id":"BTC-USDT",
# "notional":"",
# "order_id":"2482659399697408",
# "order_type":"0",
# "price":"3927.3",
# "product_id":"BTC-USDT",
# "side":"buy",
# "size":"0.001",
# "status":"filled",
# "state": "2",
# "timestamp":"2019-03-15T02:52:56.000Z",
# "type":"limit"
# }
#
# futures, swap
#
# {
# "instrument_id":"EOS-USD-190628",
# "size":"10",
# "timestamp":"2019-03-20T02:46:38.000Z",
# "filled_qty":"10",
# "fee":"-0.0080819",
# "order_id":"2510946213248000",
# "price":"3.712",
# "price_avg":"3.712",
# "status":"2",
# "state": "2",
# "type":"2",
# "contract_val":"10",
# "leverage":"10",
# "client_oid":"", # missing in swap orders
# "pnl":"0", # missing in swap orders
# "order_type":"0"
# }
#
return self.parse_order(response)
async def fetch_orders_by_state(self, state, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrdersByState() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
type = None
if market['futures'] or market['swap']:
type = market['type']
else:
defaultType = self.safe_string_2(self.options, 'fetchOrder', 'defaultType', market['type'])
type = self.safe_string(params, 'type', defaultType)
if type is None:
raise ArgumentsRequired(self.id + " fetchOrdersByState() requires a type parameter(one of 'spot', 'margin', 'futures', 'swap').")
request = {
'instrument_id': market['id'],
# '-2': failed,
# '-1': cancelled,
# '0': open ,
# '1': partially filled,
# '2': fully filled,
# '3': submitting,
# '4': cancelling,
# '6': incomplete(open+partially filled),
# '7': complete(cancelled+fully filled),
'state': state,
}
method = type + 'GetOrders'
if market['futures'] or market['swap']:
method += 'InstrumentId'
query = self.omit(params, 'type')
response = await getattr(self, method)(self.extend(request, query))
#
# spot, margin
#
# [
# # in fact, self documented API response does not correspond
# # to their actual API response for spot markets
# # OKEX v3 API returns a plain array of orders(see below)
# [
# {
# "client_oid":"oktspot76",
# "created_at":"2019-03-18T07:26:49.000Z",
# "filled_notional":"3.9734",
# "filled_size":"0.001",
# "funds":"",
# "instrument_id":"BTC-USDT",
# "notional":"",
# "order_id":"2500723297813504",
# "order_type":"0",
# "price":"4013",
# "product_id":"BTC-USDT",
# "side":"buy",
# "size":"0.001",
# "status":"filled",
# "state": "2",
# "timestamp":"2019-03-18T07:26:49.000Z",
# "type":"limit"
# },
# ],
# {
# "before":"2500723297813504",
# "after":"2500650881647616"
# }
# ]
#
# futures, swap
#
# {
# "result":true, # missing in swap orders
# "order_info": [
# {
# "instrument_id":"EOS-USD-190628",
# "size":"10",
# "timestamp":"2019-03-20T10:04:55.000Z",
# "filled_qty":"10",
# "fee":"-0.00841043",
# "order_id":"2512669605501952",
# "price":"3.668",
# "price_avg":"3.567",
# "status":"2",
# "state": "2",
# "type":"4",
# "contract_val":"10",
# "leverage":"10", # missing in swap orders
# "client_oid":"",
# "pnl":"1.09510794", # missing in swap orders
# "order_type":"0"
# },
# ]
# }
#
orders = None
if market['swap'] or market['futures']:
orders = self.safe_value(response, 'order_info', [])
else:
orders = response
responseLength = len(response)
if responseLength < 1:
return []
# in fact, self documented API response does not correspond
# to their actual API response for spot markets
# OKEX v3 API returns a plain array of orders
if responseLength > 1:
before = self.safe_value(response[1], 'before')
if before is not None:
orders = response[0]
return self.parse_orders(orders, market, since, limit)
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
# '-2': failed,
# '-1': cancelled,
# '0': open ,
# '1': partially filled,
# '2': fully filled,
# '3': submitting,
# '4': cancelling,
# '6': incomplete(open+partially filled),
# '7': complete(cancelled+fully filled),
return await self.fetch_orders_by_state('6', symbol, since, limit, params)
async def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
# '-2': failed,
# '-1': cancelled,
# '0': open ,
# '1': partially filled,
# '2': fully filled,
# '3': submitting,
# '4': cancelling,
# '6': incomplete(open+partially filled),
# '7': complete(cancelled+fully filled),
return await self.fetch_orders_by_state('7', symbol, since, limit, params)
def parse_deposit_address(self, depositAddress, currency=None):
#
# {
# address: '0x696abb81974a8793352cbd33aadcf78eda3cfdfa',
# currency: 'eth'
# tag: 'abcde12345', # will be missing if the token does not require a deposit tag
# payment_id: 'abcde12345', # will not be returned if the token does not require a payment_id
# # can_deposit: 1, # 0 or 1, documented but missing
# # can_withdraw: 1, # 0 or 1, documented but missing
# }
#
address = self.safe_string(depositAddress, 'address')
tag = self.safe_string_2(depositAddress, 'tag', 'payment_id')
tag = self.safe_string(depositAddress, 'memo', tag)
currencyId = self.safe_string(depositAddress, 'currency')
code = self.safe_currency_code(currencyId)
self.check_address(address)
return {
'currency': code,
'address': address,
'tag': tag,
'info': depositAddress,
}
async def fetch_deposit_address(self, code, params={}):
await self.load_markets()
parts = code.split('-')
currency = self.currency(parts[0])
request = {
'currency': currency['id'],
}
response = await self.accountGetDepositAddress(self.extend(request, params))
#
# [
# {
# address: '0x696abb81974a8793352cbd33aadcf78eda3cfdfa',
# currency: 'eth'
# }
# ]
#
addressesByCode = self.parse_deposit_addresses(response)
address = self.safe_value(addressesByCode, code)
if address is None:
raise InvalidAddress(self.id + ' fetchDepositAddress cannot return nonexistent addresses, you should create withdrawal addresses with the exchange website first')
return address
async def withdraw(self, code, amount, address, tag=None, params={}):
self.check_address(address)
await self.load_markets()
currency = self.currency(code)
if tag:
address = address + ':' + tag
fee = self.safe_string(params, 'fee')
if fee is None:
raise ArgumentsRequired(self.id + " withdraw() requires a `fee` string parameter, network transaction fee must be ≥ 0. Withdrawals to OKCoin or OKEx are fee-free, please set '0'. Withdrawing to external digital asset address requires network transaction fee.")
request = {
'currency': currency['id'],
'to_address': address,
'destination': '4', # 2 = OKCoin International, 3 = OKEx 4 = others
'amount': self.number_to_string(amount),
'fee': fee, # String. Network transaction fee ≥ 0. Withdrawals to OKCoin or OKEx are fee-free, please set as 0. Withdrawal to external digital asset address requires network transaction fee.
}
if 'password' in params:
request['trade_pwd'] = params['password']
elif 'trade_pwd' in params:
request['trade_pwd'] = params['trade_pwd']
elif self.password:
request['trade_pwd'] = self.password
query = self.omit(params, ['fee', 'password', 'trade_pwd'])
if not ('trade_pwd' in request):
raise ExchangeError(self.id + ' withdraw() requires self.password set on the exchange instance or a password / trade_pwd parameter')
response = await self.accountPostWithdrawal(self.extend(request, query))
#
# {
# "amount":"0.1",
# "withdrawal_id":"67485",
# "currency":"btc",
# "result":true
# }
#
return {
'info': response,
'id': self.safe_string(response, 'withdrawal_id'),
}
async def fetch_deposits(self, code=None, since=None, limit=None, params={}):
await self.load_markets()
request = {}
method = 'accountGetDepositHistory'
currency = None
if code is not None:
currency = self.currency(code)
request['currency'] = currency['id']
method += 'Currency'
response = await getattr(self, method)(self.extend(request, params))
return self.parse_transactions(response, currency, since, limit, params)
async def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
await self.load_markets()
request = {}
method = 'accountGetWithdrawalHistory'
currency = None
if code is not None:
currency = self.currency(code)
request['currency'] = currency['id']
method += 'Currency'
response = await getattr(self, method)(self.extend(request, params))
return self.parse_transactions(response, currency, since, limit, params)
def parse_transaction_status(self, status):
#
# deposit statuses
#
# {
# '0': 'waiting for confirmation',
# '1': 'confirmation account',
# '2': 'recharge success'
# }
#
# withdrawal statues
#
# {
# '-3': 'pending cancel',
# '-2': 'cancelled',
# '-1': 'failed',
# '0': 'pending',
# '1': 'sending',
# '2': 'sent',
# '3': 'email confirmation',
# '4': 'manual confirmation',
# '5': 'awaiting identity confirmation'
# }
#
statuses = {
'-3': 'pending',
'-2': 'canceled',
'-1': 'failed',
'0': 'pending',
'1': 'pending',
'2': 'ok',
'3': 'pending',
'4': 'pending',
'5': 'pending',
}
return self.safe_string(statuses, status, status)
def parse_transaction(self, transaction, currency=None):
#
# withdraw
#
# {
# "amount":"0.1",
# "withdrawal_id":"67485",
# "currency":"btc",
# "result":true
# }
#
# fetchWithdrawals
#
# {
# amount: "4.72100000",
# withdrawal_id: "1729116",
# fee: "0.01000000eth",
# txid: "0xf653125bbf090bcfe4b5e8e7b8f586a9d87aa7de94598702758c0802b…",
# currency: "ETH",
# from: "7147338839",
# to: "0x26a3CB49578F07000575405a57888681249c35Fd",
# timestamp: "2018-08-17T07:03:42.000Z",
# status: "2"
# }
#
# fetchDeposits
#
# {
# "amount": "4.19511659",
# "txid": "14c9a8c925647cdb7e5b2937ea9aefe2b29b2c273150ad3f44b3b8a4635ed437",
# "currency": "XMR",
# "from": "",
# "to": "48PjH3ksv1fiXniKvKvyH5UtFs5WhfS2Vf7U3TwzdRJtCc7HJWvCQe56dRahyhQyTAViXZ8Nzk4gQg6o4BJBMUoxNy8y8g7",
# "tag": "1234567",
# "deposit_id": 11571659, <-- we can use self
# "timestamp": "2019-10-01T14:54:19.000Z",
# "status": "2"
# }
#
type = None
id = None
address = None
withdrawalId = self.safe_string(transaction, 'withdrawal_id')
addressFrom = self.safe_string(transaction, 'from')
addressTo = self.safe_string(transaction, 'to')
tagTo = self.safe_string(transaction, 'tag')
if withdrawalId is not None:
type = 'withdrawal'
id = withdrawalId
address = addressTo
else:
# the payment_id will appear on new deposits but appears to be removed from the response after 2 months
id = self.safe_string_2(transaction, 'payment_id', 'deposit_id')
type = 'deposit'
address = addressTo
currencyId = self.safe_string(transaction, 'currency')
code = self.safe_currency_code(currencyId)
amount = self.safe_number(transaction, 'amount')
status = self.parse_transaction_status(self.safe_string(transaction, 'status'))
txid = self.safe_string(transaction, 'txid')
timestamp = self.parse8601(self.safe_string(transaction, 'timestamp'))
feeCost = None
if type == 'deposit':
feeCost = 0
else:
if currencyId is not None:
feeWithCurrencyId = self.safe_string(transaction, 'fee')
if feeWithCurrencyId is not None:
# https://github.com/ccxt/ccxt/pull/5748
lowercaseCurrencyId = currencyId.lower()
feeWithoutCurrencyId = feeWithCurrencyId.replace(lowercaseCurrencyId, '')
feeCost = float(feeWithoutCurrencyId)
# todo parse tags
return {
'info': transaction,
'id': id,
'currency': code,
'amount': amount,
'addressFrom': addressFrom,
'addressTo': addressTo,
'address': address,
'tagFrom': None,
'tagTo': tagTo,
'tag': tagTo,
'status': status,
'type': type,
'updated': None,
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'fee': {
'currency': code,
'cost': feeCost,
},
}
def parse_my_trade(self, pair, market=None):
# check that trading symbols match in both entries
userTrade = self.safe_value(pair, 1)
otherTrade = self.safe_value(pair, 0)
firstMarketId = self.safe_string(otherTrade, 'instrument_id')
secondMarketId = self.safe_string(userTrade, 'instrument_id')
if firstMarketId != secondMarketId:
raise NotSupported(self.id + ' parseMyTrade() received unrecognized response format, differing instrument_ids in one fill, the exchange API might have changed, paste your verbose output: https://github.com/ccxt/ccxt/wiki/FAQ#what-is-required-to-get-help')
marketId = firstMarketId
market = self.safe_market(marketId, market)
symbol = market['symbol']
quoteId = market['quoteId']
side = None
amount = None
cost = None
receivedCurrencyId = self.safe_string(userTrade, 'currency')
feeCurrencyId = None
if receivedCurrencyId == quoteId:
side = self.safe_string(otherTrade, 'side')
amount = self.safe_number(otherTrade, 'size')
cost = self.safe_number(userTrade, 'size')
feeCurrencyId = self.safe_string(otherTrade, 'currency')
else:
side = self.safe_string(userTrade, 'side')
amount = self.safe_number(userTrade, 'size')
cost = self.safe_number(otherTrade, 'size')
feeCurrencyId = self.safe_string(userTrade, 'currency')
id = self.safe_string(userTrade, 'trade_id')
price = self.safe_number(userTrade, 'price')
feeCostFirst = self.safe_number(otherTrade, 'fee')
feeCostSecond = self.safe_number(userTrade, 'fee')
feeCurrencyCodeFirst = self.safe_currency_code(self.safe_string(otherTrade, 'currency'))
feeCurrencyCodeSecond = self.safe_currency_code(self.safe_string(userTrade, 'currency'))
fee = None
fees = None
# fee is either a positive number(invitation rebate)
# or a negative number(transaction fee deduction)
# therefore we need to invert the fee
# more about it https://github.com/ccxt/ccxt/issues/5909
if (feeCostFirst is not None) and (feeCostFirst != 0):
if (feeCostSecond is not None) and (feeCostSecond != 0):
fees = [
{
'cost': -feeCostFirst,
'currency': feeCurrencyCodeFirst,
},
{
'cost': -feeCostSecond,
'currency': feeCurrencyCodeSecond,
},
]
else:
fee = {
'cost': -feeCostFirst,
'currency': feeCurrencyCodeFirst,
}
elif (feeCostSecond is not None) and (feeCostSecond != 0):
fee = {
'cost': -feeCostSecond,
'currency': feeCurrencyCodeSecond,
}
else:
fee = {
'cost': 0,
'currency': self.safe_currency_code(feeCurrencyId),
}
#
# simplified structures to show the underlying semantics
#
# # market/limit sell
#
# {
# "currency":"USDT",
# "fee":"-0.04647925", # ←--- fee in received quote currency
# "price":"129.13", # ←------ price
# "size":"30.98616393", # ←-- cost
# },
# {
# "currency":"ETH",
# "fee":"0",
# "price":"129.13",
# "size":"0.23996099", # ←--- amount
# },
#
# # market/limit buy
#
# {
# "currency":"ETH",
# "fee":"-0.00036049", # ←--- fee in received base currency
# "price":"129.16", # ←------ price
# "size":"0.240322", # ←----- amount
# },
# {
# "currency":"USDT",
# "fee":"0",
# "price":"129.16",
# "size":"31.03998952", # ←-- cost
# }
#
timestamp = self.parse8601(self.safe_string_2(userTrade, 'timestamp', 'created_at'))
takerOrMaker = self.safe_string_2(userTrade, 'exec_type', 'liquidity')
if takerOrMaker == 'M':
takerOrMaker = 'maker'
elif takerOrMaker == 'T':
takerOrMaker = 'taker'
orderId = self.safe_string(userTrade, 'order_id')
result = {
'info': pair,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'id': id,
'order': orderId,
'type': None,
'takerOrMaker': takerOrMaker,
'side': side,
'price': price,
'amount': amount,
'cost': cost,
'fee': fee,
}
if fees is not None:
result['fees'] = fees
return result
def parse_my_trades(self, trades, market=None, since=None, limit=None, params={}):
grouped = self.group_by(trades, 'trade_id')
tradeIds = list(grouped.keys())
result = []
for i in range(0, len(tradeIds)):
tradeId = tradeIds[i]
pair = grouped[tradeId]
# make sure it has exactly 2 trades, no more, no less
numTradesInPair = len(pair)
if numTradesInPair == 2:
trade = self.parse_my_trade(pair)
result.append(trade)
symbol = None
if market is not None:
symbol = market['symbol']
return self.filter_by_symbol_since_limit(result, symbol, since, limit)
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
# okex actually returns ledger entries instead of fills here, so each fill in the order
# is represented by two trades with opposite buy/sell sides, not one :\
# self aspect renders the 'fills' endpoint unusable for fetchOrderTrades
# until either OKEX fixes the API or we workaround self on our side somehow
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchMyTrades() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
if (limit is not None) and (limit > 100):
limit = 100
request = {
'instrument_id': market['id'],
# 'order_id': id, # string
# 'after': '1', # pagination of data to return records earlier than the requested ledger_id
# 'before': '1', # P=pagination of data to return records newer than the requested ledger_id
# 'limit': limit, # optional, number of results per request, default = maximum = 100
}
defaultType = self.safe_string_2(self.options, 'fetchMyTrades', 'defaultType')
type = self.safe_string(params, 'type', defaultType)
query = self.omit(params, 'type')
method = type + 'GetFills'
response = await getattr(self, method)(self.extend(request, query))
#
# [
# # sell
# {
# "created_at":"2020-03-29T11:55:25.000Z",
# "currency":"USDT",
# "exec_type":"T",
# "fee":"-0.04647925",
# "instrument_id":"ETH-USDT",
# "ledger_id":"10562924353",
# "liquidity":"T",
# "order_id":"4636470489136128",
# "price":"129.13",
# "product_id":"ETH-USDT",
# "side":"buy",
# "size":"30.98616393",
# "timestamp":"2020-03-29T11:55:25.000Z",
# "trade_id":"18551601"
# },
# {
# "created_at":"2020-03-29T11:55:25.000Z",
# "currency":"ETH",
# "exec_type":"T",
# "fee":"0",
# "instrument_id":"ETH-USDT",
# "ledger_id":"10562924352",
# "liquidity":"T",
# "order_id":"4636470489136128",
# "price":"129.13",
# "product_id":"ETH-USDT",
# "side":"sell",
# "size":"0.23996099",
# "timestamp":"2020-03-29T11:55:25.000Z",
# "trade_id":"18551601"
# },
# # buy
# {
# "created_at":"2020-03-29T11:55:16.000Z",
# "currency":"ETH",
# "exec_type":"T",
# "fee":"-0.00036049",
# "instrument_id":"ETH-USDT",
# "ledger_id":"10562922669",
# "liquidity":"T",
# "order_id": "4636469894136832",
# "price":"129.16",
# "product_id":"ETH-USDT",
# "side":"buy",
# "size":"0.240322",
# "timestamp":"2020-03-29T11:55:16.000Z",
# "trade_id":"18551600"
# },
# {
# "created_at":"2020-03-29T11:55:16.000Z",
# "currency":"USDT",
# "exec_type":"T",
# "fee":"0",
# "instrument_id":"ETH-USDT",
# "ledger_id":"10562922668",
# "liquidity":"T",
# "order_id":"4636469894136832",
# "price":"129.16",
# "product_id":"ETH-USDT",
# "side":"sell",
# "size":"31.03998952",
# "timestamp":"2020-03-29T11:55:16.000Z",
# "trade_id":"18551600"
# }
# ]
#
return self.parse_my_trades(response, market, since, limit, params)
async def fetch_order_trades(self, id, symbol=None, since=None, limit=None, params={}):
request = {
# 'instrument_id': market['id'],
'order_id': id,
# 'after': '1', # return the page after the specified page number
# 'before': '1', # return the page before the specified page number
# 'limit': limit, # optional, number of results per request, default = maximum = 100
}
return await self.fetch_my_trades(symbol, since, limit, self.extend(request, params))
async def fetch_position(self, symbol, params={}):
await self.load_markets()
market = self.market(symbol)
method = None
request = {
'instrument_id': market['id'],
# 'order_id': id, # string
# 'after': '1', # pagination of data to return records earlier than the requested ledger_id
# 'before': '1', # P=pagination of data to return records newer than the requested ledger_id
# 'limit': limit, # optional, number of results per request, default = maximum = 100
}
type = market['type']
if (type == 'futures') or (type == 'swap'):
method = type + 'GetInstrumentIdPosition'
elif type == 'option':
underlying = self.safe_string(params, 'underlying')
if underlying is None:
raise ArgumentsRequired(self.id + ' fetchPosition() requires an underlying parameter for ' + type + ' market ' + symbol)
method = type + 'GetUnderlyingPosition'
else:
raise NotSupported(self.id + ' fetchPosition() does not support ' + type + ' market ' + symbol + ', supported market types are futures, swap or option')
response = await getattr(self, method)(self.extend(request, params))
#
# futures
#
# crossed margin mode
#
# {
# "result": True,
# "holding": [
# {
# "long_qty": "2",
# "long_avail_qty": "2",
# "long_avg_cost": "8260",
# "long_settlement_price": "8260",
# "realised_pnl": "0.00020928",
# "short_qty": "2",
# "short_avail_qty": "2",
# "short_avg_cost": "8259.99",
# "short_settlement_price": "8259.99",
# "liquidation_price": "113.81",
# "instrument_id": "BTC-USD-191227",
# "leverage": "10",
# "created_at": "2019-09-25T07:58:42.129Z",
# "updated_at": "2019-10-08T14:02:51.029Z",
# "margin_mode": "crossed",
# "short_margin": "0.00242197",
# "short_pnl": "6.63E-6",
# "short_pnl_ratio": "0.002477997",
# "short_unrealised_pnl": "6.63E-6",
# "long_margin": "0.00242197",
# "long_pnl": "-6.65E-6",
# "long_pnl_ratio": "-0.002478",
# "long_unrealised_pnl": "-6.65E-6",
# "long_settled_pnl": "0",
# "short_settled_pnl": "0",
# "last": "8257.57"
# }
# ],
# "margin_mode": "crossed"
# }
#
# fixed margin mode
#
# {
# "result": True,
# "holding": [
# {
# "long_qty": "4",
# "long_avail_qty": "4",
# "long_margin": "0.00323844",
# "long_liqui_price": "7762.09",
# "long_pnl_ratio": "0.06052306",
# "long_avg_cost": "8234.43",
# "long_settlement_price": "8234.43",
# "realised_pnl": "-0.00000296",
# "short_qty": "2",
# "short_avail_qty": "2",
# "short_margin": "0.00241105",
# "short_liqui_price": "9166.74",
# "short_pnl_ratio": "0.03318052",
# "short_avg_cost": "8295.13",
# "short_settlement_price": "8295.13",
# "instrument_id": "BTC-USD-191227",
# "long_leverage": "15",
# "short_leverage": "10",
# "created_at": "2019-09-25T07:58:42.129Z",
# "updated_at": "2019-10-08T13:12:09.438Z",
# "margin_mode": "fixed",
# "short_margin_ratio": "0.10292507",
# "short_maint_margin_ratio": "0.005",
# "short_pnl": "7.853E-5",
# "short_unrealised_pnl": "7.853E-5",
# "long_margin_ratio": "0.07103743",
# "long_maint_margin_ratio": "0.005",
# "long_pnl": "1.9841E-4",
# "long_unrealised_pnl": "1.9841E-4",
# "long_settled_pnl": "0",
# "short_settled_pnl": "0",
# "last": "8266.99"
# }
# ],
# "margin_mode": "fixed"
# }
#
# swap
#
# crossed margin mode
#
# {
# "margin_mode": "crossed",
# "timestamp": "2019-09-27T03:49:02.018Z",
# "holding": [
# {
# "avail_position": "3",
# "avg_cost": "59.49",
# "instrument_id": "LTC-USD-SWAP",
# "last": "55.98",
# "leverage": "10.00",
# "liquidation_price": "4.37",
# "maint_margin_ratio": "0.0100",
# "margin": "0.0536",
# "position": "3",
# "realized_pnl": "0.0000",
# "unrealized_pnl": "0",
# "settled_pnl": "-0.0330",
# "settlement_price": "55.84",
# "side": "long",
# "timestamp": "2019-09-27T03:49:02.018Z"
# },
# ]
# }
#
# fixed margin mode
#
# {
# "margin_mode": "fixed",
# "timestamp": "2019-09-27T03:47:37.230Z",
# "holding": [
# {
# "avail_position": "20",
# "avg_cost": "8025.0",
# "instrument_id": "BTC-USD-SWAP",
# "last": "8113.1",
# "leverage": "15.00",
# "liquidation_price": "7002.6",
# "maint_margin_ratio": "0.0050",
# "margin": "0.0454",
# "position": "20",
# "realized_pnl": "-0.0001",
# "unrealized_pnl": "0",
# "settled_pnl": "0.0076",
# "settlement_price": "8279.2",
# "side": "long",
# "timestamp": "2019-09-27T03:47:37.230Z"
# }
# ]
# }
#
# option
#
# {
# "holding":[
# {
# "instrument_id":"BTC-USD-190927-12500-C",
# "position":"20",
# "avg_cost":"3.26",
# "avail_position":"20",
# "settlement_price":"0.017",
# "total_pnl":"50",
# "pnl_ratio":"0.3",
# "realized_pnl":"40",
# "unrealized_pnl":"10",
# "pos_margin":"100",
# "option_value":"70",
# "created_at":"2019-08-30T03:09:20.315Z",
# "updated_at":"2019-08-30T03:40:18.318Z"
# },
# {
# "instrument_id":"BTC-USD-190927-12500-P",
# "position":"20",
# "avg_cost":"3.26",
# "avail_position":"20",
# "settlement_price":"0.019",
# "total_pnl":"50",
# "pnl_ratio":"0.3",
# "realized_pnl":"40",
# "unrealized_pnl":"10",
# "pos_margin":"100",
# "option_value":"70",
# "created_at":"2019-08-30T03:09:20.315Z",
# "updated_at":"2019-08-30T03:40:18.318Z"
# }
# ]
# }
#
# todo unify parsePosition/parsePositions
return response
async def fetch_positions(self, symbols=None, params={}):
await self.load_markets()
method = None
defaultType = self.safe_string_2(self.options, 'fetchPositions', 'defaultType')
type = self.safe_string(params, 'type', defaultType)
if (type == 'futures') or (type == 'swap'):
method = type + 'GetPosition'
elif type == 'option':
underlying = self.safe_string(params, 'underlying')
if underlying is None:
raise ArgumentsRequired(self.id + ' fetchPositions() requires an underlying parameter for ' + type + ' markets')
method = type + 'GetUnderlyingPosition'
else:
raise NotSupported(self.id + ' fetchPositions() does not support ' + type + ' markets, supported market types are futures, swap or option')
params = self.omit(params, 'type')
response = await getattr(self, method)(params)
#
# futures
#
# ...
#
#
# swap
#
# ...
#
# option
#
# {
# "holding":[
# {
# "instrument_id":"BTC-USD-190927-12500-C",
# "position":"20",
# "avg_cost":"3.26",
# "avail_position":"20",
# "settlement_price":"0.017",
# "total_pnl":"50",
# "pnl_ratio":"0.3",
# "realized_pnl":"40",
# "unrealized_pnl":"10",
# "pos_margin":"100",
# "option_value":"70",
# "created_at":"2019-08-30T03:09:20.315Z",
# "updated_at":"2019-08-30T03:40:18.318Z"
# },
# {
# "instrument_id":"BTC-USD-190927-12500-P",
# "position":"20",
# "avg_cost":"3.26",
# "avail_position":"20",
# "settlement_price":"0.019",
# "total_pnl":"50",
# "pnl_ratio":"0.3",
# "realized_pnl":"40",
# "unrealized_pnl":"10",
# "pos_margin":"100",
# "option_value":"70",
# "created_at":"2019-08-30T03:09:20.315Z",
# "updated_at":"2019-08-30T03:40:18.318Z"
# }
# ]
# }
#
# todo unify parsePosition/parsePositions
return response
async def fetch_ledger(self, code=None, since=None, limit=None, params={}):
await self.load_markets()
defaultType = self.safe_string_2(self.options, 'fetchLedger', 'defaultType')
type = self.safe_string(params, 'type', defaultType)
query = self.omit(params, 'type')
suffix = '' if (type == 'account') else 'Accounts'
argument = ''
request = {
# 'from': 'id',
# 'to': 'id',
}
if limit is not None:
request['limit'] = limit
currency = None
if type == 'spot':
if code is None:
raise ArgumentsRequired(self.id + " fetchLedger() requires a currency code argument for '" + type + "' markets")
argument = 'Currency'
currency = self.currency(code)
request['currency'] = currency['id']
elif type == 'futures':
if code is None:
raise ArgumentsRequired(self.id + " fetchLedger() requires an underlying symbol for '" + type + "' markets")
argument = 'Underlying'
market = self.market(code) # we intentionally put a market inside here for the margin and swap ledgers
marketInfo = self.safe_value(market, 'info', {})
settlementCurrencyId = self.safe_string(marketInfo, 'settlement_currency')
settlementCurrencyСode = self.safe_currency_code(settlementCurrencyId)
currency = self.currency(settlementCurrencyСode)
underlyingId = self.safe_string(marketInfo, 'underlying')
request['underlying'] = underlyingId
elif (type == 'margin') or (type == 'swap'):
if code is None:
raise ArgumentsRequired(self.id + " fetchLedger() requires a code argument(a market symbol) for '" + type + "' markets")
argument = 'InstrumentId'
market = self.market(code) # we intentionally put a market inside here for the margin and swap ledgers
currency = self.currency(market['base'])
request['instrument_id'] = market['id']
#
# if type == 'margin':
# #
# # 3. Borrow
# # 4. Repayment
# # 5. Interest
# # 7. Buy
# # 8. Sell
# # 9. From capital account
# # 10. From C2C
# # 11. From Futures
# # 12. From Spot
# # 13. From ETT
# # 14. To capital account
# # 15. To C2C
# # 16. To Spot
# # 17. To Futures
# # 18. To ETT
# # 19. Mandatory Repayment
# # 20. From Piggybank
# # 21. To Piggybank
# # 22. From Perpetual
# # 23. To Perpetual
# # 24. Liquidation Fee
# # 54. Clawback
# # 59. Airdrop Return.
# #
# request['type'] = 'number' # All types will be returned if self filed is left blank
# }
#
elif type == 'account':
if code is not None:
currency = self.currency(code)
request['currency'] = currency['id']
#
# #
# # 1. deposit
# # 2. withdrawal
# # 13. cancel withdrawal
# # 18. into futures account
# # 19. out of futures account
# # 20. into sub account
# # 21. out of sub account
# # 28. claim
# # 29. into ETT account
# # 30. out of ETT account
# # 31. into C2C account
# # 32. out of C2C account
# # 33. into margin account
# # 34. out of margin account
# # 37. into spot account
# # 38. out of spot account
# #
# request['type'] = 'number'
#
else:
raise NotSupported(self.id + " fetchLedger does not support the '" + type + "' type(the type must be one of 'account', 'spot', 'margin', 'futures', 'swap')")
method = type + 'Get' + suffix + argument + 'Ledger'
response = await getattr(self, method)(self.extend(request, query))
#
# transfer funds transfer in/out
# trade funds moved as a result of a trade, spot and margin accounts only
# rebate fee rebate as per fee schedule, spot and margin accounts only
# match open long/open short/close long/close short(futures) or a change in the amount because of trades(swap)
# fee fee, futures only
# settlement settlement/clawback/settle long/settle short
# liquidation force close long/force close short/deliver close long/deliver close short
# funding funding fee, swap only
# margin a change in the amount after adjusting margin, swap only
#
# account
#
# [
# {
# "amount":0.00051843,
# "balance":0.00100941,
# "currency":"BTC",
# "fee":0,
# "ledger_id":8987285,
# "timestamp":"2018-10-12T11:01:14.000Z",
# "typename":"Get from activity"
# }
# ]
#
# spot
#
# [
# {
# "timestamp":"2019-03-18T07:08:25.000Z",
# "ledger_id":"3995334780",
# "created_at":"2019-03-18T07:08:25.000Z",
# "currency":"BTC",
# "amount":"0.0009985",
# "balance":"0.0029955",
# "type":"trade",
# "details":{
# "instrument_id":"BTC-USDT",
# "order_id":"2500650881647616",
# "product_id":"BTC-USDT"
# }
# }
# ]
#
# margin
#
# [
# [
# {
# "created_at":"2019-03-20T03:45:05.000Z",
# "ledger_id":"78918186",
# "timestamp":"2019-03-20T03:45:05.000Z",
# "currency":"EOS",
# "amount":"0", # ?
# "balance":"0.59957711",
# "type":"transfer",
# "details":{
# "instrument_id":"EOS-USDT",
# "order_id":"787057",
# "product_id":"EOS-USDT"
# }
# }
# ],
# {
# "before":"78965766",
# "after":"78918186"
# }
# ]
#
# futures
#
# [
# {
# "ledger_id":"2508090544914461",
# "timestamp":"2019-03-19T14:40:24.000Z",
# "amount":"-0.00529521",
# "balance":"0",
# "currency":"EOS",
# "type":"fee",
# "details":{
# "order_id":"2506982456445952",
# "instrument_id":"EOS-USD-190628"
# }
# }
# ]
#
# swap
#
# [
# {
# "amount":"0.004742",
# "fee":"-0.000551",
# "type":"match",
# "instrument_id":"EOS-USD-SWAP",
# "ledger_id":"197429674941902848",
# "timestamp":"2019-03-25T05:56:31.286Z"
# },
# ]
#
responseLength = len(response)
if responseLength < 1:
return []
isArray = isinstance(response[0], list)
isMargin = (type == 'margin')
entries = response[0] if (isMargin and isArray) else response
if type == 'swap':
ledgerEntries = self.parse_ledger(entries)
return self.filter_by_symbol_since_limit(ledgerEntries, code, since, limit)
return self.parse_ledger(entries, currency, since, limit)
def parse_ledger_entry_type(self, type):
types = {
'transfer': 'transfer', # # funds transfer in/out
'trade': 'trade', # funds moved as a result of a trade, spot and margin accounts only
'rebate': 'rebate', # fee rebate as per fee schedule, spot and margin accounts only
'match': 'trade', # open long/open short/close long/close short(futures) or a change in the amount because of trades(swap)
'fee': 'fee', # fee, futures only
'settlement': 'trade', # settlement/clawback/settle long/settle short
'liquidation': 'trade', # force close long/force close short/deliver close long/deliver close short
'funding': 'fee', # funding fee, swap only
'margin': 'margin', # a change in the amount after adjusting margin, swap only
}
return self.safe_string(types, type, type)
def parse_ledger_entry(self, item, currency=None):
#
#
# account
#
# {
# "amount":0.00051843,
# "balance":0.00100941,
# "currency":"BTC",
# "fee":0,
# "ledger_id":8987285,
# "timestamp":"2018-10-12T11:01:14.000Z",
# "typename":"Get from activity"
# }
#
# spot
#
# {
# "timestamp":"2019-03-18T07:08:25.000Z",
# "ledger_id":"3995334780",
# "created_at":"2019-03-18T07:08:25.000Z",
# "currency":"BTC",
# "amount":"0.0009985",
# "balance":"0.0029955",
# "type":"trade",
# "details":{
# "instrument_id":"BTC-USDT",
# "order_id":"2500650881647616",
# "product_id":"BTC-USDT"
# }
# }
#
# margin
#
# {
# "created_at":"2019-03-20T03:45:05.000Z",
# "ledger_id":"78918186",
# "timestamp":"2019-03-20T03:45:05.000Z",
# "currency":"EOS",
# "amount":"0", # ?
# "balance":"0.59957711",
# "type":"transfer",
# "details":{
# "instrument_id":"EOS-USDT",
# "order_id":"787057",
# "product_id":"EOS-USDT"
# }
# }
#
# futures
#
# {
# "ledger_id":"2508090544914461",
# "timestamp":"2019-03-19T14:40:24.000Z",
# "amount":"-0.00529521",
# "balance":"0",
# "currency":"EOS",
# "type":"fee",
# "details":{
# "order_id":"2506982456445952",
# "instrument_id":"EOS-USD-190628"
# }
# }
#
# swap
#
# {
# "amount":"0.004742",
# "fee":"-0.000551",
# "type":"match",
# "instrument_id":"EOS-USD-SWAP",
# "ledger_id":"197429674941902848",
# "timestamp":"2019-03-25T05:56:31.286Z"
# },
#
id = self.safe_string(item, 'ledger_id')
account = None
details = self.safe_value(item, 'details', {})
referenceId = self.safe_string(details, 'order_id')
referenceAccount = None
type = self.parse_ledger_entry_type(self.safe_string(item, 'type'))
code = self.safe_currency_code(self.safe_string(item, 'currency'), currency)
amount = self.safe_number(item, 'amount')
timestamp = self.parse8601(self.safe_string(item, 'timestamp'))
fee = {
'cost': self.safe_number(item, 'fee'),
'currency': code,
}
before = None
after = self.safe_number(item, 'balance')
status = 'ok'
marketId = self.safe_string(item, 'instrument_id')
symbol = None
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
symbol = market['symbol']
return {
'info': item,
'id': id,
'account': account,
'referenceId': referenceId,
'referenceAccount': referenceAccount,
'type': type,
'currency': code,
'symbol': symbol,
'amount': amount,
'before': before, # balance before
'after': after, # balance after
'status': status,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'fee': fee,
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
isArray = isinstance(params, list)
request = '/api/' + api + '/' + self.version + '/'
request += path if isArray else self.implode_params(path, params)
query = params if isArray else self.omit(params, self.extract_params(path))
url = self.implode_params(self.urls['api']['rest'], {'hostname': self.hostname}) + request
type = self.get_path_authentication_type(path)
if type == 'public':
if query:
url += '?' + self.urlencode(query)
elif type == 'private':
self.check_required_credentials()
timestamp = self.iso8601(self.milliseconds())
headers = {
'OK-ACCESS-KEY': self.apiKey,
'OK-ACCESS-PASSPHRASE': self.password,
'OK-ACCESS-TIMESTAMP': timestamp,
# 'OK-FROM': '',
# 'OK-TO': '',
# 'OK-LIMIT': '',
}
auth = timestamp + method + request
if method == 'GET':
if query:
urlencodedQuery = '?' + self.urlencode(query)
url += urlencodedQuery
auth += urlencodedQuery
else:
if isArray or query:
body = self.json(query)
auth += body
headers['Content-Type'] = 'application/json'
signature = self.hmac(self.encode(auth), self.encode(self.secret), hashlib.sha256, 'base64')
headers['OK-ACCESS-SIGN'] = signature
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def get_path_authentication_type(self, path):
# https://github.com/ccxt/ccxt/issues/6651
# a special case to handle the optionGetUnderlying interefering with
# other endpoints containing self keyword
if path == 'underlying':
return 'public'
auth = self.safe_value(self.options, 'auth', {})
key = self.find_broadly_matched_key(auth, path)
return self.safe_string(auth, key, 'private')
def handle_errors(self, code, reason, url, method, headers, body, response, requestHeaders, requestBody):
if not response:
return # fallback to default error handler
feedback = self.id + ' ' + body
if code == 503:
# {"message":"name resolution failed"}
raise ExchangeNotAvailable(feedback)
#
# {"error_message":"Order does not exist","result":"true","error_code":"35029","order_id":"-1"}
#
message = self.safe_string(response, 'message')
errorCode = self.safe_string_2(response, 'code', 'error_code')
nonEmptyMessage = ((message is not None) and (message != ''))
nonZeroErrorCode = (errorCode is not None) and (errorCode != '0')
if nonEmptyMessage:
self.throw_exactly_matched_exception(self.exceptions['exact'], message, feedback)
self.throw_broadly_matched_exception(self.exceptions['broad'], message, feedback)
if nonZeroErrorCode:
self.throw_exactly_matched_exception(self.exceptions['exact'], errorCode, feedback)
if nonZeroErrorCode or nonEmptyMessage:
raise ExchangeError(feedback) # unknown message
| 47.352217
| 521
| 0.471667
|
5dfebdaaaf7e259a82f58fad3b83d632806686a0
| 4,475
|
py
|
Python
|
src/mixins/fixed_cone_mixin.py
|
cvxgrp/qcml
|
ff5e378cfeeebcf3f85a6e30c3449585f9af869f
|
[
"BSD-2-Clause-FreeBSD"
] | 26
|
2015-02-06T02:59:17.000Z
|
2021-11-15T18:13:27.000Z
|
src/mixins/fixed_cone_mixin.py
|
cvxgrp/qcml
|
ff5e378cfeeebcf3f85a6e30c3449585f9af869f
|
[
"BSD-2-Clause-FreeBSD"
] | 6
|
2015-06-14T04:43:43.000Z
|
2019-10-27T11:03:30.000Z
|
src/mixins/fixed_cone_mixin.py
|
cvxgrp/qcml
|
ff5e378cfeeebcf3f85a6e30c3449585f9af869f
|
[
"BSD-2-Clause-FreeBSD"
] | 6
|
2015-03-14T07:40:56.000Z
|
2019-12-30T23:11:36.000Z
|
# TODO: (ECHU) presumably, this will work, but in actuality, i'm not sure...
# Need to test this.
#
# This code doesn't work yet. Requires expression slices.
#
from .. ast.constraints import SOC, SOCProd
from .. codes import SliceCoeff
from variable_creation_mixin import VariableCreatorMixin
class FixedConeMixin(VariableCreatorMixin):
""" This implements the fixed cone size behavior.
"""
def __init__(self, cone_size = None, *args, **kwargs):
super(FixedConeMixin, self).__init__(*args, **kwargs)
if cone_size is not None: self.cone_size = max(3,cone_size)
else: self.cone_size = 3
def visit_SOC(self, node):
if self.cone_size is not None:
# look at the size of the SOC
cone_length = 1
for e in node.left:
dim = e.shape.size(abstractdim_rewriter=self.abstractdim_rewriter)
cone_length += dim
while cone_length > self.cone_size:
# maximum number of elements on the lhs
max_lhs = self.cone_size - 1
# collect the new arguments
new_args = []
old_args = []
cum = 0
create_new = True
for e in node.left:
if create_new:
dim = e.shape.size(abstractdim_rewriter=self.abstractdim_rewriter)
# if the dimension of the current expression doesn't
# exceed the max allowable, just push onto argument stack
if cum + dim <= max_lhs:
new_args.append(e)
else:
# if it exceeds, only push the slice up to max_lhs
new_args.append(SliceCoeff(e, 0, max_lhs - cum))
# save the rest of the expression for another cone
old_args.append(SliceCoeff(e, max_lhs - cum, dim))
if cum + dim >= max_lhs:
create_new = False
else:
# just push into the old args
old_args.append(e)
cum += dim
# create a new variable
new_var = self.create_variable(1)
# process the new cone, which has the right size
super(FixedConeMixin,self).visit_SOC(SOC(new_var, new_args))
# process the old cone
old_args.append(new_var)
node.left = old_args
cone_length -= (max_lhs - 1) # the extra "1" is the rhs
if cone_length < self.cone_size:
# create a new variable and append to the node
new_length = self.cone_size - cone_length
new_var = self.create_variable(new_length)
node.left.append(new_var)
super(FixedConeMixin,self).visit_SOC(node)
def visit_SOCProd(self, node):
if self.cone_size is not None:
# look at the size of the SOC
n = node.shape.size(abstractdim_rewriter=self.abstractdim_rewriter)
cone_length = 1 + node.nargs
#print cone_length
while cone_length > self.cone_size:
# maximum number of elements on the lhs
max_lhs = self.cone_size - 1
# collect the new arguments
new_args = []
old_args = []
count = 0
for e in node.arglist:
if count < max_lhs: new_args.append(e)
else: old_args.append(e)
count += 1
new_var = self.create_variable(n)
# process the new cone, which has the right size
super(FixedConeMixin,self).visit_SOCProd(SOCProd(new_var, new_args))
# process the old cone
old_args.append(new_var)
node.arglist = old_args
cone_length -= (max_lhs - 1) # the extra "1" is the rhs
if cone_length < self.cone_size:
# create a new variable and append to the node
new_length = self.cone_size - cone_length
for i in range(new_length):
new_var = self.create_variable(n)
node.arglist.append(new_var)
super(FixedConeMixin,self).visit_SOCProd(node)
| 38.577586
| 90
| 0.529832
|
d4bb40f31065712f45c03f3ba22440f9f1b89b19
| 3,508
|
py
|
Python
|
mne/viz/tests/test_evoked.py
|
jaeilepp/eggie
|
a7e812f27e33f9c43ac2e36c6b45a26a01530a06
|
[
"BSD-2-Clause"
] | null | null | null |
mne/viz/tests/test_evoked.py
|
jaeilepp/eggie
|
a7e812f27e33f9c43ac2e36c6b45a26a01530a06
|
[
"BSD-2-Clause"
] | null | null | null |
mne/viz/tests/test_evoked.py
|
jaeilepp/eggie
|
a7e812f27e33f9c43ac2e36c6b45a26a01530a06
|
[
"BSD-2-Clause"
] | null | null | null |
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Denis Engemann <denis.engemann@gmail.com>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
# Eric Larson <larson.eric.d@gmail.com>
# Cathy Nangini <cnangini@gmail.com>
# Mainak Jas <mainak@neuro.hut.fi>
#
# License: Simplified BSD
import os.path as op
import warnings
import numpy as np
from numpy.testing import assert_raises
# Set our plotters to test mode
import matplotlib
matplotlib.use('Agg') # for testing don't use X server
import matplotlib.pyplot as plt
from mne import io, read_events, Epochs
from mne import pick_types
from mne.layouts import read_layout
from mne.datasets import sample
warnings.simplefilter('always') # enable b/c these tests throw warnings
data_dir = sample.data_path(download=False)
subjects_dir = op.join(data_dir, 'subjects')
ecg_fname = op.join(data_dir, 'MEG', 'sample', 'sample_audvis_ecg_proj.fif')
base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
evoked_fname = op.join(base_dir, 'test-ave.fif')
raw_fname = op.join(base_dir, 'test_raw.fif')
cov_fname = op.join(base_dir, 'test-cov.fif')
event_name = op.join(base_dir, 'test-eve.fif')
event_id, tmin, tmax = 1, -0.1, 0.1
n_chan = 6
layout = read_layout('Vectorview-all')
def _get_raw():
return io.Raw(raw_fname, preload=False)
def _get_events():
return read_events(event_name)
def _get_picks(raw):
return pick_types(raw.info, meg=True, eeg=False, stim=False,
ecg=False, eog=False, exclude='bads')
def _get_epochs():
raw = _get_raw()
events = _get_events()
picks = _get_picks(raw)
# Use a subset of channels for plotting speed
picks = np.round(np.linspace(0, len(picks) + 1, n_chan)).astype(int)
epochs = Epochs(raw, events[:5], event_id, tmin, tmax, picks=picks,
baseline=(None, 0))
return epochs
def _get_epochs_delayed_ssp():
raw = _get_raw()
events = _get_events()
picks = _get_picks(raw)
reject = dict(mag=4e-12)
epochs_delayed_ssp = Epochs(raw, events[:10], event_id, tmin, tmax,
picks=picks, baseline=(None, 0),
proj='delayed', reject=reject)
return epochs_delayed_ssp
def test_plot_evoked():
"""Test plotting of evoked
"""
evoked = _get_epochs().average()
with warnings.catch_warnings(record=True):
evoked.plot(proj=True, hline=[1])
# plot with bad channels excluded
evoked.plot(exclude='bads')
evoked.plot(exclude=evoked.info['bads']) # does the same thing
# test selective updating of dict keys is working.
evoked.plot(hline=[1], units=dict(mag='femto foo'))
evoked_delayed_ssp = _get_epochs_delayed_ssp().average()
evoked_delayed_ssp.plot(proj='interactive')
evoked_delayed_ssp.apply_proj()
assert_raises(RuntimeError, evoked_delayed_ssp.plot,
proj='interactive')
evoked_delayed_ssp.info['projs'] = []
assert_raises(RuntimeError, evoked_delayed_ssp.plot,
proj='interactive')
assert_raises(RuntimeError, evoked_delayed_ssp.plot,
proj='interactive', axes='foo')
evoked.plot_image(proj=True)
# plot with bad channels excluded
evoked.plot_image(exclude='bads')
evoked.plot_image(exclude=evoked.info['bads']) # does the same thing
plt.close('all')
| 32.785047
| 77
| 0.661345
|
04c0dfee375fa10451792eb5dc0b3eb7e94a2830
| 54
|
py
|
Python
|
xuperchain/__init__.py
|
xuperchain/contract-sdk-py
|
190c1f80d055ea29c3cb16e4eeea46845e06cd88
|
[
"Apache-2.0"
] | null | null | null |
xuperchain/__init__.py
|
xuperchain/contract-sdk-py
|
190c1f80d055ea29c3cb16e4eeea46845e06cd88
|
[
"Apache-2.0"
] | null | null | null |
xuperchain/__init__.py
|
xuperchain/contract-sdk-py
|
190c1f80d055ea29c3cb16e4eeea46845e06cd88
|
[
"Apache-2.0"
] | 1
|
2021-04-02T03:50:57.000Z
|
2021-04-02T03:50:57.000Z
|
from xuperchain.contract_method import contract_method
| 54
| 54
| 0.925926
|
3a883431edaa642a8b96cac6844270b437843b04
| 5,548
|
py
|
Python
|
pruning/main.py
|
acnagle/optimal-lottery-tickets
|
9412547700c359339c819d8144e67c2f33a9e786
|
[
"Apache-2.0"
] | 2
|
2020-11-26T00:37:23.000Z
|
2021-10-03T18:26:11.000Z
|
pruning/main.py
|
acnagle/optimal-lottery-tickets
|
9412547700c359339c819d8144e67c2f33a9e786
|
[
"Apache-2.0"
] | null | null | null |
pruning/main.py
|
acnagle/optimal-lottery-tickets
|
9412547700c359339c819d8144e67c2f33a9e786
|
[
"Apache-2.0"
] | 1
|
2021-06-24T11:36:04.000Z
|
2021-06-24T11:36:04.000Z
|
from __future__ import print_function
import os
import math
import random
import numpy as np
import sys
import time
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.optim.lr_scheduler import CosineAnnealingLR
import torch.autograd as autograd
from utils import data
from utils.train_test import train, test
import models
from args import args
def main():
print(args, '\n')
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
device = get_device(args)
data = get_dataset(args)
model = get_model(args, data, device)
print('\n'+str(model)+'\n')
# Only pass the parameters where p.requires_grad == True to the optimizer
optimizer = optim.SGD(
[p for p in model.parameters() if p.requires_grad],
lr=args.lr,
momentum=args.momentum,
weight_decay=args.wd,
)
criterion = nn.CrossEntropyLoss().to(device)
scheduler = CosineAnnealingLR(optimizer, T_max=args.epochs)
test_acc_arr = np.array([np.nan] * args.epochs)
start = time.time()
for epoch in range(args.epochs):
train(model, device, data.train_loader, optimizer, criterion, epoch+1, args.log_interval)
test_acc_arr[epoch] = test(model, device, data.test_loader, criterion, args.batch_size)
scheduler.step()
end = time.time()
total_time = (end - start) / 60
num_weights = sum(x.numel() for x in model.parameters() if x.requires_grad) # This calcuation does not include the number of weights in convolutional layers, including baseline models, since we are interested in observing the number of parameters in fully connected layers only. Convolutional layers are randomly initialized and never trained/pruned in any of the models. Note that num_weights is equal to the number of parameters that are being updated in the network
print('\nTotal time spent pruning/training: {:.2f} minutes'.format(total_time))
print('Total number of parameters in model:', num_weights)
if args.arch not in ['TwoLayerFC', 'FourLayerFC', 'LeNet5']:
num_params_pruned = int(num_weights * args.sparsity)
num_params_remaining = num_weights - num_params_pruned
print('Number of parameters in pruned model:', num_params_remaining)
else:
num_params_remaining = None
if args.save_results or args.save_model:
save(model, test_acc_arr, total_time, num_weights, num_params_remaining, args)
def get_device(args):
use_cuda = not args.no_cuda and torch.cuda.is_available()
if args.gpu is None:
device = torch.device('cuda:0' if use_cuda else 'cpu')
else:
device = 'cuda:' + str(args.gpu)
if use_cuda:
torch.cuda.device(device)
print('Using device {} for training and testing'.format(device))
return device
def get_dataset(args):
print('Benchmarking with the {} dataset'.format(args.dataset))
dataset = getattr(data, args.dataset.upper())(args)
return dataset
def get_model(args, data, device):
if args.redundancy <= 0:
raise ValueError('Redundancy factor must be greater than or equal to 1')
print('Creating model {}'.format(args.arch))
model = models.__dict__[args.arch](data.INPUT_SIZE, data.NUM_CLASSES, args)
if not args.no_cuda:
model.cuda(device)
if args.freeze_weights:
freeze_model_weights(model)
return model
def freeze_model_weights(model):
print('\nFreezing model weights:')
for weight_attr in ['weight', 'weight1', 'weight2']:
for n, m in model.named_modules():
if hasattr(m, weight_attr) and getattr(m, weight_attr) is not None:
print(f' No gradient to {n}.{weight_attr}')
getattr(m, weight_attr).requires_grad = False
if getattr(m, weight_attr).grad is not None:
print(f' Setting gradient of {n}.{weight_attr} to None')
getattr(m, weight_attr).grad = None
if hasattr(m, "bias") and m.bias is not None:
print(f' No gradient to {n}.bias')
m.bias.requires_grad = False
if m.bias.grad is not None:
print(f' Setting gradient of {n}.bias to None')
m.bias.grad = None
def save(model, test_acc_arr, total_time, num_weights, num_params_remaining, args):
if args.arch not in ['TwoLayerFC', 'FourLayerFC', 'LeNet5']:
filename = 'r'+str(args.redundancy)+'_s'+str(args.sparsity)+'_'
else:
filename = ''
filename += 'e'+str(args.epochs)+'_h'+str(args.hidden_size)
if args.use_relu:
filename += '_relu'
if args.save_results:
save_dir = './results/'+args.arch
if not os.path.exists(save_dir):
os.makedirs(save_dir)
np.savez(save_dir+'/'+filename+'.npz',
args=vars(args),
test_acc=test_acc_arr,
total_time=total_time,
sparsity=args.sparsity,
num_weights=num_weights,
num_params_remaining=num_params_remaining
)
if args.save_model:
save_dir = './weights/'+args.arch
if not os.path.exists(save_dir):
os.makedirs(save_dir)
torch.save(model.state_dict(), save_dir+'/'+filename+'.pt')
if __name__ == '__main__':
main()
| 32.828402
| 476
| 0.650324
|
822b1b8e47ebc1e15cce2fdbacb755fccd62f9b0
| 837
|
py
|
Python
|
tests/model_tasklog_tests.py
|
richard-ma/dress
|
86e892673635319c0a1860edb33cdba7ed22a7fb
|
[
"MIT"
] | 2
|
2019-10-23T09:06:47.000Z
|
2019-11-07T12:52:42.000Z
|
tests/model_tasklog_tests.py
|
richard-ma/dress
|
86e892673635319c0a1860edb33cdba7ed22a7fb
|
[
"MIT"
] | 4
|
2017-12-28T01:44:42.000Z
|
2017-12-31T13:08:18.000Z
|
tests/model_tasklog_tests.py
|
richard-ma/dress
|
86e892673635319c0a1860edb33cdba7ed22a7fb
|
[
"MIT"
] | 2
|
2019-10-15T07:42:33.000Z
|
2019-10-24T06:49:22.000Z
|
#!/usr/bin/env python
import unittest
from flask_testing import TestCase
import dress
from dress.models import *
from manager import seed
class ModelTaskLogTestCase(TestCase):
def create_app(self):
app = dress.create_app()
app.config.testing = True
return app
def setUp(self):
seed()
def tearDown(self):
pass
def test_create_setting_with_value(self):
task_name = 'unknown'
custom_data = {
'hello': 'world'
}
tl = TaskLog(task_name, custom_data)
tl.create()
query_tl = TaskLog.query.one()
self.assertEqual(query_tl.task_name, task_name)
self.assertEqual(query_tl.custom_data, custom_data)
self.assertEqual(custom_data['hello'], 'world')
if __name__ == '__main__':
unittest.main()
| 21.461538
| 59
| 0.634409
|
3ff234037e4aabe49b84f6ae1ccc32bffeb82207
| 14,618
|
py
|
Python
|
ctapipe/reco/hillas_intersection.py
|
mpecimotika/ctapipe
|
ffd7930921f7139b761fbf1208da16dd302e97a6
|
[
"BSD-3-Clause"
] | null | null | null |
ctapipe/reco/hillas_intersection.py
|
mpecimotika/ctapipe
|
ffd7930921f7139b761fbf1208da16dd302e97a6
|
[
"BSD-3-Clause"
] | null | null | null |
ctapipe/reco/hillas_intersection.py
|
mpecimotika/ctapipe
|
ffd7930921f7139b761fbf1208da16dd302e97a6
|
[
"BSD-3-Clause"
] | null | null | null |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
TODO:
- Speed tests, need to be certain the looping on all telescopes is not killing
performance
- Introduce new weighting schemes
- Make intersect_lines code more readable
"""
import numpy as np
import itertools
import astropy.units as u
from ctapipe.reco.reco_algorithms import Reconstructor
from ctapipe.io.containers import ReconstructedShowerContainer
from ctapipe.coordinates import NominalFrame, HorizonFrame
from ctapipe.coordinates import TiltedGroundFrame, project_to_ground
from ctapipe.instrument import get_atmosphere_profile_functions
__all__ = [
'HillasIntersection'
]
class HillasIntersection(Reconstructor):
"""
This class is a simple re-implementation of Hillas parameter based event
reconstruction. e.g. https://arxiv.org/abs/astro-ph/0607333
In this case the Hillas parameters are all constructed in the shared
angular ( Nominal) system. Direction reconstruction is performed by
extrapolation of the major axes of the Hillas parameters in the nominal
system and the weighted average of the crossing points is taken. Core
reconstruction is performed by performing the same procedure in the
tilted ground system.
The height of maximum is reconstructed by the projection os the image
centroid onto the shower axis, taking the weighted average of all images.
Uncertainties on the positions are provided by taking the spread of the
crossing points, however this means that no uncertainty can be provided
for multiplicity 2 events.
"""
def __init__(self, atmosphere_profile_name="paranal"):
# We need a conversion function from height above ground to depth of maximum
# To do this we need the conversion table from CORSIKA
_ = get_atmosphere_profile_functions(atmosphere_profile_name)
self.thickness_profile, self.altitude_profile = _
def predict(self, hillas_parameters, tel_x, tel_y, array_direction):
"""
Parameters
----------
hillas_parameters: dict
Dictionary containing Hillas parameters for all telescopes
in reconstruction
tel_x: dict
Dictionary containing telescope position on ground for all
telescopes in reconstruction
tel_y: dict
Dictionary containing telescope position on ground for all
telescopes in reconstruction
array_direction: HorizonFrame
Pointing direction of the array
Returns
-------
ReconstructedShowerContainer:
"""
src_x, src_y, err_x, err_y = self.reconstruct_nominal(hillas_parameters)
core_x, core_y, core_err_x, core_err_y = self.reconstruct_tilted(
hillas_parameters, tel_x, tel_y)
err_x *= u.rad
err_y *= u.rad
nom = NominalFrame(x=src_x * u.rad, y=src_y * u.rad,
array_direction=array_direction)
horiz = nom.transform_to(HorizonFrame())
result = ReconstructedShowerContainer()
result.alt, result.az = horiz.alt, horiz.az
tilt = TiltedGroundFrame(x=core_x * u.m, y=core_y * u.m,
pointing_direction=array_direction)
grd = project_to_ground(tilt)
result.core_x = grd.x
result.core_y = grd.y
x_max = self.reconstruct_xmax(nom.x, nom.y,
tilt.x, tilt.y,
hillas_parameters,
tel_x, tel_y,
90 * u.deg - array_direction.alt)
result.core_uncert = np.sqrt(core_err_x * core_err_x
+ core_err_y * core_err_y) * u.m
result.tel_ids = [h for h in hillas_parameters.keys()]
result.average_size = np.mean([h.intensity for h in hillas_parameters.values()])
result.is_valid = True
src_error = np.sqrt(err_x * err_x + err_y * err_y)
result.alt_uncert = src_error.to(u.deg)
result.az_uncert = src_error.to(u.deg)
result.h_max = x_max
result.h_max_uncert = np.nan
result.goodness_of_fit = np.nan
return result
def reconstruct_nominal(self, hillas_parameters, weighting="Konrad"):
"""
Perform event reconstruction by simple Hillas parameter intersection
in the nominal system
Parameters
----------
hillas_parameters: dict
Hillas parameter objects
weighting: string
Specify image weighting scheme used (HESS or Konrad style)
Returns
-------
Reconstructed event position in the nominal system
"""
if len(hillas_parameters) < 2:
return None # Throw away events with < 2 images
# Find all pairs of Hillas parameters
combos = itertools.combinations(list(hillas_parameters.values()), 2)
hillas_pairs = list(combos)
# Copy parameters we need to a numpy array to speed things up
h1 = list(
map(
lambda h: [h[0].psi.to(u.rad).value,
h[0].x.value,
h[0].y.value,
h[0].intensity], hillas_pairs
)
)
h1 = np.array(h1)
h1 = np.transpose(h1)
h2 = list(
map(lambda h: [h[1].psi.to(u.rad).value,
h[1].x.value,
h[1].y.value,
h[1].intensity], hillas_pairs)
)
h2 = np.array(h2)
h2 = np.transpose(h2)
# Perform intersection
sx, sy = self.intersect_lines(h1[1], h1[2], h1[0],
h2[1], h2[2], h2[0])
if weighting == "Konrad":
weight_fn = self.weight_konrad
elif weighting == "HESS":
weight_fn = self.weight_HESS
# Weight by chosen method
weight = weight_fn(h1[3], h2[3])
# And sin of interception angle
weight *= self.weight_sin(h1[0], h2[0])
# Make weighted average of all possible pairs
x_pos = np.average(sx, weights=weight)
y_pos = np.average(sy, weights=weight)
var_x = np.average((sx - x_pos) ** 2, weights=weight)
var_y = np.average((sy - y_pos) ** 2, weights=weight)
# Copy into nominal coordinate
return x_pos, y_pos, np.sqrt(var_x), np.sqrt(var_y)
def reconstruct_tilted(self, hillas_parameters, tel_x, tel_y,
weighting="Konrad"):
"""
Core position reconstruction by image axis intersection in the tilted
system
Parameters
----------
hillas_parameters: dict
Hillas parameter objects
tel_x: dict
Telescope X positions, tilted system
tel_y: dict
Telescope Y positions, tilted system
weighting: str
Weighting scheme for averaging of crossing points
Returns
-------
(float, float, float, float):
core position X, core position Y, core uncertainty X,
core uncertainty X
"""
if len(hillas_parameters) < 2:
return None # Throw away events with < 2 images
h = list()
tx = list()
ty = list()
# Need to loop here as dict is unordered
for tel in hillas_parameters.keys():
h.append(hillas_parameters[tel])
tx.append(tel_x[tel])
ty.append(tel_y[tel])
# Find all pairs of Hillas parameters
hillas_pairs = list(itertools.combinations(h, 2))
tel_x = list(itertools.combinations(tx, 2))
tel_y = list(itertools.combinations(ty, 2))
tx = np.zeros((len(tel_x), 2))
ty = np.zeros((len(tel_y), 2))
for i, _ in enumerate(tel_x):
tx[i][0], tx[i][1] = tel_x[i][0].value, tel_x[i][1].value
ty[i][0], ty[i][1] = tel_y[i][0].value, tel_y[i][1].value
tel_x = np.array(tx)
tel_y = np.array(ty)
# Copy parameters we need to a numpy array to speed things up
h1 = map(lambda h: [h[0].psi.to(u.rad).value, h[0].intensity], hillas_pairs)
h1 = np.array(list(h1))
h1 = np.transpose(h1)
h2 = map(lambda h: [h[1].psi.to(u.rad).value, h[1].intensity], hillas_pairs)
h2 = np.array(list(h2))
h2 = np.transpose(h2)
# Perform intersection
cx, cy = self.intersect_lines(tel_x[:, 0], tel_y[:, 0], h1[0],
tel_x[:, 1], tel_y[:, 1], h2[0])
if weighting == "Konrad":
weight_fn = self.weight_konrad
elif weighting == "HESS":
weight_fn = self.weight_HESS
# Weight by chosen method
weight = weight_fn(h1[1], h2[1])
# And sin of interception angle
weight *= self.weight_sin(h1[0], h2[0])
# Make weighted average of all possible pairs
x_pos = np.average(cx, weights=weight)
y_pos = np.average(cy, weights=weight)
var_x = np.average((cx - x_pos) ** 2, weights=weight)
var_y = np.average((cy - y_pos) ** 2, weights=weight)
return x_pos, y_pos, np.sqrt(var_x), np.sqrt(var_y)
def reconstruct_xmax(self, source_x, source_y, core_x, core_y,
hillas_parameters, tel_x, tel_y, zen):
"""
Geometrical depth of shower maximum reconstruction, assuming the shower
maximum lies at the image centroid
Parameters
----------
source_x: float
Source X position in nominal system
source_y: float
Source Y position in nominal system
core_x: float
Core X position in nominal system
core_y: float
Core Y position in nominal system
hillas_parameters: dict
Dictionary of hillas parameters objects
tel_x: dict
Dictionary of telescope X positions
tel_y: dict
Dictionary of telescope X positions
zen: float
Zenith angle of shower
Returns
-------
float:
Estimated depth of shower maximum
"""
cog_x = list()
cog_y = list()
amp = list()
tx = list()
ty = list()
# Loops over telescopes in event
for tel in hillas_parameters.keys():
cog_x.append(hillas_parameters[tel].x.to(u.rad).value)
cog_y.append(hillas_parameters[tel].y.to(u.rad).value)
amp.append(hillas_parameters[tel].intensity)
tx.append(tel_x[tel].to(u.m).value)
ty.append(tel_y[tel].to(u.m).value)
height = get_shower_height(source_x.to(u.rad).value,
source_y.to(u.rad).value,
np.array(cog_x),
np.array(cog_y),
core_x.to(u.m).value,
core_y.to(u.m).value,
np.array(tx),
np.array(ty))
weight = np.array(amp)
mean_height = np.sum(height * weight) / np.sum(weight)
# This value is height above telescope in the tilted system,
# we should convert to height above ground
mean_height *= np.cos(zen)
# Add on the height of the detector above sea level
mean_height += 2100 # TODO: replace with instrument info
if mean_height > 100000 or np.isnan(mean_height):
mean_height = 100000
mean_height *= u.m
# Lookup this height in the depth tables, the convert Hmax to Xmax
x_max = self.thickness_profile(mean_height.to(u.km))
# Convert to slant depth
x_max /= np.cos(zen)
return x_max
@staticmethod
def intersect_lines(xp1, yp1, phi1, xp2, yp2, phi2):
"""
Perform intersection of two lines. This code is borrowed from read_hess.
Parameters
----------
xp1: ndarray
X position of first image
yp1: ndarray
Y position of first image
phi1: ndarray
Rotation angle of first image
xp2: ndarray
X position of second image
yp2: ndarray
Y position of second image
phi2: ndarray
Rotation angle of second image
Returns
-------
ndarray of x and y crossing points for all pairs
"""
sin_1 = np.sin(phi1)
cos_1 = np.cos(phi1)
a1 = sin_1
b1 = -1 * cos_1
c1 = yp1 * cos_1 - xp1 * sin_1
sin_2 = np.sin(phi2)
cos_2 = np.cos(phi2)
a2 = sin_2
b2 = -1 * cos_2
c2 = yp2 * cos_2 - xp2 * sin_2
det_ab = (a1 * b2 - a2 * b1)
det_bc = (b1 * c2 - b2 * c1)
det_ca = (c1 * a2 - c2 * a1)
# if math.fabs(det_ab) < 1e-14 : # /* parallel */
# return 0,0
xs = det_bc / det_ab
ys = det_ca / det_ab
return xs, ys
@staticmethod
def weight_konrad(p1, p2):
return (p1 * p2) / (p1 + p2)
@staticmethod
def weight_hess(p1, p2):
return 1 / ((1 / p1) + (1 / p2))
@staticmethod
def weight_sin(phi1, phi2):
return np.abs(np.sin(np.fabs(phi1 - phi2)))
def get_shower_height(source_x, source_y, cog_x, cog_y,
core_x, core_y, tel_pos_x, tel_pos_y):
"""
Function to calculate the depth of shower maximum geometrically under the assumption
that the shower maximum lies at the brightest point of the camera image.
Parameters
----------
source_x: float
Event source position in nominal frame
source_y: float
Event source position in nominal frame
core_x: float
Event core position in telescope tilted frame
core_y: float
Event core position in telescope tilted frame
zen: float
Zenith angle of event
Returns
-------
float: Depth of maximum of air shower
"""
# Calculate displacement of image centroid from source position (in rad)
disp = np.sqrt(np.power(cog_x - source_x, 2) +
np.power(cog_y - source_y, 2))
# Calculate impact parameter of the shower
impact = np.sqrt(np.power(tel_pos_x - core_x, 2) +
np.power(tel_pos_y - core_y, 2))
# Distance above telescope is ration of these two (small angle)
height = impact / disp
return height
| 33.995349
| 88
| 0.577918
|
cfbb0558359bc109f34b756df9635c9844639ec7
| 2,350
|
py
|
Python
|
color-depth-reduction/CIFAR-10/CW_attack.py
|
jfc43/pixel-discretization
|
1543649e5172cb4f8226962a5ab5087091910418
|
[
"Apache-2.0"
] | 6
|
2019-03-08T23:09:20.000Z
|
2021-07-29T19:23:58.000Z
|
color-depth-reduction/CIFAR-10/CW_attack.py
|
jfc43/pixel-discretization
|
1543649e5172cb4f8226962a5ab5087091910418
|
[
"Apache-2.0"
] | null | null | null |
color-depth-reduction/CIFAR-10/CW_attack.py
|
jfc43/pixel-discretization
|
1543649e5172cb4f8226962a5ab5087091910418
|
[
"Apache-2.0"
] | 1
|
2020-02-05T20:07:19.000Z
|
2020-02-05T20:07:19.000Z
|
import tensorflow as tf
import numpy as np
from util import preprocess
class CWAttack:
def __init__(self, model, num_steps, step_size, epsilon, codes, batch_size, alpha):
self.model = model
self.num_steps = num_steps
self.step_size = step_size
self.codes = codes
self.xs = tf.Variable(np.zeros((batch_size, 32, 32, 3), dtype=np.float32),
name='modifier')
self.orig_xs = tf.placeholder(tf.float32, [batch_size, 32, 32, 3])
self.ys = tf.placeholder(tf.int32, [batch_size])
self.epsilon = epsilon
delta = tf.clip_by_value(self.xs, 0, 255) - self.orig_xs
delta = tf.clip_by_value(delta, -self.epsilon, self.epsilon)
self.do_clip_xs = tf.assign(self.xs, self.orig_xs+delta)
w = []
cw = []
for i in range(codes.shape[0]):
wt = tf.exp(-alpha*tf.abs(self.xs-codes[i]))
w.append(wt)
cw.append(codes[i]*wt)
self.z = sum(cw)/(sum(w))
logits = self.model.forward(self.z)
label_mask = tf.one_hot(self.ys, 10)
correct_logit = tf.reduce_sum(label_mask * logits, axis=1)
wrong_logit = tf.reduce_max((1-label_mask) * logits - 1e4*label_mask, axis=1)
self.loss = (correct_logit - wrong_logit)
start_vars = set(x.name for x in tf.global_variables())
optimizer = tf.train.AdamOptimizer(step_size*1)
grad,var = optimizer.compute_gradients(self.loss, [self.xs])[0]
self.train = optimizer.apply_gradients([(tf.sign(grad),var)])
end_vars = tf.global_variables()
self.new_vars = [x for x in end_vars if x.name not in start_vars]
self.new_vars_initializer = tf.variables_initializer(self.new_vars)
def perturb(self, x, y, sess):
sess.run(self.new_vars_initializer)
sess.run(self.xs.initializer)
sess.run(self.do_clip_xs,
{self.orig_xs: x})
for i in range(self.num_steps):
imgs = sess.run(self.xs)
points = imgs.reshape((-1,3))
t = preprocess(imgs, self.codes)
sess.run(self.train, feed_dict={self.ys: y,
self.z: t})
sess.run(self.do_clip_xs,
{self.orig_xs: x})
return sess.run(self.xs)
| 37.301587
| 87
| 0.58766
|
0d0d2ff8103a0b8723377d21503c48a8cc4f7b90
| 30,165
|
py
|
Python
|
built-in/TensorFlow/Official/cv/detection/MaskRcnn_ID0011_for_TensorFlow/dataloader_.py
|
Ascend/modelzoo
|
f018cfed33dbb1cc2110b9ea2e233333f71cc509
|
[
"Apache-2.0"
] | 12
|
2020-12-13T08:34:24.000Z
|
2022-03-20T15:17:17.000Z
|
built-in/TensorFlow/Official/cv/detection/MaskRcnn_ID0011_for_TensorFlow/dataloader_.py
|
Ascend/modelzoo
|
f018cfed33dbb1cc2110b9ea2e233333f71cc509
|
[
"Apache-2.0"
] | 1
|
2022-01-20T03:11:05.000Z
|
2022-01-20T06:53:39.000Z
|
built-in/TensorFlow/Official/cv/detection/MaskRcnn_ID0011_for_TensorFlow/dataloader_.py
|
Ascend/modelzoo
|
f018cfed33dbb1cc2110b9ea2e233333f71cc509
|
[
"Apache-2.0"
] | 2
|
2021-07-10T12:40:46.000Z
|
2021-12-17T07:55:15.000Z
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ============================================================================
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
"""Data loader and processing.
Defines input_fn of Mask-RCNN for TF Estimator. The input_fn includes training
data for category classification, bounding box regression, and number of
positive examples to normalize the loss during training.
"""
import tensorflow.compat.v1 as tf
import anchors
import coco_utils
import preprocess_ops
import spatial_transform_ops
from object_detection import tf_example_decoder
from utils import box_utils
from utils import dataloader_utils
from utils import input_utils
from dataloader import extract_objects_parser
MAX_NUM_INSTANCES = 100
MAX_NUM_VERTICES_PER_INSTANCE = 1500
MAX_NUM_POLYGON_LIST_LEN = 2 * MAX_NUM_VERTICES_PER_INSTANCE * MAX_NUM_INSTANCES
POLYGON_PAD_VALUE = coco_utils.POLYGON_PAD_VALUE
def _prepare_labels_for_eval(data,
target_num_instances=MAX_NUM_INSTANCES,
target_polygon_list_len=MAX_NUM_POLYGON_LIST_LEN,
use_instance_mask=False):
"""Create labels dict for infeed from data of tf.Example."""
image = data['image']
height = tf.shape(image)[0]
width = tf.shape(image)[1]
boxes = data['groundtruth_boxes']
classes = data['groundtruth_classes']
classes = tf.cast(classes, dtype=tf.float32)
num_labels = tf.shape(classes)[0]
boxes = preprocess_ops.pad_to_fixed_size(boxes, -1, [target_num_instances, 4])
classes = preprocess_ops.pad_to_fixed_size(classes, -1,
[target_num_instances, 1])
is_crowd = data['groundtruth_is_crowd']
is_crowd = tf.cast(is_crowd, dtype=tf.float32)
is_crowd = preprocess_ops.pad_to_fixed_size(is_crowd, 0,
[target_num_instances, 1])
labels = {}
labels['width'] = width
labels['height'] = height
labels['groundtruth_boxes'] = boxes
labels['groundtruth_classes'] = classes
labels['num_groundtruth_labels'] = num_labels
labels['groundtruth_is_crowd'] = is_crowd
if use_instance_mask:
polygons = data['groundtruth_polygons']
polygons = preprocess_ops.pad_to_fixed_size(polygons, POLYGON_PAD_VALUE,
[target_polygon_list_len, 1])
labels['groundtruth_polygons'] = polygons
if 'groundtruth_area' in data:
groundtruth_area = data['groundtruth_area']
groundtruth_area = preprocess_ops.pad_to_fixed_size(
groundtruth_area, 0, [target_num_instances, 1])
labels['groundtruth_area'] = groundtruth_area
return labels
class InputReader(object):
"""Input reader for dataset."""
def __init__(self,
file_pattern,
mode=tf.estimator.ModeKeys.TRAIN,
num_examples=0,
use_fake_data=False,
use_instance_mask=False,
max_num_instances=MAX_NUM_INSTANCES,
max_num_polygon_list_len=MAX_NUM_POLYGON_LIST_LEN):
self._file_pattern = file_pattern
self._max_num_instances = max_num_instances
self._max_num_polygon_list_len = max_num_polygon_list_len
self._mode = mode
self._num_examples = num_examples
self._use_fake_data = use_fake_data
self._use_instance_mask = use_instance_mask
self._include_mask = True
self._skip_crowd_during_training = True
self._aug_rand_hflip = True
self._min_level = 2
self._max_level = 6
self._aug_scale_min = 0.5
self._aug_scale_max = 2
self._output_size = [1024, 1024]
self._mask_crop_size = 112
self._copy_paste_occluded_obj_threshold = 300
self._copy_paste_box_update_threshold = 10
def _transform_mask(self, image_shape, scale, offset, mask):
"""Transform input mask according to the image info (scale, offset)"""
image_scaled_shape = tf.round(
tf.cast(image_shape, tf.float32) * scale
)
image_scaled_shape = tf.cast(image_scaled_shape, tf.int32)
offset = tf.cast(offset, tf.int32)
mask_shape = tf.shape(mask)
mask = tf.image.pad_to_bounding_box(
mask, offset[0], offset[1],
tf.maximum(image_scaled_shape[0], mask_shape[0]) + offset[0],
tf.maximum(image_scaled_shape[1], mask_shape[1]) + offset[1],
)
mask = mask[0:image_scaled_shape[0], 0:image_scaled_shape[1], :]
mask = tf.image.resize(mask, image_shape)
return mask
def _get_occluded_bbox(self, updated_bbox, bbox):
# finds bbox coordinates which are occluded by the new pasted objects.
# if the difference between the bounding box coordinates of updated masks
# and the original bounding box are larger than a threshold then those
# coordinates are considered as occluded
return tf.greater(tf.abs(updated_bbox - tf.cast(bbox, bbox.dtype)),
self._copy_paste_box_update_threshold)
def _get_visible_masks_indices(self, masks, boxes_, cropped_boxes):
"""return indices of not fully occluded objects"""
occluded_objects = tf.reduce_any(
self._get_occluded_bbox(boxes_, cropped_boxes)
)
areas = tf.reduce_sum(masks, axis=[1, 2])
# among the occluded objects, find the objects that their mask area is
# less than copy_paste_occluded_obj_threshold.These objects are considered
# as fully occluded objects and will be removed from the ground truth
indices = tf.where(
tf.math.logical_or(
tf.greater(areas, self._copy_paste_occluded_obj_threshold),
tf.math.logical_not(occluded_objects)
)
)
indices = tf.reshape(indices, [-1])
return indices
def _compute_boxes_using_masks(self, masks, image_shape, image_info, image_scale, offset):
"""computes bounding boxes using masks"""
masks = tf.cast(masks, tf.int8)
x = tf.reduce_max(masks, axis=1)
xmin = tf.cast(tf.argmax(x, 1), tf.int16)
xmax = tf.cast(image_shape[1], tf.int16) - tf.cast(tf.argmax(tf.reverse(x, [1]), 1), tf.int16)
y = tf.reduce_max(masks, axis=2)
ymin = tf.cast(tf.argmax(y, 1), tf.int16)
ymax = tf.cast(image_shape[0], tf.int16) - tf.cast(tf.argmax(tf.reverse(y, [1]), 1), tf.int16)
bbox = tf.stack([ymin, xmin, ymax, xmax], -1)
# clips boxes
bbox = tf.cast(bbox, tf.float32)
bbox = input_utils.resize_and_crop_boxes(
bbox, image_scale, image_info[1, :], offset
)
bbox += tf.tile(tf.expand_dims(offset, axis=0), [1, 2])
bbox /= tf.tile(tf.expand_dims(image_scale, axis=0), [1, 2])
return bbox
def _parse_train_data_extractObjs(self, data):
"""
parses data for training.
Args:
data: the decoded tensor dictionary from tfexampledecoder
returns:
image: image tensor that is preprocessed to have normalized value and dimension [output_size[0], output_size[1], 4]
labels: a dictionary of tensors used for training.
"""
classes = data['groundtruth_classes2']
boxes = data['groundtruth_boxes2']
if self._include_mask:
masks = data['groundtruth_instance_masks2']
is_crowds = data['groundtruth_is_crowd2']
# skips annotation with 'is_crowd' = True
if self._skip_crowd_during_training:
num_groundtrtuhs = tf.shape(classes)[0]
with tf.control_dependencies([num_groundtrtuhs, is_crowds]):
indices = tf.cond(
tf.greater(tf.size(data['groundtruth_is_crowd2']), 0),
lambda: data['groundtruth_is_crowd2'],
lambda: tf.zeros_like(data['groundtruth_classes2'], dtype=tf.bool)
)
indices = tf.where(tf.logical_not(indices))
classes = tf.gather_nd(classes, indices)
boxes = tf.gather_nd(boxes, indices)
if self._include_mask:
masks = tf.gather_nd(masks, indices)
# gets original image and its size
image = data['image2']
image_shape = tf.shape(image)[0:2]
# normalizes image with mean and std pixel values
image = input_utils.normalize_image(image)
# flips image randomly during training
if self._aug_rand_hflip:
if self._include_mask:
image, boxes, masks = input_utils.random_horizontal_flip(
image, boxes, masks
)
else:
image, boxes = input_utils.random_horizontal_flip(
image, boxes
)
# converts boxes from normaliezd coordinates to pixel coordinates.
# now the coordinates of boxes are w.r.t. the original image.
boxes = box_utils.denormalize_boxes(boxes, image_shape)
# resizes and crops image
image, image_info, _ = input_utils.resize_and_crop_image(
image,
self._output_size,
padded_size=input_utils.compute_padded_size(
self._output_size, 2 ** self._max_level
),
aug_scale_min=self._aug_scale_min,
aug_scale_max=self._aug_scale_max
)
# resizes and crops boxes
# now the coordinates of boxes are w.r.t. the scaled image.
image_scale = image_info[2, :]
offset = image_info[3, :]
boxes = input_utils.resize_and_crop_boxes(
boxes, image_scale, image_info[1, :], offset
)
# filters out groundtruth boxes that are all zeros
indices = box_utils.get_non_empty_box_indices(boxes)
boxes = tf.gather(boxes, indices)
classes = tf.gather(classes, indices)
if self._include_mask:
masks = tf.gather(masks, indices)
uncropped_masks = tf.cast(masks, tf.int8)
uncropped_masks = tf.expand_dims(uncropped_masks, axis=3)
uncropped_masks = input_utils.resize_and_crop_masks(
uncropped_masks, image_scale, self._output_size, offset
)
# transfer boxes to the original image space and do normalization
cropped_boxes = boxes + tf.tile(tf.expand_dims(offset, axis=0), [1, 2])
cropped_boxes /= tf.tile(tf.expand_dims(image_scale, axis=0), [1, 2])
cropped_boxes = box_utils.normalize_boxes(cropped_boxes, image_shape)
num_masks = tf.shape(masks)[0]
masks = tf.image.crop_and_resize(
tf.expand_dims(masks, axis=-1),
cropped_boxes,
box_indices = tf.range(num_masks, dtype=tf.int32),
crop_size = [self._mask_crop_size, self._mask_crop_size],
method='bilinear'
)
masks = tf.squeeze(masks, axis=-1)
indices = tf.range(start=0, limit=tf.shape(classes)[0], dtype=tf.int32)
# samples the numbers of masks for pasting
m = tf.random.uniform(shape=[], maxval=tf.shape(classes)[0]+1, dtype=tf.int32)
m = tf.math.minimum(m, tf.shape(classes)[0])
# shuffle the indices of objects and keep the first m objects for pasting
shuffled_indices = tf.random.shuffle(indices)
shuffled_indices = tf.slice(shuffled_indices, [0], [m])
boxes = tf.gather(boxes, shuffled_indices)
masks = tf.gather(masks, shuffled_indices)
classes = tf.gather(classes, shuffled_indices)
uncropped_masks = tf.gather(uncropped_masks, shuffled_indices)
pasted_objects_mask = tf.reduce_max(uncropped_masks, 0)
pasted_objects_mask = tf.cast(pasted_objects_mask, tf.bool)
labels = {
'image':image,
'image_info':image_info,
'num_groundtrtuhs':tf.shape(classes)[0],
'boxes':boxes,
'masks':masks,
'classes':classes,
'pasted_objects_mask':pasted_objects_mask,
}
return labels
def _create_dataset_fn(self):
# Prefetch data from files.
def _prefetch_dataset(filename):
dataset = tf.data.TFRecordDataset(filename).prefetch(1)
return dataset
return _prefetch_dataset
def _create_example_decoder(self):
return tf_example_decoder.TfExampleDecoder(
use_instance_mask=self._use_instance_mask)
def _create_dataset_parser_fn(self, params):
"""Create parser for parsing input data (dictionary)."""
example_decoder = self._create_example_decoder()
def _dataset_parser(value, value2=None):
"""Parse data to a fixed dimension input image and learning targets.
Args:
value: A dictionary contains an image and groundtruth annotations.
Returns:
features: a dictionary that contains the image and auxiliary
information. The following describes {key: value} pairs in the
dictionary.
image: Image tensor that is preproessed to have normalized value and
fixed dimension [image_size, image_size, 3]
image_info: image information that includes the original height and
width, the scale of the proccessed image to the original image, and
the scaled height and width.
source_ids: Source image id. Default value -1 if the source id is
empty in the groundtruth annotation.
labels: a dictionary that contains auxiliary information plus (optional)
labels. The following describes {key: value} pairs in the dictionary.
`labels` is only for training.
score_targets_dict: ordered dictionary with keys
[min_level, min_level+1, ..., max_level]. The values are tensor with
shape [height_l, width_l, num_anchors]. The height_l and width_l
represent the dimension of objectiveness score at l-th level.
box_targets_dict: ordered dictionary with keys
[min_level, min_level+1, ..., max_level]. The values are tensor with
shape [height_l, width_l, num_anchors * 4]. The height_l and
width_l represent the dimension of bounding box regression output at
l-th level.
gt_boxes: Groundtruth bounding box annotations. The box is represented
in [y1, x1, y2, x2] format. The tennsor is padded with -1 to the
fixed dimension [self._max_num_instances, 4].
gt_classes: Groundtruth classes annotations. The tennsor is padded
with -1 to the fixed dimension [self._max_num_instances].
cropped_gt_masks: groundtrugh masks cropped by the bounding box and
resized to a fixed size determined by params['gt_mask_size']
"""
with tf.name_scope('parser'):
data = example_decoder.decode(value)
# extract data2 objs here
if value2 is None:
data2 = self._parse_train_data_extractObjs(data)
else:
data2 = value2
data['groundtruth_is_crowd'] = tf.cond(
tf.greater(tf.size(data['groundtruth_is_crowd']), 0),
lambda: data['groundtruth_is_crowd'],
lambda: tf.zeros_like(data['groundtruth_classes'], dtype=tf.bool))
image = data['image']
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
orig_image = image
source_id = data['source_id']
source_id = tf.where(tf.equal(source_id, tf.constant('')), '-1',
source_id)
source_id = tf.string_to_number(source_id)
if (self._mode == tf.estimator.ModeKeys.PREDICT or
self._mode == tf.estimator.ModeKeys.EVAL):
image = preprocess_ops.normalize_image(image)
if params['resize_method'] == 'retinanet':
image, image_info, _, _, _ = preprocess_ops.resize_crop_pad(
image, params['image_size'], 2 ** params['max_level'])
else:
image, image_info, _, _, _ = preprocess_ops.resize_crop_pad_v2(
image, params['short_side'], params['long_side'],
2 ** params['max_level'])
if params['precision'] == 'bfloat16':
image = tf.cast(image, dtype=tf.bfloat16)
features = {
'images': image,
'image_info': image_info,
'source_ids': source_id,
}
if params['visualize_images_summary']:
resized_image = tf.image.resize_images(orig_image,
params['image_size'])
features['orig_images'] = resized_image
if (params['include_groundtruth_in_features'] or
self._mode == tf.estimator.ModeKeys.EVAL):
labels = _prepare_labels_for_eval(
data,
target_num_instances=self._max_num_instances,
target_polygon_list_len=self._max_num_polygon_list_len,
use_instance_mask=params['include_mask'])
return {'features': features, 'labels': labels}
else:
return {'features': features}
elif self._mode == tf.estimator.ModeKeys.TRAIN:
instance_masks = None
if self._use_instance_mask:
instance_masks = data['groundtruth_instance_masks']
boxes = data['groundtruth_boxes']
classes = data['groundtruth_classes']
classes = tf.reshape(tf.cast(classes, dtype=tf.float32), [-1, 1])
if not params['use_category']:
classes = tf.cast(tf.greater(classes, 0), dtype=tf.float32)
if (params['skip_crowd_during_training'] and
self._mode == tf.estimator.ModeKeys.TRAIN):
indices = tf.where(tf.logical_not(data['groundtruth_is_crowd']))
classes = tf.gather_nd(classes, indices)
boxes = tf.gather_nd(boxes, indices)
if self._use_instance_mask:
instance_masks = tf.gather_nd(instance_masks, indices)
image = preprocess_ops.normalize_image(image)
if params['input_rand_hflip']:
flipped_results = (
preprocess_ops.random_horizontal_flip(
image, boxes=boxes, masks=instance_masks))
if self._use_instance_mask:
image, boxes, instance_masks = flipped_results
else:
image, boxes = flipped_results
# Scaling, jittering and padding.
if params['resize_method'] == 'retinanet':
image_shape = tf.shape(image)[0:2]
boxes = box_utils.denormalize_boxes(boxes, image_shape)
image, image_info_copyPaste, image_info = input_utils.resize_and_crop_image(
image,
params['image_size'],
padded_size=input_utils.compute_padded_size(
params['image_size'], 2 ** self._max_level
),
aug_scale_min=params['aug_scale_min'],
aug_scale_max=params['aug_scale_max']
)
# resizes and crops boxes
# now the coordinates of boxes are w.r.t. the scaled image
image_scale = image_info_copyPaste[2, :]
offset = image_info_copyPaste[3, :]
boxes = input_utils.resize_and_crop_boxes(
boxes, image_scale, image_info_copyPaste[1, :], offset
)
indices = box_utils.get_non_empty_box_indices(boxes)
boxes = tf.gather(boxes, indices)
classes = tf.gather(classes, indices)
else:
image, image_info, boxes, classes, cropped_gt_masks = (
preprocess_ops.resize_crop_pad_v2(
image,
params['short_side'],
params['long_side'],
2 ** params['max_level'],
aug_scale_min=params['aug_scale_min'],
aug_scale_max=params['aug_scale_max'],
boxes=boxes,
classes=classes,
masks=instance_masks,
crop_mask_size=params['gt_mask_size']))
_copy_paste_aug = True
if _copy_paste_aug:
# paste objects and creates a new composed image
compose_mask = tf.cast(data2['pasted_objects_mask'],image.dtype) * tf.ones_like(image)
image = image * (1 - compose_mask) + data2['image'] * compose_mask
if self._include_mask:
masks = tf.gather(instance_masks, indices)
if _copy_paste_aug:
pasted_objects_mask = self._transform_mask(
image_shape, image_scale, offset,
tf.cast(data2['pasted_objects_mask'], tf.int8)
)
pasted_objects_mask = tf.cast(pasted_objects_mask, tf.int8)
pasted_objects_mask = tf.expand_dims(
tf.squeeze(pasted_objects_mask, -1), 0) * tf.ones(tf.shape(masks), dtype=pasted_objects_mask.dtype)
masks = tf.where(
tf.equal(pasted_objects_mask, 1), tf.zeros_like(masks), masks
)
cropped_boxes = boxes + tf.tile(tf.expand_dims(offset, axis=0), [1, 2])
cropped_boxes /= tf.tile(tf.expand_dims(image_scale, axis=0), [1, 2])
if _copy_paste_aug:
# computes bounding boxes of objects using updated masks
boxes_ = self._compute_boxes_using_masks(
masks, image_shape, image_info_copyPaste, image_scale, offset
)
# filters out objects that are fully occluded in the new image
indices = self._get_visible_masks_indices(
masks, boxes_, cropped_boxes
)
boxes_ = tf.gather(boxes_, indices)
boxes = tf.gather(boxes, indices)
cropped_boxes = tf.gather(cropped_boxes, indices)
masks = tf.gather(masks, indices)
classes = tf.gather(classes, indices)
# update bounding boxes of which are occluded by new pasted objects
def update_bboxes(boxes_, cropped_boxes):
occluded_bbox = self._get_occluded_bbox(boxes_, cropped_boxes)
cropped_boxes = tf.where(
occluded_bbox,
tf.cast(boxes_, cropped_boxes.dtype),
cropped_boxes
)
boxes = input_utils.resize_and_crop_boxes(
cropped_boxes, image_scale, image_info_copyPaste[1, :], offset
)
return boxes, cropped_boxes
boxes, cropped_boxes = update_bboxes(boxes_, cropped_boxes)
cropped_boxes = box_utils.normalize_boxes(cropped_boxes, image_shape)
num_masks = tf.shape(masks)[0]
masks = tf.image.crop_and_resize(
tf.expand_dims(masks, axis=-1),
cropped_boxes,
box_indices=tf.range(num_masks, dtype=tf.int32),
crop_size=[self._mask_crop_size, self._mask_crop_size],
method='bilinear'
)
masks = tf.squeeze(masks, axis=-1)
cropped_gt_masks = masks
else:
cropped_gt_masks = None
if _copy_paste_aug:
if self._include_mask:
masks = tf.concat([masks, data2['masks']], axis=0)
data2['classes'] = tf.reshape(tf.cast(data2['classes'], dtype=tf.float32), [-1, 1])
boxes = tf.concat([boxes, data2['boxes']], axis=0)
classes = tf.concat([classes, data2['classes']], axis=0)
if cropped_gt_masks is not None:
cropped_gt_masks = tf.pad(
cropped_gt_masks,
paddings=tf.constant([[0, 0,], [2, 2,], [2, 2]]),
mode='CONSTANT',
constant_values=0.)
padded_height, padded_width, _ = image.get_shape().as_list()
padded_image_size = (padded_height, padded_width)
input_anchors = anchors.Anchors(
params['min_level'],
params['max_level'],
params['num_scales'],
params['aspect_ratios'],
params['anchor_scale'],
padded_image_size)
anchor_labeler = anchors.AnchorLabeler(
input_anchors,
params['num_classes'],
params['rpn_positive_overlap'],
params['rpn_negative_overlap'],
params['rpn_batch_size_per_im'],
params['rpn_fg_fraction'])
# Assign anchors.
score_targets, box_targets = anchor_labeler.label_anchors(
boxes, classes)
# Pad groundtruth data.
boxes = preprocess_ops.pad_to_fixed_size(
boxes, -1, [self._max_num_instances, 4])
classes = preprocess_ops.pad_to_fixed_size(
classes, -1, [self._max_num_instances, 1])
# Pads cropped_gt_masks.
if self._use_instance_mask:
cropped_gt_masks = tf.reshape(
cropped_gt_masks, tf.stack([tf.shape(cropped_gt_masks)[0], -1]))
cropped_gt_masks = preprocess_ops.pad_to_fixed_size(
cropped_gt_masks, -1,
[self._max_num_instances, (params['gt_mask_size'] + 4) ** 2])
cropped_gt_masks = tf.reshape(
cropped_gt_masks,
[self._max_num_instances, params['gt_mask_size'] + 4,
params['gt_mask_size'] + 4])
if params['precision'] == 'bfloat16':
image = tf.cast(image, dtype=tf.bfloat16)
features = {
'images': image,
'image_info': image_info,
'source_ids': source_id,
}
labels = {}
for level in range(params['min_level'], params['max_level'] + 1):
labels['score_targets_%d' % level] = score_targets[level]
labels['box_targets_%d' % level] = box_targets[level]
labels['gt_boxes'] = boxes
labels['gt_classes'] = classes
if self._use_instance_mask:
labels['cropped_gt_masks'] = cropped_gt_masks
return features, labels
return _dataset_parser
def get_data(self, _file_pattern, dataset_fn, input_context=None):
dataset = tf.data.Dataset.list_files(
_file_pattern, shuffle=(self._mode == tf.estimator.ModeKeys.TRAIN), seed=0)
if input_context is not None:
dataset = dataset.shard(input_context.num_input_pipelines,
input_context.input_pipeline_id)
if self._mode == tf.estimator.ModeKeys.TRAIN:
dataset = dataset.repeat()
dataset = dataset.apply(
tf.data.experimental.parallel_interleave(
dataset_fn,
cycle_length=32,
sloppy=(self._mode == tf.estimator.ModeKeys.TRAIN)))
if self._mode == tf.estimator.ModeKeys.TRAIN:
dataset = dataset.shuffle(64, seed=0)
return dataset
def _create_dataset_parser_fn_pre(self, params=None):
parse_pre = extract_objects_parser.Parser(
[1024, 1024],
params['min_level'],
params['max_level'],
aug_rand_hflip=True,
aug_scale_min=0.1,
aug_scale_max=2.0,
skip_crowd_during_training=True,
include_mask=True,
mask_crop_size=112
)
return parse_pre
def __call__(self, params, input_context=None):
dataset_parser_fn = self._create_dataset_parser_fn(params)
dataset_fn = self._create_dataset_fn()
batch_size = params['batch_size'] if 'batch_size' in params else 1
dataset = self.get_data(self._file_pattern, dataset_fn, input_context)
dataset_p = self.get_data(self._file_pattern, dataset_fn, input_context)
pre_parser_fn = self._create_dataset_parser_fn_pre(params)
dataset_p = dataset_p.map(
pre_parser_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE
)
dataset_p = dataset_p.prefetch(tf.data.experimental.AUTOTUNE)
dataset_p = dataset_p.filter(
lambda data:tf.greater(data['num_groundtrtuhs'], 0)
)
dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)
dataset = tf.data.Dataset.zip((dataset, dataset_p))
# Parse the fetched records to input tensors for model function.
dataset = dataset.map(
dataset_parser_fn,
num_parallel_calls=256)
dataset = dataset.batch(batch_size, drop_remainder=True)
# Enable TPU performance optimization: transpose input, space-to-depth
# image transform, or both.
if (self._mode == tf.estimator.ModeKeys.TRAIN and
(params['transpose_input'] or
(params['backbone'].startswith('resnet') and
params['conv0_space_to_depth_block_size'] > 0))):
def _transform_images(features, labels):
"""Transforms images."""
images = features['images']
if (params['backbone'].startswith('resnet') and
params['conv0_space_to_depth_block_size'] > 0):
# Transforms images for TPU performance.
features['images'] = (
spatial_transform_ops.fused_transpose_and_space_to_depth(
images,
params['conv0_space_to_depth_block_size'],
params['transpose_input']))
else:
features['images'] = tf.transpose(features['images'], [1, 2, 3, 0])
return features, labels
dataset = dataset.map(_transform_images, num_parallel_calls=256)
dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)
if self._num_examples > 0:
dataset = dataset.take(self._num_examples)
if self._use_fake_data:
# Turn this dataset into a semi-fake dataset which always loop at the
# first batch. This reduces variance in performance and is useful in
# testing.
dataset = dataset.take(1).cache().repeat()
return dataset
| 41.954103
| 121
| 0.641273
|
ededdc8cbb2813f08bc96cfb328b7084691608eb
| 3,223
|
py
|
Python
|
dnnf/model.py
|
dlshriver/DNNF
|
898e4df69be35312ace622ce2c47f7bf8d6a0ffe
|
[
"MIT"
] | 5
|
2021-01-13T19:52:03.000Z
|
2021-12-10T02:54:35.000Z
|
dnnf/model.py
|
dlshriver/DNNF
|
898e4df69be35312ace622ce2c47f7bf8d6a0ffe
|
[
"MIT"
] | 1
|
2021-09-15T20:45:17.000Z
|
2021-09-23T15:47:10.000Z
|
dnnf/model.py
|
dlshriver/DNNF
|
898e4df69be35312ace622ce2c47f7bf8d6a0ffe
|
[
"MIT"
] | 1
|
2021-11-03T02:56:30.000Z
|
2021-11-03T02:56:30.000Z
|
import numpy as np
import torch
import torch.nn.functional as F
from .reduction import HPolyProperty
class FalsificationModel:
def __init__(self, prop: HPolyProperty):
self.prop = prop
self.op_graph = prop.suffixed_op_graph()
self.input_details = self.op_graph.input_details
self.input_shape = tuple(
int(d) if d > 0 else 1 for d in self.input_details[0].shape
)
self.input_dtype = self.input_details[0].dtype
self.input_torch_dtype = torch.from_numpy(
np.ones((1,), dtype=self.input_dtype)
).dtype
self.model = self.as_pytorch()
def __call__(self, *args, **kwargs):
return self.model(*args, **kwargs)
def __reduce__(self):
return FalsificationModel, (self.prop,)
@property
def input_lower_bound(self):
lower_bounds = self.prop.input_lower_bounds
assert len(lower_bounds) == 1
lower_bound = lower_bounds[0]
return torch.from_numpy(lower_bound.astype(self.input_dtype)).to(
self.model.device
)
@property
def input_upper_bound(self):
upper_bounds = self.prop.input_upper_bounds
assert len(upper_bounds) == 1
upper_bound = upper_bounds[0]
return torch.from_numpy(upper_bound.astype(self.input_dtype)).to(
self.model.device
)
def as_pytorch(self):
from .pytorch import convert
return convert(self.op_graph.output_operations).eval()
def as_tf(self):
return self.op_graph.as_tf()
def loss(self, y):
return F.cross_entropy(
y.reshape((1, -1)), torch.Tensor([0]).long().to(y.device)
) - F.cross_entropy(y.reshape((1, -1)), torch.Tensor([1]).long().to(y.device))
def project_input(self, x):
y = x.detach()
lb = self.input_lower_bound
ub = self.input_upper_bound
lb_violations = y < lb
ub_violations = y > ub
y[lb_violations] = lb[lb_violations]
y[ub_violations] = ub[ub_violations]
return y.detach()
def sample(self):
x = (
torch.rand(
self.input_shape,
device=self.model.device,
dtype=self.input_torch_dtype,
)
* (self.input_upper_bound - self.input_lower_bound)
+ self.input_lower_bound
)
return x.detach()
def step(self, x, y, alpha=0.05):
loss = self.loss(y)
loss.backward()
gradients = x.grad
neg_grads = gradients < 0
pos_grads = gradients > 0
lb = self.input_lower_bound
ub = self.input_upper_bound
gradients[(x == lb) & neg_grads] = 0
gradients[(x == ub) & pos_grads] = 0
if gradients.abs().max().item() < 1e-12:
return
lb = self.input_lower_bound
ub = self.input_upper_bound
epsilon = (ub - lb) / 2
if len(gradients.shape) == 1:
x = x + F.normalize(gradients.reshape(1, -1)).flatten() * epsilon
else:
x = x + F.normalize(gradients) * epsilon
return x.detach()
def validate(self, x):
return self.prop.validate_counter_example(x)
| 30.990385
| 86
| 0.591374
|
0b4b37e1b8ff30adeafa90e83bcf4e3c97bf4770
| 12,240
|
py
|
Python
|
lib/python/treadmill/context.py
|
drienyov/treadmill
|
ce21537cd9a2fdb0567ac2aa3de1afcb2f6861de
|
[
"Apache-2.0"
] | null | null | null |
lib/python/treadmill/context.py
|
drienyov/treadmill
|
ce21537cd9a2fdb0567ac2aa3de1afcb2f6861de
|
[
"Apache-2.0"
] | null | null | null |
lib/python/treadmill/context.py
|
drienyov/treadmill
|
ce21537cd9a2fdb0567ac2aa3de1afcb2f6861de
|
[
"Apache-2.0"
] | null | null | null |
"""Treadmill context.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import functools
import logging
import socket
from treadmill import plugin_manager
_LOGGER = logging.getLogger(__name__)
class ContextError(Exception):
"""Raised when unable to connect to LDAP or Zookeeper.
"""
pass
def required(msg):
"""Raises error if return value of function is None.
"""
def _decorator(func):
"""Actual decorator.
"""
@functools.wraps(func)
def decorated_function(*args, **kwargs):
"""Decorated function, checks result is not None.
"""
result = func(*args, **kwargs)
if result is None:
raise ContextError(msg)
return result
return decorated_function
return _decorator
class DnsContext:
"""DNS context.
"""
__slots__ = (
'_context',
'_dns',
)
def __init__(self, ctx):
self._context = ctx
self._dns = None
@property
def _resolver(self):
if self._dns is not None:
return self._dns
dns = plugin_manager.load('treadmill.context', 'dns')
dns.init(self._context)
self._dns = dns
return self._dns
def admin_api_srv(self):
"""Get Admin API SRV record data.
"""
(srv_entry, _proto) = self._resolver.lookup(
self._context,
'admin_api'
)
return srv_entry
def state_api_srv(self, cell):
"""Get State API SRV record data.
"""
(srv_entry, _proto) = self._resolver.lookup(
self._context,
'state_api',
scope=self._resolver.cell_scope(cell)
)
return srv_entry
def cell_api_srv(self, cell):
"""Get Cell API SRV record data.
"""
(srv_entry, _proto) = self._resolver.lookup(
self._context,
'cell_api',
scope=self._resolver.cell_scope(cell)
)
return srv_entry
def ws_api_srv(self, cell):
"""Get Websocket API SRV record data.
"""
(srv_entry, _proto) = self._resolver.lookup(
self._context,
'ws_api',
scope=self._resolver.cell_scope(cell)
)
return srv_entry
class AdminContext:
"""Ldap context.
"""
__slots__ = (
'_context',
'_conn',
)
def __init__(self, ctx):
self._context = ctx
self._conn = None
@property
@required('Cannot resolve LDAP suffix.')
def ldap_suffix(self):
"""LDAP suffix getter.
"""
return self._context.get('ldap_suffix', resolve=False)
@property
def user(self):
"""User, getter.
"""
return self._context.get('ldap_user', resolve=False)
@user.setter
def user(self, value):
"""User, setter.
"""
if value != self._context.get('ldap_user', resolve=False):
self._conn = None
self._context.set('ldap_user', value)
@property
def password(self):
"""Password, getter.
"""
return self._context.get('ldap_pwd', resolve=False)
@password.setter
def password(self, value):
"""Password, setter.
"""
self._context.set('ldap_pwd', value)
if self.user is None:
self.user = 'cn=Manager,%s' % self.ldap_suffix
@property
@required('Cannot resolve LDAP url.')
def url(self):
"""URL, getter.
"""
return self._context.get('ldap_url', resolve=True)
@url.setter
def url(self, value):
"""Set URL, then nullify the connection.
"""
self._context.set('ldap_url', value)
self._conn = None
@property
def write_url(self):
"""Get the LDAP server URL for write access.
"""
url = self._context.get('ldap_write_url', resolve=True)
return url
@write_url.setter
def write_url(self, value):
"""Set the LDAP server URL for write access.
"""
self._context.set('ldap_write_url', value)
self._conn = None
@property
def conn(self):
"""Lazily establishes connection to admin LDAP.
"""
if self._conn:
return self._conn
plugin = plugin_manager.load('treadmill.context', 'admin')
self._conn = plugin.connect(self.url, self.write_url, self.ldap_suffix,
self.user, self.password)
return self._conn
class ZkContext:
"""Zookeeper context.
"""
__slots__ = (
'_context',
'_conn',
'_listeners',
)
def __init__(self, ctx):
self._context = ctx
self._conn = None
self._listeners = []
def add_listener(self, listener):
"""Add a listener.
"""
self._listeners.append(listener)
@property
@required('Cannot resolve Zookeeper connection string.')
def url(self):
"""Resolves and return context zk url.
"""
return self._context.get('zk_url', resolve=True)
@url.setter
def url(self, value):
"""Sets context zk url.
"""
self._context.set('zk_url', value)
@property
def conn(self):
"""Lazily creates Zookeeper client.
"""
if self._conn:
return self._conn
_LOGGER.debug('Connecting to Zookeeper %s', self.url)
plugin = plugin_manager.load('treadmill.context', 'zookeeper')
self._conn = plugin.connect(self.url)
if self._listeners:
for listener in self._listeners:
self._conn.add_listener(listener)
return self._conn
@conn.setter
def conn(self, zkclient):
"""Explicitely set connection."""
self._conn = zkclient
class Context:
"""Global connection context.
"""
__slots__ = (
'ldap',
'zk',
'dns',
'_resolvers',
'_plugins',
'_profile',
'_profile_name',
'_defaults',
'_stack',
)
def __init__(self):
self._profile_name = None
self._profile = {}
self._defaults = None
self._plugins = []
# Protect against recursive gets
self._stack = set()
# Lazy connections to Zookeeper, LDAP and DNS
self.zk = ZkContext(self)
self.ldap = AdminContext(self)
self.dns = DnsContext(self)
def _load_profile(self):
"""Loads the profile.
"""
# Load once.
if self._defaults is not None:
return
self._defaults = {
'dns_domain': '.'.join(socket.getfqdn().split('.')[1:])
}
if self._profile_name:
try:
profile_mod = plugin_manager.load('treadmill.profiles',
self._profile_name)
self._defaults.update(profile_mod.PROFILE)
except KeyError:
_LOGGER.warning('Profile not found: %s', self._profile_name)
def _init_plugins(self):
"""Initialize plugins.
"""
if self._plugins:
return
_LOGGER.debug('Loading plugins.')
# TODO: Thsi is a hack, need a better way to determine if plugin
# should be loaded.
if self.get('dns_domain', resolve=False):
_LOGGER.debug('Loading dns plugin.')
dns = plugin_manager.load('treadmill.context', 'dns')
dns.init(self)
self._plugins.append(dns)
if self.get('ldap_url', resolve=False):
_LOGGER.debug('Loading admin plugin.')
ldap = plugin_manager.load('treadmill.context', 'admin')
ldap.init(self)
self._plugins.append(ldap)
def get(self, attr, default=None, resolve=True, volatile=False):
"""Get attribute from profile or defaults.
"""
if attr in self._profile:
return self._profile[attr]
self._load_profile()
if resolve and attr not in self._stack:
self._stack.add(attr)
try:
self._init_plugins()
for plugin in self._plugins:
try:
self._profile[attr] = plugin.resolve(self, attr)
except ContextError as err:
_LOGGER.warning(
'Error resolving attribute %s in %s: %s',
attr, plugin, err
)
except KeyError:
# Plugin is not responsible fot the attribute.
pass
finally:
self._stack.discard(attr)
if attr not in self._profile:
# Attr was not found, look for it in _defaults
if (self._defaults is not None and
self._defaults.get(attr) is not None):
self._profile[attr] = self._defaults[attr]
if attr not in self._profile and default is not None:
self._profile[attr] = default
# The end of the function attribute is recorded in the profile and
# never evaluated again.
#
# volatile attributes are evaluated all the time.
if volatile:
return self._profile.pop(attr, default)
else:
return self._profile.get(attr, default)
def set(self, attr, value):
"""Set profile attribute.
"""
self._profile[attr] = value
def set_profile_name(self, profile_name):
"""Sets current profile.
"""
self._profile_name = profile_name
def get_profile_name(self):
"""Returns profile name.
"""
return self._profile_name
@property
def profile(self):
"""Returns the profile name.
"""
self._load_profile()
return self._profile
@property
@required('Cannot resolve cell.')
def cell(self):
"""Returns cell name.
"""
return self.get('cell', resolve=False)
@cell.setter
def cell(self, value):
"""Sets cell name.
"""
self.set('cell', value)
@property
@required('Cannot resolve DNS domain.')
def dns_domain(self):
"""Returns DNS domain.
"""
return self.get('dns_domain', resolve=False)
@dns_domain.setter
def dns_domain(self, value):
"""Sets DNS domain.
"""
self.set('dns_domain', value)
@property
def dns_server(self):
"""Returns DNS server.
"""
return self.get('dns_server')
@dns_server.setter
def dns_server(self, value):
"""Sets DNS server.
"""
return self.set('dns_server', value)
@property
@required('Cannot resolve LDAP suffix.')
def ldap_suffix(self):
"""Returns LDAP suffix.
"""
return self.get('ldap_suffix')
@ldap_suffix.setter
def ldap_suffix(self, value):
"""Sets DNS server.
"""
return self.set('ldap_suffix', value)
def scopes(self):
"""Returns supported scopes.
"""
return self.get('scopes', ['cell'])
@required('Cannot resolve admin api.')
def admin_api(self, api=None):
"""Returns admin API.
"""
if api:
return [api]
return self.get('admin_api', volatile=True)
@required('Cannot resolve cell api.')
def cell_api(self, api=None):
"""Returns cell API.
"""
if api:
return [api]
return self.get('cell_api', volatile=True)
@required('Cannot resolve websocket api.')
def ws_api(self, api=None):
"""Returns cell API.
"""
if api:
return [api]
return self.get('ws_api', volatile=True)
@required('Cannot resolve state api.')
def state_api(self, api=None):
"""Returns cell API.
"""
if api:
return [api]
return self.get('state_api', volatile=True)
GLOBAL = Context()
| 25.237113
| 79
| 0.547059
|
2a9eff59b0e3fd240bb114d6f792d334bc327d09
| 401
|
py
|
Python
|
innetProject/innetProject/innetProject/wsgi.py
|
stefanSuYiGuo/Django_case2
|
e0e06d95f747a4ada5416dae7d8064037bc5adf0
|
[
"MIT"
] | null | null | null |
innetProject/innetProject/innetProject/wsgi.py
|
stefanSuYiGuo/Django_case2
|
e0e06d95f747a4ada5416dae7d8064037bc5adf0
|
[
"MIT"
] | null | null | null |
innetProject/innetProject/innetProject/wsgi.py
|
stefanSuYiGuo/Django_case2
|
e0e06d95f747a4ada5416dae7d8064037bc5adf0
|
[
"MIT"
] | null | null | null |
"""
WSGI config for innetProject project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'innetProject.settings')
application = get_wsgi_application()
| 23.588235
| 78
| 0.790524
|
6833c56e8f510a8ed2a2e0e955cb67003aa487cc
| 496
|
py
|
Python
|
examples/misc/djangotasks/todo/views.py
|
takipsizad/pyjs
|
54db0ba6747aca744f9f3c3e985a17e913dfb951
|
[
"ECL-2.0",
"Apache-2.0"
] | 739
|
2015-01-01T02:05:11.000Z
|
2022-03-30T15:26:16.000Z
|
examples/misc/djangotasks/todo/views.py
|
takipsizad/pyjs
|
54db0ba6747aca744f9f3c3e985a17e913dfb951
|
[
"ECL-2.0",
"Apache-2.0"
] | 33
|
2015-03-25T23:17:04.000Z
|
2021-08-19T08:25:22.000Z
|
examples/misc/djangotasks/todo/views.py
|
takipsizad/pyjs
|
54db0ba6747aca744f9f3c3e985a17e913dfb951
|
[
"ECL-2.0",
"Apache-2.0"
] | 167
|
2015-01-01T22:27:47.000Z
|
2022-03-17T13:29:19.000Z
|
# Create your views here.
from django.pimentech.network import *
from todo.models import Todo
service = JSONRPCService()
@jsonremote(service)
def getTasks (request):
return [(str(task),task.id) for task in Todo.objects.all()]
@jsonremote(service)
def addTask (request, taskFromJson):
t = Todo()
t.task = taskFromJson
t.save()
return getTasks(request)
@jsonremote(service)
def deleteTask (request,idFromJson):
t = Todo.objects.get(id=idFromJson)
t.delete()
return getTasks(request)
| 19.076923
| 60
| 0.743952
|
f2f123f9c7fbf276e4dc31df44a1df1630ae868b
| 43,126
|
py
|
Python
|
pyzoo/test/zoo/chronos/data/test_tsdataset.py
|
wangyoucaocxl/analytics-zoo
|
125d1c146f6552f3ceb38d78a2174af902535341
|
[
"Apache-2.0"
] | null | null | null |
pyzoo/test/zoo/chronos/data/test_tsdataset.py
|
wangyoucaocxl/analytics-zoo
|
125d1c146f6552f3ceb38d78a2174af902535341
|
[
"Apache-2.0"
] | null | null | null |
pyzoo/test/zoo/chronos/data/test_tsdataset.py
|
wangyoucaocxl/analytics-zoo
|
125d1c146f6552f3ceb38d78a2174af902535341
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
import numpy as np
import pandas as pd
import random
import tempfile
import os
import shutil
from test.zoo.pipeline.utils.test_utils import ZooTestCase
from zoo.chronos.data import TSDataset
from pandas.testing import assert_frame_equal
from numpy.testing import assert_array_almost_equal
def get_ts_df():
sample_num = np.random.randint(100, 200)
train_df = pd.DataFrame({"datetime": pd.date_range('1/1/2019', periods=sample_num),
"value": np.random.randn(sample_num),
"id": np.array(['00']*sample_num),
"extra feature": np.random.randn(sample_num)})
return train_df
def get_multi_id_ts_df():
sample_num = 100
train_df = pd.DataFrame({"value": np.random.randn(sample_num),
"id": np.array(['00']*50 + ['01']*50),
"extra feature": np.random.randn(sample_num)})
train_df["datetime"] = pd.date_range('1/1/2019', periods=sample_num)
train_df.loc[50:100, "datetime"] = pd.date_range('1/1/2019', periods=50)
return train_df
def get_ugly_ts_df():
data = np.random.random_sample((100, 5))
mask = np.random.random_sample((100, 5))
newmask = mask.copy()
mask[newmask >= 0.4] = 2
mask[newmask < 0.4] = 1
mask[newmask < 0.2] = 0
data[mask == 0] = None
data[mask == 1] = np.nan
df = pd.DataFrame(data, columns=['a', 'b', 'c', 'd', 'e'])
df['a'][0] = np.nan # make sure column 'a' has a N/A
df["datetime"] = pd.date_range('1/1/2019', periods=100)
df.loc[50:100, "datetime"] = pd.date_range('1/1/2019', periods=50)
df["id"] = np.array(['00']*50 + ['01']*50)
return df
def get_int_target_df():
sample_num = np.random.randint(100, 200)
train_df = pd.DataFrame({"datetime": pd.date_range('1/1/2019', periods=sample_num),
"value": np.array(sample_num),
"id": np.array(['00']*sample_num),
"extra feature": np.random.randn(sample_num)})
return train_df
def get_non_dt():
df = pd.DataFrame({"datetime": np.arange(100),
"id": np.array(['00']*100),
"value": np.random.randn(100),
"extra feature": np.random.randn(100)})
return df
def get_not_aligned_df():
df_val = pd.DataFrame({"id": np.array(['00']*20+['01']*30+['02']*50),
"value": np.random.randn(100),
"extra feature": np.random.randn(100)})
data_sec = pd.DataFrame({"datetime": pd.date_range(
start='1/1/2019 00:00:00', periods=20, freq='S')})
data_min = pd.DataFrame({"datetime": pd.date_range(
start='1/2/2019 00:00:00', periods=30, freq='H')})
data_hou = pd.DataFrame({"datetime": pd.date_range(
start='1/3/2019 00:00:00', periods=50, freq='D')})
dt_val = pd.concat([data_sec, data_min, data_hou],
axis=0, ignore_index=True)
df = pd.merge(left=dt_val, right=df_val, left_index=True, right_index=True)
return df
class TestTSDataset(ZooTestCase):
def setup_method(self, method):
pass
def teardown_method(self, method):
pass
def test_tsdataset_initialization(self):
df = get_ts_df()
# legal input
tsdata = TSDataset.from_pandas(df, dt_col="datetime", target_col="value",
extra_feature_col=["extra feature"], id_col="id")
assert tsdata._id_list == ['00']
assert tsdata.feature_col == ["extra feature"]
assert tsdata.target_col == ["value"]
assert tsdata.dt_col == "datetime"
assert tsdata._is_pd_datetime
tsdata = TSDataset.from_pandas(df, dt_col="datetime", target_col=["value"],
extra_feature_col="extra feature", id_col="id")
assert tsdata._id_list == ['00']
assert tsdata.feature_col == ["extra feature"]
assert tsdata.target_col == ["value"]
assert tsdata.dt_col == "datetime"
assert tsdata._is_pd_datetime
tsdata = TSDataset.from_pandas(df.drop(columns=["id"]), dt_col="datetime",
target_col=["value"], extra_feature_col="extra feature")
assert tsdata._id_list == ['0']
assert tsdata.feature_col == ["extra feature"]
assert tsdata.target_col == ["value"]
assert tsdata.dt_col == "datetime"
assert tsdata._is_pd_datetime
# illegal input
with pytest.raises(AssertionError):
tsdata = TSDataset.from_pandas(df, dt_col="datetime", target_col=["value"],
extra_feature_col="extra feature", id_col=0)
with pytest.raises(AssertionError):
tsdata = TSDataset.from_pandas(df, dt_col=0, target_col=["value"],
extra_feature_col="extra feature", id_col="id")
with pytest.raises(AssertionError):
tsdata = TSDataset.from_pandas(df, dt_col="datetime", target_col=0,
extra_feature_col="extra feature", id_col="id")
with pytest.raises(AssertionError):
tsdata = TSDataset.from_pandas(0, dt_col="datetime", target_col=["value"],
extra_feature_col="extra feature", id_col="id")
with pytest.raises(AssertionError):
tsdata = TSDataset.from_pandas(df, dt_col="datetime", target_col=["value1"],
extra_feature_col="extra feature", id_col="id")
def test_tsdataset_from_parquet(self):
df = get_ts_df()
configs = dict(dt_col="datetime",
target_col="value",
extra_feature_col=["extra feature"],
id_col="id")
tsdata_pd = TSDataset.from_pandas(df, **configs)
temp = tempfile.mkdtemp()
try:
path = os.path.join(temp, "test.parquet")
df.to_parquet(path)
tsdata_pq = TSDataset.from_parquet(path, **configs)
pd.testing.assert_frame_equal(tsdata_pd.to_pandas(), tsdata_pq.to_pandas(),
check_like=True)
finally:
shutil.rmtree(temp)
def test_tsdataset_initialization_multiple(self):
df = get_multi_id_ts_df()
# legal input
tsdata = TSDataset.from_pandas(df, dt_col="datetime", target_col="value",
extra_feature_col=["extra feature"], id_col="id")
assert tsdata._id_list == ['00', '01']
assert tsdata.feature_col == ["extra feature"]
assert tsdata.target_col == ["value"]
assert tsdata.dt_col == "datetime"
assert tsdata._is_pd_datetime
tsdata = TSDataset.from_pandas(df, dt_col="datetime", target_col=["value"],
extra_feature_col="extra feature", id_col="id")
assert tsdata._id_list == ['00', '01']
assert tsdata.feature_col == ["extra feature"]
assert tsdata.target_col == ["value"]
assert tsdata.dt_col == "datetime"
assert tsdata._is_pd_datetime
tsdata = TSDataset.from_pandas(df.drop(columns=["id"]), dt_col="datetime",
target_col=["value"], extra_feature_col="extra feature")
assert tsdata._id_list == ['0']
assert tsdata.feature_col == ["extra feature"]
assert tsdata.target_col == ["value"]
assert tsdata.dt_col == "datetime"
assert tsdata._is_pd_datetime
# illegael input
with pytest.raises(AssertionError):
tsdata = TSDataset.from_pandas(df, dt_col="datetime", target_col=["value"],
extra_feature_col="extra feature", id_col=0)
with pytest.raises(AssertionError):
tsdata = TSDataset.from_pandas(df, dt_col=0, target_col=["value"],
extra_feature_col="extra feature", id_col="id")
with pytest.raises(AssertionError):
tsdata = TSDataset.from_pandas(df, dt_col="datetime", target_col=0,
extra_feature_col="extra feature", id_col="id")
with pytest.raises(AssertionError):
tsdata = TSDataset.from_pandas(0, dt_col="datetime", target_col=["value"],
extra_feature_col="extra feature", id_col="id")
with pytest.raises(AssertionError):
tsdata = TSDataset.from_pandas(df, dt_col="datetime", target_col=["value1"],
extra_feature_col="extra feature", id_col="id")
def test_tsdataset_roll_single_id(self):
df = get_ts_df()
horizon = random.randint(1, 10)
lookback = random.randint(1, 20)
tsdata = TSDataset.from_pandas(df, dt_col="datetime", target_col="value",
extra_feature_col=["extra feature"], id_col="id")
with pytest.raises(RuntimeError):
tsdata.to_numpy()
# roll train, diff input.
tsdata.roll(lookback=lookback, horizon=horizon)
x, y = tsdata.to_numpy()
assert x.shape == (len(df)-lookback-horizon+1, lookback, 2)
assert y.shape == (len(df)-lookback-horizon+1, horizon, 1)
tsdata.roll(lookback=lookback, horizon=horizon, id_sensitive=True)
x, y = tsdata.to_numpy()
assert x.shape == (len(df)-lookback-horizon+1, lookback, 2)
assert y.shape == (len(df)-lookback-horizon+1, horizon, 1)
# add extra_feature_col.
tsdata.roll(lookback=lookback, horizon=horizon,
feature_col=["extra feature"], target_col="value")
x, y = tsdata.to_numpy()
assert x.shape == (len(df)-lookback-horizon+1, lookback, 2)
assert y.shape == (len(df)-lookback-horizon+1, horizon, 1)
tsdata.roll(lookback=lookback, horizon=horizon,
feature_col=["extra feature"], target_col="value", id_sensitive=True)
x, y = tsdata.to_numpy()
assert x.shape == (len(df)-lookback-horizon+1, lookback, 2)
assert y.shape == (len(df)-lookback-horizon+1, horizon, 1)
tsdata.roll(lookback=lookback, horizon=horizon,
feature_col=[], target_col="value")
x, y = tsdata.to_numpy()
assert x.shape == (len(df)-lookback-horizon+1, lookback, 1)
assert y.shape == (len(df)-lookback-horizon+1, horizon, 1)
tsdata.roll(lookback=lookback, horizon=horizon,
feature_col=[], target_col="value", id_sensitive=True)
x, y = tsdata.to_numpy()
assert x.shape == (len(df)-lookback-horizon+1, lookback, 1)
assert y.shape == (len(df)-lookback-horizon+1, horizon, 1)
# roll test.
horizon = 0
lookback = random.randint(1, 20)
tsdata.roll(lookback=lookback, horizon=horizon)
x, y = tsdata.to_numpy()
assert x.shape == (len(df)-lookback-horizon+1, lookback, 2)
assert y is None
tsdata.roll(lookback=lookback, horizon=horizon, id_sensitive=True)
x, y = tsdata.to_numpy()
assert x.shape == (len(df)-lookback-horizon+1, lookback, 2)
assert y is None
tsdata._check_basic_invariants()
def test_tsdataset_roll_multi_id(self):
df = get_multi_id_ts_df()
horizon = random.randint(1, 10)
lookback = random.randint(1, 20)
tsdata = TSDataset.from_pandas(df, dt_col="datetime", target_col="value",
extra_feature_col=["extra feature"], id_col="id")
# test train
tsdata.roll(lookback=lookback, horizon=horizon, id_sensitive=True)
x, y = tsdata.to_numpy()
assert x.shape == ((50-lookback-horizon+1), lookback, 4)
assert y.shape == ((50-lookback-horizon+1), horizon, 2)
tsdata.roll(lookback=lookback, horizon=horizon)
x, y = tsdata.to_numpy()
assert x.shape == ((50-lookback-horizon+1)*2, lookback, 2)
assert y.shape == ((50-lookback-horizon+1)*2, horizon, 1)
# horizon list.
horizon_list = [1, 3, 5]
tsdata.roll(lookback=lookback, horizon=horizon_list)
x, y = tsdata.to_numpy()
assert x.shape == ((50-lookback-max(horizon_list)+1)*2, lookback, 2)
assert y.shape == ((50-lookback-max(horizon_list)+1)*2, len(horizon_list), 1)
horizon_list = [1, 5, 9]
tsdata.roll(lookback=lookback, horizon=horizon_list, id_sensitive=True)
x, y = tsdata.to_numpy()
assert x.shape == ((50-lookback-max(horizon_list)+1), lookback, 4)
assert y.shape == ((50-lookback-max(horizon_list)+1), len(horizon_list), 2)
# target multi.
tsdata = TSDataset.from_pandas(df,
dt_col="datetime",
target_col=["value", "extra feature"],
id_col="id")
tsdata.roll(lookback=lookback, horizon=horizon, id_sensitive=False)
x, y = tsdata.to_numpy()
assert x.shape == ((50-lookback-horizon+1)*2, lookback, 2)
assert y.shape == ((50-lookback-horizon+1)*2, horizon, 2)
tsdata._check_basic_invariants()
def test_tsdataset_roll_order(self):
df = pd.DataFrame({"datetime": np.array(['1/1/2019', '1/1/2019', '1/2/2019', '1/2/2019']),
"value": np.array([1.9, 2.3, 2.4, 2.6]),
"id": np.array(['00', '01', '00', '01']),
"extra feature1": np.array([1, 0, 3, 0]),
"extra feature2": np.array([2, 9, 4, 2])})
tsdata = TSDataset.from_pandas(df,
dt_col="datetime",
target_col="value",
extra_feature_col=["extra feature1", "extra feature2"],
id_col="id")
x, y = tsdata.roll(lookback=1, horizon=1, id_sensitive=False).to_numpy()
assert x.shape == (2, 1, 3) and y.shape == (2, 1, 1)
assert np.array_equal(x, np.array([[[1.9, 1, 2]], [[2.3, 0, 9]]], dtype=np.float32))
assert np.array_equal(y, np.array([[[2.4]], [[2.6]]], dtype=np.float32))
x, y = tsdata.roll(lookback=1, horizon=1, id_sensitive=True).to_numpy()
assert x.shape == (1, 1, 6) and y.shape == (1, 1, 2)
assert np.array_equal(x, np.array([[[1.9, 2.3, 1, 2, 0, 9]]], dtype=np.float32))
assert np.array_equal(y, np.array([[[2.4, 2.6]]], dtype=np.float32))
def test_tsdata_roll_int_target(self):
horizon = random.randint(1, 10)
lookback = random.randint(1, 20)
df = get_int_target_df()
tsdata = TSDataset.from_pandas(df, dt_col='datetime', target_col='value',
extra_feature_col=['extra feature'], id_col="id")
x, y = tsdata.roll(lookback=lookback, horizon=horizon).to_numpy()
assert x.dtype == np.float32
assert y.dtype == np.float32
tsdata._check_basic_invariants()
def test_tsdataset_to_torch_loader_roll(self):
df_single_id = get_ts_df()
df_multi_id = get_multi_id_ts_df()
for df in [df_single_id, df_multi_id]:
horizon = random.randint(1, 10)
lookback = random.randint(1, 20)
batch_size = random.randint(16, 32)
tsdata = TSDataset.from_pandas(df, dt_col="datetime", target_col="value",
extra_feature_col=["extra feature"], id_col="id")
# train
torch_loader = tsdata.to_torch_data_loader(batch_size=batch_size,
roll=True,
lookback=lookback,
horizon=horizon)
for x_batch, y_batch in torch_loader:
assert tuple(x_batch.size()) == (batch_size, lookback, 2)
assert tuple(y_batch.size()) == (batch_size, horizon, 1)
break
# test
torch_loader = tsdata.to_torch_data_loader(batch_size=batch_size,
roll=True,
lookback=lookback,
horizon=0)
for x_batch in torch_loader:
assert tuple(x_batch.size()) == (batch_size, lookback, 2)
break
# specify feature_col
torch_loader = tsdata.to_torch_data_loader(batch_size=batch_size,
roll=True,
lookback=lookback,
horizon=horizon,
feature_col=[])
for x_batch, y_batch in torch_loader:
assert tuple(x_batch.size()) == (batch_size, lookback, 1)
assert tuple(y_batch.size()) == (batch_size, horizon, 1)
break
# Non-subset relationship
with pytest.raises(ValueError):
tsdata.to_torch_data_loader(batch_size=batch_size,
roll=True,
lookback=lookback,
horizon=horizon,
target_col=['value', 'extra feature'])
# specify horizon_list
horizon_list = [1, 3, 5]
torch_loader = tsdata.to_torch_data_loader(batch_size=batch_size,
roll=True,
lookback=lookback,
horizon=horizon_list)
for x_batch, y_batch in torch_loader:
assert tuple(x_batch.size()) == (batch_size, lookback, 2)
assert tuple(y_batch.size()) == (batch_size, len(horizon_list), 1)
break
# multi target_col
tsdata = TSDataset.from_pandas(df, dt_col="datetime",
target_col=["value", "extra feature"], id_col="id")
torch_loader = tsdata.to_torch_data_loader(batch_size=batch_size,
roll=True,
lookback=lookback,
horizon=horizon)
for x_batch, y_batch in torch_loader:
assert tuple(x_batch.size()) == (batch_size, lookback, 2)
assert tuple(y_batch.size()) == (batch_size, horizon, 2)
break
def test_tsdataset_to_torch_loader(self):
df = get_ts_df()
horizon = random.randint(1, 10)
lookback = random.randint(1, 20)
batch_size = random.randint(16, 32)
tsdata = TSDataset.from_pandas(df, dt_col="datetime", target_col="value",
extra_feature_col=["extra feature"], id_col="id")
with pytest.raises(RuntimeError):
tsdata.to_torch_data_loader()
tsdata.roll(lookback=lookback, horizon=horizon)
loader = tsdata.to_torch_data_loader(batch_size=batch_size,
lookback=lookback,
horizon=horizon)
for x_batch, y_batch in loader:
assert tuple(x_batch.size()) == (batch_size, lookback, 2)
assert tuple(y_batch.size()) == (batch_size, horizon, 1)
break
def test_tsdata_multi_unscale_numpy_torch_load(self):
lookback = random.randint(1, 10)
horizon = random.randint(1, 20)
batch_size = random.randint(16, 32)
df = get_multi_id_ts_df()
df_test = get_multi_id_ts_df()
tsdata_train = TSDataset.from_pandas(df,
target_col='value',
dt_col='datetime',
extra_feature_col='extra feature',
id_col='id')
tsdata_test = TSDataset.from_pandas(df_test,
target_col='value',
dt_col='datetime',
extra_feature_col='extra feature',
id_col='id')
# roll is True.
from sklearn.preprocessing import StandardScaler
stand = StandardScaler()
for tsdata in [tsdata_train, tsdata_test]:
tsdata.scale(stand, fit=tsdata is tsdata_train)
test_loader = tsdata_test.to_torch_data_loader(batch_size=batch_size,
roll=True,
lookback=lookback,
horizon=horizon)
import torch
from torch.utils.data.dataloader import DataLoader
test_loader = DataLoader(test_loader.dataset, batch_size=batch_size, shuffle=False)
batch_load_list = []
for _, y_batch in test_loader:
batch_load_list.append(y_batch)
y_test = torch.cat(batch_load_list, dim=0)
pred = np.copy(y_test.numpy()) # sanity check
unscaled_pred = tsdata_train.unscale_numpy(pred)
unscaled_y_test = tsdata_train.unscale_numpy(y_test.numpy())
_, unscaled_y_test_reproduce = tsdata_test.unscale()\
.roll(lookback=lookback, horizon=horizon)\
.to_numpy()
assert_array_almost_equal(unscaled_pred, unscaled_y_test_reproduce)
assert_array_almost_equal(unscaled_y_test, unscaled_y_test_reproduce)
tsdata._check_basic_invariants()
def test_tsdataset_imputation(self):
for val in ["last", "const", "linear"]:
df = get_ugly_ts_df()
tsdata = TSDataset.from_pandas(df, dt_col="datetime", target_col="e",
extra_feature_col=["a", "b", "c", "d"], id_col="id")
tsdata.impute(mode=val)
assert tsdata.to_pandas().isna().sum().sum() == 0
assert len(tsdata.to_pandas()) == 100
tsdata._check_basic_invariants()
def test_tsdataset_deduplicate(self):
df = get_ugly_ts_df()
for _ in range(20):
df.loc[len(df)] = df.loc[np.random.randint(0, 99)]
assert len(df) == 120
tsdata = TSDataset.from_pandas(df, dt_col="datetime", target_col="e",
extra_feature_col=["a", "b", "c", "d"], id_col="id")
tsdata.deduplicate()
assert len(tsdata.to_pandas()) == 100
tsdata._check_basic_invariants()
def test_tsdataset_datetime_feature(self):
df = get_ts_df()
# interval = day
tsdata = TSDataset.from_pandas(df, dt_col="datetime", target_col="value",
extra_feature_col=["extra feature"], id_col="id")
tsdata.gen_dt_feature()
assert set(tsdata.to_pandas().columns) == {'DAY',
'IS_WEEKEND',
'WEEKDAY',
'MONTH',
'DAYOFYEAR',
'WEEKOFYEAR',
'extra feature',
'value',
'datetime',
'id'}
assert set(tsdata.feature_col) == {'DAY',
'IS_WEEKEND',
'WEEKDAY',
'MONTH',
'DAYOFYEAR',
'WEEKOFYEAR',
'extra feature'}
tsdata._check_basic_invariants()
# interval = day, one_hot = ["WEEKDAY"]
tsdata = TSDataset.from_pandas(df, dt_col="datetime", target_col="value",
extra_feature_col=["extra feature"], id_col="id")
tsdata.gen_dt_feature(one_hot_features=["WEEKDAY"])
assert set(tsdata.to_pandas().columns) == {'DAY',
'IS_WEEKEND',
'WEEKDAY_0',
'WEEKDAY_1',
'WEEKDAY_2',
'WEEKDAY_3',
'WEEKDAY_4',
'WEEKDAY_5',
'WEEKDAY_6',
'MONTH',
'DAYOFYEAR',
'WEEKOFYEAR',
'extra feature',
'value',
'datetime',
'id'}
assert set(tsdata.feature_col) == {'DAY',
'IS_WEEKEND',
'WEEKDAY_0',
'WEEKDAY_1',
'WEEKDAY_2',
'WEEKDAY_3',
'WEEKDAY_4',
'WEEKDAY_5',
'WEEKDAY_6',
'MONTH',
'DAYOFYEAR',
'WEEKOFYEAR',
'extra feature'}
tsdata._check_basic_invariants()
def test_tsdataset_datetime_feature_multiple(self):
df = get_multi_id_ts_df()
# interval = day
tsdata = TSDataset.from_pandas(df, dt_col="datetime", target_col="value",
extra_feature_col=["extra feature"], id_col="id")
tsdata.gen_dt_feature()
assert set(tsdata.to_pandas().columns) == {'DAY',
'IS_WEEKEND',
'WEEKDAY',
'MONTH',
'DAYOFYEAR',
'WEEKOFYEAR',
'extra feature',
'value',
'datetime',
'id'}
assert set(tsdata.feature_col) == {'DAY',
'IS_WEEKEND',
'WEEKDAY',
'MONTH',
'DAYOFYEAR',
'WEEKOFYEAR',
'extra feature'}
tsdata._check_basic_invariants()
# interval = day, one_hot = ["WEEKDAY"]
tsdata = TSDataset.from_pandas(df, dt_col="datetime", target_col="value",
extra_feature_col=["extra feature"], id_col="id")
tsdata.gen_dt_feature(one_hot_features=["WEEKDAY"])
assert set(tsdata.to_pandas().columns) == {'DAY',
'IS_WEEKEND',
'WEEKDAY_0',
'WEEKDAY_1',
'WEEKDAY_2',
'WEEKDAY_3',
'WEEKDAY_4',
'WEEKDAY_5',
'WEEKDAY_6',
'MONTH',
'DAYOFYEAR',
'WEEKOFYEAR',
'extra feature',
'value',
'datetime',
'id'}
assert set(tsdata.feature_col) == {'DAY',
'IS_WEEKEND',
'WEEKDAY_0',
'WEEKDAY_1',
'WEEKDAY_2',
'WEEKDAY_3',
'WEEKDAY_4',
'WEEKDAY_5',
'WEEKDAY_6',
'MONTH',
'DAYOFYEAR',
'WEEKOFYEAR',
'extra feature'}
tsdata._check_basic_invariants()
def test_tsdataset_scale_unscale(self):
df = get_ts_df()
df_test = get_ts_df()
tsdata = TSDataset.from_pandas(df, dt_col="datetime", target_col="value",
extra_feature_col=["extra feature"], id_col="id")
tsdata_test = TSDataset.from_pandas(df_test, dt_col="datetime", target_col="value",
extra_feature_col=["extra feature"], id_col="id")
from sklearn.preprocessing import StandardScaler, MaxAbsScaler, MinMaxScaler, RobustScaler
scalers = [StandardScaler(), MaxAbsScaler(),
MinMaxScaler(), RobustScaler()]
for scaler in scalers:
tsdata.scale(scaler)
tsdata_test.scale(scaler, fit=False)
with pytest.raises(AssertionError):
assert_frame_equal(tsdata.to_pandas(), df)
with pytest.raises(AssertionError):
assert_frame_equal(tsdata_test.to_pandas(), df_test)
tsdata.unscale()
tsdata_test.unscale()
assert_frame_equal(tsdata.to_pandas(), df)
assert_frame_equal(tsdata_test.to_pandas(), df_test)
tsdata._check_basic_invariants()
def test_tsdataset_unscale_numpy(self):
df = get_multi_id_ts_df()
df_test = get_multi_id_ts_df()
from sklearn.preprocessing import StandardScaler, MaxAbsScaler, MinMaxScaler, RobustScaler
scalers = [StandardScaler(),
StandardScaler(with_mean=False),
StandardScaler(with_std=False),
MaxAbsScaler(),
MinMaxScaler(),
MinMaxScaler(feature_range=(1, 3)),
RobustScaler(),
RobustScaler(with_centering=False),
RobustScaler(with_scaling=False),
RobustScaler(quantile_range=(20, 80))]
for scaler in scalers:
tsdata = TSDataset.from_pandas(df, dt_col="datetime", target_col="value",
extra_feature_col=["extra feature"], id_col="id")
tsdata_test = TSDataset.from_pandas(df_test, dt_col="datetime", target_col="value",
extra_feature_col=["extra feature"], id_col="id")
tsdata.gen_dt_feature()\
.scale(scaler)\
.roll(lookback=5, horizon=4, id_sensitive=True)
tsdata_test.gen_dt_feature()\
.scale(scaler, fit=False)\
.roll(lookback=5, horizon=4, id_sensitive=True)
_, _ = tsdata.to_numpy()
_, y_test = tsdata_test.to_numpy()
pred = np.copy(y_test) # sanity check
unscaled_pred = tsdata.unscale_numpy(pred)
unscaled_y_test = tsdata.unscale_numpy(y_test)
tsdata_test.unscale()\
.roll(lookback=5, horizon=4, id_sensitive=True)
_, unscaled_y_test_reproduce = tsdata_test.to_numpy()
assert_array_almost_equal(unscaled_pred, unscaled_y_test_reproduce)
assert_array_almost_equal(unscaled_y_test, unscaled_y_test_reproduce)
tsdata._check_basic_invariants()
def test_tsdataset_resample(self):
df = get_ts_df()
tsdata = TSDataset.from_pandas(df, dt_col="datetime", target_col="value",
extra_feature_col=["extra feature"], id_col="id")
tsdata.resample('2D', df["datetime"][0], df["datetime"][df.shape[0]-1])
assert len(tsdata.to_pandas()) == (df.shape[0] + 1) // 2
tsdata._check_basic_invariants()
def test_tsdataset_resample_multiple(self):
df = get_multi_id_ts_df()
tsdata = TSDataset.from_pandas(df, dt_col="datetime", target_col="value",
extra_feature_col=["extra feature"], id_col="id")
tsdata.resample('2D', df["datetime"][0], df["datetime"][df.shape[0]-1])
assert len(tsdata.to_pandas()) == df.shape[0] // 2
tsdata._check_basic_invariants()
tsdata = TSDataset.from_pandas(df, dt_col="datetime", target_col="value",
extra_feature_col=["extra feature"], id_col="id")
tsdata.resample('2D')
assert len(tsdata.to_pandas()) == 50
tsdata._check_basic_invariants()
def test_tsdataset_split(self):
df = get_ts_df()
# only train and test
tsdata_train, tsdata_valid, tsdata_test =\
TSDataset.from_pandas(df, dt_col="datetime", target_col="value",
extra_feature_col=["extra feature"], id_col="id",
with_split=True, val_ratio=0, test_ratio=0.1)
# standard split with all three sets
tsdata_train, tsdata_valid, tsdata_test =\
TSDataset.from_pandas(df, dt_col="datetime", target_col="value",
extra_feature_col=["extra feature"], id_col="id",
with_split=True, val_ratio=0.1, test_ratio=0.1,
largest_look_back=5, largest_horizon=2)
assert set(np.unique(tsdata_train.to_pandas()["id"])) == {"00"}
assert set(np.unique(tsdata_valid.to_pandas()["id"])) == {"00"}
assert set(np.unique(tsdata_test.to_pandas()["id"])) == {"00"}
assert len(tsdata_train.to_pandas()) == df[:-(int(df.shape[0]*0.1)*2)].shape[0]
assert len(tsdata_valid.to_pandas()) == int(df.shape[0] * 0.1 + 5 + 2 - 1)
assert len(tsdata_test.to_pandas()) == int(df.shape[0] * 0.1 + 5 + 2 - 1)
tsdata_train.feature_col.append("new extra feature")
assert len(tsdata_train.feature_col) == 2
assert len(tsdata_valid.feature_col) == 1
assert len(tsdata_test.feature_col) == 1
tsdata_train.target_col[0] = "new value"
assert tsdata_train.target_col[0] == "new value"
assert tsdata_valid.target_col[0] != "new value"
assert tsdata_test.target_col[0] != "new value"
def test_tsdataset_split_multiple(self):
df = get_multi_id_ts_df()
tsdata_train, tsdata_valid, tsdata_test =\
TSDataset.from_pandas(df, dt_col="datetime", target_col="value",
extra_feature_col=["extra feature"], id_col="id",
with_split=True, val_ratio=0.1, test_ratio=0.1,
largest_look_back=5, largest_horizon=2)
assert set(np.unique(tsdata_train.to_pandas()["id"])) == {"00", "01"}
assert set(np.unique(tsdata_valid.to_pandas()["id"])) == {"00", "01"}
assert set(np.unique(tsdata_test.to_pandas()["id"])) == {"00", "01"}
assert len(tsdata_train.to_pandas()) == (50 * 0.8)*2
assert len(tsdata_valid.to_pandas()) == (50 * 0.1 + 5 + 2 - 1)*2
assert len(tsdata_test.to_pandas()) == (50 * 0.1 + 5 + 2 - 1)*2
assert tsdata_train.feature_col is not tsdata_valid.feature_col
assert tsdata_train.feature_col is not tsdata_test.feature_col
assert tsdata_train.target_col is not tsdata_valid.target_col
assert tsdata_train.target_col is not tsdata_test.target_col
tsdata_train.feature_col.append("new extra feature")
assert len(tsdata_train.feature_col) == 2
assert len(tsdata_valid.feature_col) == 1
assert len(tsdata_test.feature_col) == 1
tsdata_train.target_col[0] = "new value"
assert tsdata_train.target_col[0] == "new value"
assert tsdata_valid.target_col[0] != "new value"
assert tsdata_test.target_col[0] != "new value"
def test_tsdataset_global_feature(self):
for val in ["minimal"]:
df = get_ts_df()
tsdata = TSDataset.from_pandas(df, dt_col="datetime", target_col="value",
extra_feature_col=["extra feature"], id_col="id")
tsdata.gen_global_feature(settings=val)
tsdata._check_basic_invariants()
def test_tsdataset_global_feature_multiple(self):
df = get_multi_id_ts_df()
tsdata = TSDataset.from_pandas(df, dt_col="datetime", target_col="value",
extra_feature_col=["extra feature"], id_col="id")
tsdata.gen_global_feature(settings="minimal")
tsdata._check_basic_invariants()
tsdata = TSDataset.from_pandas(df, dt_col="datetime", target_col="value",
extra_feature_col=["extra feature"], id_col="id")
tsdata.gen_global_feature(settings="minimal", n_jobs=2)
tsdata._check_basic_invariants()
def test_tsdataset_rolling_feature_multiple(self):
df = get_multi_id_ts_df()
horizon = random.randint(2, 10)
lookback = random.randint(2, 20)
tsdata = TSDataset.from_pandas(df, dt_col="datetime", target_col="value",
extra_feature_col=["extra feature"], id_col="id")
tsdata.gen_rolling_feature(settings="minimal", window_size=lookback)
tsdata._check_basic_invariants()
tsdata = TSDataset.from_pandas(df, dt_col="datetime", target_col="value",
extra_feature_col=["extra feature"], id_col="id")
tsdata.gen_rolling_feature(settings="minimal", window_size=lookback, n_jobs=2)
tsdata._check_basic_invariants()
# roll train
tsdata.roll(lookback=lookback, horizon=horizon)
x, y = tsdata.to_numpy()
feature_num = len(tsdata.feature_col) + len(tsdata.target_col)
assert x.shape == ((50-lookback-horizon+1)*2, lookback, feature_num)
assert y.shape == ((50-lookback-horizon+1)*2, horizon, 1)
tsdata.roll(lookback=lookback, horizon=horizon, id_sensitive=True)
x, y = tsdata.to_numpy()
assert x.shape == ((50-lookback-horizon+1), lookback, feature_num*2)
assert y.shape == ((50-lookback-horizon+1), horizon, 2)
tsdata._check_basic_invariants()
def test_check_scale_sequence(self):
df = get_multi_id_ts_df()
# with split is True.
td_train, td_valid, td_test = TSDataset.from_pandas(df, dt_col="datetime",
target_col="value",
extra_feature_col=[
"extra feature"],
id_col="id",
with_split=True,
val_ratio=0.1,
test_ratio=0.1)
from sklearn.preprocessing import StandardScaler
stand = StandardScaler()
with pytest.raises(AssertionError):
for tsdata in [td_train, td_valid, td_test]:
tsdata.scale(stand, fit=False)
tsdata._check_basic_invariants()
# remove due to the possible large cost on test sys
# with pytest.raises(AssertionError):
# tsdata.gen_global_feature(settings="minimal")\
# .gen_rolling_feature(settings="minimal", window_size=5)
def test_non_pd_datetime(self):
df = get_non_dt()
tsdata = TSDataset.from_pandas(df, dt_col="datetime",
target_col="value",
extra_feature_col="extra feature",
id_col="id")
with pytest.raises(AssertionError):
tsdata.resample('2D')
with pytest.raises(AssertionError):
tsdata.gen_dt_feature()
with pytest.raises(AssertionError):
tsdata.gen_rolling_feature(settings="minimal", window_size=1000)
tsdata._check_basic_invariants()
def test_not_aligned(self):
df = get_not_aligned_df()
tsdata = TSDataset.from_pandas(df, target_col="value",
dt_col="datetime",
extra_feature_col="extra feature",
id_col="id")
with pytest.raises(AssertionError):
tsdata.roll(lookback=5, horizon=2, id_sensitive=True)
tsdata._check_basic_invariants()
| 48.674944
| 98
| 0.509391
|
cef93b2051eab851e4cd2fd6bb83355728b401f9
| 810
|
py
|
Python
|
manage.py
|
avinash795k/leaveProject
|
264818f052e0abfdd47a0b9e73b1a9fa28114da0
|
[
"MIT"
] | null | null | null |
manage.py
|
avinash795k/leaveProject
|
264818f052e0abfdd47a0b9e73b1a9fa28114da0
|
[
"MIT"
] | null | null | null |
manage.py
|
avinash795k/leaveProject
|
264818f052e0abfdd47a0b9e73b1a9fa28114da0
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "leaveProject.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| 35.217391
| 77
| 0.644444
|
bb712c5cfb785c0c0fa1b98d02fa48ffd2861c63
| 24,343
|
py
|
Python
|
keystone/identity/core.py
|
christophgysin/openstack-keystone
|
ab4f7473c34b8a94ed5a3aced01bf055d4453905
|
[
"Apache-2.0"
] | null | null | null |
keystone/identity/core.py
|
christophgysin/openstack-keystone
|
ab4f7473c34b8a94ed5a3aced01bf055d4453905
|
[
"Apache-2.0"
] | null | null | null |
keystone/identity/core.py
|
christophgysin/openstack-keystone
|
ab4f7473c34b8a94ed5a3aced01bf055d4453905
|
[
"Apache-2.0"
] | 1
|
2021-08-29T16:53:06.000Z
|
2021-08-29T16:53:06.000Z
|
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Main entry point into the Identity service."""
import abc
import functools
import os
from oslo.config import cfg
import six
from keystone import clean
from keystone.common import dependency
from keystone.common import driver_hints
from keystone.common import manager
from keystone import config
from keystone import exception
from keystone import notifications
from keystone.openstack.common.gettextutils import _
from keystone.openstack.common import importutils
from keystone.openstack.common import log
CONF = config.CONF
LOG = log.getLogger(__name__)
DOMAIN_CONF_FHEAD = 'keystone.'
DOMAIN_CONF_FTAIL = '.conf'
def filter_user(user_ref):
"""Filter out private items in a user dict.
'password', 'tenants' and 'groups' are never returned.
:returns: user_ref
"""
if user_ref:
user_ref = user_ref.copy()
user_ref.pop('password', None)
user_ref.pop('tenants', None)
user_ref.pop('groups', None)
user_ref.pop('domains', None)
try:
user_ref['extra'].pop('password', None)
user_ref['extra'].pop('tenants', None)
except KeyError:
pass
return user_ref
class DomainConfigs(dict):
"""Discover, store and provide access to domain specific configs.
The setup_domain_drivers() call will be made via the wrapper from
the first call to any driver function handled by this manager. This
setup call it will scan the domain config directory for files of the form
keystone.<domain_name>.conf
For each file, the domain_name will be turned into a domain_id and then
this class will:
- Create a new config structure, adding in the specific additional options
defined in this config file
- Initialise a new instance of the required driver with this new config.
"""
configured = False
driver = None
def _load_driver(self, assignment_api, domain_id):
domain_config = self[domain_id]
domain_config['driver'] = (
importutils.import_object(
domain_config['cfg'].identity.driver, domain_config['cfg']))
domain_config['driver'].assignment_api = assignment_api
def _load_config(self, assignment_api, file_list, domain_name):
try:
domain_ref = assignment_api.get_domain_by_name(domain_name)
except exception.DomainNotFound:
LOG.warning(
_('Invalid domain name (%s) found in config file name'),
domain_name)
return
# Create a new entry in the domain config dict, which contains
# a new instance of both the conf environment and driver using
# options defined in this set of config files. Later, when we
# service calls via this Manager, we'll index via this domain
# config dict to make sure we call the right driver
domain = domain_ref['id']
self[domain] = {}
self[domain]['cfg'] = cfg.ConfigOpts()
config.configure(conf=self[domain]['cfg'])
self[domain]['cfg'](args=[], project='keystone',
default_config_files=file_list)
self._load_driver(assignment_api, domain)
def setup_domain_drivers(self, standard_driver, assignment_api):
# This is called by the api call wrapper
self.configured = True
self.driver = standard_driver
conf_dir = CONF.identity.domain_config_dir
if not os.path.exists(conf_dir):
LOG.warning(_('Unable to locate domain config directory: %s'),
conf_dir)
return
for r, d, f in os.walk(conf_dir):
for fname in f:
if (fname.startswith(DOMAIN_CONF_FHEAD) and
fname.endswith(DOMAIN_CONF_FTAIL)):
if fname.count('.') >= 2:
self._load_config(assignment_api,
[os.path.join(r, fname)],
fname[len(DOMAIN_CONF_FHEAD):
-len(DOMAIN_CONF_FTAIL)])
else:
LOG.debug(_('Ignoring file (%s) while scanning domain '
'config directory'),
fname)
def get_domain_driver(self, domain_id):
if domain_id in self:
return self[domain_id]['driver']
def get_domain_conf(self, domain_id):
if domain_id in self:
return self[domain_id]['cfg']
def reload_domain_driver(self, assignment_api, domain_id):
# Only used to support unit tests that want to set
# new config values. This should only be called once
# the domains have been configured, since it relies on
# the fact that the configuration files have already been
# read.
if self.configured:
if domain_id in self:
self._load_driver(assignment_api, domain_id)
else:
# The standard driver
self.driver = self.driver()
self.driver.assignment_api = assignment_api
def domains_configured(f):
"""Wraps API calls to lazy load domain configs after init.
This is required since the assignment manager needs to be initialized
before this manager, and yet this manager's init wants to be
able to make assignment calls (to build the domain configs). So
instead, we check if the domains have been initialized on entry
to each call, and if requires load them,
"""
@functools.wraps(f)
def wrapper(self, *args, **kwargs):
if (not self.domain_configs.configured and
CONF.identity.domain_specific_drivers_enabled):
LOG.warning(_(
'Running an experimental and unsupported configuration '
'(domain_specific_drivers_enabled = True); '
'this will result in known issues.'))
self.domain_configs.setup_domain_drivers(
self.driver, self.assignment_api)
return f(self, *args, **kwargs)
return wrapper
@dependency.provider('identity_api')
@dependency.optional('revoke_api')
@dependency.requires('assignment_api', 'credential_api', 'token_api')
class Manager(manager.Manager):
"""Default pivot point for the Identity backend.
See :mod:`keystone.common.manager.Manager` for more details on how this
dynamically calls the backend.
This class also handles the support of domain specific backends, by using
the DomainConfigs class. The setup call for DomainConfigs is called
from with the @domains_configured wrapper in a lazy loading fashion
to get around the fact that we can't satisfy the assignment api it needs
from within our __init__() function since the assignment driver is not
itself yet initialized.
Each of the identity calls are pre-processed here to choose, based on
domain, which of the drivers should be called. The non-domain-specific
driver is still in place, and is used if there is no specific driver for
the domain in question.
"""
_USER = 'user'
_GROUP = 'group'
def __init__(self):
super(Manager, self).__init__(CONF.identity.driver)
self.domain_configs = DomainConfigs()
# Domain ID normalization methods
def _set_domain_id(self, ref, domain_id):
if isinstance(ref, dict):
ref = ref.copy()
ref['domain_id'] = domain_id
return ref
elif isinstance(ref, list):
return [self._set_domain_id(x, domain_id) for x in ref]
else:
raise ValueError(_('Expected dict or list: %s') % type(ref))
def _clear_domain_id(self, ref):
# Clear the domain_id, and then check to ensure that if this
# was not the default domain, it is being handled by its own
# backend driver.
ref = ref.copy()
domain_id = ref.pop('domain_id', CONF.identity.default_domain_id)
if (domain_id != CONF.identity.default_domain_id and
domain_id not in self.domain_configs):
raise exception.DomainNotFound(domain_id=domain_id)
return ref
def _normalize_scope(self, domain_scope):
if domain_scope is None:
return CONF.identity.default_domain_id
else:
return domain_scope
def _select_identity_driver(self, domain_id):
driver = self.domain_configs.get_domain_driver(domain_id)
if driver:
return driver
else:
self.assignment_api.get_domain(domain_id)
return self.driver
def _get_domain_id_and_driver(self, domain_scope):
domain_id = self._normalize_scope(domain_scope)
driver = self._select_identity_driver(domain_id)
return (domain_id, driver)
def _mark_domain_id_filter_satisfied(self, hints):
if hints:
for filter in hints.filters:
if (filter['name'] == 'domain_id' and
filter['comparator'] == 'equals'):
hints.filters.remove(filter)
# The actual driver calls - these are pre/post processed here as
# part of the Manager layer to make sure we:
#
# - select the right driver for this domain
# - clear/set domain_ids for drivers that do not support domains
@notifications.emit_event('authenticate')
@domains_configured
def authenticate(self, context, user_id, password, domain_scope=None):
domain_id, driver = self._get_domain_id_and_driver(domain_scope)
ref = driver.authenticate(user_id, password)
if not driver.is_domain_aware():
ref = self._set_domain_id(ref, domain_id)
return ref
@notifications.created(_USER)
@domains_configured
def create_user(self, user_id, user_ref):
user = user_ref.copy()
user['name'] = clean.user_name(user['name'])
user.setdefault('enabled', True)
user['enabled'] = clean.user_enabled(user['enabled'])
# For creating a user, the domain is in the object itself
domain_id = user_ref['domain_id']
driver = self._select_identity_driver(domain_id)
if not driver.is_domain_aware():
user = self._clear_domain_id(user)
ref = driver.create_user(user_id, user)
if not driver.is_domain_aware():
ref = self._set_domain_id(ref, domain_id)
return ref
@domains_configured
def get_user(self, user_id, domain_scope=None):
domain_id, driver = self._get_domain_id_and_driver(domain_scope)
ref = driver.get_user(user_id)
if not driver.is_domain_aware():
ref = self._set_domain_id(ref, domain_id)
return ref
@domains_configured
def get_user_by_name(self, user_name, domain_id):
driver = self._select_identity_driver(domain_id)
ref = driver.get_user_by_name(user_name, domain_id)
if not driver.is_domain_aware():
ref = self._set_domain_id(ref, domain_id)
return ref
@manager.response_truncated
@domains_configured
def list_users(self, domain_scope=None, hints=None):
domain_id, driver = self._get_domain_id_and_driver(domain_scope)
if not driver.is_domain_aware():
# We are effectively satisfying any domain_id filter by the above
# driver selection, so remove any such filter
self._mark_domain_id_filter_satisfied(hints)
ref_list = driver.list_users(hints or driver_hints.Hints())
if not driver.is_domain_aware():
ref_list = self._set_domain_id(ref_list, domain_id)
return ref_list
@notifications.updated(_USER)
@domains_configured
def update_user(self, user_id, user_ref, domain_scope=None):
user = user_ref.copy()
if 'name' in user:
user['name'] = clean.user_name(user['name'])
if 'enabled' in user:
user['enabled'] = clean.user_enabled(user['enabled'])
domain_id, driver = self._get_domain_id_and_driver(domain_scope)
if not driver.is_domain_aware():
user = self._clear_domain_id(user)
ref = driver.update_user(user_id, user)
if user.get('enabled') is False or user.get('password') is not None:
if self.revoke_api:
self.revoke_api.revoke_by_user(user_id)
self.token_api.delete_tokens_for_user(user_id)
if not driver.is_domain_aware():
ref = self._set_domain_id(ref, domain_id)
return ref
@notifications.deleted(_USER)
@domains_configured
def delete_user(self, user_id, domain_scope=None):
domain_id, driver = self._get_domain_id_and_driver(domain_scope)
driver.delete_user(user_id)
self.credential_api.delete_credentials_for_user(user_id)
self.token_api.delete_tokens_for_user(user_id)
@notifications.created(_GROUP)
@domains_configured
def create_group(self, group_id, group_ref):
group = group_ref.copy()
group.setdefault('description', '')
# For creating a group, the domain is in the object itself
domain_id = group_ref['domain_id']
driver = self._select_identity_driver(domain_id)
if not driver.is_domain_aware():
group = self._clear_domain_id(group)
ref = driver.create_group(group_id, group)
if not driver.is_domain_aware():
ref = self._set_domain_id(ref, domain_id)
return ref
@domains_configured
def get_group(self, group_id, domain_scope=None):
domain_id, driver = self._get_domain_id_and_driver(domain_scope)
ref = driver.get_group(group_id)
if not driver.is_domain_aware():
ref = self._set_domain_id(ref, domain_id)
return ref
@notifications.updated(_GROUP)
@domains_configured
def update_group(self, group_id, group, domain_scope=None):
domain_id, driver = self._get_domain_id_and_driver(domain_scope)
if not driver.is_domain_aware():
group = self._clear_domain_id(group)
ref = driver.update_group(group_id, group)
if not driver.is_domain_aware():
ref = self._set_domain_id(ref, domain_id)
return ref
def revoke_tokens_for_group(self, group_id, domain_scope):
# We get the list of users before we attempt the group
# deletion, so that we can remove these tokens after we know
# the group deletion succeeded.
# TODO(ayoung): revoke based on group and roleids instead
user_ids = []
for u in self.list_users_in_group(group_id, domain_scope):
user_ids.append(u['id'])
if self.revoke_api:
self.revoke_api.revoke_by_user(u['id'])
self.token_api.delete_tokens_for_users(user_ids)
@notifications.deleted(_GROUP)
@domains_configured
def delete_group(self, group_id, domain_scope=None):
domain_id, driver = self._get_domain_id_and_driver(domain_scope)
# As well as deleting the group, we need to invalidate
# any tokens for the users who are members of the group.
self.revoke_tokens_for_group(group_id, domain_scope)
driver.delete_group(group_id)
@domains_configured
def add_user_to_group(self, user_id, group_id, domain_scope=None):
domain_id, driver = self._get_domain_id_and_driver(domain_scope)
driver.add_user_to_group(user_id, group_id)
self.token_api.delete_tokens_for_user(user_id)
@domains_configured
def remove_user_from_group(self, user_id, group_id, domain_scope=None):
domain_id, driver = self._get_domain_id_and_driver(domain_scope)
driver.remove_user_from_group(user_id, group_id)
# TODO(ayoung) revoking all tokens for a user based on group
# membership is overkill, as we only would need to revoke tokens
# that had role assignments via the group. Calculating those
# assignments would have to be done by the assignment backend.
if self.revoke_api:
self.revoke_api.revoke_by_user(user_id)
self.token_api.delete_tokens_for_user(user_id)
@manager.response_truncated
@domains_configured
def list_groups_for_user(self, user_id, domain_scope=None,
hints=None):
domain_id, driver = self._get_domain_id_and_driver(domain_scope)
if not driver.is_domain_aware():
# We are effectively satisfying any domain_id filter by the above
# driver selection, so remove any such filter
self._mark_domain_id_filter_satisfied(hints)
ref_list = driver.list_groups_for_user(
user_id, hints or driver_hints.Hints())
if not driver.is_domain_aware():
ref_list = self._set_domain_id(ref_list, domain_id)
return ref_list
@manager.response_truncated
@domains_configured
def list_groups(self, domain_scope=None, hints=None):
domain_id, driver = self._get_domain_id_and_driver(domain_scope)
if not driver.is_domain_aware():
# We are effectively satisfying any domain_id filter by the above
# driver selection, so remove any such filter
self._mark_domain_id_filter_satisfied(hints)
ref_list = driver.list_groups(hints or driver_hints.Hints())
if not driver.is_domain_aware():
ref_list = self._set_domain_id(ref_list, domain_id)
return ref_list
@manager.response_truncated
@domains_configured
def list_users_in_group(self, group_id, domain_scope=None,
hints=None):
domain_id, driver = self._get_domain_id_and_driver(domain_scope)
if not driver.is_domain_aware():
# We are effectively satisfying any domain_id filter by the above
# driver selection, so remove any such filter
self._mark_domain_id_filter_satisfied(hints)
ref_list = driver.list_users_in_group(
group_id, hints or driver_hints.Hints())
if not driver.is_domain_aware():
ref_list = self._set_domain_id(ref_list, domain_id)
return ref_list
@domains_configured
def check_user_in_group(self, user_id, group_id, domain_scope=None):
domain_id, driver = self._get_domain_id_and_driver(domain_scope)
driver.check_user_in_group(user_id, group_id)
@domains_configured
def change_password(self, context, user_id, original_password,
new_password, domain_scope):
# authenticate() will raise an AssertionError if authentication fails
self.authenticate(context, user_id, original_password,
domain_scope=domain_scope)
update_dict = {'password': new_password}
self.update_user(user_id, update_dict, domain_scope=domain_scope)
@six.add_metaclass(abc.ABCMeta)
class Driver(object):
"""Interface description for an Identity driver."""
def _get_list_limit(self):
return CONF.identity.list_limit or CONF.list_limit
@abc.abstractmethod
def authenticate(self, user_id, password):
"""Authenticate a given user and password.
:returns: user_ref
:raises: AssertionError
"""
raise exception.NotImplemented()
# user crud
@abc.abstractmethod
def create_user(self, user_id, user):
"""Creates a new user.
:raises: keystone.exception.Conflict
"""
raise exception.NotImplemented()
@abc.abstractmethod
def list_users(self, hints):
"""List users in the system.
:param hints: filter hints which the driver should
implement if at all possible.
:returns: a list of user_refs or an empty list.
"""
raise exception.NotImplemented()
@abc.abstractmethod
def list_users_in_group(self, group_id, hints):
"""List users in a group.
:param group_id: the group in question
:param hints: filter hints which the driver should
implement if at all possible.
:returns: a list of user_refs or an empty list.
"""
raise exception.NotImplemented()
@abc.abstractmethod
def get_user(self, user_id):
"""Get a user by ID.
:returns: user_ref
:raises: keystone.exception.UserNotFound
"""
raise exception.NotImplemented()
@abc.abstractmethod
def update_user(self, user_id, user):
"""Updates an existing user.
:raises: keystone.exception.UserNotFound,
keystone.exception.Conflict
"""
raise exception.NotImplemented()
@abc.abstractmethod
def add_user_to_group(self, user_id, group_id):
"""Adds a user to a group.
:raises: keystone.exception.UserNotFound,
keystone.exception.GroupNotFound
"""
raise exception.NotImplemented()
@abc.abstractmethod
def check_user_in_group(self, user_id, group_id):
"""Checks if a user is a member of a group.
:raises: keystone.exception.UserNotFound,
keystone.exception.GroupNotFound
"""
raise exception.NotImplemented()
@abc.abstractmethod
def remove_user_from_group(self, user_id, group_id):
"""Removes a user from a group.
:raises: keystone.exception.NotFound
"""
raise exception.NotImplemented()
@abc.abstractmethod
def delete_user(self, user_id):
"""Deletes an existing user.
:raises: keystone.exception.UserNotFound
"""
raise exception.NotImplemented()
@abc.abstractmethod
def get_user_by_name(self, user_name, domain_id):
"""Get a user by name.
:returns: user_ref
:raises: keystone.exception.UserNotFound
"""
raise exception.NotImplemented()
# group crud
@abc.abstractmethod
def create_group(self, group_id, group):
"""Creates a new group.
:raises: keystone.exception.Conflict
"""
raise exception.NotImplemented()
@abc.abstractmethod
def list_groups(self, hints):
"""List groups in the system.
:param hints: filter hints which the driver should
implement if at all possible.
:returns: a list of group_refs or an empty list.
"""
raise exception.NotImplemented()
@abc.abstractmethod
def list_groups_for_user(self, user_id, hints):
"""List groups a user is in
:param user_id: the user in question
:param hints: filter hints which the driver should
implement if at all possible.
:returns: a list of group_refs or an empty list.
"""
raise exception.NotImplemented()
@abc.abstractmethod
def get_group(self, group_id):
"""Get a group by ID.
:returns: group_ref
:raises: keystone.exception.GroupNotFound
"""
raise exception.NotImplemented()
@abc.abstractmethod
def update_group(self, group_id, group):
"""Updates an existing group.
:raises: keystone.exceptionGroupNotFound,
keystone.exception.Conflict
"""
raise exception.NotImplemented()
@abc.abstractmethod
def delete_group(self, group_id):
"""Deletes an existing group.
:raises: keystone.exception.GroupNotFound
"""
raise exception.NotImplemented()
@abc.abstractmethod
def is_domain_aware(self):
"""Indicates if Driver supports domains."""
raise exception.NotImplemented()
# end of identity
| 35.693548
| 79
| 0.654192
|
d7de9b55370073145f653163d5da240ac4d7e133
| 25,441
|
py
|
Python
|
pytorch_toolkit/text_spotting/text_spotting/datasets/datasets.py
|
morkovka1337/openvino_training_extensions
|
846db45c264d6b061505213f51763520b9432ba9
|
[
"Apache-2.0"
] | 3
|
2020-12-29T02:47:32.000Z
|
2021-11-12T08:12:51.000Z
|
pytorch_toolkit/text_spotting/text_spotting/datasets/datasets.py
|
morkovka1337/openvino_training_extensions
|
846db45c264d6b061505213f51763520b9432ba9
|
[
"Apache-2.0"
] | 28
|
2020-09-25T22:40:36.000Z
|
2022-03-12T00:37:36.000Z
|
pytorch_toolkit/text_spotting/text_spotting/datasets/datasets.py
|
morkovka1337/openvino_training_extensions
|
846db45c264d6b061505213f51763520b9432ba9
|
[
"Apache-2.0"
] | 1
|
2021-04-02T07:51:01.000Z
|
2021-04-02T07:51:01.000Z
|
"""
Copyright (c) 2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import copy
import json
import os
from collections import defaultdict
import cv2
import imagesize
import numpy as np
from tqdm import tqdm
class TextOnlyCocoAnnotation:
""" Class for working with MSCOCO-like annotation for text. """
def __init__(self, path=None, root=''):
self.label_map = {'text': 1}
self.annotation = {
"type": "instances",
"images": [],
"categories": [],
"annotations": [],
}
self.annotation['categories'] = [{"supercategory": "none", "name": key, "id": value}
for key, value in self.label_map.items()]
self.annotation['categories'] = sorted(self.annotation['categories'],
key=lambda x: x["id"])
if path is not None:
assert os.path.exists(path), path
with open(path) as read_file:
self.annotation = json.load(read_file)
if root:
for image_info in self.annotation['images']:
image_info['file_name'] = os.path.join(root, image_info['file_name'])
self.img_id_2_ann_id = defaultdict(list)
for index, ann in enumerate(self.annotation['annotations']):
assert index == ann['id']
self.img_id_2_ann_id[ann['image_id']].append(ann['id'])
self.img_path_2_img_id = dict()
for index, img in enumerate(self.annotation['images']):
assert index == img['id']
self.img_path_2_img_id[img['file_name']] = index
def add_bbox(self, image_path, image_size, obj):
""" Adds new text object to annotation. """
if image_path not in self.img_path_2_img_id:
self.img_path_2_img_id[image_path] = len(self.img_path_2_img_id)
self.annotation['images'].append({
"file_name": image_path,
"height": image_size[1],
"width": image_size[0],
"id": self.img_path_2_img_id[image_path]
})
new_ann_id = len(self.annotation['annotations'])
self.img_id_2_ann_id[self.img_path_2_img_id[image_path]].append(new_ann_id)
self.annotation['annotations'].append({
"bbox": obj['bbox'], # x, y, w, h
"segmentation": obj['segmentation'],
"text": obj['text'],
"ignore": 0,
"id": new_ann_id,
"image_id": self.img_path_2_img_id[image_path],
"area": obj['bbox'][2] * obj['bbox'][3],
"iscrowd": 1 - int(obj['text']['legible']),
"category_id": self.label_map['text']
})
def __iadd__(self, other):
for image_info in other.annotation['images']:
ann_ids = other.img_id_2_ann_id[image_info['id']]
for ann_id in ann_ids:
ann = other.annotation['annotations'][ann_id]
self.add_bbox(image_info['file_name'], (image_info['width'], image_info['height']),
copy.deepcopy(ann))
return self
def write(self, path):
""" Writes annotation as json file. """
annotation = copy.deepcopy(self.annotation)
for image_info in annotation['images']:
image_info['file_name'] = os.path.relpath(image_info['file_name'],
os.path.dirname(path))
with open(path, 'w') as read_file:
json.dump(annotation, read_file)
@staticmethod
def _check_object_consistency(obj):
assert obj['iscrowd'] == 1 - obj['text']['legible']
def visualize(self, put_text, imshow_delay=1):
""" Visualizes annotation using cv2.imshow from OpenCV. Press `Esc` to exit. """
max_image_size = 1280, 768
for frame in tqdm(self.annotation['images']):
image_path = frame['file_name']
image = cv2.imread(image_path)
for ann_id in self.img_id_2_ann_id[frame['id']]:
obj = self.annotation['annotations'][ann_id]
lwd = 2
color = (0, 255, 0)
if obj['iscrowd']:
color = (128, 128, 128)
bbox = obj['bbox']
if put_text:
cv2.putText(image, obj['text']['transcription'], tuple(bbox[0:2]), 1, 1.0,
color)
cv2.rectangle(image, (bbox[0], bbox[1]), (bbox[0] + bbox[2], bbox[1] + bbox[3]),
color, lwd)
contours = np.array(obj['segmentation'])
contours = contours.reshape([contours.shape[0], contours.shape[1] // 2, 2])
cv2.drawContours(image, contours, 0, color, 1)
try:
if image.shape[1] > max_image_size[0] or image.shape[0] > max_image_size[1]:
print('resized')
image = cv2.resize(image, max_image_size)
cv2.imshow('image', image)
k = cv2.waitKey(imshow_delay)
if k == 27:
break
except:
print('Error: image is empty or corrupted: ', frame['file_name'])
def extract_text_recognition_dataset(self, path):
""" Crops text instances and saves as another dataset. """
os.makedirs(os.path.join(path, 'images'))
annotation = []
for frame in tqdm(self.annotation['images']):
image = cv2.imread(frame['file_name'], cv2.IMREAD_IGNORE_ORIENTATION | cv2.IMREAD_COLOR)
for ann_id in self.img_id_2_ann_id[frame['id']]:
obj = self.annotation['annotations'][ann_id]
if obj['text']['legible']:
bbox = obj['bbox']
try:
transcription = obj['text']['transcription']
if transcription.isalnum():
coord_x1, coord_y1, coord_x2, coord_y2 = bbox[0], bbox[1], bbox[0] + \
bbox[2], bbox[1] + bbox[3]
coord_x1 = max(0, coord_x1)
coord_x2 = min(image.shape[1] - 1, coord_x2)
coord_y1 = max(0, coord_y1)
coord_y2 = min(image.shape[0] - 1, coord_y2)
crop_path = os.path.join(path, 'images', f'image{len(annotation)}.jpg')
annotation.append(f'{crop_path} {transcription}')
cv2.imwrite(crop_path, image[coord_y1:coord_y2, coord_x1:coord_x2])
except:
print('Something went wrong with', frame['file_name'])
break
with open(os.path.join(path, 'annotation.txt'), 'w') as file:
file.write('\n'.join(annotation))
class ICDAR2013DatasetConverter:
""" Class for conversion of ICDAR2013 to TextOnlyCocoAnnotation. """
def __init__(self, images_folder, annotations_folder, is_train, root=''):
self.images_folder = images_folder
self.annotations_folder = annotations_folder
self.is_train = is_train
if root:
self.annotations_folder = os.path.join(root, self.annotations_folder)
self.images_folder = os.path.join(root, self.images_folder)
def __call__(self, *args, **kwargs):
dataset = TextOnlyCocoAnnotation()
begin, end = (100, 328 + 1) if self.is_train else (1, 233 + 1)
gt_format = 'gt_{}.txt' if self.is_train else 'gt_img_{}.txt'
img_format = '{}.jpg' if self.is_train else 'img_{}.jpg'
for i in range(begin, end):
image_path = os.path.join(self.images_folder, img_format.format(i))
annotation_path = os.path.join(self.annotations_folder, gt_format.format(i))
with open(annotation_path, encoding='utf-8-sig') as read_file:
for line in [line.strip() for line in read_file.readlines()]:
image_size = imagesize.get(image_path)
dataset.add_bbox(image_path, image_size, self.parse_line(line))
return dataset
def parse_line(self, line):
""" Parses line of ICDAR2013 annotation. """
sep = ' ' if self.is_train else ', '
line = line.split(sep)
xmin, ymin, xmax, ymax = [int(x) for x in line[:4]]
assert xmin < xmax
assert ymin < ymax
transcription = (sep.join(line[4:]))[1:-1]
word_annotation = {
'bbox': [xmin, ymin, xmax - xmin + 1, ymax - ymin + 1],
'segmentation': [[xmin, ymin, xmax, ymin, xmax, ymax, xmin, ymax]],
'text': {
'transcription': transcription,
'legible': 1,
'language': 'english',
}
}
return word_annotation
class ICDAR2015DatasetConverter:
""" Class for conversion of ICDAR2015 to TextOnlyCocoAnnotation. """
def __init__(self, images_folder, annotations_folder, is_train, root=''):
self.images_folder = images_folder
self.annotations_folder = annotations_folder
self.is_train = is_train
if root:
self.annotations_folder = os.path.join(root, self.annotations_folder)
self.images_folder = os.path.join(root, self.images_folder)
@staticmethod
def parse_line(line):
""" Parses line of ICDAR2015 annotation. """
line = line.split(',')
quadrilateral = [int(x) for x in line[:8]]
transcription = ','.join(line[8:])
legible = 1
language = 'english'
if transcription == '###':
transcription = ''
legible = 0
language = ''
xmin = min(quadrilateral[0::2])
xmax = max(quadrilateral[0::2])
ymin = min(quadrilateral[1::2])
ymax = max(quadrilateral[1::2])
word_annotation = {
'bbox': [xmin, ymin, xmax - xmin + 1, ymax - ymin + 1],
'segmentation': [quadrilateral],
'text': {
'transcription': transcription,
'legible': legible,
'language': language,
}
}
return word_annotation
def __call__(self, *args, **kwargs):
""" Converts annotation from ICDAR 2015 format to internal format. """
dataset = TextOnlyCocoAnnotation()
n_images = 1000 if self.is_train else 500
for i in range(1, n_images + 1):
image_path = os.path.join(self.images_folder, 'img_{}.jpg'.format(i))
annotation_path = os.path.join(self.annotations_folder, 'gt_img_{}.txt'.format(i))
with open(annotation_path, encoding='utf-8-sig') as read_file:
content = [line.strip() for line in read_file.readlines()]
for line in content:
dataset.add_bbox(image_path, imagesize.get(image_path), self.parse_line(line))
return dataset
class ICDAR2017MLTDatasetConverter:
""" Class for conversion of ICDAR2017 to TextOnlyCocoAnnotation. """
def __init__(self, folder, subset, is_latin_required, root=''):
'''
Converts ICDAR2017 MLT to TextOnlyCocoAnnotation
:param folder: Folder with extracted zip archives containing images and annotation.
:param subset: 'train' or 'val'
:param is_latin_required: if it is True than images that do not contain latin text will be
filtered out.
'''
self.folder = folder
self.subset = subset
self.is_latin_required = is_latin_required
if root:
self.folder = os.path.join(root, self.folder)
assert self.subset in ['train', 'val']
if self.subset == 'train':
for i in range(1, 9):
assert os.path.exists(os.path.join(self.folder, f'ch8_training_images_{i}'))
assert os.path.exists(
os.path.join(self.folder, 'ch8_training_localization_transcription_gt_v2'))
elif self.subset == 'val':
assert os.path.exists(
os.path.join(self.folder, 'ch8_validation_images'))
assert os.path.exists(
os.path.join(self.folder, 'ch8_validation_localization_transcription_gt_v2'))
@staticmethod
def parse_line(line):
""" Parses line of ICDAR2015 annotation. """
line = line.split(',')
quadrilateral = [int(x) for x in line[:8]]
language = line[8]
transcription = ','.join(line[9:])
legible = 1
if transcription == '###':
transcription = ''
legible = 0
language = ''
xmin = min(quadrilateral[0::2])
xmax = max(quadrilateral[0::2])
ymin = min(quadrilateral[1::2])
ymax = max(quadrilateral[1::2])
word_annotation = {
'bbox': [xmin, ymin, xmax - xmin + 1, ymax - ymin + 1],
'segmentation': [quadrilateral],
'text': {
'transcription': transcription,
'legible': legible,
'language': language,
}
}
return word_annotation
def collect_train_paths(self):
""" Collects images and annotations paths for training set. """
image_paths = []
annotation_paths = []
n_images = 7200
for i in range(1, n_images + 1):
added = False
for extension in ['jpg', 'png']:
image_path = os.path.join(self.folder,
f'ch8_training_images_{(i - 1) // 1000 + 1}',
f'img_{i}.{extension}')
if os.path.exists(image_path):
image_paths.append(image_path)
added = True
break
if added:
annotation_paths.append(
os.path.join(self.folder, 'ch8_training_localization_transcription_gt_v2',
f'gt_img_{i}.txt')
)
else:
print(f'Could not find: {image_path[:-3]}*')
return image_paths, annotation_paths
def collect_val_paths(self):
""" Collects images and annotations paths for validation set. """
image_paths = []
annotation_paths = []
n_images = 1800
for i in range(1, n_images + 1):
added = False
for extension in ['jpg', 'png']:
image_path = os.path.join(self.folder,
'ch8_validation_images',
f'img_{i}.{extension}')
if os.path.exists(image_path):
image_paths.append(image_path)
added = True
break
if added:
annotation_paths.append(
os.path.join(self.folder, 'ch8_validation_localization_transcription_gt_v2',
f'gt_img_{i}.txt')
)
else:
print(f'Could not find: {image_path[:-3]}*')
return image_paths, annotation_paths
def __call__(self, *args, **kwargs):
""" Converts annotation from ICDAR 2017 format to internal format. """
dataset = TextOnlyCocoAnnotation()
if self.subset == 'train':
image_paths, annotation_paths = self.collect_train_paths()
elif self.subset == 'val':
image_paths, annotation_paths = self.collect_val_paths()
for image_path, annotation_path in zip(image_paths, annotation_paths):
word_annotations = []
with open(annotation_path, encoding='utf-8-sig') as read_file:
content = [line.strip() for line in read_file.readlines()]
for line in content:
word_annotations.append(self.parse_line(line))
should_add = not self.is_latin_required
if self.is_latin_required:
for word_annotation in word_annotations:
if word_annotation['text']['language'].lower() == 'latin':
should_add = True
break
if should_add:
for word_annotation in word_annotations:
dataset.add_bbox(image_path, imagesize.get(image_path), word_annotation)
return dataset
class ICDAR2019MLTDatasetConverter:
""" Class for conversion of ICDAR2019 to TextOnlyCocoAnnotation. """
def __init__(self, folder, is_latin_required, root=''):
'''
Converts ICDAR2017 MLT to TextOnlyCocoAnnotation
:param folder: Folder with extracted zip archives containing images and annotation.
:param is_latin_required: if it is True than images that do not contain latin text will be
filtered out.
'''
self.folder = folder
self.is_latin_required = is_latin_required
if root:
self.folder = os.path.join(root, self.folder)
assert os.path.exists(os.path.join(self.folder, 'ImagesPart1'))
assert os.path.exists(os.path.join(self.folder, 'ImagesPart2'))
assert os.path.exists(os.path.join(self.folder, 'train_gt_t13'))
@staticmethod
def parse_line(line):
""" Parses line of ICDAR2019 annotation. """
line = line.split(',')
quadrilateral = [int(x) for x in line[:8]]
language = line[8]
transcription = ','.join(line[9:])
legible = 1
if transcription == '###':
transcription = ''
legible = 0
language = ''
xmin = min(quadrilateral[0::2])
xmax = max(quadrilateral[0::2])
ymin = min(quadrilateral[1::2])
ymax = max(quadrilateral[1::2])
word_annotation = {
'bbox': [xmin, ymin, xmax - xmin + 1, ymax - ymin + 1],
'segmentation': [quadrilateral],
'text': {
'transcription': transcription,
'legible': legible,
'language': language,
}
}
return word_annotation
def collect_train_paths(self):
""" Collects images and annotations paths for training set. """
image_paths = []
annotation_paths = []
n_images = 10000
for i in range(1, n_images + 1):
added = False
for extension in ['jpg', 'png']:
image_path = os.path.join(self.folder,
f'ImagesPart{(i - 1) // 5000 + 1}',
f'tr_img_{i:05}.{extension}')
if os.path.exists(image_path):
image_paths.append(image_path)
added = True
break
if added:
annotation_paths.append(
os.path.join(self.folder, 'train_gt_t13', f'tr_img_{i:05}.txt')
)
else:
print(f'Could not find: {image_path[:-3]}*')
return image_paths, annotation_paths
def __call__(self, *args, **kwargs):
""" Converts annotation from ICDAR 2019 format to internal format. """
dataset = TextOnlyCocoAnnotation()
image_paths, annotation_paths = self.collect_train_paths()
for image_path, annotation_path in zip(image_paths, annotation_paths):
word_annotations = []
with open(annotation_path, encoding='utf-8-sig') as read_file:
content = [line.strip() for line in read_file.readlines()]
for line in content:
word_annotations.append(self.parse_line(line))
should_add = not self.is_latin_required
if self.is_latin_required:
for word_annotation in word_annotations:
if word_annotation['text']['language'].lower() == 'latin':
should_add = True
break
if should_add:
for word_annotation in word_annotations:
dataset.add_bbox(image_path, imagesize.get(image_path), word_annotation)
return dataset
class MSRATD500DatasetConverter:
""" Class for conversion of MSRA-TD500 to TextOnlyCocoAnnotation. """
def __init__(self, folder, root=''):
self.folder = folder
if root:
self.folder = os.path.join(root, self.folder)
@staticmethod
def parse_line(line):
""" Parses line of MSRA-TD500 annotation. """
line = line.split(' ')
_, _, top_left_x, top_left_y, width, height, rotation = [float(x) for x in line]
box = cv2.boxPoints(((top_left_x + width / 2, top_left_y + height / 2),
(width, height), rotation * 57.2958))
quadrilateral = [int(x) for x in box.reshape([-1])]
xmin = min(quadrilateral[0::2])
xmax = max(quadrilateral[0::2])
ymin = min(quadrilateral[1::2])
ymax = max(quadrilateral[1::2])
word_annotation = {
'bbox': [xmin, ymin, xmax - xmin + 1, ymax - ymin + 1],
'segmentation': [quadrilateral],
'text': {
'transcription': '',
'legible': 1,
'language': '',
}
}
return word_annotation
def __call__(self, *args, **kwargs):
""" Converts annotation from MSRA-TD500 format to internal format. """
dataset = TextOnlyCocoAnnotation()
for image_name in sorted(os.listdir(self.folder)):
if image_name.endswith('JPG'):
image_path = os.path.join(self.folder, image_name)
annotation_path = os.path.join(self.folder, image_name.replace('.JPG', '.gt'))
with open(annotation_path, encoding='utf-8-sig') as read_file:
content = [line.strip() for line in read_file.readlines()]
for line in content:
dataset.add_bbox(image_path, imagesize.get(image_path),
self.parse_line(line))
return dataset
class COCOTextDatasetConverter:
""" Class for conversion of COCO-Text to TextOnlyCocoAnnotation. """
def __init__(self, path, sets=None, root=''):
self.path = path
if root:
self.path = os.path.join(root, self.path)
self.sets = sets
if self.sets is None:
self.sets = ['train'] # 'val
@staticmethod
def parse_annotation_instance(annotation):
""" Parses annotation instance of COCO-Text dataset. """
text = annotation['utf8_string']
language = annotation['language']
legible = int(annotation['legibility'] == 'legible')
mask = np.reshape(np.array(annotation['mask'], np.int32), (-1, 2))
box = cv2.boxPoints(cv2.minAreaRect(mask))
quadrilateral = [int(x) for x in box.reshape([-1])]
xmin = min(quadrilateral[0::2])
xmax = max(quadrilateral[0::2])
ymin = min(quadrilateral[1::2])
ymax = max(quadrilateral[1::2])
word_annotation = {
'bbox': [xmin, ymin, xmax - xmin + 1, ymax - ymin + 1],
'segmentation': [quadrilateral],
'text': {
'transcription': text,
'legible': legible,
'language': language,
}
}
return word_annotation
def __call__(self):
""" Converts annotation from COCO-TEXT format to internal format. """
dataset = TextOnlyCocoAnnotation()
with open(self.path) as read_file:
json_loaded = json.load(read_file)
for i, value in json_loaded['imgs'].items():
image_path = os.path.join(os.path.dirname(self.path), 'train2014',
value['file_name'])
dataset_type = value['set']
if dataset_type not in self.sets:
print(dataset_type)
continue
for annotation_id in json_loaded['imgToAnns'][i]:
annotation_value = json_loaded['anns'][str(annotation_id)]
word_annotation = self.parse_annotation_instance(annotation_value)
dataset.add_bbox(image_path, imagesize.get(image_path), word_annotation)
return dataset
str_to_class = {
'ICDAR2013DatasetConverter': ICDAR2013DatasetConverter,
'ICDAR2015DatasetConverter': ICDAR2015DatasetConverter,
'ICDAR2019MLTDatasetConverter': ICDAR2019MLTDatasetConverter,
'MSRATD500DatasetConverter': MSRATD500DatasetConverter,
'COCOTextDatasetConverter': COCOTextDatasetConverter,
}
| 37.579025
| 100
| 0.552966
|
0a06a6e077bdec81e2d5cc5f0593b79585aecc8b
| 77
|
py
|
Python
|
tests/development/conftest.py
|
denssk/backup
|
292d5f1b1a3765ce0ea8d3cab8bd1ae0c583f72e
|
[
"Apache-2.0"
] | 69
|
2016-06-29T16:13:55.000Z
|
2022-03-21T06:38:37.000Z
|
tests/development/conftest.py
|
denssk/backup
|
292d5f1b1a3765ce0ea8d3cab8bd1ae0c583f72e
|
[
"Apache-2.0"
] | 237
|
2016-09-28T02:12:34.000Z
|
2022-03-25T13:32:23.000Z
|
tests/development/conftest.py
|
denssk/backup
|
292d5f1b1a3765ce0ea8d3cab8bd1ae0c583f72e
|
[
"Apache-2.0"
] | 45
|
2017-01-04T21:20:27.000Z
|
2021-12-29T10:42:22.000Z
|
from twindb_backup import setup_logging, LOG
setup_logging(LOG, debug=True)
| 19.25
| 44
| 0.831169
|
652e166f64b376d9e6ea0b8ee4469ffb49db67de
| 4,046
|
py
|
Python
|
laikarestd.py
|
sandialabs/laikaboss
|
3064ac1176911651d61c5176e9bd83eacec36b16
|
[
"Apache-2.0"
] | 2
|
2019-11-02T23:40:23.000Z
|
2019-12-01T22:24:57.000Z
|
laikarestd.py
|
sandialabs/laikaboss
|
3064ac1176911651d61c5176e9bd83eacec36b16
|
[
"Apache-2.0"
] | null | null | null |
laikarestd.py
|
sandialabs/laikaboss
|
3064ac1176911651d61c5176e9bd83eacec36b16
|
[
"Apache-2.0"
] | 3
|
2017-08-09T23:58:40.000Z
|
2019-12-01T22:25:06.000Z
|
#!/usr/bin/env python
# Copyright 2020 National Technology & Engineering Solutions of Sandia, LLC
# (NTESS). Under the terms of Contract DE-NA0003525 with NTESS, the U.S.
# Government retains certain rights in this software.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
from flask import g
from flask import Flask
from flask_cors import CORS
from flask import g
from werkzeug.middleware.proxy_fix import ProxyFix
from laikaboss.lbconfigparser import LBConfigParser
from laikaboss.storage_utils import redisclient_from_url
from laikarest import routes
def create_flask_app():
""" Creates a flask web server """
app = Flask(__name__)
app.wsgi_app = ProxyFix(app.wsgi_app)
CORS(app, supports_credentials=True) # Allow requests from different origin
return app
def setupLoggers(laikarest_config, app):
logFormatter = logging.Formatter("%(asctime)s - %(process)d [%(levelname)-5.5s] %(message)s")
rootLogger = logging.getLogger()
log_file_path = laikarest_config["log_file"]
debug = True if laikarest_config.get("debug", '').lower() == 'true' else False
fileHandler = logging.FileHandler(log_file_path)
fileHandler.setFormatter(logFormatter)
app.logger.addHandler(fileHandler)
rootLogger.addHandler(fileHandler)
consoleHandler = logging.StreamHandler()
consoleHandler.setFormatter(logFormatter)
app.logger.addHandler(consoleHandler)
rootLogger.addHandler(consoleHandler)
if debug:
app.logger.setLevel(logging.DEBUG)
rootLogger.setLevel(logging.DEBUG)
else:
if __name__ != "__main__":
# if running under gunicorn set the loggers to that level
gunicorn_logger = logging.getLogger("gunicorn.error")
app.logger.setLevel(gunicorn_logger.level)
rootLogger.setLevel(gunicorn_logger.level)
else:
app.logger.setLevel(logging.INFO)
rootLogger.setLevel(logging.INFO)
# create Flask application
app = create_flask_app()
# path to config
config_file = app.config.get("CONFIG_FILE", "/etc/laikaboss/laikarestd.conf")
# environmental variable which if present must contain the client secret, and this overrides config file value
CLIENT_SECRET_VAR = "CLIENT_SECRET"
lb_api_client_id = "laikaboss-api"
lb_api_client_secret_file = "/etc/laikaboss/secrets/client_secret"
default_config = {
"submission_dir": "/var/laikaboss/submission-queue",
"lb_client_secret_file": lb_api_client_secret_file,
"lb_client": lb_api_client_id,
"lb_grant_type": "unset",
"jwt_enabled": "False",
"redis_url" : "redis://127.0.0.1:6379/0",
"max_submission_size": 100 * 1024 * 1024
}
# Read config file into a dict
# Read config file into a dict
config = LBConfigParser()
config.read(config_file)
laikarest_config = default_config.copy()
laikarest_config.update(config.items("General"))
laikarest_config.update(config.items("laikarestd"))
storage_gui_config = default_config.copy()
storage_gui_config.update(config.items("General"))
storage_gui_config.update(config.items("storage-gui"))
# Setup logging
setupLoggers(laikarest_config, app)
# Register the routes pertaining to this application
routes.init_app(app, laikarest_config, storage_gui_config)
if __name__ == "__main__":
# Start Flask web server
# It should be okay to bind to all interfaces because gunicorn is
# running on production and doesn't expose port 8123 to the world
# (e.g. binding to all interfaces is convenient for dev work)
app.run(host="0.0.0.0", port=8123, debug=False)
| 35.80531
| 110
| 0.749135
|
67c808f4ed62f104cc9a60d4442724dac655b0ff
| 3,406
|
py
|
Python
|
sdv/docker/sdvstate/tools/conf/__init__.py
|
opnfv/cirv-sdv
|
31fb310d3fd1c9c1f12cfe0c654870e24f5efab6
|
[
"Apache-2.0"
] | 2
|
2021-09-16T06:31:45.000Z
|
2022-03-09T19:59:55.000Z
|
sdv/docker/sdvstate/tools/conf/__init__.py
|
opnfv/cirv-sdv
|
31fb310d3fd1c9c1f12cfe0c654870e24f5efab6
|
[
"Apache-2.0"
] | null | null | null |
sdv/docker/sdvstate/tools/conf/__init__.py
|
opnfv/cirv-sdv
|
31fb310d3fd1c9c1f12cfe0c654870e24f5efab6
|
[
"Apache-2.0"
] | 2
|
2021-05-11T14:41:01.000Z
|
2021-05-14T05:59:38.000Z
|
# Copyright 2020 University Of Delhi.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Settings and configuration handlers.
Settings will be loaded from several .yaml or .yml files
and any user provided settings file.
"""
import os
import ast
import yaml
# pylint: disable=invalid-name
class Settings():
"""Holding class for settings.
"""
def __init__(self):
pass
def getValue(self, attr):
"""
Return a settings item value
"""
try:
attr = attr.lower()
return getattr(self, attr)
except AttributeError:
raise AttributeError("{obj} object has no attribute \
{attr}".format(obj=self.__class__, attr=attr))
def setValue(self, name, value):
"""Set a value
"""
if name is not None and value is not None:
super(Settings, self).__setattr__(name.lower(), value)
def load_from_file(self, path):
"""Update ``settings`` with values found in module at ``path``.
"""
with open(path) as file:
configs = yaml.load_all(file, Loader=yaml.SafeLoader)
for conf in configs:
for name, value in conf.items():
self.setValue(name, value)
def load_from_env(self):
"""
Update ``settings`` with values found in the environment.
"""
for key in os.environ:
value = os.environ[key]
#evaluate string to python type
try:
value = ast.literal_eval(os.environ[key])
except (ValueError, SyntaxError):
pass #already string
self.setValue(key, value)
def load_from_dir(self, dir_path):
"""Update ``settings`` with contents of the yaml files at ``path``.
Files are read in ascending order, hence if a configuration item
exists in more than one file, then the setting in the file that
occurs in the last read file will have high precedence and
overwrite previous values.
Same precedence logic for sub-directories.
Also, child directory will have more precedence than it's parent
:param dir_path: The full path to the dir from which to load the
yaml files.
:returns: None
"""
files = list_yamls(dir_path)
for file in files:
self.load_from_file(file)
settings = Settings()
def list_yamls(dir_path):
"""Get all yaml files recursively in ``dir_path``
"""
files = []
dir_list = [x[0] for x in os.walk(dir_path)]
dir_list.sort()
for path in dir_list:
dir_files = [path+'/'+f for f in os.listdir(path)
if f.endswith('.yaml') or f.endswith('.yml')]
if dir_files is not None:
dir_files.sort()
files.extend(dir_files)
return files
| 28.14876
| 79
| 0.611861
|
d178cbddbc46bdb4b04a594ebd401f53bbcd4a85
| 453
|
py
|
Python
|
zezere/migrations/0008_runrequest_raw_settings.py
|
Rintsi/zezere
|
1a49476d3d9cef26d65c7dcab2c4abb47938b934
|
[
"MIT"
] | 32
|
2020-02-16T21:37:22.000Z
|
2022-03-29T06:34:28.000Z
|
zezere/migrations/0008_runrequest_raw_settings.py
|
Rintsi/zezere
|
1a49476d3d9cef26d65c7dcab2c4abb47938b934
|
[
"MIT"
] | 44
|
2019-12-18T14:03:22.000Z
|
2022-03-31T11:54:21.000Z
|
zezere/migrations/0008_runrequest_raw_settings.py
|
Rintsi/zezere
|
1a49476d3d9cef26d65c7dcab2c4abb47938b934
|
[
"MIT"
] | 15
|
2019-12-05T18:46:35.000Z
|
2022-03-29T12:21:33.000Z
|
# Generated by Django 2.2.6 on 2019-10-29 12:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("zezere", "0007_auto_20191021_1444")]
operations = [
migrations.AddField(
model_name="runrequest",
name="raw_settings",
field=models.TextField(
blank=True, null=True, verbose_name="JSON-encoded settings"
),
)
]
| 23.842105
| 75
| 0.604857
|
8aeef6c8143075baaddb2fc79089598e98ac648c
| 953
|
py
|
Python
|
tests/v1/test_timeseries_widget_definition_type.py
|
MichaelTROEHLER/datadog-api-client-python
|
12c46626622fb1277bb1e172753b342c671348bd
|
[
"Apache-2.0"
] | null | null | null |
tests/v1/test_timeseries_widget_definition_type.py
|
MichaelTROEHLER/datadog-api-client-python
|
12c46626622fb1277bb1e172753b342c671348bd
|
[
"Apache-2.0"
] | null | null | null |
tests/v1/test_timeseries_widget_definition_type.py
|
MichaelTROEHLER/datadog-api-client-python
|
12c46626622fb1277bb1e172753b342c671348bd
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
# Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
# This product includes software developed at Datadog (https://www.datadoghq.com/).
# Copyright 2019-Present Datadog, Inc.
from __future__ import absolute_import
import sys
import unittest
import datadog_api_client.v1
from datadog_api_client.v1.model.timeseries_widget_definition_type import TimeseriesWidgetDefinitionType
class TestTimeseriesWidgetDefinitionType(unittest.TestCase):
"""TimeseriesWidgetDefinitionType unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testTimeseriesWidgetDefinitionType(self):
"""Test TimeseriesWidgetDefinitionType"""
# FIXME: construct object with mandatory attributes with example values
# model = TimeseriesWidgetDefinitionType() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 28.029412
| 108
| 0.756558
|
248aeefc8743bf6be9be72cfb51fb2c4bf80ab8e
| 130
|
py
|
Python
|
pypro/base/views.py
|
limberger/curso-django
|
9b099a9934871c221be2018d2e80331e90bee40f
|
[
"Apache-2.0"
] | null | null | null |
pypro/base/views.py
|
limberger/curso-django
|
9b099a9934871c221be2018d2e80331e90bee40f
|
[
"Apache-2.0"
] | 1,012
|
2020-06-22T21:43:39.000Z
|
2022-03-31T22:09:32.000Z
|
pypro/base/views.py
|
limberger/curso-django
|
9b099a9934871c221be2018d2e80331e90bee40f
|
[
"Apache-2.0"
] | 1
|
2020-08-06T19:50:33.000Z
|
2020-08-06T19:50:33.000Z
|
# Create your views here.
from django.shortcuts import render
def home(request):
return render(request, 'base/home.html')
| 14.444444
| 44
| 0.730769
|
e3273fcb87325d9e3c5c093beacf6a58fc5c00f4
| 174
|
py
|
Python
|
02/00/getreader.py
|
pylangstudy/201709
|
53d868786d7327a83bfa7f4149549c6f9855a6c6
|
[
"CC0-1.0"
] | null | null | null |
02/00/getreader.py
|
pylangstudy/201709
|
53d868786d7327a83bfa7f4149549c6f9855a6c6
|
[
"CC0-1.0"
] | 32
|
2017-09-01T00:52:17.000Z
|
2017-10-01T00:30:02.000Z
|
02/00/getreader.py
|
pylangstudy/201709
|
53d868786d7327a83bfa7f4149549c6f9855a6c6
|
[
"CC0-1.0"
] | null | null | null |
#!python3.6
#encoding:utf-8
import codecs
text = '日本語'
for enc in ['utf-8', 'utf-16LE', 'utf-16BE', 'utf-32', 'shift-jis', 'euc-jp']:
print(enc, codecs.getreader(enc))
| 19.333333
| 78
| 0.626437
|
3abd6121a2045489e599f3d8fb6fcb1e7db9ab8b
| 10,424
|
py
|
Python
|
utils/dataloaders.py
|
adrift00/imagenet_pretrain
|
fb824a860b105aad0bda1c4dcc0b9bffea5fb418
|
[
"Apache-2.0"
] | null | null | null |
utils/dataloaders.py
|
adrift00/imagenet_pretrain
|
fb824a860b105aad0bda1c4dcc0b9bffea5fb418
|
[
"Apache-2.0"
] | null | null | null |
utils/dataloaders.py
|
adrift00/imagenet_pretrain
|
fb824a860b105aad0bda1c4dcc0b9bffea5fb418
|
[
"Apache-2.0"
] | null | null | null |
import os
import torch
import numpy as np
import torchvision.datasets as datasets
import torchvision.transforms as transforms
DATA_BACKEND_CHOICES = ['pytorch']
try:
from nvidia.dali.plugin.pytorch import DALIClassificationIterator
from nvidia.dali.pipeline import Pipeline
import nvidia.dali.ops as ops
import nvidia.dali.types as types
DATA_BACKEND_CHOICES.append('dali-gpu')
DATA_BACKEND_CHOICES.append('dali-cpu')
except ImportError:
print("Please install DALI from https://www.github.com/NVIDIA/DALI to run this example.")
class HybridTrainPipe(Pipeline):
def __init__(self, batch_size, num_threads, device_id, data_dir, crop, dali_cpu=False):
super(HybridTrainPipe, self).__init__(batch_size, num_threads, device_id, seed = 12 + device_id)
if torch.distributed.is_initialized():
local_rank = torch.distributed.get_rank()
world_size = torch.distributed.get_world_size()
else:
local_rank = 0
world_size = 1
self.input = ops.FileReader(
file_root = data_dir,
shard_id = local_rank,
num_shards = world_size,
random_shuffle = True)
if dali_cpu:
dali_device = "cpu"
self.decode = ops.HostDecoderRandomCrop(device=dali_device, output_type=types.RGB,
random_aspect_ratio=[0.75, 4./3.],
random_area=[0.08, 1.0],
num_attempts=100)
else:
dali_device = "gpu"
# This padding sets the size of the internal nvJPEG buffers to be able to handle all images from full-sized ImageNet
# without additional reallocations
self.decode = ops.nvJPEGDecoderRandomCrop(device="mixed", output_type=types.RGB, device_memory_padding=211025920, host_memory_padding=140544512,
random_aspect_ratio=[0.75, 4./3.],
random_area=[0.08, 1.0],
num_attempts=100)
self.res = ops.Resize(device=dali_device, resize_x=crop, resize_y=crop, interp_type=types.INTERP_TRIANGULAR)
self.cmnp = ops.CropMirrorNormalize(device = "gpu",
output_dtype = types.FLOAT,
output_layout = types.NCHW,
crop = (crop, crop),
image_type = types.RGB,
mean = [0.485 * 255,0.456 * 255,0.406 * 255],
std = [0.229 * 255,0.224 * 255,0.225 * 255])
self.coin = ops.CoinFlip(probability = 0.5)
def define_graph(self):
rng = self.coin()
self.jpegs, self.labels = self.input(name = "Reader")
images = self.decode(self.jpegs)
images = self.res(images)
output = self.cmnp(images.gpu(), mirror = rng)
return [output, self.labels]
class HybridValPipe(Pipeline):
def __init__(self, batch_size, num_threads, device_id, data_dir, crop, size):
super(HybridValPipe, self).__init__(batch_size, num_threads, device_id, seed = 12 + device_id)
if torch.distributed.is_initialized():
local_rank = torch.distributed.get_rank()
world_size = torch.distributed.get_world_size()
else:
local_rank = 0
world_size = 1
self.input = ops.FileReader(
file_root = data_dir,
shard_id = local_rank,
num_shards = world_size,
random_shuffle = False)
self.decode = ops.nvJPEGDecoder(device = "mixed", output_type = types.RGB)
self.res = ops.Resize(device = "gpu", resize_shorter = size)
self.cmnp = ops.CropMirrorNormalize(device = "gpu",
output_dtype = types.FLOAT,
output_layout = types.NCHW,
crop = (crop, crop),
image_type = types.RGB,
mean = [0.485 * 255,0.456 * 255,0.406 * 255],
std = [0.229 * 255,0.224 * 255,0.225 * 255])
def define_graph(self):
self.jpegs, self.labels = self.input(name = "Reader")
images = self.decode(self.jpegs)
images = self.res(images)
output = self.cmnp(images)
return [output, self.labels]
class DALIWrapper(object):
def gen_wrapper(dalipipeline):
for data in dalipipeline:
input = data[0]["data"]
target = data[0]["label"].squeeze().cuda().long()
yield input, target
dalipipeline.reset()
def __init__(self, dalipipeline):
self.dalipipeline = dalipipeline
def __iter__(self):
return DALIWrapper.gen_wrapper(self.dalipipeline)
def get_dali_train_loader(dali_cpu=False):
def gdtl(data_path, batch_size, workers=5, _worker_init_fn=None):
if torch.distributed.is_initialized():
local_rank = torch.distributed.get_rank()
world_size = torch.distributed.get_world_size()
else:
local_rank = 0
world_size = 1
traindir = os.path.join(data_path, 'train')
pipe = HybridTrainPipe(batch_size=batch_size, num_threads=workers,
device_id = local_rank,
data_dir = traindir, crop = 224, dali_cpu=dali_cpu)
pipe.build()
test_run = pipe.run()
train_loader = DALIClassificationIterator(pipe, size = int(pipe.epoch_size("Reader") / world_size))
return DALIWrapper(train_loader), int(pipe.epoch_size("Reader") / (world_size * batch_size))
return gdtl
def get_dali_val_loader():
def gdvl(data_path, batch_size, workers=5, _worker_init_fn=None):
if torch.distributed.is_initialized():
local_rank = torch.distributed.get_rank()
world_size = torch.distributed.get_world_size()
else:
local_rank = 0
world_size = 1
valdir = os.path.join(data_path, 'val')
pipe = HybridValPipe(batch_size=batch_size, num_threads=workers,
device_id = local_rank,
data_dir = valdir,
crop = 224, size = 256)
pipe.build()
test_run = pipe.run()
val_loader = DALIClassificationIterator(pipe, size = int(pipe.epoch_size("Reader") / world_size), fill_last_batch=False)
return DALIWrapper(val_loader), int(pipe.epoch_size("Reader") / (world_size * batch_size))
return gdvl
def fast_collate(batch):
imgs = [img[0] for img in batch]
targets = torch.tensor([target[1] for target in batch], dtype=torch.int64)
w = imgs[0].size[0]
h = imgs[0].size[1]
tensor = torch.zeros( (len(imgs), 3, h, w), dtype=torch.uint8 )
for i, img in enumerate(imgs):
nump_array = np.asarray(img, dtype=np.uint8)
tens = torch.from_numpy(nump_array)
if(nump_array.ndim < 3):
nump_array = np.expand_dims(nump_array, axis=-1)
nump_array = np.rollaxis(nump_array, 2)
tensor[i] += torch.from_numpy(nump_array)
return tensor, targets
class PrefetchedWrapper(object):
def prefetched_loader(loader):
mean = torch.tensor([0.485 * 255, 0.456 * 255, 0.406 * 255]).cuda().view(1,3,1,1)
std = torch.tensor([0.229 * 255, 0.224 * 255, 0.225 * 255]).cuda().view(1,3,1,1)
stream = torch.cuda.Stream()
first = True
for next_input, next_target in loader:
with torch.cuda.stream(stream):
next_input = next_input.cuda()
next_target = next_target.cuda()
next_input = next_input.float()
# next_input = next_input.sub_(mean).div_(std) # don't use normolization because siamrpn doesn't use it.
if not first:
yield input, target
else:
first = False
torch.cuda.current_stream().wait_stream(stream)
input = next_input
target = next_target
yield input, target
def __init__(self, dataloader):
self.dataloader = dataloader
self.epoch = 0
def __iter__(self):
if (self.dataloader.sampler is not None and
isinstance(self.dataloader.sampler,
torch.utils.data.distributed.DistributedSampler)):
self.dataloader.sampler.set_epoch(self.epoch)
self.epoch += 1
return PrefetchedWrapper.prefetched_loader(self.dataloader)
def get_pytorch_train_loader(data_path, batch_size, workers=5, _worker_init_fn=None, input_size=224):
traindir = os.path.join(data_path, 'train')
train_dataset = datasets.ImageFolder(
traindir,
transforms.Compose([
transforms.RandomResizedCrop(input_size),
transforms.RandomHorizontalFlip(),
]))
if torch.distributed.is_initialized():
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
else:
train_sampler = None
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=batch_size, shuffle=(train_sampler is None),
num_workers=workers, worker_init_fn=_worker_init_fn, pin_memory=True, sampler=train_sampler, collate_fn=fast_collate)
return PrefetchedWrapper(train_loader), len(train_loader)
def get_pytorch_val_loader(data_path, batch_size, workers=5, _worker_init_fn=None, input_size=224):
valdir = os.path.join(data_path, 'val')
val_dataset = datasets.ImageFolder(
valdir, transforms.Compose([
transforms.Resize(int(input_size / 0.875)),
transforms.CenterCrop(input_size),
]))
if torch.distributed.is_initialized():
val_sampler = torch.utils.data.distributed.DistributedSampler(val_dataset)
else:
val_sampler = None
val_loader = torch.utils.data.DataLoader(
val_dataset,
sampler=val_sampler,
batch_size=batch_size, shuffle=False,
num_workers=workers, worker_init_fn=_worker_init_fn, pin_memory=True,
collate_fn=fast_collate)
return PrefetchedWrapper(val_loader), len(val_loader)
| 39.78626
| 156
| 0.601784
|
06c3f5dbf1ed4e6e30448f5a857e377a1753c66e
| 4,115
|
py
|
Python
|
amqp/abstract_channel.py
|
smurfix/py-amqp
|
583e5c8f2b6fc37070654e68efcdc6ed681b87ea
|
[
"BSD-3-Clause"
] | null | null | null |
amqp/abstract_channel.py
|
smurfix/py-amqp
|
583e5c8f2b6fc37070654e68efcdc6ed681b87ea
|
[
"BSD-3-Clause"
] | null | null | null |
amqp/abstract_channel.py
|
smurfix/py-amqp
|
583e5c8f2b6fc37070654e68efcdc6ed681b87ea
|
[
"BSD-3-Clause"
] | null | null | null |
"""Code common to Connection and Channel objects."""
# Copyright (C) 2007-2008 Barry Pederson <bp@barryp.org>)
from __future__ import absolute_import, unicode_literals
from vine import ensure_promise, promise
from .exceptions import AMQPNotImplementedError, RecoverableConnectionError
from .serialization import dumps, loads
__all__ = ['AbstractChannel']
class AbstractChannel(object):
"""Superclass for Connection and Channel.
The connection is treated as channel 0, then comes
user-created channel objects.
The subclasses must have a _METHOD_MAP class property, mapping
between AMQP method signatures and Python methods.
"""
def __init__(self, connection, channel_id):
self.connection = connection
self.channel_id = channel_id
connection.channels[channel_id] = self
self.method_queue = [] # Higher level queue for methods
self.auto_decode = False
self._pending = {}
self._callbacks = {}
self._setup_listeners()
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.close()
def send_method(self, sig,
format=None, args=None, content=None,
wait=None, callback=None, returns_tuple=False):
p = promise()
conn = self.connection
if conn is None:
raise RecoverableConnectionError('connection already closed')
args = dumps(format, args) if format else ''
try:
conn.frame_writer(1, self.channel_id, sig, args, content)
except StopIteration:
raise RecoverableConnectionError('connection already closed')
# TODO temp: callback should be after write_method ... ;)
if callback:
p.then(callback)
p()
if wait:
return self.wait(wait, returns_tuple=returns_tuple)
return p
def close(self):
"""Close this Channel or Connection."""
raise NotImplementedError('Must be overriden in subclass')
def wait(self, method, callback=None, timeout=None, returns_tuple=False):
p = ensure_promise(callback)
pending = self._pending
prev_p = []
if not isinstance(method, list):
method = [method]
for m in method:
prev_p.append(pending.get(m))
pending[m] = p
try:
while not p.ready:
self.connection.drain_events(timeout=timeout)
if p.value:
args, kwargs = p.value
return args if returns_tuple else (args and args[0])
finally:
for i, m in enumerate(method):
if prev_p[i] is not None:
pending[m] = prev_p[i]
else:
pending.pop(m, None)
def dispatch_method(self, method_sig, payload, content):
if content and \
self.auto_decode and \
hasattr(content, 'content_encoding'):
try:
content.body = content.body.decode(content.content_encoding)
except Exception:
pass
try:
amqp_method = self._METHODS[method_sig]
except KeyError:
raise AMQPNotImplementedError(
'Unknown AMQP method {0!r}'.format(method_sig))
try:
listeners = [self._callbacks[method_sig]]
except KeyError:
listeners = None
try:
one_shot = self._pending.pop(method_sig)
except KeyError:
if not listeners:
return
else:
if listeners is None:
listeners = [one_shot]
else:
listeners.append(one_shot)
args = []
if amqp_method.args:
args, _ = loads(amqp_method.args, payload, 4)
if amqp_method.content:
args.append(content)
for listener in listeners:
listener(*args)
#: Placeholder, the concrete implementations will have to
#: supply their own versions of _METHOD_MAP
_METHODS = {}
| 31.174242
| 77
| 0.590765
|
8d3bf08c426df034576d4813300d6f8a05caf297
| 1,713
|
py
|
Python
|
xen/xen-4.2.2/tools/xm-test/tests/block-list/06_block-list_checkremove_pos.py
|
zhiming-shen/Xen-Blanket-NG
|
47e59d9bb92e8fdc60942df526790ddb983a5496
|
[
"Apache-2.0"
] | 1
|
2018-02-02T00:15:26.000Z
|
2018-02-02T00:15:26.000Z
|
xen/xen-4.2.2/tools/xm-test/tests/block-list/06_block-list_checkremove_pos.py
|
zhiming-shen/Xen-Blanket-NG
|
47e59d9bb92e8fdc60942df526790ddb983a5496
|
[
"Apache-2.0"
] | null | null | null |
xen/xen-4.2.2/tools/xm-test/tests/block-list/06_block-list_checkremove_pos.py
|
zhiming-shen/Xen-Blanket-NG
|
47e59d9bb92e8fdc60942df526790ddb983a5496
|
[
"Apache-2.0"
] | 1
|
2019-05-27T09:47:18.000Z
|
2019-05-27T09:47:18.000Z
|
#!/usr/bin/python
# Copyright (C) International Business Machines Corp., 2005
# Author: Dan Smith <danms@us.ibm.com>
from XmTestLib import *
from XmTestLib.block_utils import *
if ENABLE_HVM_SUPPORT:
SKIP("Block-list not supported for HVM domains")
domain = XmTestDomain()
try:
domain.start(noConsole=True)
except DomainError, e:
FAIL(str(e))
s, o = traceCommand("xm block-list %s" % domain.getName())
if s != 0:
FAIL("block-list returned !0 when no devices attached")
if o:
FAIL("block-list without devices reported something!")
block_attach(domain, "phy:/dev/ram0", "xvda1")
s, o = traceCommand("xm block-list %s" % domain.getName())
if s != 0:
FAIL("block-list failed")
if o.find("51713") == -1:
FAIL("block-list didn't show the block device I just attached!")
block_attach(domain, "phy:/dev/ram1", "xvda2")
s, o = traceCommand("xm block-list %s" % domain.getName())
if s != 0:
FAIL("block-list failed")
if o.find("51714") == -1:
FAIL("block-list didn't show the other block device I just attached!")
block_detach(domain, "xvda1")
s, o = traceCommand("xm block-list %s" % domain.getName())
if s != 0:
FAIL("block-list failed after detaching a device")
if o.find("51713") != -1:
FAIL("xvda1 still shown in block-list after detach!")
if o.find("51714") == -1:
FAIL("xvda2 not shown after detach of xvda1!")
block_detach(domain, "xvda2")
s, o = traceCommand("xm block-list %s" % domain.getName())
if s != 0:
FAIL("block-list failed after detaching another device")
if o.find("51714") != -1:
FAIL("xvda2 still shown in block-list after detach!")
if o:
FAIL("block-list still shows something after all devices detached!")
domain.stop()
| 27.629032
| 74
| 0.676007
|
e976e750a39493a6066a1cef2c31e77767cc2a11
| 4,094
|
py
|
Python
|
python/graphscope/tests/unittest/test_java_app.py
|
luoxiaojian/GraphScope-1
|
97785684f2b2495c41dc079aed64198b5a6e1331
|
[
"Apache-2.0"
] | 2
|
2021-04-07T07:57:13.000Z
|
2021-11-19T09:44:01.000Z
|
python/graphscope/tests/unittest/test_java_app.py
|
luoxiaojian/GraphScope-1
|
97785684f2b2495c41dc079aed64198b5a6e1331
|
[
"Apache-2.0"
] | 16
|
2021-12-22T09:19:25.000Z
|
2022-03-29T02:43:34.000Z
|
python/graphscope/tests/unittest/test_java_app.py
|
luoxiaojian/GraphScope-1
|
97785684f2b2495c41dc079aed64198b5a6e1331
|
[
"Apache-2.0"
] | 2
|
2022-01-25T10:16:51.000Z
|
2022-02-07T11:51:20.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2020 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import os
from pathlib import Path
import numpy as np
import pandas as pd
import pytest
from graphscope import JavaApp
@pytest.fixture(scope="module")
def not_exist_jar():
path = os.path.join("not_exist_dir", "not_exist.jar")
return path
@pytest.fixture(scope="module")
def not_jar_file():
return os.path.expandvars("${GS_TEST_DIR}/p2p-31.e")
@pytest.fixture(scope="module")
def a_gar_file():
return os.path.expandvars("${GS_TEST_DIR}/gars/sssp_pie.gar")
@pytest.fixture(scope="module")
def empty_jar():
return os.path.expandvars("${GS_TEST_DIR}/jars/empty.jar")
@pytest.fixture(scope="module")
def demo_jar():
return os.path.expandvars("${USER_JAR_PATH}")
@pytest.fixture(scope="module")
def property_graph_sssp_vertex_data_class():
return "com.alibaba.graphscope.example.property.sssp.ParallelPropertySSSPVertexData"
@pytest.fixture(scope="module")
def non_exist_java_class():
return "com.alibaba.graphscope.example.non.existing.java.class"
@pytest.mark.skipif(
os.environ.get("RUN_JAVA_TESTS") != "ON",
reason="Java SDK is disabled, skip this test.",
)
def test_load_non_existing_jar(
not_exist_jar, property_graph_sssp_vertex_data_class, non_exist_java_class
):
with pytest.raises(FileNotFoundError):
sssp = JavaApp(not_exist_jar, property_graph_sssp_vertex_data_class)
with pytest.raises(FileNotFoundError):
sssp = JavaApp(not_exist_jar, non_exist_java_class)
@pytest.mark.skipif(
os.environ.get("RUN_JAVA_TESTS") != "ON",
reason="Java SDK is disabled, skip this test.",
)
def test_load_not_a_jar(
not_jar_file, property_graph_sssp_vertex_data_class, non_exist_java_class
):
with pytest.raises(KeyError):
sssp = JavaApp(not_jar_file, property_graph_sssp_vertex_data_class)
with pytest.raises(KeyError):
sssp = JavaApp(not_jar_file, non_exist_java_class)
@pytest.mark.skipif(
os.environ.get("RUN_JAVA_TESTS") != "ON",
reason="Java SDK is disabled, skip this test.",
)
def test_load_gar_file(
a_gar_file, property_graph_sssp_vertex_data_class, non_exist_java_class
):
with pytest.raises(KeyError):
sssp = JavaApp(a_gar_file, property_graph_sssp_vertex_data_class)
with pytest.raises(KeyError):
sssp = JavaApp(a_gar_file, non_exist_java_class)
@pytest.mark.skipif(
os.environ.get("RUN_JAVA_TESTS") != "ON",
reason="Java SDK is disabled, skip this test.",
)
def test_load_empty_jar(
empty_jar, property_graph_sssp_vertex_data_class, non_exist_java_class
):
with pytest.raises(KeyError):
sssp = JavaApp(empty_jar, property_graph_sssp_vertex_data_class)
with pytest.raises(KeyError):
sssp = JavaApp(empty_jar, non_exist_java_class)
@pytest.mark.skipif(
os.environ.get("RUN_JAVA_TESTS") != "ON",
reason="Java SDK is disabled, skip this test.",
)
def test_load_correct_jar(property_graph_sssp_vertex_data_class, demo_jar):
sssp = JavaApp(demo_jar, property_graph_sssp_vertex_data_class)
@pytest.mark.skipif(
os.environ.get("RUN_JAVA_TESTS") != "ON",
reason="Java SDK is disabled, skip this test.",
)
def test_sssp_property_vertex_data(
demo_jar,
graphscope_session,
p2p_property_graph,
property_graph_sssp_vertex_data_class,
):
sssp = JavaApp(
full_jar_path=demo_jar, java_app_class=property_graph_sssp_vertex_data_class
)
sssp(p2p_property_graph, src=6)
| 29.242857
| 88
| 0.74597
|
94592d0c5d4dff58a268ddc1cd719d067e9701fd
| 1,824
|
py
|
Python
|
objectModel/Python/cdm/resolvedmodel/resolved_entity_reference_set.py
|
rt112000/CDM
|
34bd34f9260140a8f8aa02bd87c23033f3daad4c
|
[
"CC-BY-4.0",
"MIT"
] | 884
|
2019-05-10T02:09:10.000Z
|
2022-03-31T14:02:00.000Z
|
objectModel/Python/cdm/resolvedmodel/resolved_entity_reference_set.py
|
spbast/CDM
|
bf97a3720c97ee4c9df3625084cf8b3bc65ff9c7
|
[
"CC-BY-4.0",
"MIT"
] | 171
|
2019-06-10T11:34:37.000Z
|
2022-03-31T22:50:12.000Z
|
objectModel/Python/cdm/resolvedmodel/resolved_entity_reference_set.py
|
spbast/CDM
|
bf97a3720c97ee4c9df3625084cf8b3bc65ff9c7
|
[
"CC-BY-4.0",
"MIT"
] | 340
|
2019-05-07T18:00:16.000Z
|
2022-03-31T12:00:15.000Z
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
from typing import List, Optional, TYPE_CHECKING
if TYPE_CHECKING:
from cdm.objectmodel import CdmEntityDefinition, SpewCatcher
from cdm.resolvedmodel import ResolvedEntityReference
from cdm.utilities import ResolveOptions
class ResolvedEntityReferenceSet:
def __init__(self, res_opt: 'ResolveOptions', rer_set: List['ResolvedEntityReference'] = None) -> None:
self.res_opt = res_opt # type: ResolveOptions
self.rer_set = rer_set or [] # type: List[ResolvedEntityReference]
def add(self, to_add: 'ResolvedEntityReferenceSet') -> None:
if to_add and to_add.rer_set:
self.rer_set += to_add.rer_set
def copy(self) -> 'ResolvedEntityReferenceSet':
return ResolvedEntityReferenceSet(self.res_opt, [rer.copy() for rer in self.rer_set])
def find_entity(self, ent_other: 'CdmEntityDefinition') -> Optional['ResolvedEntityReferenceSet']:
# Make an array of just the refs that include the requested.
filtered_set = [rer for rer in self.rer_set if any(rers.entity == ent_other for rers in rer.referenced)]
return None if filtered_set else ResolvedEntityReferenceSet(self.res_opt, filtered_set)
def spew(self, res_opt: 'ResolveOptions', to: 'SpewCatcher', indent: str, name_sort: bool) -> None:
if name_sort:
rer_list = sorted(
self.rer_set,
key=lambda rer: rer.referenced[0].entity.entity_name.casefold() if rer and rer.referenced else '')
else:
rer_list = self.rer_set
for idx, rer in enumerate(rer_list):
rer.spew(res_opt, to, indent + '(rer[' + str(idx) + '])', name_sort)
| 46.769231
| 114
| 0.697368
|
92ad9de318fd5152094b8872d8254796ff8f6e08
| 1,003
|
py
|
Python
|
tests/test_feedback.py
|
voyagegroup/apns-proxy-server
|
5858d1b33d37b9333ca153cd92f091bad9537455
|
[
"BSD-2-Clause"
] | 16
|
2015-01-20T22:54:43.000Z
|
2021-07-07T03:33:04.000Z
|
tests/test_feedback.py
|
voyagegroup/apns-proxy-server
|
5858d1b33d37b9333ca153cd92f091bad9537455
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_feedback.py
|
voyagegroup/apns-proxy-server
|
5858d1b33d37b9333ca153cd92f091bad9537455
|
[
"BSD-2-Clause"
] | 6
|
2015-01-22T05:00:36.000Z
|
2022-03-03T15:20:00.000Z
|
# -*- coding: utf-8 -*-
"""
Tests for apns_proxy_server.feedback
"""
from datetime import datetime
from nose.tools import ok_, eq_
import simplejson as json
from apns_proxy_server.feedback import FeedbackProxy
def test_instance():
proxy = FeedbackProxy(True, '/path/to/cert', '/path/to/key')
ok_(proxy)
ok_(proxy.use_sandbox)
eq_(proxy.cert_file, '/path/to/cert')
eq_(proxy.key_file, '/path/to/key')
def test_get():
disabled_datetime = datetime(1988, 4, 23, 2, 0, 0)
proxy = FeedbackProxy(True, '/path/to/cert', '/path/to/key')
proxy._apns = type('MockApns', (object,), {
'feedback_server': {
'token_value': disabled_datetime,
},
})
result = proxy.get()
json_result = json.loads(result)
ok_(result)
ok_(json_result)
ok_(isinstance(result, basestring))
ok_(isinstance(json_result, dict))
ok_('token_value' in json_result)
eq_(datetime.fromtimestamp(json_result['token_value']), disabled_datetime)
| 23.880952
| 78
| 0.666002
|
c261420a300c4ce53735f0cd1ba088344d58a79a
| 7,926
|
py
|
Python
|
aiida_vasp/workchains/tests/test_vasp_wc.py
|
MichaelWolloch/aiida-vasp
|
315b79bf874b8449dd702f1f3bc48c55aa89683b
|
[
"MIT"
] | 28
|
2019-03-06T11:33:01.000Z
|
2022-02-25T22:29:12.000Z
|
aiida_vasp/workchains/tests/test_vasp_wc.py
|
MichaelWolloch/aiida-vasp
|
315b79bf874b8449dd702f1f3bc48c55aa89683b
|
[
"MIT"
] | 386
|
2018-09-04T15:05:51.000Z
|
2022-03-04T12:18:39.000Z
|
aiida_vasp/workchains/tests/test_vasp_wc.py
|
MichaelWolloch/aiida-vasp
|
315b79bf874b8449dd702f1f3bc48c55aa89683b
|
[
"MIT"
] | 35
|
2019-01-14T17:12:08.000Z
|
2022-02-24T18:52:11.000Z
|
"""
Test submitting a VaspWorkChain.
This does not seem to work, for `submit` the daemon will not pick up the workchain
and `run` just seems to get stuck after a while.
"""
# pylint: disable=unused-import,wildcard-import,unused-wildcard-import,unused-argument,redefined-outer-name, import-outside-toplevel
from __future__ import print_function
import pytest
import numpy as np
from aiida.common.extendeddicts import AttributeDict
from aiida_vasp.utils.fixtures import *
from aiida_vasp.utils.fixtures.data import POTCAR_FAMILY_NAME, POTCAR_MAP
from aiida_vasp.utils.aiida_utils import get_data_node, aiida_version, cmp_version, create_authinfo
@pytest.mark.parametrize(['vasp_structure', 'vasp_kpoints'], [('str', 'mesh')], indirect=True)
def test_vasp_wc(fresh_aiida_env, run_vasp_process):
"""Test submitting only, not correctness, with mocked vasp code."""
results, node = run_vasp_process(process_type='workchain')
assert node.exit_status == 0
assert 'retrieved' in results
assert 'misc' in results
assert 'remote_folder' in results
misc = results['misc'].get_dict()
assert misc['maximum_stress'] == pytest.approx(22.8499295)
assert misc['total_energies']['energy_extrapolated'] == pytest.approx(-14.16209692)
@pytest.mark.parametrize(['vasp_structure', 'vasp_kpoints'], [('str', 'mesh')], indirect=True)
def test_vasp_wc_chgcar(fresh_aiida_env, run_vasp_process):
"""Test submitting only, not correctness, with mocked vasp code, test fetching of the CHGCAR."""
settings = {'ADDITIONAL_RETRIEVE_LIST': ['CHGCAR'], 'parser_settings': {'add_chgcar': True}}
results, node = run_vasp_process(settings=settings, process_type='workchain')
assert node.exit_status == 0
assert 'chgcar' in results
assert results['chgcar'].get_content() == 'This is a test CHGCAR file.\n'
### COMPLEX WORHCAIN TEST ###
def si_structure():
"""
Setup a silicon structure in a displaced FCC setting
"""
from aiida.plugins import DataFactory
structure_data = DataFactory('structure')
alat = 3.9
lattice = np.array([[.5, .5, 0], [0, .5, .5], [.5, 0, .5]]) * alat
structure = structure_data(cell=lattice)
positions = [[0.1, 0.0, 0.0]]
for pos_direct in positions:
pos_cartesian = np.dot(pos_direct, lattice)
structure.append_atom(position=pos_cartesian, symbols='Si')
return structure
# TEST INPUT FOR AUTOMATIC correction of NELM
# calculation should finish in the second run where the calculation
INCAR_ELEC_CONV = {
'encut': 240,
'ismear': 0,
'sigma': 0.1,
'ediff': 1e-9,
'nelm': 7,
'ibrion': -1,
'potim': 0.01,
'nsw': -1,
'isif': 3,
# 'ediffg': -0.01
}
INCAR_IONIC_CONV = {
'encut': 240,
'ismear': 0,
'sigma': 0.1,
'ediff': 1e-9,
'nelm': 15,
'ibrion': 1,
'potim': 0.1,
'nsw': 5,
'isif': 3,
}
# Parameters for test handling unfinished VASP. The first iteration was killed manually.
INCAR_IONIC_UNFINISHED = {
'encut': 500,
'ismear': 0,
'isym': 0,
'sigma': 0.1,
'ediff': 1e-9,
'nelm': 15,
'ibrion': 1,
'potim': 0.1,
'nsw': 20,
'isif': 3,
}
def setup_vasp_workchain(structure, incar, nkpts):
"""
Setup the inputs for a VaspWorkChain.
"""
from aiida.orm import Code
inputs = AttributeDict()
inputs.structure = structure
inputs.parameters = get_data_node('dict', dict={'incar': incar})
kpoints = get_data_node('array.kpoints')
kpoints.set_kpoints_mesh((nkpts, nkpts, nkpts))
inputs.kpoints = kpoints
inputs.potential_family = get_data_node('str', POTCAR_FAMILY_NAME)
inputs.potential_mapping = get_data_node('dict', dict=POTCAR_MAP)
inputs.options = get_data_node('dict',
dict={
'withmpi': False,
'queue_name': 'None',
'resources': {
'num_machines': 1,
'num_mpiprocs_per_machine': 1
},
'max_wallclock_seconds': 3600
})
inputs.settings = get_data_node('dict', dict={'parser_settings': {'add_structure': True}})
mock = Code.get_from_string('mock-vasp-strict@localhost')
inputs.code = mock
return inputs
def test_vasp_wc_nelm(fresh_aiida_env, potentials, mock_vasp_strict):
"""Test with mocked vasp code for handling electronic convergence issues"""
from aiida.orm import Code
from aiida.plugins import WorkflowFactory
from aiida.engine import run
from aiida.cmdline.utils.common import get_calcjob_report, get_workchain_report
workchain = WorkflowFactory('vasp.vasp')
mock_vasp_strict.store()
create_authinfo(computer=mock_vasp_strict.computer, store=True)
inputs = setup_vasp_workchain(si_structure(), INCAR_ELEC_CONV, 8)
inputs.verbose = get_data_node('bool', True)
results, node = run.get_node(workchain, **inputs)
called_nodes = list(node.called)
called_nodes.sort(key=lambda x: x.ctime)
print(get_workchain_report(node, 'DEBUG'))
for child in called_nodes:
print(get_calcjob_report(child))
child = called_nodes[0]
print(child.get_object_content('INCAR'))
print(child.get_object_content('POSCAR'))
print(child.get_object_content('KPOINTS'))
print(child.outputs.retrieved.get_object_content('vasp_output'))
print(child.outputs.retrieved.list_object_names())
print(child.outputs.misc.get_dict())
print(child.exit_status)
child = called_nodes[1]
print(child.get_object_content('INCAR'))
print(child.get_object_content('POSCAR'))
print(child.get_object_content('KPOINTS'))
print(child.outputs.retrieved.get_object_content('vasp_output'))
print(child.outputs.retrieved.list_object_names())
print(child.outputs.misc.get_dict())
print(child.exit_status)
assert node.exit_status == 0
assert 'retrieved' in results
assert 'misc' in results
assert 'remote_folder' in results
assert results['misc']['total_energies']['energy_extrapolated'] == pytest.approx(-4.82467802)
# Sort the called nodes by creation time
called_nodes = list(node.called)
called_nodes.sort(key=lambda x: x.ctime)
assert called_nodes[0].exit_status == 701
assert called_nodes[1].exit_status == 0
@pytest.mark.parametrize('incar,nkpts,exit_codes', [[INCAR_IONIC_CONV, 8, [702, 0]], [INCAR_IONIC_UNFINISHED, 16, [700, 0]]])
def test_vasp_wc_ionic_continue(fresh_aiida_env, potentials, mock_vasp_strict, incar, nkpts, exit_codes):
"""Test with mocked vasp code for handling ionic convergence issues"""
from aiida.orm import Code
from aiida.plugins import WorkflowFactory
from aiida.engine import run
workchain = WorkflowFactory('vasp.vasp')
mock_vasp_strict.store()
create_authinfo(computer=mock_vasp_strict.computer, store=True)
inputs = setup_vasp_workchain(si_structure(), incar, nkpts)
inputs.verbose = get_data_node('bool', True)
# The test calculation contain NELM breaches during the relaxation - set to ignore it.
inputs.handler_overrides = get_data_node('dict', dict={'ignore_nelm_breach_relax': True})
results, node = run.get_node(workchain, **inputs)
assert node.exit_status == 0
assert 'retrieved' in results
assert 'misc' in results
assert 'remote_folder' in results
assert results['misc']['run_status']['ionic_converged']
# Sort the called nodes by creation time
called_nodes = list(node.called)
called_nodes.sort(key=lambda x: x.ctime)
# Check the child status - here the first calculation is not finished but the second one is
for idx, code in enumerate(exit_codes):
assert called_nodes[idx].exit_status == code
| 35.070796
| 132
| 0.676634
|
35186bb87f7e509bca13d96949b0ab808bd786ad
| 6,384
|
py
|
Python
|
openstack_dashboard/dashboards/project/database_backups/tests.py
|
maofutian/horizon
|
dab92e7d2f576caea8f81c8e22a516fb45633794
|
[
"Apache-2.0"
] | null | null | null |
openstack_dashboard/dashboards/project/database_backups/tests.py
|
maofutian/horizon
|
dab92e7d2f576caea8f81c8e22a516fb45633794
|
[
"Apache-2.0"
] | null | null | null |
openstack_dashboard/dashboards/project/database_backups/tests.py
|
maofutian/horizon
|
dab92e7d2f576caea8f81c8e22a516fb45633794
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django import http
from mox import IsA # noqa
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
INDEX_URL = reverse('horizon:project:database_backups:index')
BACKUP_URL = reverse('horizon:project:database_backups:create')
DETAILS_URL = reverse('horizon:project:database_backups:detail', args=['id'])
class DatabasesBackupsTests(test.TestCase):
@test.create_stubs({api.trove: ('backup_list', 'instance_get')})
def test_index(self):
api.trove.backup_list(IsA(http.HttpRequest))\
.AndReturn(self.database_backups.list())
api.trove.instance_get(IsA(http.HttpRequest),
IsA(str))\
.MultipleTimes()\
.AndReturn(self.databases.first())
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'project/database_backups/index.html')
@test.create_stubs({api.trove: ('backup_list',)})
def test_index_exception(self):
api.trove.backup_list(IsA(http.HttpRequest))\
.AndRaise(self.exceptions.trove)
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(
res, 'project/database_backups/index.html')
self.assertEqual(res.status_code, 200)
self.assertMessageCount(res, error=1)
@test.create_stubs({api.trove: ('instance_list',
'backup_list',
'backup_create')})
def test_launch_backup(self):
api.trove.instance_list(IsA(http.HttpRequest))\
.AndReturn(self.databases.list())
api.trove.backup_list(IsA(http.HttpRequest)) \
.AndReturn(self.database_backups.list())
database = self.databases.first()
backupName = "NewBackup"
backupDesc = "Backup Description"
api.trove.backup_create(
IsA(http.HttpRequest),
backupName,
database.id,
backupDesc,
"")
self.mox.ReplayAll()
post = {
'name': backupName,
'instance': database.id,
'description': backupDesc,
'parent': ""
}
res = self.client.post(BACKUP_URL, post)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.trove: ('instance_list', 'backup_list')})
def test_launch_backup_exception(self):
api.trove.instance_list(IsA(http.HttpRequest))\
.AndRaise(self.exceptions.trove)
api.trove.backup_list(IsA(http.HttpRequest)) \
.AndReturn(self.database_backups.list())
self.mox.ReplayAll()
res = self.client.get(BACKUP_URL)
self.assertMessageCount(res, error=1)
self.assertTemplateUsed(res,
'project/database_backups/backup.html')
@test.create_stubs({api.trove: ('instance_list',
'backup_list',
'backup_create')})
def test_launch_backup_incr(self):
api.trove.instance_list(IsA(http.HttpRequest)) \
.AndReturn(self.databases.list())
api.trove.backup_list(IsA(http.HttpRequest)) \
.AndReturn(self.database_backups.list())
database = self.databases.first()
backupName = "NewBackup"
backupDesc = "Backup Description"
backupParent = self.database_backups.first()
api.trove.backup_create(
IsA(http.HttpRequest),
backupName,
database.id,
backupDesc,
backupParent.id)
self.mox.ReplayAll()
post = {
'name': backupName,
'instance': database.id,
'description': backupDesc,
'parent': backupParent.id,
}
res = self.client.post(BACKUP_URL, post)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.trove: ('backup_get', 'instance_get')})
def test_detail_backup(self):
api.trove.backup_get(IsA(http.HttpRequest),
IsA(unicode))\
.AndReturn(self.database_backups.first())
api.trove.instance_get(IsA(http.HttpRequest),
IsA(str))\
.AndReturn(self.databases.first())
self.mox.ReplayAll()
res = self.client.get(DETAILS_URL)
self.assertTemplateUsed(res,
'project/database_backups/details.html')
@test.create_stubs({api.trove: ('backup_get',)})
def test_detail_backup_notfound(self):
api.trove.backup_get(IsA(http.HttpRequest),
IsA(unicode))\
.AndRaise(self.exceptions.trove)
self.mox.ReplayAll()
res = self.client.get(DETAILS_URL)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.trove: ('backup_get', 'instance_get')})
def test_detail_backup_incr(self):
incr_backup = self.database_backups.list()[2]
parent_backup = self.database_backups.list()[1]
api.trove.backup_get(IsA(http.HttpRequest), IsA(unicode))\
.AndReturn(incr_backup)
api.trove.backup_get(IsA(http.HttpRequest), incr_backup.parent_id) \
.AndReturn(parent_backup)
api.trove.instance_get(IsA(http.HttpRequest), IsA(str))\
.AndReturn(self.databases.list()[1])
self.mox.ReplayAll()
url = reverse('horizon:project:database_backups:detail',
args=[incr_backup.id])
res = self.client.get(url)
self.assertTemplateUsed(res, 'project/database_backups/details.html')
| 34.885246
| 78
| 0.620771
|
c2e39d13ced0835f16408cb680517001e90f1caa
| 188
|
py
|
Python
|
redhawk/test/files/python/z004.py
|
spranesh/Redhawk
|
e2be5a6553df8449acecee2239b60c7bca0f22bc
|
[
"BSD-2-Clause-FreeBSD"
] | 2
|
2016-10-04T11:46:32.000Z
|
2017-07-09T15:23:55.000Z
|
redhawk/test/files/python/z004.py
|
spranesh/Redhawk
|
e2be5a6553df8449acecee2239b60c7bca0f22bc
|
[
"BSD-2-Clause-FreeBSD"
] | 4
|
2016-03-07T13:16:48.000Z
|
2018-03-21T00:25:04.000Z
|
redhawk/test/files/python/z004.py
|
spranesh/Redhawk
|
e2be5a6553df8449acecee2239b60c7bca0f22bc
|
[
"BSD-2-Clause-FreeBSD"
] | 3
|
2016-04-06T08:04:34.000Z
|
2020-03-17T20:59:47.000Z
|
# Test Try, Except
try:
x = a.x
y = a.y
z = a.array[0]
except AttributeError as e:
x = 1
y = 1
z = 1
except IndexError as e:
x = 0
y = 0
z = 0
finally:
print(x, y, z)
| 11.058824
| 27
| 0.526596
|
4b457f80788111d80cb0fb6b5838695f237fc5f6
| 4,733
|
py
|
Python
|
python/pyspark/pandas/tests/plot/test_frame_plot.py
|
akhalymon-cv/spark
|
76191b9151b6a7804f8894e53eef74106f98b787
|
[
"Apache-2.0"
] | 35,083
|
2015-01-01T03:05:13.000Z
|
2022-03-31T21:57:40.000Z
|
python/pyspark/pandas/tests/plot/test_frame_plot.py
|
akhalymon-cv/spark
|
76191b9151b6a7804f8894e53eef74106f98b787
|
[
"Apache-2.0"
] | 32,117
|
2015-01-01T00:00:24.000Z
|
2022-03-31T23:54:58.000Z
|
python/pyspark/pandas/tests/plot/test_frame_plot.py
|
akhalymon-cv/spark
|
76191b9151b6a7804f8894e53eef74106f98b787
|
[
"Apache-2.0"
] | 29,687
|
2015-01-01T02:40:43.000Z
|
2022-03-31T16:49:33.000Z
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pandas as pd
import numpy as np
from pyspark import pandas as ps
from pyspark.pandas.config import set_option, reset_option, option_context
from pyspark.pandas.plot import TopNPlotBase, SampledPlotBase, HistogramPlotBase
from pyspark.pandas.exceptions import PandasNotImplementedError
from pyspark.testing.pandasutils import PandasOnSparkTestCase
class DataFramePlotTest(PandasOnSparkTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
set_option("plotting.max_rows", 2000)
set_option("plotting.sample_ratio", None)
@classmethod
def tearDownClass(cls):
reset_option("plotting.max_rows")
reset_option("plotting.sample_ratio")
super().tearDownClass()
def test_missing(self):
psdf = ps.DataFrame(np.random.rand(2500, 4), columns=["a", "b", "c", "d"])
unsupported_functions = ["box", "hexbin"]
for name in unsupported_functions:
with self.assertRaisesRegex(
PandasNotImplementedError, "method.*DataFrame.*{}.*not implemented".format(name)
):
getattr(psdf.plot, name)()
def test_topn_max_rows(self):
pdf = pd.DataFrame(np.random.rand(2500, 4), columns=["a", "b", "c", "d"])
psdf = ps.from_pandas(pdf)
data = TopNPlotBase().get_top_n(psdf)
self.assertEqual(len(data), 2000)
def test_sampled_plot_with_ratio(self):
with option_context("plotting.sample_ratio", 0.5):
pdf = pd.DataFrame(np.random.rand(2500, 4), columns=["a", "b", "c", "d"])
psdf = ps.from_pandas(pdf)
data = SampledPlotBase().get_sampled(psdf)
self.assertEqual(round(len(data) / 2500, 1), 0.5)
def test_sampled_plot_with_max_rows(self):
# 'plotting.max_rows' is 2000
pdf = pd.DataFrame(np.random.rand(2000, 4), columns=["a", "b", "c", "d"])
psdf = ps.from_pandas(pdf)
data = SampledPlotBase().get_sampled(psdf)
self.assertEqual(round(len(data) / 2000, 1), 1)
def test_compute_hist_single_column(self):
psdf = ps.DataFrame(
{"a": [1, 2, 3, 4, 5, 6, 7, 8, 9, 15, 50]}, index=[0, 1, 3, 5, 6, 8, 9, 9, 9, 10, 10]
)
expected_bins = np.linspace(1, 50, 11)
bins = HistogramPlotBase.get_bins(psdf[["a"]].to_spark(), 10)
expected_histogram = np.array([5, 4, 1, 0, 0, 0, 0, 0, 0, 1])
histogram = HistogramPlotBase.compute_hist(psdf[["a"]], bins)[0]
self.assert_eq(pd.Series(expected_bins), pd.Series(bins))
self.assert_eq(pd.Series(expected_histogram, name="a"), histogram, almost=True)
def test_compute_hist_multi_columns(self):
expected_bins = np.linspace(1, 50, 11)
psdf = ps.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6, 7, 8, 9, 15, 50],
"b": [50, 50, 30, 30, 30, 24, 10, 5, 4, 3, 1],
}
)
bins = HistogramPlotBase.get_bins(psdf.to_spark(), 10)
self.assert_eq(pd.Series(expected_bins), pd.Series(bins))
expected_histograms = [
np.array([5, 4, 1, 0, 0, 0, 0, 0, 0, 1]),
np.array([4, 1, 0, 0, 1, 3, 0, 0, 0, 2]),
]
histograms = HistogramPlotBase.compute_hist(psdf, bins)
expected_names = ["a", "b"]
for histogram, expected_histogram, expected_name in zip(
histograms, expected_histograms, expected_names
):
self.assert_eq(
pd.Series(expected_histogram, name=expected_name), histogram, almost=True
)
if __name__ == "__main__":
import unittest
from pyspark.pandas.tests.plot.test_frame_plot import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| 37.864
| 97
| 0.640397
|
4f51ecb40dc2f9b80a3a072d784295c5e4c5925f
| 456
|
py
|
Python
|
Section 19/9.Document-complie-operations.py
|
airbornum/-Complete-Python-Scripting-for-Automation
|
bc053444f8786259086269ca1713bdb10144dd74
|
[
"MIT"
] | 18
|
2020-04-13T03:14:06.000Z
|
2022-03-09T18:54:41.000Z
|
Section 19/9.Document-complie-operations.py
|
airbornum/-Complete-Python-Scripting-for-Automation
|
bc053444f8786259086269ca1713bdb10144dd74
|
[
"MIT"
] | null | null | null |
Section 19/9.Document-complie-operations.py
|
airbornum/-Complete-Python-Scripting-for-Automation
|
bc053444f8786259086269ca1713bdb10144dd74
|
[
"MIT"
] | 22
|
2020-04-29T21:12:42.000Z
|
2022-03-17T18:19:54.000Z
|
import re
my_str="This is about python. Python is easy to learn and we have two major versions: python2 and python3 "
my_pat=r'\bPython[23]?\b'
#print(re.search(my_pat,my_str))
#print(re.findall(my_pat,my_str,flags=re.I))
#print(re.split(my_pat,my_str))
pat_ob=re.compile(my_pat,flags=re.I)
print(pat_ob)
print(pat_ob.search(my_str))
print(pat_ob.findall(my_str))
#re.findall(my_pat,my_str)===> re.complie(my_pat).findall(my_str)
| 24
| 109
| 0.719298
|
314a5c1a65f462d0ca7d5fecec70069e9f0c97a0
| 439
|
py
|
Python
|
api/cloud_provider/migrations/0007_auto_20190731_0424.py
|
240325184/KubeOperator
|
777774050b236abf938a5a9ef505124c26e4916e
|
[
"Apache-2.0"
] | 3
|
2019-11-29T03:49:08.000Z
|
2020-07-29T02:52:51.000Z
|
api/cloud_provider/migrations/0007_auto_20190731_0424.py
|
240325184/KubeOperator
|
777774050b236abf938a5a9ef505124c26e4916e
|
[
"Apache-2.0"
] | 27
|
2021-05-05T02:51:26.000Z
|
2022-01-04T21:30:21.000Z
|
api/cloud_provider/migrations/0007_auto_20190731_0424.py
|
240325184/KubeOperator
|
777774050b236abf938a5a9ef505124c26e4916e
|
[
"Apache-2.0"
] | 1
|
2020-07-06T04:53:51.000Z
|
2020-07-06T04:53:51.000Z
|
# Generated by Django 2.1.2 on 2019-07-31 04:24
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('cloud_provider', '0006_region_cloud_region'),
]
operations = [
migrations.RemoveField(
model_name='region',
name='connected',
),
migrations.RemoveField(
model_name='region',
name='msg',
),
]
| 19.954545
| 55
| 0.567198
|
0aa24044bdd1197df87c9f5bf04ff6b0db185e3f
| 285
|
py
|
Python
|
week03/CodingSkills/coding_skills_test.py
|
PreslavaKuzova/Python101
|
716cdd2d818f7eef45a1cdafdfb85a208daec750
|
[
"MIT"
] | 4
|
2019-04-06T20:06:19.000Z
|
2020-03-31T20:51:56.000Z
|
week03/CodingSkills/coding_skills_test.py
|
PreslavaKuzova/Python101
|
716cdd2d818f7eef45a1cdafdfb85a208daec750
|
[
"MIT"
] | null | null | null |
week03/CodingSkills/coding_skills_test.py
|
PreslavaKuzova/Python101
|
716cdd2d818f7eef45a1cdafdfb85a208daec750
|
[
"MIT"
] | 1
|
2020-03-21T00:49:56.000Z
|
2020-03-21T00:49:56.000Z
|
import unittest
from coding_skills import coding_skills, read_json
class TestCodingSkills(unittest.TestCase):
def test_when_no_system_argument_is_given_are_throw_an_exception(self):
self.assertRaises(Exception, read_json())
if __name__ == '__main__':
unittest.main()
| 31.666667
| 75
| 0.796491
|
3f0ca4b1e1fddb156f8bba69b9d8acd0fc9b31a1
| 1,493
|
py
|
Python
|
todo/routes.py
|
hhao99/flask-todo
|
dc0bc6ffcdde206e05b4ea7636324efa26315241
|
[
"CNRI-Python"
] | 3
|
2020-02-01T11:25:48.000Z
|
2020-02-04T14:11:50.000Z
|
todo/routes.py
|
hhao99/flask-todo
|
dc0bc6ffcdde206e05b4ea7636324efa26315241
|
[
"CNRI-Python"
] | 1
|
2021-06-02T01:11:26.000Z
|
2021-06-02T01:11:26.000Z
|
todo/routes.py
|
hhao99/flask-todo
|
dc0bc6ffcdde206e05b4ea7636324efa26315241
|
[
"CNRI-Python"
] | null | null | null |
from flask import (
render_template, redirect, render_template,
request, g, flash, url_for
)
from .forms import TodoForm
from .models import Todo
from .models import db
def init_route(app):
@app.route('/')
def index():
todos = Todo.query.all()
return render_template('index.html',todos = todos)
@app.route('/new',methods=['GET','POST'])
def new():
if request.method == 'POST':
form = TodoForm()
print(form)
if form.validate_on_submit():
task = form.task.data
isDone = form.isDone.data
t = Todo(task=task,isDone = isDone)
print(t.isDone)
db.session.add(t)
db.session.commit()
return redirect(url_for('index'))
form = TodoForm()
return render_template('edit.html',form=form,action=url_for('new'))
@app.route('/delete/<int:id>')
def delete(id):
print(f"delete the todo with id: {id}")
todo = db.session.query(Todo).get(id)
db.session.delete(todo)
db.session.commit()
return redirect(url_for('index'))
@app.route('/update/<int:id>')
def update(id):
print(f"update the todo with id: {id}")
todo = db.session.query(Todo).get(id)
todo.isDone = not todo.isDone
db.session.add(todo)
db.session.commit()
return redirect(url_for('index'))
| 29.27451
| 75
| 0.54789
|
e175189fdb42e853767f0afbb683b6553f8c27eb
| 828
|
py
|
Python
|
collatelogs/metahandlers.py
|
tchamberlin/collatelogs
|
00335099a2a0a893bec1f927b3541d2acb5fb932
|
[
"MIT"
] | null | null | null |
collatelogs/metahandlers.py
|
tchamberlin/collatelogs
|
00335099a2a0a893bec1f927b3541d2acb5fb932
|
[
"MIT"
] | 1
|
2018-05-04T14:54:42.000Z
|
2018-05-04T15:48:22.000Z
|
collatelogs/metahandlers.py
|
tchamberlin/collatelogs
|
00335099a2a0a893bec1f927b3541d2acb5fb932
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""File metadata handlers: provide metadata based on a given path
These are used to populated keywords in the line_output_format, elsewhere"""
import os
try:
# For unix
import pwd
except ImportError:
# For windows
import win32security
import sys
def get_owner_from_path(path):
"""Get the username of the owner of the given file"""
if "pwd" in sys.modules:
# On unix
return pwd.getpwuid(os.stat(path).st_uid).pw_name
# On Windows
f = win32security.GetFileSecurity(path, win32security.OWNER_SECURITY_INFORMATION)
username, _, _ = win32security.LookupAccountSid(
None, f.GetSecurityDescriptorOwner()
)
return username
# All available meta handlers
all_meta_handlers = {"owner": get_owner_from_path, "filename": os.path.basename}
| 23.657143
| 85
| 0.707729
|
d9b8172a25a0d56e4aceb18f37a936063e1bc2df
| 242
|
py
|
Python
|
exampleproject/lectures/admin.py
|
rishikesh67/django-shared-schema-tenants
|
ed3bcddf80a7838979fe1be2045dfa16b545beed
|
[
"MIT"
] | 20
|
2017-08-29T02:36:32.000Z
|
2021-12-06T21:29:46.000Z
|
exampleproject/lectures/admin.py
|
rishikesh67/django-shared-schema-tenants
|
ed3bcddf80a7838979fe1be2045dfa16b545beed
|
[
"MIT"
] | 35
|
2017-08-18T06:28:31.000Z
|
2021-09-02T01:53:09.000Z
|
exampleproject/lectures/admin.py
|
rishikesh67/django-shared-schema-tenants
|
ed3bcddf80a7838979fe1be2045dfa16b545beed
|
[
"MIT"
] | 9
|
2018-06-17T22:04:13.000Z
|
2022-03-18T09:27:18.000Z
|
from django.contrib import admin
from shared_schema_tenants_custom_data.admin import TenantSpecificModelAdmin
from .models import Lecture
class LectureAdmin(TenantSpecificModelAdmin):
pass
admin.sites.register(Lecture, LectureAdmin)
| 20.166667
| 76
| 0.847107
|
be5ef80622566f1ace154f2c6c86d4d4f30a9442
| 10,355
|
py
|
Python
|
tests/unit/pynwb_tests/test_ophys.py
|
VBaratham/pynwb
|
a9429c93f29763b9ebe9022b099afcffbc6be493
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
tests/unit/pynwb_tests/test_ophys.py
|
VBaratham/pynwb
|
a9429c93f29763b9ebe9022b099afcffbc6be493
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
tests/unit/pynwb_tests/test_ophys.py
|
VBaratham/pynwb
|
a9429c93f29763b9ebe9022b099afcffbc6be493
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
import unittest
from pynwb.ophys import TwoPhotonSeries, RoiResponseSeries, DfOverF, Fluorescence, PlaneSegmentation, \
ImageSegmentation, OpticalChannel, ImagingPlane, MotionCorrection, CorrectedImageStack
from pynwb.image import ImageSeries
from pynwb.base import TimeSeries
from pynwb.device import Device
from pynwb.base import ProcessingModule
import numpy as np
def CreatePlaneSegmentation():
w, h = 5, 5
img_mask = [[[1.0 for x in range(w)] for y in range(h)], [[2.0 for x in range(w)] for y in range(h)]]
pix_mask = [[1, 2, 1.0], [3, 4, 1.0], [5, 6, 1.0],
[7, 8, 2.0], [9, 10, 2.0]]
iSS = ImageSeries(name='test_iS', data=np.ones((2, 2, 2)), unit='unit',
external_file=['external_file'], starting_frame=[1, 2, 3], format='tiff', timestamps=[1., 2.])
oc = OpticalChannel('test_optical_channel', 'description', 500.)
device = Device(name='device_name')
ip = ImagingPlane('test_imaging_plane', oc, 'description', device, 600.,
300., 'indicator', 'location', (1, 2, 1, 2, 3), 4.0,
'unit', 'reference_frame')
pS = PlaneSegmentation('description', ip, 'test_name', iSS)
pS.add_roi(pixel_mask=pix_mask[0:3], image_mask=img_mask[0])
pS.add_roi(pixel_mask=pix_mask[3:5], image_mask=img_mask[1])
return pS
class TwoPhotonSeriesConstructor(unittest.TestCase):
def test_init(self):
oc = OpticalChannel('test_name', 'description', 500.)
self.assertEqual(oc.description, 'description')
self.assertEqual(oc.emission_lambda, 500.)
device = Device(name='device_name')
ip = ImagingPlane('test_imaging_plane', oc, 'description', device, 600.,
300., 'indicator', 'location', (50, 100, 3), 4.0, 'unit', 'reference_frame')
self.assertEqual(ip.optical_channel[0], oc)
self.assertEqual(ip.device, device)
self.assertEqual(ip.excitation_lambda, 600.)
self.assertEqual(ip.imaging_rate, 300.)
self.assertEqual(ip.indicator, 'indicator')
self.assertEqual(ip.location, 'location')
self.assertEqual(ip.manifold, (50, 100, 3))
self.assertEqual(ip.conversion, 4.0)
self.assertEqual(ip.unit, 'unit')
self.assertEqual(ip.reference_frame, 'reference_frame')
tPS = TwoPhotonSeries('test_tPS', unit='unit', field_of_view=[2., 3.],
imaging_plane=ip, pmt_gain=1.0, scan_line_rate=2.0, external_file=['external_file'],
starting_frame=[1, 2, 3], format='tiff', timestamps=list())
self.assertEqual(tPS.name, 'test_tPS')
self.assertEqual(tPS.unit, 'unit')
self.assertEqual(tPS.field_of_view, [2., 3.])
self.assertEqual(tPS.imaging_plane, ip)
self.assertEqual(tPS.pmt_gain, 1.0)
self.assertEqual(tPS.scan_line_rate, 2.0)
self.assertEqual(tPS.external_file, ['external_file'])
self.assertEqual(tPS.starting_frame, [1, 2, 3])
self.assertEqual(tPS.format, 'tiff')
self.assertEqual(tPS.dimension, [np.nan])
def test_args(self):
oc = OpticalChannel('test_name', 'description', 500.)
device = Device(name='device_name')
ip = ImagingPlane('test_imaging_plane', oc, 'description', device, 600.,
300., 'indicator', 'location', (50, 100, 3), 4.0, 'unit', 'reference_frame')
with self.assertRaises(ValueError): # no data or external file
TwoPhotonSeries('test_tPS', unit='unit', field_of_view=[2., 3.],
imaging_plane=ip, pmt_gain=1.0, scan_line_rate=2.0,
starting_frame=[1, 2, 3], format='tiff', timestamps=[1., 2.])
class MotionCorrectionConstructor(unittest.TestCase):
def test_init(self):
MotionCorrection(list())
class CorrectedImageStackConstructor(unittest.TestCase):
def test_init(self):
is1 = ImageSeries(name='is1', data=np.ones((2, 2, 2)), unit='unit',
external_file=['external_file'], starting_frame=[1, 2, 3], format='tiff', timestamps=[1., 2.])
is2 = ImageSeries(name='is2', data=np.ones((2, 2, 2)), unit='unit',
external_file=['external_file'], starting_frame=[1, 2, 3], format='tiff', timestamps=[1., 2.])
tstamps = np.arange(1.0, 100.0, 0.1, dtype=np.float)
ts = TimeSeries("test_ts", list(range(len(tstamps))), 'unit', timestamps=tstamps)
cis = CorrectedImageStack(is1, is2, ts)
ProcessingModule('name', 'description').add_container(cis)
self.assertEqual(cis.corrected, is1)
self.assertEqual(cis.original, is2)
self.assertEqual(cis.xy_translation, ts)
class RoiResponseSeriesConstructor(unittest.TestCase):
def test_init(self):
ip = CreatePlaneSegmentation()
rt_region = ip.create_roi_table_region('the second ROI', region=[0])
ts = RoiResponseSeries('test_ts', list(), 'unit', rt_region, timestamps=list())
self.assertEqual(ts.name, 'test_ts')
self.assertEqual(ts.unit, 'unit')
self.assertEqual(ts.rois, rt_region)
class DfOverFConstructor(unittest.TestCase):
def test_init(self):
ip = CreatePlaneSegmentation()
rt_region = ip.create_roi_table_region('the second ROI', region=[1])
rrs = RoiResponseSeries('test_ts', list(), 'unit', rt_region, timestamps=list())
dof = DfOverF(rrs)
self.assertEqual(dof.roi_response_series['test_ts'], rrs)
class FluorescenceConstructor(unittest.TestCase):
def test_init(self):
ip = CreatePlaneSegmentation()
rt_region = ip.create_roi_table_region('the second ROI', region=[1])
ts = RoiResponseSeries('test_ts', list(), 'unit', rt_region, timestamps=list())
ff = Fluorescence(ts)
self.assertEqual(ff.roi_response_series['test_ts'], ts)
self.assertEqual(ff.roi_response_series['test_ts'], ts)
class ImageSegmentationConstructor(unittest.TestCase):
def test_init(self):
ps = CreatePlaneSegmentation()
iS = ImageSegmentation(ps, name='test_iS')
self.assertEqual(iS.name, 'test_iS')
self.assertEqual(iS.plane_segmentations[ps.name], ps)
self.assertEqual(iS[ps.name], iS.plane_segmentations[ps.name])
class PlaneSegmentationConstructor(unittest.TestCase):
def getBoilerPlateObjects(self):
iSS = ImageSeries(name='test_iS', data=np.ones((2, 2, 2)), unit='unit',
external_file=['external_file'], starting_frame=[1, 2, 3], format='tiff', timestamps=list())
device = Device(name='device_name')
oc = OpticalChannel('test_optical_channel', 'description', 500.)
ip = ImagingPlane('test_imaging_plane', oc, 'description', device, 600.,
300., 'indicator', 'location', (1, 2, 1, 2, 3), 4.0, 'unit', 'reference_frame')
return iSS, ip
def test_init(self):
w, h = 5, 5
img_mask = [[[1.0 for x in range(w)] for y in range(h)], [[2.0 for x in range(w)] for y in range(h)]]
pix_mask = [[1, 2, 1.0], [3, 4, 1.0], [5, 6, 1.0],
[7, 8, 2.0], [9, 10, 2.0]]
iSS, ip = self.getBoilerPlateObjects()
pS = PlaneSegmentation('description', ip, 'test_name', iSS)
pS.add_roi(pixel_mask=pix_mask[0:3], image_mask=img_mask[0])
pS.add_roi(pixel_mask=pix_mask[3:5], image_mask=img_mask[1])
self.assertEqual(pS.description, 'description')
self.assertEqual(pS.imaging_plane, ip)
self.assertEqual(pS.reference_images, (iSS,))
self.assertEqual(pS['pixel_mask'].target.data, pix_mask)
self.assertEqual(pS['pixel_mask'][0], pix_mask[0:3])
self.assertEqual(pS['pixel_mask'][1], pix_mask[3:5])
self.assertEqual(pS['image_mask'].data, img_mask)
def test_init_pixel_mask(self):
pix_mask = [[1, 2, 1.0], [3, 4, 1.0], [5, 6, 1.0],
[7, 8, 2.0], [9, 10, 2.0]]
iSS, ip = self.getBoilerPlateObjects()
pS = PlaneSegmentation('description', ip, 'test_name', iSS)
pS.add_roi(pixel_mask=pix_mask[0:3])
pS.add_roi(pixel_mask=pix_mask[3:5])
self.assertEqual(pS.description, 'description')
self.assertEqual(pS.imaging_plane, ip)
self.assertEqual(pS.reference_images, (iSS,))
self.assertEqual(pS['pixel_mask'].target.data, pix_mask)
self.assertEqual(pS['pixel_mask'][0], pix_mask[0:3])
self.assertEqual(pS['pixel_mask'][1], pix_mask[3:5])
def test_init_voxel_mask(self):
vox_mask = [[1, 2, 3, 1.0], [3, 4, 1, 1.0], [5, 6, 3, 1.0],
[7, 8, 3, 2.0], [9, 10, 2, 2.0]]
iSS, ip = self.getBoilerPlateObjects()
pS = PlaneSegmentation('description', ip, 'test_name', iSS)
pS.add_roi(voxel_mask=vox_mask[0:3])
pS.add_roi(voxel_mask=vox_mask[3:5])
self.assertEqual(pS.description, 'description')
self.assertEqual(pS.imaging_plane, ip)
self.assertEqual(pS.reference_images, (iSS,))
self.assertEqual(pS['voxel_mask'].target.data, vox_mask)
self.assertEqual(pS['voxel_mask'][0], vox_mask[0:3])
self.assertEqual(pS['voxel_mask'][1], vox_mask[3:5])
def test_init_3d_image_mask(self):
img_masks = np.random.randn(2, 20, 30, 4)
iSS, ip = self.getBoilerPlateObjects()
pS = PlaneSegmentation('description', ip, 'test_name', iSS)
pS.add_roi(image_mask=img_masks[0])
pS.add_roi(image_mask=img_masks[1])
self.assertTrue(np.allclose(pS['image_mask'][0], img_masks[0]))
self.assertTrue(np.allclose(pS['image_mask'][1], img_masks[1]))
def test_init_image_mask(self):
w, h = 5, 5
img_mask = [[[1.0 for x in range(w)] for y in range(h)], [[2.0 for x in range(w)] for y in range(h)]]
iSS, ip = self.getBoilerPlateObjects()
pS = PlaneSegmentation('description', ip, 'test_name', iSS)
pS.add_roi(image_mask=img_mask[0])
pS.add_roi(image_mask=img_mask[1])
self.assertEqual(pS.description, 'description')
self.assertEqual(pS.imaging_plane, ip)
self.assertEqual(pS.reference_images, (iSS,))
self.assertEqual(pS['image_mask'].data, img_mask)
if __name__ == '__main__':
unittest.main()
| 41.09127
| 120
| 0.627523
|
3bbcf7b77edc894d07497e30200a30fbfc8201c5
| 137
|
py
|
Python
|
apis/raw/021_raw/021_cleaner_bs4_version.py
|
sighill/shade_app
|
2b42d6411bc6e292b112a5e6be3598de8edadee1
|
[
"MIT"
] | null | null | null |
apis/raw/021_raw/021_cleaner_bs4_version.py
|
sighill/shade_app
|
2b42d6411bc6e292b112a5e6be3598de8edadee1
|
[
"MIT"
] | null | null | null |
apis/raw/021_raw/021_cleaner_bs4_version.py
|
sighill/shade_app
|
2b42d6411bc6e292b112a5e6be3598de8edadee1
|
[
"MIT"
] | null | null | null |
from requests import get
from bs4 import BeautifulSoup
file = '/home/common/shade/apis/raw/021_raw/src'
soup = BeautifulSoup(file)
| 22.833333
| 49
| 0.759124
|
1e5fe405209094bcd713d6efce48b34fd054c594
| 6,827
|
py
|
Python
|
rioxarray/raster_writer.py
|
spestana/rioxarray
|
a96c6083ee15b090ffe15b2beb34047777e90ecf
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
rioxarray/raster_writer.py
|
spestana/rioxarray
|
a96c6083ee15b090ffe15b2beb34047777e90ecf
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
rioxarray/raster_writer.py
|
spestana/rioxarray
|
a96c6083ee15b090ffe15b2beb34047777e90ecf
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
"""
This module contains a dataset writer for Dask.
Credits:
RasterioWriter dask write functionality was adopted from https://github.com/dymaxionlabs/dask-rasterio # noqa: E501
Source file:
- https://github.com/dymaxionlabs/dask-rasterio/blob/8dd7fdece7ad094a41908c0ae6b4fe6ca49cf5e1/dask_rasterio/write.py # noqa: E501
"""
import rasterio
from rasterio.windows import Window
from xarray.conventions import encode_cf_variable
from rioxarray.exceptions import RioXarrayError
try:
import dask.array
from dask import is_dask_collection
except ImportError:
def is_dask_collection(_):
"""
Replacement method to check if it is a dask collection
"""
# if you cannot import dask, then it cannot be a dask array
return False
FILL_VALUE_NAMES = ("_FillValue", "missing_value", "fill_value", "nodata")
UNWANTED_RIO_ATTRS = ("nodatavals", "crs", "is_tiled", "res")
def _write_metatata_to_raster(raster_handle, xarray_dataset, tags):
"""
Write the metadata stored in the xarray object to raster metadata
"""
tags = xarray_dataset.attrs if tags is None else {**xarray_dataset.attrs, **tags}
# write scales and offsets
try:
raster_handle.scales = tags["scales"]
except KeyError:
scale_factor = tags.get(
"scale_factor", xarray_dataset.encoding.get("scale_factor")
)
if scale_factor is not None:
raster_handle.scales = (scale_factor,) * raster_handle.count
try:
raster_handle.offsets = tags["offsets"]
except KeyError:
add_offset = tags.get("add_offset", xarray_dataset.encoding.get("add_offset"))
if add_offset is not None:
raster_handle.offsets = (add_offset,) * raster_handle.count
# filter out attributes that should be written in a different location
skip_tags = (
UNWANTED_RIO_ATTRS
+ FILL_VALUE_NAMES
+ (
"transform",
"scales",
"scale_factor",
"add_offset",
"offsets",
"grid_mapping",
)
)
# this is for when multiple values are used
# in this case, it will be stored in the raster description
if not isinstance(tags.get("long_name"), str):
skip_tags += ("long_name",)
tags = {key: value for key, value in tags.items() if key not in skip_tags}
raster_handle.update_tags(**tags)
# write band name information
long_name = xarray_dataset.attrs.get("long_name")
if isinstance(long_name, (tuple, list)):
if len(long_name) != raster_handle.count:
raise RioXarrayError(
"Number of names in the 'long_name' attribute does not equal "
"the number of bands."
)
for iii, band_description in enumerate(long_name):
raster_handle.set_band_description(iii + 1, band_description)
else:
band_description = long_name or xarray_dataset.name
if band_description:
for iii in range(raster_handle.count):
raster_handle.set_band_description(iii + 1, band_description)
class RasterioWriter:
"""
..versionadded:: 0.2
Rasterio wrapper to allow dask.array.store to do window saving or to
save using the rasterio write method.
"""
def __init__(self, raster_path):
"""
raster_path: str
The path to output the raster to.
"""
# https://github.com/dymaxionlabs/dask-rasterio/issues/3#issuecomment-514781825
# Rasterio datasets can't be pickled and can't be shared between
# processes or threads. The work around is to distribute dataset
# identifiers (paths or URIs) and then open them in new threads.
# See mapbox/rasterio#1731.
self.raster_path = raster_path
def __setitem__(self, key, item):
"""Put the data chunk in the image"""
if len(key) == 3:
index_range, yyy, xxx = key
indexes = list(
range(
index_range.start + 1, index_range.stop + 1, index_range.step or 1
)
)
else:
indexes = 1
yyy, xxx = key
chy_off = yyy.start
chy = yyy.stop - yyy.start
chx_off = xxx.start
chx = xxx.stop - xxx.start
with rasterio.open(self.raster_path, "r+") as rds:
rds.write(item, window=Window(chx_off, chy_off, chx, chy), indexes=indexes)
def to_raster(self, xarray_dataarray, tags, windowed, lock, compute, **kwargs):
"""
This method writes to the raster on disk.
xarray_dataarray: xarray.DataArray
The input data array to write to disk.
tags: dict, optional
A dictionary of tags to write to the raster.
windowed: bool
If True and the data array is not a dask array, it will write
the data to disk using rasterio windows.
lock: boolean or Lock, optional
Lock to use to write data using dask.
If not supplied, it will use a single process.
compute: bool
If True (default) and data is a dask array, then compute and save
the data immediately. If False, return a dask Delayed object.
Call ".compute()" on the Delayed object to compute the result
later. Call ``dask.compute(delayed1, delayed2)`` to save
multiple delayed files at once.
**kwargs
Keyword arguments to pass into writing the raster.
"""
dtype = kwargs["dtype"]
# generate initial output file
with rasterio.open(self.raster_path, "w", **kwargs) as rds:
_write_metatata_to_raster(rds, xarray_dataarray, tags)
if not (lock and is_dask_collection(xarray_dataarray.data)):
# write data to raster immmediately if not dask array
if windowed:
window_iter = rds.block_windows(1)
else:
window_iter = [(None, None)]
for _, window in window_iter:
if window is not None:
out_data = xarray_dataarray.rio.isel_window(window)
else:
out_data = xarray_dataarray
data = encode_cf_variable(out_data).values.astype(dtype)
if data.ndim == 2:
rds.write(data, 1, window=window)
else:
rds.write(data, window=window)
if lock and is_dask_collection(xarray_dataarray.data):
return dask.array.store(
encode_cf_variable(xarray_dataarray).data.astype(dtype),
self,
lock=lock,
compute=compute,
)
| 36.704301
| 130
| 0.609345
|
8daa7d5624c694926e07552e655edd1742be7468
| 6,871
|
py
|
Python
|
webWeixin/webWeixin.py
|
awesome-archive/awesome-python-login-model
|
98aecab631a717934efc308c873fd00cbc6ef930
|
[
"MIT"
] | 2
|
2019-04-12T15:10:02.000Z
|
2019-04-12T15:11:18.000Z
|
webWeixin/webWeixin.py
|
masdude/awesome-python-login-model
|
aa67e633c0be8634081bae9fa1ed218c1f9fb75c
|
[
"MIT"
] | null | null | null |
webWeixin/webWeixin.py
|
masdude/awesome-python-login-model
|
aa67e633c0be8634081bae9fa1ed218c1f9fb75c
|
[
"MIT"
] | 1
|
2019-03-31T15:45:36.000Z
|
2019-03-31T15:45:36.000Z
|
import os
import re
import time
import sys
import subprocess
import requests
import xml.dom.minidom
import json
"""
info:
author:CriseLYJ
github:https://github.com/CriseLYJ/
update_time:2019-3-6
"""
session = requests.session()
headers = {
'User-Agent' : 'Mozilla/5.0 (Windows NT 5.1; rv:33.0) Gecko/20100101 Firefox/33.0'
}
QRImgPath = os.path.split(os.path.realpath(__file__))[0] + os.sep + 'webWeixinQr.jpg'
uuid = ''
tip = 0
base_uri = ''
redirect_uri = ''
skey = ''
wxsid = ''
wxuin = ''
pass_ticket = ''
deviceId = 'e000000000000000'
BaseRequest = {}
ContactList = []
My = []
SyncKey = ''
def getUUID():
global uuid,session
url = 'https://login.weixin.qq.com/jslogin'
params = {
'appid': 'wx782c26e4c19acffb',
'fun': 'new',
'lang': 'zh_CN',
'_': int(time.time()),
}
response = session.get(url, params=params)
data = response.content.decode('utf-8')
# print(data) >>> window.QRLogin.code = 200; window.QRLogin.uuid = "oZwt_bFfRg==";
regx = r'window.QRLogin.code = (\d+); window.QRLogin.uuid = "(\S+?)"'
pm = re.search(regx, data)
code = pm.group(1)
uuid = pm.group(2)
if code == '200':
return True
return False
def showQRImage():
global tip
url = 'https://login.weixin.qq.com/qrcode/' + uuid
params = {
't': 'webwx',
'_': int(time.time()),
}
response = session.get(url, params=params)
tip = 1
with open(QRImgPath, 'wb') as f:
f.write(response.content)
f.close()
if sys.platform.find('darwin') >= 0:
subprocess.call(['open', QRImgPath])
elif sys.platform.find('linux') >= 0:
subprocess.call(['xdg-open', QRImgPath])
else:
os.startfile(QRImgPath)
print('请使用微信扫描二维码以登录')
def waitForLogin():
global tip, base_uri, redirect_uri
url = 'https://login.weixin.qq.com/cgi-bin/mmwebwx-bin/login?tip=%s&uuid=%s&_=%s' % (
tip, uuid, int(time.time()))
response = session.get(url)
data = response.content.decode('utf-8')
# print(data)
# window.code=500;
regx = r'window.code=(\d+);'
pm = re.search(regx, data)
code = pm.group(1)
if code == '201': # 已扫描
print('成功扫描,请在手机上点击确认以登录')
tip = 0
elif code == '200': # 已登录
print('正在登录...')
regx = r'window.redirect_uri="(\S+?)";'
pm = re.search(regx, data)
redirect_uri = pm.group(1) + '&fun=new'
base_uri = redirect_uri[:redirect_uri.rfind('/')]
# closeQRImage
if sys.platform.find('darwin') >= 0: # for OSX with Preview
os.system("osascript -e 'quit app \"Preview\"'")
elif code == '408': # 超时
pass
# elif code == '400' or code == '500':
return code
def login():
global skey, wxsid, wxuin, pass_ticket, BaseRequest
response = session.get(redirect_uri)
data = response.content.decode('utf-8')
# print(data)
'''
<error>
<ret>0</ret>
<message>OK</message>
<skey>xxx</skey>
<wxsid>xxx</wxsid>
<wxuin>xxx</wxuin>
<pass_ticket>xxx</pass_ticket>
<isgrayscale>1</isgrayscale>
</error>
'''
xml.dom
doc = xml.dom.minidom.parseString(data)
root = doc.documentElement
for node in root.childNodes:
if node.nodeName == 'skey':
skey = node.childNodes[0].data
elif node.nodeName == 'wxsid':
wxsid = node.childNodes[0].data
elif node.nodeName == 'wxuin':
wxuin = node.childNodes[0].data
elif node.nodeName == 'pass_ticket':
pass_ticket = node.childNodes[0].data
# print('skey: %s, wxsid: %s, wxuin: %s, pass_ticket: %s' % (skey, wxsid,
# wxuin, pass_ticket))
if not all((skey, wxsid, wxuin, pass_ticket)):
return False
BaseRequest = {
'Uin': int(wxuin),
'Sid': wxsid,
'Skey': skey,
'DeviceID': deviceId,
}
return True
def webwxinit():
url = base_uri + \
'/webwxinit?pass_ticket=%s&skey=%s&r=%s' % (
pass_ticket, skey, int(time.time()))
params = {
'BaseRequest': BaseRequest
}
h = headers
h['ContentType'] = 'application/json; charset=UTF-8'
response = session.post(url, data=json.dumps(params), headers=h)
data = response.content.decode('utf-8')
#print(data)
global ContactList, My, SyncKey
dic = json.loads(data)
ContactList = dic['ContactList']
My = dic['User']
SyncKeyList = []
for item in dic['SyncKey']['List']:
SyncKeyList.append('%s_%s' % (item['Key'], item['Val']))
SyncKey = '|'.join(SyncKeyList)
ErrMsg = dic['BaseResponse']['ErrMsg']
Ret = dic['BaseResponse']['Ret']
if Ret != 0:
return False
return True
def webwxgetcontact():
url = base_uri + \
'/webwxgetcontact?pass_ticket=%s&skey=%s&r=%s' % (
pass_ticket, skey, int(time.time()))
h = headers
h['ContentType'] = 'application/json; charset=UTF-8'
response = session.get(url, headers=h)
data = response.content.decode('utf-8')
# print(data)
dic = json.loads(data)
MemberList = dic['MemberList']
# 倒序遍历,不然删除的时候出问题..
SpecialUsers = ["newsapp", "fmessage", "filehelper", "weibo", "qqmail", "tmessage", "qmessage", "qqsync", "floatbottle", "lbsapp", "shakeapp", "medianote", "qqfriend", "readerapp", "blogapp", "facebookapp", "masssendapp",
"meishiapp", "feedsapp", "voip", "blogappweixin", "weixin", "brandsessionholder", "weixinreminder", "wxid_novlwrv3lqwv11", "gh_22b87fa7cb3c", "officialaccounts", "notification_messages", "wxitil", "userexperience_alarm"]
for i in range(len(MemberList) - 1, -1, -1):
Member = MemberList[i]
if Member['VerifyFlag'] & 8 != 0: # 公众号/服务号
MemberList.remove(Member)
elif Member['UserName'] in SpecialUsers: # 特殊账号
MemberList.remove(Member)
elif Member['UserName'].find('@@') != -1: # 群聊
MemberList.remove(Member)
elif Member['UserName'] == My['UserName']: # 自己
MemberList.remove(Member)
return MemberList
def main():
if not getUUID():
print('获取uuid失败')
return
showQRImage()
time.sleep(1)
while waitForLogin() != '200':
pass
os.remove(QRImgPath)
if not login():
print('登录失败')
return
#登录完成, 下面查询好友
if not webwxinit():
print('初始化失败')
return
MemberList = webwxgetcontact()
print('通讯录共%s位好友' % len(MemberList))
for x in MemberList :
sex = '未知' if x['Sex'] == 0 else '男' if x['Sex'] == 1 else '女'
print('昵称:%s, 性别:%s, 备注:%s, 签名:%s' % (x['NickName'], sex, x['RemarkName'], x['Signature']))
if __name__ == '__main__':
print('开始')
main()
| 24.539286
| 240
| 0.572988
|
2207f92aa9a28e0454a7ba0b7e5d54d108fa1f3b
| 5,454
|
py
|
Python
|
external/configure_panorama.py
|
jabielecki/azure-vmseries-terraform
|
338337c347c54b1a07b5c6f0f0a38efd54f26d08
|
[
"Apache-2.0"
] | null | null | null |
external/configure_panorama.py
|
jabielecki/azure-vmseries-terraform
|
338337c347c54b1a07b5c6f0f0a38efd54f26d08
|
[
"Apache-2.0"
] | null | null | null |
external/configure_panorama.py
|
jabielecki/azure-vmseries-terraform
|
338337c347c54b1a07b5c6f0f0a38efd54f26d08
|
[
"Apache-2.0"
] | null | null | null |
from terraform_external_data import terraform_external_data
from panosxml import Panos
import re
import urllib3
urllib3.disable_warnings()
from xml.etree import ElementTree
from constants import *
import os
import subprocess
import time
OUTPUT_DIR="output"
REQUIRED_ARGS=[
"panorama_ip",
"username",
"password",
"panorama_private_ip",
"storage_account_name",
"storage_account_key",
"inbound_storage_share_name",
"outbound_storage_share_name",
]
OPTIONAL_ARGS={
"outbound_hostname": "outside-fw",
"outbound_device_group": "OUTBOUND",
"outbound_template_stack": "OUTBOUND",
"inbound_hostname": "inside-fw",
"inbound_device_group": "INBOUND",
"inbound_template_stack": "INBOUND",
"dns_server": "8.8.8.8",
}
def connect(query: dict):
connected = False
failures = 0
# Retry for 10 minutes
max_failures = 20
while not connected:
if failures >= max_failures:
raise PanoramaError("Failed to connect to panorama at {}".format(query["panorama_ip"]))
try:
p = Panos(query["panorama_ip"], user=query["username"], pw=query["password"])
connected = True
except:
failures = failures +1
time.sleep(30)
pass
return p
def gen_inbound_init_cfgs(query: dict, vm_auth_key:str):
inbound_config = init_cfg(
hostname=query["inbound_hostname"],
vm_auth_key=vm_auth_key,
device_group_name=query["inbound_device_group"],
template_name=query["inbound_template_stack"],
panorama_ip=query["panorama_private_ip"],
dns_ip=query["dns_server"]
)
fp = os.path.join(query["output_dir"], OUTPUT_DIR, "init-cfg-inbound.txt")
fd = os.path.join(query["output_dir"], OUTPUT_DIR)
if not os.path.isdir(fd):
os.mkdir(fd)
fh = open(fp, mode="w")
fh.write(inbound_config)
fh.close()
return fp
def gen_outbound_init_cfgs(query: dict, vm_auth_key:str):
outbound_config = init_cfg(
hostname=query["outbound_hostname"],
vm_auth_key=vm_auth_key,
device_group_name=query["outbound_device_group"],
template_name=query["outbound_template_stack"],
panorama_ip=query["panorama_private_ip"],
dns_ip=query["dns_server"]
)
fp = os.path.join(query["output_dir"], OUTPUT_DIR, "init-cfg-outbound.txt")
fd = os.path.join(query["output_dir"], OUTPUT_DIR)
if not os.path.isdir(fd):
os.mkdir(fd)
fh = open(fp, mode="w")
fh.write(outbound_config)
fh.close()
return fp
def upload_cfgs(path,
storage_account_name,
primary_access_key,
storage_share_name
):
results = []
cmd = f"az storage file upload --account-name {storage_account_name} --account-key {primary_access_key} --share-name {storage_share_name} --source {path} --path config/init-cfg.txt"
r = subprocess.run(cmd.split(), shell=True, capture_output=True)
results.append(r)
return results
def gen_bootstrap(p: Panos, lifetime: str):
"""
Gen a new Bootstrap key
"""
params = {
"type": "op",
"cmd": "<request><bootstrap><vm-auth-key><generate><lifetime>{}</lifetime></generate></vm-auth-key></bootstrap></request>".format(lifetime)
}
r = p.send(params)
if not p.check_resp(r):
raise PanoramaError("Failed to generate Bootstrap key {}".format(r.content))
regex_result = re.search("VM auth key\s+(\d+)\s+", r.content.decode())
key = regex_result.group(1)
return key
def show_bootstrap(p: Panos):
"""
Get the most recently generated bootstrap key
"""
params = {
"type": "op",
"cmd": "<request><bootstrap><vm-auth-key><show></show></vm-auth-key></bootstrap></request>"
}
r = p.send(params)
if not p.check_resp(r):
raise PanoramaError("Failed to show Bootstrap key.")
root = ElementTree.fromstring(r.content.decode())
keys = root.findall("./result/bootstrap-vm-auth-keys/entry/vm-auth-key")
if len(keys) == 0:
return
return keys[0].text
def bootstrap(query):
p = connect(query)
key = show_bootstrap(p)
# never yet bootstratpped
if not key:
key = gen_bootstrap(p, query["key_lifetime"])
inbound_config = gen_inbound_init_cfgs(query, key)
outbound_config = gen_outbound_init_cfgs(query, key)
upload_cfgs(
inbound_config,
storage_account_name=query["storage_account_name"],
storage_share_name=query["inbound_storage_share_name"],
primary_access_key=query["storage_account_key"]
)
upload_cfgs(
outbound_config,
storage_account_name=query["storage_account_name"],
storage_share_name=query["outbound_storage_share_name"],
primary_access_key=query["storage_account_key"]
)
return key
def parse_args(query: dict):
for a in REQUIRED_ARGS:
if a not in query:
raise ValueError("Missing required argument {}".format(a))
for k, v in OPTIONAL_ARGS.items():
if k not in query:
query[k] = v
return query
@terraform_external_data
def main(query):
r = {}
query = parse_args(query)
r['vm-auth-key'] = bootstrap(query)
r['status'] = "OK"
return r
class PanoramaError(Exception):
pass
if __name__ == '__main__':
main()
| 28.259067
| 185
| 0.642831
|
827b8015080165a574795a1927545e7e66951a64
| 15,556
|
py
|
Python
|
scipy/sparse/linalg/isolve/_gcrotmk.py
|
EverLookNeverSee/scipy
|
5ffd20ab831b3bc46bc5692c8624c01f8df09a9b
|
[
"BSD-3-Clause"
] | 1
|
2021-08-16T09:32:42.000Z
|
2021-08-16T09:32:42.000Z
|
scipy/sparse/linalg/isolve/_gcrotmk.py
|
EverLookNeverSee/scipy
|
5ffd20ab831b3bc46bc5692c8624c01f8df09a9b
|
[
"BSD-3-Clause"
] | 44
|
2019-06-27T15:56:14.000Z
|
2022-03-15T22:21:10.000Z
|
scipy/sparse/linalg/isolve/_gcrotmk.py
|
EverLookNeverSee/scipy
|
5ffd20ab831b3bc46bc5692c8624c01f8df09a9b
|
[
"BSD-3-Clause"
] | 4
|
2020-06-13T10:32:25.000Z
|
2021-12-03T15:48:16.000Z
|
# Copyright (C) 2015, Pauli Virtanen <pav@iki.fi>
# Distributed under the same license as SciPy.
import warnings
import numpy as np
from numpy.linalg import LinAlgError
from scipy.linalg import (get_blas_funcs, qr, solve, svd, qr_insert, lstsq)
from scipy.sparse.linalg.isolve.utils import make_system
__all__ = ['gcrotmk']
def _fgmres(matvec, v0, m, atol, lpsolve=None, rpsolve=None, cs=(), outer_v=(),
prepend_outer_v=False):
"""
FGMRES Arnoldi process, with optional projection or augmentation
Parameters
----------
matvec : callable
Operation A*x
v0 : ndarray
Initial vector, normalized to nrm2(v0) == 1
m : int
Number of GMRES rounds
atol : float
Absolute tolerance for early exit
lpsolve : callable
Left preconditioner L
rpsolve : callable
Right preconditioner R
CU : list of (ndarray, ndarray)
Columns of matrices C and U in GCROT
outer_v : list of ndarrays
Augmentation vectors in LGMRES
prepend_outer_v : bool, optional
Whether augmentation vectors come before or after
Krylov iterates
Raises
------
LinAlgError
If nans encountered
Returns
-------
Q, R : ndarray
QR decomposition of the upper Hessenberg H=QR
B : ndarray
Projections corresponding to matrix C
vs : list of ndarray
Columns of matrix V
zs : list of ndarray
Columns of matrix Z
y : ndarray
Solution to ||H y - e_1||_2 = min!
res : float
The final (preconditioned) residual norm
"""
if lpsolve is None:
lpsolve = lambda x: x
if rpsolve is None:
rpsolve = lambda x: x
axpy, dot, scal, nrm2 = get_blas_funcs(['axpy', 'dot', 'scal', 'nrm2'], (v0,))
vs = [v0]
zs = []
y = None
res = np.nan
m = m + len(outer_v)
# Orthogonal projection coefficients
B = np.zeros((len(cs), m), dtype=v0.dtype)
# H is stored in QR factorized form
Q = np.ones((1, 1), dtype=v0.dtype)
R = np.zeros((1, 0), dtype=v0.dtype)
eps = np.finfo(v0.dtype).eps
breakdown = False
# FGMRES Arnoldi process
for j in range(m):
# L A Z = C B + V H
if prepend_outer_v and j < len(outer_v):
z, w = outer_v[j]
elif prepend_outer_v and j == len(outer_v):
z = rpsolve(v0)
w = None
elif not prepend_outer_v and j >= m - len(outer_v):
z, w = outer_v[j - (m - len(outer_v))]
else:
z = rpsolve(vs[-1])
w = None
if w is None:
w = lpsolve(matvec(z))
else:
# w is clobbered below
w = w.copy()
w_norm = nrm2(w)
# GCROT projection: L A -> (1 - C C^H) L A
# i.e. orthogonalize against C
for i, c in enumerate(cs):
alpha = dot(c, w)
B[i,j] = alpha
w = axpy(c, w, c.shape[0], -alpha) # w -= alpha*c
# Orthogonalize against V
hcur = np.zeros(j+2, dtype=Q.dtype)
for i, v in enumerate(vs):
alpha = dot(v, w)
hcur[i] = alpha
w = axpy(v, w, v.shape[0], -alpha) # w -= alpha*v
hcur[i+1] = nrm2(w)
with np.errstate(over='ignore', divide='ignore'):
# Careful with denormals
alpha = 1/hcur[-1]
if np.isfinite(alpha):
w = scal(alpha, w)
if not (hcur[-1] > eps * w_norm):
# w essentially in the span of previous vectors,
# or we have nans. Bail out after updating the QR
# solution.
breakdown = True
vs.append(w)
zs.append(z)
# Arnoldi LSQ problem
# Add new column to H=Q*R, padding other columns with zeros
Q2 = np.zeros((j+2, j+2), dtype=Q.dtype, order='F')
Q2[:j+1,:j+1] = Q
Q2[j+1,j+1] = 1
R2 = np.zeros((j+2, j), dtype=R.dtype, order='F')
R2[:j+1,:] = R
Q, R = qr_insert(Q2, R2, hcur, j, which='col',
overwrite_qru=True, check_finite=False)
# Transformed least squares problem
# || Q R y - inner_res_0 * e_1 ||_2 = min!
# Since R = [R'; 0], solution is y = inner_res_0 (R')^{-1} (Q^H)[:j,0]
# Residual is immediately known
res = abs(Q[0,-1])
# Check for termination
if res < atol or breakdown:
break
if not np.isfinite(R[j,j]):
# nans encountered, bail out
raise LinAlgError()
# -- Get the LSQ problem solution
# The problem is triangular, but the condition number may be
# bad (or in case of breakdown the last diagonal entry may be
# zero), so use lstsq instead of trtrs.
y, _, _, _, = lstsq(R[:j+1,:j+1], Q[0,:j+1].conj())
B = B[:,:j+1]
return Q, R, B, vs, zs, y, res
def gcrotmk(A, b, x0=None, tol=1e-5, maxiter=1000, M=None, callback=None,
m=20, k=None, CU=None, discard_C=False, truncate='oldest',
atol=None):
"""
Solve a matrix equation using flexible GCROT(m,k) algorithm.
Parameters
----------
A : {sparse matrix, ndarray, LinearOperator}
The real or complex N-by-N matrix of the linear system.
Alternatively, ``A`` can be a linear operator which can
produce ``Ax`` using, e.g.,
``scipy.sparse.linalg.LinearOperator``.
b : ndarray
Right hand side of the linear system. Has shape (N,) or (N,1).
x0 : ndarray
Starting guess for the solution.
tol, atol : float, optional
Tolerances for convergence, ``norm(residual) <= max(tol*norm(b), atol)``.
The default for ``atol`` is `tol`.
.. warning::
The default value for `atol` will be changed in a future release.
For future compatibility, specify `atol` explicitly.
maxiter : int, optional
Maximum number of iterations. Iteration will stop after maxiter
steps even if the specified tolerance has not been achieved.
M : {sparse matrix, ndarray, LinearOperator}, optional
Preconditioner for A. The preconditioner should approximate the
inverse of A. gcrotmk is a 'flexible' algorithm and the preconditioner
can vary from iteration to iteration. Effective preconditioning
dramatically improves the rate of convergence, which implies that
fewer iterations are needed to reach a given error tolerance.
callback : function, optional
User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector.
m : int, optional
Number of inner FGMRES iterations per each outer iteration.
Default: 20
k : int, optional
Number of vectors to carry between inner FGMRES iterations.
According to [2]_, good values are around m.
Default: m
CU : list of tuples, optional
List of tuples ``(c, u)`` which contain the columns of the matrices
C and U in the GCROT(m,k) algorithm. For details, see [2]_.
The list given and vectors contained in it are modified in-place.
If not given, start from empty matrices. The ``c`` elements in the
tuples can be ``None``, in which case the vectors are recomputed
via ``c = A u`` on start and orthogonalized as described in [3]_.
discard_C : bool, optional
Discard the C-vectors at the end. Useful if recycling Krylov subspaces
for different linear systems.
truncate : {'oldest', 'smallest'}, optional
Truncation scheme to use. Drop: oldest vectors, or vectors with
smallest singular values using the scheme discussed in [1,2].
See [2]_ for detailed comparison.
Default: 'oldest'
Returns
-------
x : ndarray
The solution found.
info : int
Provides convergence information:
* 0 : successful exit
* >0 : convergence to tolerance not achieved, number of iterations
References
----------
.. [1] E. de Sturler, ''Truncation strategies for optimal Krylov subspace
methods'', SIAM J. Numer. Anal. 36, 864 (1999).
.. [2] J.E. Hicken and D.W. Zingg, ''A simplified and flexible variant
of GCROT for solving nonsymmetric linear systems'',
SIAM J. Sci. Comput. 32, 172 (2010).
.. [3] M.L. Parks, E. de Sturler, G. Mackey, D.D. Johnson, S. Maiti,
''Recycling Krylov subspaces for sequences of linear systems'',
SIAM J. Sci. Comput. 28, 1651 (2006).
"""
A,M,x,b,postprocess = make_system(A,M,x0,b)
if not np.isfinite(b).all():
raise ValueError("RHS must contain only finite numbers")
if truncate not in ('oldest', 'smallest'):
raise ValueError("Invalid value for 'truncate': %r" % (truncate,))
if atol is None:
warnings.warn("scipy.sparse.linalg.gcrotmk called without specifying `atol`. "
"The default value will change in the future. To preserve "
"current behavior, set ``atol=tol``.",
category=DeprecationWarning, stacklevel=2)
atol = tol
matvec = A.matvec
psolve = M.matvec
if CU is None:
CU = []
if k is None:
k = m
axpy, dot, scal = None, None, None
r = b - matvec(x)
axpy, dot, scal, nrm2 = get_blas_funcs(['axpy', 'dot', 'scal', 'nrm2'], (x, r))
b_norm = nrm2(b)
if b_norm == 0:
x = b
return (postprocess(x), 0)
if discard_C:
CU[:] = [(None, u) for c, u in CU]
# Reorthogonalize old vectors
if CU:
# Sort already existing vectors to the front
CU.sort(key=lambda cu: cu[0] is not None)
# Fill-in missing ones
C = np.empty((A.shape[0], len(CU)), dtype=r.dtype, order='F')
us = []
j = 0
while CU:
# More memory-efficient: throw away old vectors as we go
c, u = CU.pop(0)
if c is None:
c = matvec(u)
C[:,j] = c
j += 1
us.append(u)
# Orthogonalize
Q, R, P = qr(C, overwrite_a=True, mode='economic', pivoting=True)
del C
# C := Q
cs = list(Q.T)
# U := U P R^-1, back-substitution
new_us = []
for j in range(len(cs)):
u = us[P[j]]
for i in range(j):
u = axpy(us[P[i]], u, u.shape[0], -R[i,j])
if abs(R[j,j]) < 1e-12 * abs(R[0,0]):
# discard rest of the vectors
break
u = scal(1.0/R[j,j], u)
new_us.append(u)
# Form the new CU lists
CU[:] = list(zip(cs, new_us))[::-1]
if CU:
axpy, dot = get_blas_funcs(['axpy', 'dot'], (r,))
# Solve first the projection operation with respect to the CU
# vectors. This corresponds to modifying the initial guess to
# be
#
# x' = x + U y
# y = argmin_y || b - A (x + U y) ||^2
#
# The solution is y = C^H (b - A x)
for c, u in CU:
yc = dot(c, r)
x = axpy(u, x, x.shape[0], yc)
r = axpy(c, r, r.shape[0], -yc)
# GCROT main iteration
for j_outer in range(maxiter):
# -- callback
if callback is not None:
callback(x)
beta = nrm2(r)
# -- check stopping condition
beta_tol = max(atol, tol * b_norm)
if beta <= beta_tol and (j_outer > 0 or CU):
# recompute residual to avoid rounding error
r = b - matvec(x)
beta = nrm2(r)
if beta <= beta_tol:
j_outer = -1
break
ml = m + max(k - len(CU), 0)
cs = [c for c, u in CU]
try:
Q, R, B, vs, zs, y, pres = _fgmres(matvec,
r/beta,
ml,
rpsolve=psolve,
atol=max(atol, tol*b_norm)/beta,
cs=cs)
y *= beta
except LinAlgError:
# Floating point over/underflow, non-finite result from
# matmul etc. -- report failure.
break
#
# At this point,
#
# [A U, A Z] = [C, V] G; G = [ I B ]
# [ 0 H ]
#
# where [C, V] has orthonormal columns, and r = beta v_0. Moreover,
#
# || b - A (x + Z y + U q) ||_2 = || r - C B y - V H y - C q ||_2 = min!
#
# from which y = argmin_y || beta e_1 - H y ||_2, and q = -B y
#
#
# GCROT(m,k) update
#
# Define new outer vectors
# ux := (Z - U B) y
ux = zs[0]*y[0]
for z, yc in zip(zs[1:], y[1:]):
ux = axpy(z, ux, ux.shape[0], yc) # ux += z*yc
by = B.dot(y)
for cu, byc in zip(CU, by):
c, u = cu
ux = axpy(u, ux, ux.shape[0], -byc) # ux -= u*byc
# cx := V H y
hy = Q.dot(R.dot(y))
cx = vs[0] * hy[0]
for v, hyc in zip(vs[1:], hy[1:]):
cx = axpy(v, cx, cx.shape[0], hyc) # cx += v*hyc
# Normalize cx, maintaining cx = A ux
# This new cx is orthogonal to the previous C, by construction
try:
alpha = 1/nrm2(cx)
if not np.isfinite(alpha):
raise FloatingPointError()
except (FloatingPointError, ZeroDivisionError):
# Cannot update, so skip it
continue
cx = scal(alpha, cx)
ux = scal(alpha, ux)
# Update residual and solution
gamma = dot(cx, r)
r = axpy(cx, r, r.shape[0], -gamma) # r -= gamma*cx
x = axpy(ux, x, x.shape[0], gamma) # x += gamma*ux
# Truncate CU
if truncate == 'oldest':
while len(CU) >= k and CU:
del CU[0]
elif truncate == 'smallest':
if len(CU) >= k and CU:
# cf. [1,2]
D = solve(R[:-1,:].T, B.T).T
W, sigma, V = svd(D)
# C := C W[:,:k-1], U := U W[:,:k-1]
new_CU = []
for j, w in enumerate(W[:,:k-1].T):
c, u = CU[0]
c = c * w[0]
u = u * w[0]
for cup, wp in zip(CU[1:], w[1:]):
cp, up = cup
c = axpy(cp, c, c.shape[0], wp)
u = axpy(up, u, u.shape[0], wp)
# Reorthogonalize at the same time; not necessary
# in exact arithmetic, but floating point error
# tends to accumulate here
for cp, up in new_CU:
alpha = dot(cp, c)
c = axpy(cp, c, c.shape[0], -alpha)
u = axpy(up, u, u.shape[0], -alpha)
alpha = nrm2(c)
c = scal(1.0/alpha, c)
u = scal(1.0/alpha, u)
new_CU.append((c, u))
CU[:] = new_CU
# Add new vector to CU
CU.append((cx, ux))
# Include the solution vector to the span
CU.append((None, x.copy()))
if discard_C:
CU[:] = [(None, uz) for cz, uz in CU]
return postprocess(x), j_outer + 1
| 31.682281
| 86
| 0.5144
|
0432bffe20765a015ff93b81699d46ffae4d736f
| 21,654
|
py
|
Python
|
AtomicASTChangeMining/src/test/resources/ASTConversion/main.py
|
maldil/CPATMiner2.0
|
743aa8a5b638a1963e621f59f63d794728ab0c79
|
[
"Apache-2.0"
] | 4
|
2021-11-04T02:47:31.000Z
|
2022-01-25T02:04:05.000Z
|
AtomicASTChangeMining/src/test/resources/ASTConversion/main.py
|
maldil/R-CPATMiner
|
88b96a5af438a9c2ea2dab351cb8b210119132a2
|
[
"Apache-2.0"
] | null | null | null |
AtomicASTChangeMining/src/test/resources/ASTConversion/main.py
|
maldil/R-CPATMiner
|
88b96a5af438a9c2ea2dab351cb8b210119132a2
|
[
"Apache-2.0"
] | 1
|
2021-09-11T06:52:39.000Z
|
2021-09-11T06:52:39.000Z
|
#!/usr/bin/env python3
###############################################################################
# #
# RMG - Reaction Mechanism Generator #
# #
# Copyright (c) 2002-2020 Prof. William H. Green (whgreen@mit.edu), #
# Prof. Richard H. West (r.west@neu.edu) and the RMG Team (rmg_dev@mit.edu) #
# #
# Permission is hereby granted, free of charge, to any person obtaining a #
# copy of this software and associated documentation files (the 'Software'), #
# to deal in the Software without restriction, including without limitation #
# the rights to use, copy, modify, merge, publish, distribute, sublicense, #
# and/or sell copies of the Software, and to permit persons to whom the #
# Software is furnished to do so, subject to the following conditions: #
# #
# The above copyright notice and this permission notice shall be included in #
# all copies or substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING #
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER #
# DEALINGS IN THE SOFTWARE. #
# #
###############################################################################
"""
This module contains the :class:`Arkane` class, the main class used to run Arkane.
"""
import argparse
import csv
import logging
import os
import os.path
import sys
import time
import numpy as np
try:
import matplotlib
matplotlib.rc('mathtext', default='regular')
except ImportError:
pass
from rmgpy import __version__
from rmgpy.chemkin import write_elements_section
from rmgpy.data.thermo import ThermoLibrary
from rmgpy.data.base import Entry
from rmgpy.data.kinetics.library import KineticsLibrary
from rmgpy.exceptions import InputError
from arkane.common import is_pdep
from arkane.encorr.ae import AEJob
from arkane.encorr.bac import BACJob
from arkane.explorer import ExplorerJob
from arkane.input import load_input_file
from arkane.kinetics import KineticsJob
from arkane.output import save_thermo_lib, save_kinetics_lib
from arkane.pdep import PressureDependenceJob
from arkane.statmech import StatMechJob
from arkane.thermo import ThermoJob
################################################################################
class Arkane(object):
"""
The :class:`Arkane` class represents an instance of Arkane, a tool for
computing properties of chemical species and reactions. The attributes are:
=================== ========================================================
Attribute Description
=================== ========================================================
`job_list` A list of the jobs to execute
`input_file` The path of the input file defining the jobs to execute
`output_directory` The directory in which to write the output files
`verbose` The level of detail in the generated logging messages
=================== ========================================================
The output directory defaults to the same directory as the input file if
not explicitly specified.
To use this class programmatically, create an instance and set its
attributes using either the :meth:`__init__()` method or by directly
accessing the attributes, and then invoke the :meth:`execute()` method.
You can also populate the attributes from the command line using the
:meth:`parse_command_line_arguments()` method before running :meth:`execute()`.
"""
def __init__(self, input_file=None, output_directory=None, verbose=logging.INFO, save_rmg_libraries=True):
self.job_list = []
self.input_file = input_file
self.output_directory = output_directory
self.verbose = verbose
self.save_rmg_libraries = save_rmg_libraries
def parse_command_line_arguments(self):
"""
Parse the command-line arguments being passed to Arkane. This uses the
:mod:`argparse` module, which ensures that the command-line arguments are
sensible, parses them, and returns them.
"""
parser = argparse.ArgumentParser(description="""
Arkane is a Python toolkit for computing chemical reaction rates
and other properties used in detailed kinetics models
using various methodologies and theories.
""")
parser.add_argument('file', metavar='FILE', type=str, nargs=1, help='a file describing the job to execute')
# Options for controlling the amount of information printed to the console
# By default a moderate level of information is printed; you can either
# ask for less (quiet), more (verbose), or much more (debug)
group = parser.add_mutually_exclusive_group()
group.add_argument('-q', '--quiet', action='store_const', const=logging.WARNING, default=logging.INFO,
dest='verbose', help='only print warnings and errors')
group.add_argument('-v', '--verbose', action='store_const', const=logging.DEBUG, default=logging.INFO,
dest='verbose', help='print more verbose output')
group.add_argument('-d', '--debug', action='store_const', const=0, default=logging.INFO, dest='verbose',
help='print debug information')
# Add options for controlling what directories files are written to
parser.add_argument('-o', '--output-directory', type=str, nargs=1, default='',
metavar='DIR', help='use DIR as output directory')
# Add options for controlling generation of plots
parser.add_argument('-p', '--no-plot', action='store_false', default=True,
help='prevent generating plots', dest='plot')
args = parser.parse_args()
# Extract the input file
self.input_file = args.file[0]
# Extract the log verbosity
self.verbose = args.verbose
# Extract the plot settings
self.plot = args.plot
# Determine the output directory
# By default the directory containing the input file is used, unless an
# alternate directory is specified using the -o flag
if args.output_directory and os.path.isdir(args.output_directory[0]):
self.output_directory = os.path.abspath(args.output_directory[0])
else:
self.output_directory = os.path.dirname(os.path.abspath(args.file[0]))
def load_input_file(self, input_file):
"""
Load a set of jobs from the given `input_file` on disk. Returns the
loaded set of jobs as a list.
"""
self.input_file = input_file
self.job_list, self.reaction_dict, self.species_dict, self.transition_state_dict, self.network_dict, \
self.level_of_theory = load_input_file(self.input_file)
logging.info('')
return self.job_list
def execute(self):
"""
Execute, in order, the jobs found in input file specified by the
`input_file` attribute.
"""
# Initialize the logging system (both to the console and to a file in the
# output directory)
initialize_log(self.verbose, os.path.join(self.output_directory, 'arkane.log'))
# Print some information to the beginning of the log
log_header()
# Load the input file for the job
self.job_list = self.load_input_file(self.input_file)
logging.info('')
# Initialize (and clear!) the output files for the job
if self.output_directory is None:
self.output_directory = os.path.dirname(os.path.abspath(self.input_file))
output_file = os.path.join(self.output_directory, 'output.py')
with open(output_file, 'w'):
pass
chemkin_file = os.path.join(self.output_directory, 'chem.inp')
# write the chemkin files and run the thermo and then kinetics jobs
with open(chemkin_file, 'w') as f:
write_elements_section(f)
f.write('SPECIES\n\n')
# write each species in species block
for job in self.job_list:
if isinstance(job, ThermoJob):
f.write(job.species.to_chemkin())
f.write('\n')
f.write('\nEND\n\n\n\n')
f.write('THERM ALL\n')
f.write(' 300.000 1000.000 5000.000\n\n')
# run thermo and statmech jobs (also writes thermo blocks to Chemkin file)
supporting_info = []
hindered_rotor_info = []
bacjob_num = 1
for job in self.job_list:
if isinstance(job, ThermoJob):
job.execute(output_directory=self.output_directory, plot=self.plot)
if isinstance(job, StatMechJob):
job.execute(output_directory=self.output_directory, plot=self.plot, pdep=is_pdep(self.job_list))
if hasattr(job, 'supporting_info'):
supporting_info.append(job.supporting_info)
if hasattr(job, 'raw_hindered_rotor_data'):
for hr_info in job.raw_hindered_rotor_data:
hindered_rotor_info.append(hr_info)
if isinstance(job, BACJob):
job.execute(output_directory=self.output_directory, plot=self.plot, jobnum=bacjob_num)
bacjob_num += 1
if isinstance(job, AEJob):
job.execute(output_file=output_file)
with open(chemkin_file, 'a') as f:
f.write('\n')
f.write('END\n\n\n\n')
f.write('REACTIONS KCAL/MOLE MOLES\n\n')
if supporting_info:
# write supporting_info.csv for statmech jobs
supporting_info_file = os.path.join(self.output_directory, 'supporting_information.csv')
with open(supporting_info_file, 'w') as csvfile:
writer = csv.writer(csvfile, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
writer.writerow(['Label', 'Symmetry Number', 'Number of optical isomers', 'Symmetry Group',
'Rotational constant (cm-1)',
'Calculated Frequencies (unscaled and prior to projection, cm^-1)',
'Electronic energy (J/mol)', 'E0 (electronic energy + ZPE, J/mol)',
'E0 with atom and bond corrections (J/mol)', 'Atom XYZ coordinates (angstrom)',
'T1 diagnostic', 'D1 diagnostic'])
for row in supporting_info:
label = row[0]
rot = '-'
freq = '-'
if row[4] is not None and isinstance(row[4].rotationalConstant.value, float):
# diatomic species have a single rotational constant
rot = '{0:.2f}'.format(row[4].rotationalConstant.value)
elif row[4] is not None:
rot = ', '.join(['{0:.2f}'.format(s) for s in row[4].rotationalConstant.value])
if row[5] is not None:
freq = ''
if row[6] is not None: # there is a negative frequency
freq = '{0:.1f}'.format(abs(row[6])) + 'i, '
freq += ', '.join(['{0:.1f}'.format(s) for s in row[5]])
atoms = ', '.join(["{0} {1}".format(atom, " ".join([str(c) for c in coords]))
for atom, coords in zip(row[10], row[11])])
writer.writerow([label, row[1], row[2], row[3], rot, freq, row[7], row[8], row[9], atoms, row[12],
row[13]])
if hindered_rotor_info:
hr_file = os.path.join(self.output_directory, 'hindered_rotor_scan_data.csv')
# find longest length to set column number for energies
max_energy_length = max([len(hr[4]) for hr in hindered_rotor_info])
with open(hr_file, 'w') as csvfile:
writer = csv.writer(csvfile, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
writer.writerow(['species', 'rotor_number', 'symmetry', 'resolution (degrees)',
'pivot_atoms', 'frozen_atoms'] +
['energy (J/mol) {}'.format(i) for i in range(max_energy_length)])
for row in hindered_rotor_info:
writer.writerow([row[0], row[1], row[2], row[3][1] * 180 / np.pi,
row[5], row[6]] + [a for a in row[4]])
# run kinetics and pdep jobs (also writes reaction blocks to Chemkin file)
for job in self.job_list:
if isinstance(job, KineticsJob):
job.execute(output_directory=self.output_directory, plot=self.plot)
elif isinstance(job, PressureDependenceJob) and not any([isinstance(job, ExplorerJob) for job in
self.job_list]):
# if there is an explorer job the pdep job will be run in the explorer job
if job.network is None:
raise InputError(
'No network matched the label of the pressureDependence block and there is no explorer block '
'to generate a network')
job.execute(output_file=output_file, plot=self.plot)
elif isinstance(job, ExplorerJob):
thermo_library, kinetics_library, species_list = self.get_libraries()
job.execute(output_file=output_file, plot=self.plot, species_list=species_list,
thermo_library=thermo_library, kinetics_library=kinetics_library)
with open(chemkin_file, 'a') as f:
f.write('END\n\n')
# Print some information to the end of the log
log_footer()
if self.save_rmg_libraries:
# save RMG thermo and kinetics libraries
species, reactions = list(), list()
for job in self.job_list:
if isinstance(job, ThermoJob) and len(job.species.molecule):
species.append(job.species)
elif isinstance(job, KineticsJob) \
and all([len(species.molecule) for species in job.reaction.reactants + job.reaction.products]):
reactions.append(job.reaction)
elif isinstance(job, PressureDependenceJob):
for reaction in job.network.path_reactions:
if all([len(species.molecule) for species in reaction.reactants + reaction.products]):
reactions.append(reaction)
lib_path = os.path.join(self.output_directory, 'RMG_libraries')
level_of_theory = f' using {self.level_of_theory}' if self.level_of_theory is not None else ''
lib_long_desc = f'Calculated using Arkane v{__version__}{level_of_theory}.'
save_thermo_lib(species_list=species, path=lib_path, name='thermo', lib_long_desc=lib_long_desc)
save_kinetics_lib(rxn_list=reactions, path=lib_path, name='kinetics', lib_long_desc=lib_long_desc)
def get_libraries(self):
"""Get RMG kinetics and thermo libraries"""
name = 'kineticsjobs'
species_list = list(self.species_dict.values())
reaction_list = list(self.reaction_dict.values())
# remove duplicate species
for rxn in reaction_list:
for i, rspc in enumerate(rxn.reactants):
for spc in species_list:
if spc.is_isomorphic(rspc):
rxn.reactants[i] = spc
break
for i, rspc in enumerate(rxn.products):
for spc in species_list:
if spc.is_isomorphic(rspc):
rxn.products[i] = spc
break
del_inds = []
for i, spc1 in enumerate(species_list):
for j, spc2 in enumerate(species_list):
if j > i and spc1.is_isomorphic(spc2):
del_inds.append(j)
for j in sorted(del_inds)[::-1]:
del species_list[j]
thermo_library = ThermoLibrary(name=name)
for i, species in enumerate(species_list):
if species.thermo:
thermo_library.load_entry(index=i + 1,
label=species.label,
molecule=species.molecule[0].to_adjacency_list(),
thermo=species.thermo,
shortDesc=species.thermo.comment)
else:
logging.warning(
'Species {0} did not contain any thermo data and was omitted from the thermo library.'.format(
str(species)))
# load kinetics library entries
kinetics_library = KineticsLibrary(name=name, auto_generated=True)
kinetics_library.entries = {}
for i, reaction in enumerate(reaction_list):
entry = Entry(
index=i + 1,
label=reaction.to_labeled_str(),
item=reaction,
data=reaction.kinetics)
if reaction.kinetics is not None:
if hasattr(reaction, 'library') and reaction.library:
entry.long_desc = 'Originally from reaction library: ' + \
reaction.library + "\n" + reaction.kinetics.comment
else:
entry.long_desc = reaction.kinetics.comment
kinetics_library.entries[i + 1] = entry
kinetics_library.label = name
return thermo_library, kinetics_library, species_list
def initialize_log(verbose=logging.INFO, log_file=None):
"""
Set up a logger for Arkane to use to print output to stdout. The
`verbose` parameter is an integer specifying the amount of log text seen
at the console; the levels correspond to those of the :data:`logging` module.
"""
# Create logger
logger = logging.getLogger()
logger.setLevel(verbose)
# Use custom level names for cleaner log output
logging.addLevelName(logging.CRITICAL, 'Critical: ')
logging.addLevelName(logging.ERROR, 'Error: ')
logging.addLevelName(logging.WARNING, 'Warning: ')
logging.addLevelName(logging.INFO, '')
logging.addLevelName(logging.DEBUG, '')
logging.addLevelName(0, '')
# Create formatter and add to handlers
formatter = logging.Formatter('%(levelname)s%(message)s')
# Remove old handlers before adding ours
while logger.handlers:
logger.removeHandler(logger.handlers[0])
# Create console handler; send everything to stdout rather than stderr
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(verbose)
ch.setFormatter(formatter)
logger.addHandler(ch)
# Create file handler; always be at least verbose in the file
if log_file:
fh = logging.FileHandler(filename=log_file)
fh.setLevel(min(logging.DEBUG, verbose))
fh.setFormatter(formatter)
logger.addHandler(fh)
def log_header(level=logging.INFO):
"""
Output a header containing identifying information about Arkane to the log.
"""
from rmgpy import __version__
logging.log(level, 'Arkane execution initiated at {0}'.format(time.asctime()))
logging.log(level, '')
logging.log(level, '################################################################')
logging.log(level, '# #')
logging.log(level, '# Automated Reaction Kinetics and Network Exploration (Arkane) #')
logging.log(level, '# #')
logging.log(level, '# Version: {0:49s} #'.format(__version__))
logging.log(level, '# Authors: RMG Developers (rmg_dev@mit.edu) #')
logging.log(level, '# P.I.s: William H. Green (whgreen@mit.edu) #')
logging.log(level, '# Richard H. West (r.west@neu.edu) #')
logging.log(level, '# Website: http://reactionmechanismgenerator.github.io/ #')
logging.log(level, '# #')
logging.log(level, '################################################################')
logging.log(level, '')
def log_footer(level=logging.INFO):
"""
Output a footer to the log.
"""
logging.log(level, '')
logging.log(level, 'Arkane execution terminated at {0}'.format(time.asctime()))
| 48.55157
| 119
| 0.573012
|
518a563ab73b988bb3feb87130aa6da3ec7f7bf6
| 1,608
|
py
|
Python
|
accounts/api.py
|
notrealanurag/curezo_old
|
c3fd350750a799ae975ed6a89f6db2b39a22fbd0
|
[
"MIT"
] | null | null | null |
accounts/api.py
|
notrealanurag/curezo_old
|
c3fd350750a799ae975ed6a89f6db2b39a22fbd0
|
[
"MIT"
] | 5
|
2021-03-19T11:01:39.000Z
|
2021-09-22T19:35:40.000Z
|
accounts/api.py
|
notrealanurag/curezo_old
|
c3fd350750a799ae975ed6a89f6db2b39a22fbd0
|
[
"MIT"
] | null | null | null |
from rest_framework import generics, permissions, viewsets
from rest_framework.response import Response
from knox.models import AuthToken
from knox.auth import TokenAuthentication
from .serializers import UserSerializer, RegisterSerializer, LoginSerializer
from django.contrib.auth.models import User
# Register Viewset
class RegisterAPI(generics.GenericAPIView):
serializer_class = RegisterSerializer
def post(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
user = serializer.save()
return Response({
# "user": UserSerializer(user, context=self.get_serializer_context()).data,
# "token": AuthToken.objects.create(user)[1]
"creation" : "Account Created. Please Login"
})
class LoginAPI(generics.GenericAPIView):
serializer_class = LoginSerializer
def post(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
user = serializer.validated_data
_, token = AuthToken.objects.create(user)
return Response({
"user": UserSerializer(user, context=self.get_serializer_context()).data,
"token": token
})
class UserAPI(generics.RetrieveUpdateDestroyAPIView):
authentication_classes = (TokenAuthentication,)
permission_classes = [
permissions.IsAuthenticated,
]
serializer_class = UserSerializer
def get_object(self, *args, **kwargs):
return self.request.user
| 35.733333
| 87
| 0.709577
|
f5a5d9548048d51e81e6caa7f88042c87c428fad
| 214
|
py
|
Python
|
setup.py
|
ioliveros/github-api-client
|
2bbc832af0a7e744958e4741b9f6419cdabc4eb0
|
[
"MIT"
] | null | null | null |
setup.py
|
ioliveros/github-api-client
|
2bbc832af0a7e744958e4741b9f6419cdabc4eb0
|
[
"MIT"
] | 1
|
2021-06-02T03:15:56.000Z
|
2021-06-02T03:15:56.000Z
|
setup.py
|
ioliveros/github-api-client
|
2bbc832af0a7e744958e4741b9f6419cdabc4eb0
|
[
"MIT"
] | null | null | null |
from setuptools import setup, find_packages
setup(
name='github_api_client',
version='1.1',
author='Ian Oliveros',
author_email='ioliveros.dev@gmail.com',
packages=find_packages(),
python_requires='>=3.6'
)
| 19.454545
| 43
| 0.747664
|
19cc55f8af9df936d3b09e882b98454ba4361514
| 7,482
|
py
|
Python
|
models/networks_basic.py
|
nistath/PerceptualSimilarity
|
29c5c71a6b09557ea0d049f27ec44f02d9ba7937
|
[
"BSD-2-Clause"
] | null | null | null |
models/networks_basic.py
|
nistath/PerceptualSimilarity
|
29c5c71a6b09557ea0d049f27ec44f02d9ba7937
|
[
"BSD-2-Clause"
] | null | null | null |
models/networks_basic.py
|
nistath/PerceptualSimilarity
|
29c5c71a6b09557ea0d049f27ec44f02d9ba7937
|
[
"BSD-2-Clause"
] | null | null | null |
from __future__ import absolute_import
import sys
import torch
import torch.nn as nn
import torch.nn.init as init
from torch.autograd import Variable
import numpy as np
from pdb import set_trace as st
from skimage import color
from IPython import embed
from . import pretrained_networks as pn
from .. import models as util
def spatial_average(in_tens, keepdim=True):
return in_tens.mean([2,3],keepdim=keepdim)
def upsample(in_tens, out_H=64): # assumes scale factor is same for H and W
in_H = in_tens.shape[2]
scale_factor = 1.*out_H/in_H
return nn.Upsample(scale_factor=scale_factor, mode='bilinear', align_corners=False)(in_tens)
# Learned perceptual metric
class PNetLin(nn.Module):
def __init__(self, pnet_type='vgg', pnet_rand=False, pnet_tune=False, use_dropout=True, spatial=False, version='0.1', lpips=True):
super(PNetLin, self).__init__()
self.pnet_type = pnet_type
self.pnet_tune = pnet_tune
self.pnet_rand = pnet_rand
self.spatial = spatial
self.lpips = lpips
self.version = version
self.scaling_layer = ScalingLayer()
if(self.pnet_type in ['vgg','vgg16']):
net_type = pn.vgg16
self.chns = [64,128,256,512,512]
elif(self.pnet_type=='alex'):
net_type = pn.alexnet
self.chns = [64,192,384,256,256]
elif(self.pnet_type=='squeeze'):
net_type = pn.squeezenet
self.chns = [64,128,256,384,384,512,512]
self.L = len(self.chns)
self.net = net_type(pretrained=not self.pnet_rand, requires_grad=self.pnet_tune)
if(lpips):
self.lin0 = NetLinLayer(self.chns[0], use_dropout=use_dropout)
self.lin1 = NetLinLayer(self.chns[1], use_dropout=use_dropout)
self.lin2 = NetLinLayer(self.chns[2], use_dropout=use_dropout)
self.lin3 = NetLinLayer(self.chns[3], use_dropout=use_dropout)
self.lin4 = NetLinLayer(self.chns[4], use_dropout=use_dropout)
self.lins = [self.lin0,self.lin1,self.lin2,self.lin3,self.lin4]
if(self.pnet_type=='squeeze'): # 7 layers for squeezenet
self.lin5 = NetLinLayer(self.chns[5], use_dropout=use_dropout)
self.lin6 = NetLinLayer(self.chns[6], use_dropout=use_dropout)
self.lins+=[self.lin5,self.lin6]
def forward(self, in0, in1, retPerLayer=False):
# v0.0 - original release had a bug, where input was not scaled
in0_input, in1_input = (self.scaling_layer(in0), self.scaling_layer(in1)) if self.version=='0.1' else (in0, in1)
outs0, outs1 = self.net.forward(in0_input), self.net.forward(in1_input)
feats0, feats1, diffs = {}, {}, {}
for kk in range(self.L):
feats0[kk], feats1[kk] = util.normalize_tensor(outs0[kk]), util.normalize_tensor(outs1[kk])
diffs[kk] = (feats0[kk]-feats1[kk])**2
if(self.lpips):
if(self.spatial):
res = [upsample(self.lins[kk].model(diffs[kk]), out_H=in0.shape[2]) for kk in range(self.L)]
else:
res = [spatial_average(self.lins[kk].model(diffs[kk]), keepdim=True) for kk in range(self.L)]
else:
if(self.spatial):
res = [upsample(diffs[kk].sum(dim=1,keepdim=True), out_H=in0.shape[2]) for kk in range(self.L)]
else:
res = [spatial_average(diffs[kk].sum(dim=1,keepdim=True), keepdim=True) for kk in range(self.L)]
val = res[0]
for l in range(1,self.L):
val += res[l]
if(retPerLayer):
return (val, res)
else:
return val
class ScalingLayer(nn.Module):
def __init__(self):
super(ScalingLayer, self).__init__()
self.register_buffer('shift', torch.Tensor([-.030,-.088,-.188])[None,:,None,None])
self.register_buffer('scale', torch.Tensor([.458,.448,.450])[None,:,None,None])
def forward(self, inp):
return (inp - self.shift) / self.scale
class NetLinLayer(nn.Module):
''' A single linear layer which does a 1x1 conv '''
def __init__(self, chn_in, chn_out=1, use_dropout=False):
super(NetLinLayer, self).__init__()
layers = [nn.Dropout(),] if(use_dropout) else []
layers += [nn.Conv2d(chn_in, chn_out, 1, stride=1, padding=0, bias=False),]
self.model = nn.Sequential(*layers)
class Dist2LogitLayer(nn.Module):
''' takes 2 distances, puts through fc layers, spits out value between [0,1] (if use_sigmoid is True) '''
def __init__(self, chn_mid=32, use_sigmoid=True):
super(Dist2LogitLayer, self).__init__()
layers = [nn.Conv2d(5, chn_mid, 1, stride=1, padding=0, bias=True),]
layers += [nn.LeakyReLU(0.2,True),]
layers += [nn.Conv2d(chn_mid, chn_mid, 1, stride=1, padding=0, bias=True),]
layers += [nn.LeakyReLU(0.2,True),]
layers += [nn.Conv2d(chn_mid, 1, 1, stride=1, padding=0, bias=True),]
if(use_sigmoid):
layers += [nn.Sigmoid(),]
self.model = nn.Sequential(*layers)
def forward(self,d0,d1,eps=0.1):
return self.model.forward(torch.cat((d0,d1,d0-d1,d0/(d1+eps),d1/(d0+eps)),dim=1))
class BCERankingLoss(nn.Module):
def __init__(self, chn_mid=32):
super(BCERankingLoss, self).__init__()
self.net = Dist2LogitLayer(chn_mid=chn_mid)
# self.parameters = list(self.net.parameters())
self.loss = torch.nn.BCELoss()
def forward(self, d0, d1, judge):
per = (judge+1.)/2.
self.logit = self.net.forward(d0,d1)
return self.loss(self.logit, per)
# L2, DSSIM metrics
class FakeNet(nn.Module):
def __init__(self, use_gpu=True, colorspace='Lab'):
super(FakeNet, self).__init__()
self.use_gpu = use_gpu
self.colorspace=colorspace
class L2(FakeNet):
def forward(self, in0, in1, retPerLayer=None):
assert(in0.size()[0]==1) # currently only supports batchSize 1
if(self.colorspace=='RGB'):
(N,C,X,Y) = in0.size()
value = torch.mean(torch.mean(torch.mean((in0-in1)**2,dim=1).view(N,1,X,Y),dim=2).view(N,1,1,Y),dim=3).view(N)
return value
elif(self.colorspace=='Lab'):
value = util.l2(util.tensor2np(util.tensor2tensorlab(in0.data,to_norm=False)),
util.tensor2np(util.tensor2tensorlab(in1.data,to_norm=False)), range=100.).astype('float')
ret_var = Variable( torch.Tensor((value,) ) )
if(self.use_gpu):
ret_var = ret_var.cuda()
return ret_var
class DSSIM(FakeNet):
def forward(self, in0, in1, retPerLayer=None):
assert(in0.size()[0]==1) # currently only supports batchSize 1
if(self.colorspace=='RGB'):
value = util.dssim(1.*util.tensor2im(in0.data), 1.*util.tensor2im(in1.data), range=255.).astype('float')
elif(self.colorspace=='Lab'):
value = util.dssim(util.tensor2np(util.tensor2tensorlab(in0.data,to_norm=False)),
util.tensor2np(util.tensor2tensorlab(in1.data,to_norm=False)), range=100.).astype('float')
ret_var = Variable( torch.Tensor((value,) ) )
if(self.use_gpu):
ret_var = ret_var.cuda()
return ret_var
def print_network(net):
num_params = 0
for param in net.parameters():
num_params += param.numel()
print('Network',net)
print('Total number of parameters: %d' % num_params)
| 39.797872
| 134
| 0.623095
|
d22b875fd1a5d832cd2a299573b03a9270994d6f
| 194
|
py
|
Python
|
search/settings/settings_api.py
|
JinHai-CN/phantoscope
|
1148a30bd379691220e46520248f76615f1d86d3
|
[
"Apache-2.0"
] | null | null | null |
search/settings/settings_api.py
|
JinHai-CN/phantoscope
|
1148a30bd379691220e46520248f76615f1d86d3
|
[
"Apache-2.0"
] | null | null | null |
search/settings/settings_api.py
|
JinHai-CN/phantoscope
|
1148a30bd379691220e46520248f76615f1d86d3
|
[
"Apache-2.0"
] | null | null | null |
from flask import Blueprint
from common.common import json_response
settings = Blueprint("settings", __name__)
@settings.route("/ping")
@json_response
def settings_ping():
return "pong"
| 16.166667
| 42
| 0.762887
|
701e74b0b13c6bd090e81c58ec45e7a2bd6dc2fa
| 1,830
|
py
|
Python
|
examples/fona_simpletest.py
|
FoamyGuy/Adafruit_CircuitPython_FONA
|
0343cb590901c78af4f0510f25c63e4ef50d351f
|
[
"MIT"
] | null | null | null |
examples/fona_simpletest.py
|
FoamyGuy/Adafruit_CircuitPython_FONA
|
0343cb590901c78af4f0510f25c63e4ef50d351f
|
[
"MIT"
] | null | null | null |
examples/fona_simpletest.py
|
FoamyGuy/Adafruit_CircuitPython_FONA
|
0343cb590901c78af4f0510f25c63e4ef50d351f
|
[
"MIT"
] | null | null | null |
# pylint: disable=unused-import
import time
import board
import busio
import digitalio
import adafruit_requests as requests
from adafruit_fona.adafruit_fona import FONA
from adafruit_fona.fona_3g import FONA3G
import adafruit_fona.adafruit_fona_network as network
import adafruit_fona.adafruit_fona_socket as cellular_socket
print("FONA Webclient Test")
TEXT_URL = "http://wifitest.adafruit.com/testwifi/index.html"
JSON_URL = "http://api.coindesk.com/v1/bpi/currentprice/USD.json"
# Get GPRS details and more from a secrets.py file
try:
from secrets import secrets
except ImportError:
print("GPRS secrets are kept in secrets.py, please add them there!")
raise
# Create a serial connection for the FONA connection
uart = busio.UART(board.TX, board.RX)
rst = digitalio.DigitalInOut(board.D4)
# Use this for FONA800 and FONA808
fona = FONA(uart, rst)
# Use this for FONA3G
# fona = FONA3G(uart, rst)
# Initialize cellular data network
network = network.CELLULAR(
fona, (secrets["apn"], secrets["apn_username"], secrets["apn_password"])
)
while not network.is_attached:
print("Attaching to network...")
time.sleep(0.5)
print("Attached!")
while not network.is_connected:
print("Connecting to network...")
network.connect()
time.sleep(0.5)
print("Network Connected!")
print("My IP address is:", fona.local_ip)
print("IP lookup adafruit.com: %s" % fona.get_host_by_name("adafruit.com"))
# Initialize a requests object with a socket and cellular interface
requests.set_socket(cellular_socket, fona)
# fona._debug = True
print("Fetching text from", TEXT_URL)
r = requests.get(TEXT_URL)
print("-" * 40)
print(r.text)
print("-" * 40)
r.close()
print()
print("Fetching json from", JSON_URL)
r = requests.get(JSON_URL)
print("-" * 40)
print(r.json())
print("-" * 40)
r.close()
print("Done!")
| 25.068493
| 76
| 0.743169
|
11cd643eae0d7845c378b6833d48874d4c20506d
| 309
|
py
|
Python
|
tests/test_dataset.py
|
elifesciences/elife-crossref-xml-generation
|
1cd7b3981d9c78032d2d0ffb68b651de40a8d622
|
[
"MIT"
] | 3
|
2018-03-01T01:14:14.000Z
|
2021-01-19T13:04:42.000Z
|
tests/test_dataset.py
|
elifesciences/elife-crossref-xml-generation
|
1cd7b3981d9c78032d2d0ffb68b651de40a8d622
|
[
"MIT"
] | 88
|
2017-07-20T00:13:47.000Z
|
2021-11-29T04:58:01.000Z
|
tests/test_dataset.py
|
elifesciences/elife-crossref-xml-generation
|
1cd7b3981d9c78032d2d0ffb68b651de40a8d622
|
[
"MIT"
] | 4
|
2017-06-28T22:22:20.000Z
|
2021-02-17T23:06:39.000Z
|
import unittest
from elifecrossref import dataset
class TestDataset(unittest.TestCase):
def test_choose_dataset_identifier_none(self):
"""test when an object has no attributes"""
self.assertIsNone(dataset.choose_dataset_identifier(None))
if __name__ == "__main__":
unittest.main()
| 23.769231
| 66
| 0.747573
|
7d4a70d474b2b52ac32a7a02bef47ac4a3f2e3db
| 1,691
|
py
|
Python
|
escea/discover.py
|
snikch/escea
|
9678b8dbec81b67e61e8f9fb62578ec5870af61e
|
[
"MIT"
] | 3
|
2016-09-18T02:39:07.000Z
|
2019-09-02T03:07:09.000Z
|
escea/discover.py
|
snikch/escea
|
9678b8dbec81b67e61e8f9fb62578ec5870af61e
|
[
"MIT"
] | 3
|
2019-03-24T04:56:27.000Z
|
2020-09-29T10:00:33.000Z
|
escea/discover.py
|
snikch/escea
|
9678b8dbec81b67e61e8f9fb62578ec5870af61e
|
[
"MIT"
] | 3
|
2019-09-05T06:46:32.000Z
|
2022-03-29T05:32:37.000Z
|
import socket
import binascii
from escea.message import (
)
from escea.error import (ConnectionTimeout)
class Fire(object):
UDP_PORT = 3300
def __init__(self, ip):
super(Fire, self).__init__()
self._ip = ip
self._prefix = '47'
self._suffix = '46'
def start(self):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.sock.bind(('0.0.0.0', Fire.UDP_PORT))
def stop(self):
self.sock.close()
def send(self, message):
data = ''
try:
self.sock.sendto(message.payload(), (self._ip, Fire.UDP_PORT))
self.sock.settimeout(2)
data, _ = self.sock.recvfrom(1024)
data = binascii.hexlify(data)
except socket.timeout:
raise ConnectionTimeout
response = Response(data)
message.assert_code(response.get(1))
return response
def status(self):
return StatusResponse(self.send(StatusRequest(self._prefix, self._suffix)))
def set_temp(self, target):
self.send(SetTempRequest(self._prefix, self._suffix, target))
def power_on(self):
self.send(PowerOnRequest(self._prefix, self._suffix))
def power_off(self):
self.send(PowerOffRequest(self._prefix, self._suffix))
def flame_effect_on(self):
self.send(FlameEffectOnRequest(self._prefix, self._suffix))
def flame_effect_off(self):
self.send(FlameEffectOffRequest(self._prefix, self._suffix))
def fan_boost_on(self):
self.send(FanBoostOnRequest(self._prefix, self._suffix))
def fan_boost_off(self):
self.send(FanBoostOffRequest(self._prefix, self._suffix))
| 26.84127
| 83
| 0.643998
|
58edfcb94d4e5fc3120d5f013f575d59aeb4dd9f
| 1,026
|
py
|
Python
|
codes/a_config/c_pybullet_parameters/parameters_ant_ddpg.py
|
linklab/link_rl
|
e3d3196dcd49fd71b45941e07fc0d8a27d1d8c99
|
[
"MIT"
] | null | null | null |
codes/a_config/c_pybullet_parameters/parameters_ant_ddpg.py
|
linklab/link_rl
|
e3d3196dcd49fd71b45941e07fc0d8a27d1d8c99
|
[
"MIT"
] | null | null | null |
codes/a_config/c_pybullet_parameters/parameters_ant_ddpg.py
|
linklab/link_rl
|
e3d3196dcd49fd71b45941e07fc0d8a27d1d8c99
|
[
"MIT"
] | 1
|
2021-11-23T12:30:37.000Z
|
2021-11-23T12:30:37.000Z
|
from codes.a_config._rl_parameters.off_policy.parameter_ddpg import PARAMETERS_DDPG
from codes.e_utils.names import *
from codes.a_config.parameters_general import PARAMETERS_GENERAL
# https://github.com/bulletphysics/bullet3/blob/master/examples/pybullet/gym/pybullet_envs/minitaur/envs/minitaur_gym_env.py
class PARAMETERS_ANT_DDPG(PARAMETERS_GENERAL, PARAMETERS_DDPG):
ENVIRONMENT_ID = EnvironmentName.PYBULLET_ANT_V0
DEEP_LEARNING_MODEL = DeepLearningModelName.CONTINUOUS_DETERMINISTIC_ACTOR_CRITIC_MLP
RL_ALGORITHM = RLAlgorithmName.DDPG_V0
OPTIMIZER = OptimizerName.ADAM
TRAIN_STOP_EPISODE_REWARD = 2000.0
TRAIN_STOP_EPISODE_REWARD_STD = 50.0
STOP_PATIENCE_COUNT = 10
REPLAY_BUFFER_SIZE = 1000000
TARGET_NET_SYNC_STEP_PERIOD = 10000
MAX_GLOBAL_STEP = 10000000
EPSILON_INIT = 1.0
EPSILON_MIN = 0.01
EPSILON_MIN_STEP = 3000000
LEARNING_RATE = 0.00025
GAMMA = 0.99
BATCH_SIZE = 32
TRAIN_STEP_FREQ = 1
AVG_EPISODE_SIZE_FOR_STAT = 50
N_STEP = 1
| 35.37931
| 124
| 0.792398
|
e0ba0a82781711224640c363d3caf75836714486
| 644
|
py
|
Python
|
setup.py
|
python-diamond/diamond-redis
|
f55d747e5853b92b17b917d4616eb2c723e6f09b
|
[
"MIT"
] | 2
|
2015-09-08T05:24:45.000Z
|
2017-03-14T08:46:59.000Z
|
setup.py
|
python-diamond/diamond-redis
|
f55d747e5853b92b17b917d4616eb2c723e6f09b
|
[
"MIT"
] | 1
|
2020-09-25T06:29:00.000Z
|
2020-09-28T06:22:50.000Z
|
setup.py
|
python-diamond/diamond-redis
|
f55d747e5853b92b17b917d4616eb2c723e6f09b
|
[
"MIT"
] | 1
|
2021-02-21T10:58:21.000Z
|
2021-02-21T10:58:21.000Z
|
#!/usr/bin/env python
from setuptools import setup
install_requires = [
'diamond',
'redis',
]
setup(
name='diamond-redis',
version='0.0.1',
author='Matt Robenolt',
author_email='matt@ydekproductons.com',
url='https://github.com/python-diamond/diamond-redis',
description='',
long_description='',
license='MIT License',
py_modules=['diamond_redis'],
zip_safe=False,
install_requires=install_requires,
include_package_data=True,
entry_points={
'diamond.collectors': [
'redis = diamond_redis',
],
},
classifiers=[
'DO NOT UPLOAD',
],
)
| 20.125
| 58
| 0.61646
|
226bb9ddf543214d178c457597840963294d3dfa
| 3,374
|
py
|
Python
|
web/api/get_mp_function.py
|
spudmind/spud
|
86e44bca4efd3cd6358467e1511048698a45edbc
|
[
"MIT"
] | 2
|
2015-04-11T12:22:41.000Z
|
2016-08-18T11:12:06.000Z
|
web/api/get_mp_function.py
|
spudmind/spud
|
86e44bca4efd3cd6358467e1511048698a45edbc
|
[
"MIT"
] | 84
|
2015-01-22T14:33:49.000Z
|
2015-04-01T23:15:29.000Z
|
web/api/get_mp_function.py
|
spudmind/spud
|
86e44bca4efd3cd6358467e1511048698a45edbc
|
[
"MIT"
] | 1
|
2015-04-16T03:10:39.000Z
|
2015-04-16T03:10:39.000Z
|
from web.api import BaseAPI
from data_models import government_models
from utils import mongo
class MpApi(BaseAPI):
def __init__(self):
BaseAPI.__init__(self)
self._db = mongo.MongoInterface()
self._db_table = 'api_mps'
def request(self, args):
name = args['name']
result, _ = self._db.query(self._db_table, query=args)
if len(result) > 0:
mp = government_models.MemberOfParliament(name)
meetings = self._influencer_urls(mp.meetings)
#interests = self._nest_category(self._interest_urls(mp.interests))
interests = self._interest_urls(mp.interests)
donations = self._donor_urls(mp.donations)
result = {
'name': result[0]['name'],
'party': result[0]['party'],
'influences_summary': result[0]['influences'],
'influences_detail': {
"register_of_interests": interests,
"electoral_commission": donations,
"meetings": meetings
},
"government_departments": self._department_detail_urls(
result[0]["government_departments"]
),
"government_positions": result[0]["government_positions"],
"government_committees": self._committee_detail_urls(
result[0]["government_committees"]
),
'mp': mp.mp_website,
'wikipedia': mp.wikipedia,
'guardian': mp.guardian,
'bbc': mp.bbc,
}
return result
def _interest_urls(self, interests):
results = []
for category in interests:
updated_interests = []
for interest in category["interests"]:
updated = interest
interest_name = interest["interest"]["name"]
interest_labels = interest["interest"]["labels"]
urls = self.named_entity_resources(interest_name, interest_labels)
updated["interest"]["details_url"] = urls[0]
updated["interest"]["api_url"] = urls[1]
updated_interests.append(updated)
if len(updated_interests) > 0:
category["interests"] = updated_interests
results.append(category)
return results
def _donor_urls(self, donations):
results = []
for donation in donations:
updated = donation
donor_name = donation["donor"]["name"]
donor_labels = donation["donor"]["labels"]
urls = self.named_entity_resources(donor_name, donor_labels)
updated["donor"]["details_url"] = urls[0]
updated["donor"]["api_url"] = urls[1]
results.append(updated)
return results
def _influencer_urls(self, meetings):
results = []
for meeting in meetings:
updated = meeting
attendee_name = {"name": meeting["attendee"], "details_url": None}
if meeting["attendee"]:
urls = self.named_entity_resources(meeting["attendee"], "influencer")
attendee_name["details_url"] = urls[0]
updated["attendee"] = attendee_name
results.append(updated)
return results
| 38.781609
| 85
| 0.557499
|
360fabb24d52b432a80272fe5b616de7b9c63233
| 1,610
|
py
|
Python
|
setup.py
|
howl-anderson/tf_summary_reader
|
a88d6aeeb325405f91c011c74c04c5efb641a06c
|
[
"MIT"
] | null | null | null |
setup.py
|
howl-anderson/tf_summary_reader
|
a88d6aeeb325405f91c011c74c04c5efb641a06c
|
[
"MIT"
] | null | null | null |
setup.py
|
howl-anderson/tf_summary_reader
|
a88d6aeeb325405f91c011c74c04c5efb641a06c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script."""
from setuptools import setup, find_packages
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
requirements = ["pandas", "tensorflow>=1.15.0,<2.0.0"]
setup_requirements = ['pytest-runner', ]
test_requirements = ['pytest>=3', ]
setup(
author="Xiaoquan Kong",
author_email='u1mail2me@gmail.com',
python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
description="A package for read data from tensorflow summary files",
install_requires=requirements,
license="MIT license",
long_description=readme + '\n\n' + history,
include_package_data=True,
keywords='tf_summary_reader',
name='tf_summary_reader',
packages=find_packages(include=['tf_summary_reader', 'tf_summary_reader.*']),
setup_requires=setup_requirements,
test_suite='tests',
tests_require=test_requirements,
url='https://github.com/howl-anderson/tf_summary_reader',
version='0.2.0',
zip_safe=False,
)
| 31.568627
| 81
| 0.642236
|
8563ebfc903383a99eb74a9bfc8ecd9e64dd9f3f
| 393
|
py
|
Python
|
cha_bebe/wsgi.py
|
intelektos/Cha_bebe
|
23df4af3901413c9c50e73bd305ade165c81001b
|
[
"MIT"
] | null | null | null |
cha_bebe/wsgi.py
|
intelektos/Cha_bebe
|
23df4af3901413c9c50e73bd305ade165c81001b
|
[
"MIT"
] | 9
|
2020-06-08T03:31:08.000Z
|
2022-01-13T02:44:42.000Z
|
cha_bebe/wsgi.py
|
intelektos/Cha_bebe
|
23df4af3901413c9c50e73bd305ade165c81001b
|
[
"MIT"
] | 1
|
2020-06-01T17:43:20.000Z
|
2020-06-01T17:43:20.000Z
|
"""
WSGI config for cha_bebe project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'cha_bebe.settings')
application = get_wsgi_application()
| 23.117647
| 78
| 0.78626
|
0595e0cbf847c9acb45d88519985a86f3413f473
| 8,885
|
py
|
Python
|
tests/pouw/nods/decentralized/test_worker.py
|
projectpai/pouw-main-iteration
|
e2505f63e11bbf80648c8cbe56b6d6f3e3a8546e
|
[
"MIT"
] | 11
|
2020-06-22T05:31:18.000Z
|
2022-03-29T16:50:21.000Z
|
tests/pouw/nods/decentralized/test_worker.py
|
AIIJJII/pouw-main-iteration
|
e2505f63e11bbf80648c8cbe56b6d6f3e3a8546e
|
[
"MIT"
] | 3
|
2020-06-23T18:20:09.000Z
|
2021-07-06T23:28:24.000Z
|
tests/pouw/nods/decentralized/test_worker.py
|
AIIJJII/pouw-main-iteration
|
e2505f63e11bbf80648c8cbe56b6d6f3e3a8546e
|
[
"MIT"
] | 3
|
2020-09-02T11:03:16.000Z
|
2022-03-29T16:50:00.000Z
|
from copy import copy
import mxnet as mx
import pytest
from mock import MagicMock
from mxnet.gluon import nn
from pai.pouw.nodes.decentralized.worker import create_network, get_layer_parameters_from_config, WorkerNode
def test_create_network_fc_dnn():
model_data = {
'type': 'FC-DNN',
'hidden-units': [
{
'id': 'layer1',
'type': 'Dense',
'nodes': 128,
'activation': 'relu'
},
{
'id': 'layer2',
'type': 'Dense',
'nodes': 64,
'activation': 'relu'
},
{
'id': 'output',
'type': 'Dense',
'nodes': 10
},
],
'loss': 'SoftmaxCrossEntropyLoss'
}
network = create_network(model_data)
assert type(network) == nn.Sequential
@pytest.mark.parametrize('layer_number', range(1, 10))
def test_create_network_hidden_units_number_properly_initialized(layer_number):
model_data = {
'type': 'FC-DNN',
'hidden-units': [
{
'id': 'output',
'type': 'Dense',
'nodes': 10
},
],
'loss': 'SoftmaxCrossEntropyLoss'
}
layer = {
'id': 'layer',
'type': 'Dense',
'nodes': 128,
'activation': 'relu'
}
for index in range(layer_number):
new_layer = copy(layer)
new_layer['id'] += str(index)
model_data['hidden-units'].insert(0, new_layer)
network = create_network(model_data)
assert len(network) == layer_number + 1
@pytest.mark.parametrize('node_number', (2 ** n for n in (4, 11)))
def test_create_network_node_number_in_dense_layer(node_number):
model_data = {
'type': 'FC-DNN',
'hidden-units': [
{
'id': 'layer1',
'type': 'Dense',
'nodes': node_number,
'activation': 'relu'
},
{
'id': 'output',
'type': 'Dense',
'nodes': 10
},
],
'loss': 'SoftmaxCrossEntropyLoss'
}
network = create_network(model_data)
assert network[0]._units == node_number
def test_get_layer_parameters_from_config_simple():
raw_conf = {
'id': 'layer1',
'type': 'Dense',
'nodes': 10,
'activation': 'relu'
}
layer_config = get_layer_parameters_from_config(raw_conf)
assert layer_config == {
'units': 10,
'activation': 'relu'
}
def test_create_network_dropout_layer():
model_data = {
'type': 'FC-DNN',
'hidden-units': [
{
'id': 'layer1',
'type': 'Dense',
'nodes': 128,
'activation': 'relu'
},
{
'id': 'dropout1',
'type': 'Dropout',
'rate': 0.5,
},
{
'id': 'output',
'type': 'Dense',
'nodes': 10
},
],
'loss': 'SoftmaxCrossEntropyLoss'
}
network = create_network(model_data)
assert type(network[1]) == nn.Dropout
def test_create_network_batch_normalization_layer():
model_data = {
'type': 'FC-DNN',
'hidden-units': [
{
'id': 'layer1',
'type': 'Dense',
'nodes': 128,
'activation': 'relu'
},
{
'id': 'batch1',
'type': 'BatchNorm'
},
{
'id': 'output',
'type': 'Dense',
'nodes': 10
},
],
'loss': 'SoftmaxCrossEntropyLoss'
}
network = create_network(model_data)
assert type(network[1]) == nn.BatchNorm
def test_create_network_instance_normalization_layer():
model_data = {
'type': 'FC-DNN',
'hidden-units': [
{
'id': 'layer1',
'type': 'Dense',
'nodes': 128,
'activation': 'relu'
},
{
'id': 'batch1',
'type': 'InstanceNorm'
},
{
'id': 'output',
'type': 'Dense',
'nodes': 10
},
],
'loss': 'SoftmaxCrossEntropyLoss'
}
network = create_network(model_data)
assert type(network[1]) == nn.InstanceNorm
def test_create_network_layer_normalization():
model_data = {
'type': 'FC-DNN',
'hidden-units': [
{
'id': 'layer1',
'type': 'Dense',
'nodes': 128,
'activation': 'relu'
},
{
'id': 'batch1',
'type': 'LayerNorm'
},
{
'id': 'output',
'type': 'Dense',
'nodes': 10
},
],
'loss': 'SoftmaxCrossEntropyLoss'
}
network = create_network(model_data)
assert type(network[1]) == nn.LayerNorm
def test_create_network_embedding_layer():
model_data = {
'type': 'FC-DNN',
'hidden-units': [
{
'id': 'layer1',
'type': 'Dense',
'nodes': 128,
'activation': 'relu'
},
{
'id': 'embedding',
'type': 'Embedding',
'input_dim': 64,
'output_dim': 32
},
{
'id': 'output',
'type': 'Dense',
'nodes': 10
},
],
'loss': 'SoftmaxCrossEntropyLoss'
}
network = create_network(model_data)
assert type(network[1]) == nn.Embedding
def test_create_network_flatten_layer():
model_data = {
'type': 'FC-DNN',
'hidden-units': [
{
'id': 'layer1',
'type': 'Dense',
'nodes': 128,
'activation': 'relu'
},
{
'id': 'embedding',
'type': 'Flatten',
},
{
'id': 'output',
'type': 'Dense',
'nodes': 10
},
],
'loss': 'SoftmaxCrossEntropyLoss'
}
network = create_network(model_data)
assert type(network[1]) == nn.Flatten
def test_initialize_network_passing_parameters_to_optimizer(client_task_definition_data, mocker):
mocker.patch('redis.Redis', MagicMock())
ctx = mx.cpu(0)
node = WorkerNode(redis_host=None, redis_port=None, context=ctx)
node.task_data = client_task_definition_data
node.initialize_network()
@pytest.mark.parametrize('init_settings', [{'name': 'Xavier', 'parameters': {}},
{'name': 'Bilinear', 'parameters': {}},
{'name': 'Constant', 'parameters': {'value': 0}},
{'name': 'FusedRNN',
'parameters': {'init': None, 'num_hidden': 1, 'num_layers': 1,
'mode': 'test'}},
{'name': 'LSTMBias', 'parameters': {}},
{'name': 'MSRAPrelu', 'parameters': {}},
{'name': 'Normal', 'parameters': {}},
{'name': 'One', 'parameters': {}},
{'name': 'Orthogonal', 'parameters': {}},
{'name': 'Uniform', 'parameters': {}},
{'name': 'Zero', 'parameters': {}}])
def test_initialize_network_passing_parameters_to_optimizer_inicializator(client_task_definition_data, mocker,
init_settings):
mocker.patch('redis.Redis', MagicMock())
ctx = mx.cpu(0)
node = WorkerNode(redis_host=None, redis_port=None, context=ctx)
node.task_data = client_task_definition_data
node.task_data['ml']['optimizer']['initializer'] = init_settings
node.initialize_network()
def test_initialize_network_passing_parameters_to_optimizer_no_parameters(client_task_definition_data, mocker):
mocker.patch('redis.Redis', MagicMock())
ctx = mx.cpu(0)
node = WorkerNode(redis_host=None, redis_port=None, context=ctx)
node.task_data = client_task_definition_data
del node.task_data['ml']['optimizer']['initializer']['parameters']
node.initialize_network()
| 28.477564
| 111
| 0.448171
|
6ada85e40f050c2e639ce1d4f0b808bef81bde9b
| 106
|
py
|
Python
|
ex2-21.py
|
ppedraum/infosatc-lp-avaliativo-01
|
aa548868ada4a98727587da3a4c6452a4042c199
|
[
"MIT"
] | null | null | null |
ex2-21.py
|
ppedraum/infosatc-lp-avaliativo-01
|
aa548868ada4a98727587da3a4c6452a4042c199
|
[
"MIT"
] | null | null | null |
ex2-21.py
|
ppedraum/infosatc-lp-avaliativo-01
|
aa548868ada4a98727587da3a4c6452a4042c199
|
[
"MIT"
] | null | null | null |
#21
l = float(input("Digite uma massa em libras: "))
k = l*0.45
print("{:.2f}lb2 = {:.2f}kg".format(l, k))
| 26.5
| 48
| 0.584906
|
6917711c32e4be05cbef44b881d3c2e33fbff33c
| 4,701
|
py
|
Python
|
iunets/baseline_networks.py
|
YoelShoshan/iunets
|
9789da07e2ef932c5ea612737066ba88f4f26977
|
[
"MIT"
] | 86
|
2020-05-12T06:33:43.000Z
|
2022-03-29T13:56:30.000Z
|
iunets/baseline_networks.py
|
YoelShoshan/iunets
|
9789da07e2ef932c5ea612737066ba88f4f26977
|
[
"MIT"
] | 8
|
2020-05-19T08:08:01.000Z
|
2022-02-25T09:04:14.000Z
|
iunets/baseline_networks.py
|
YoelShoshan/iunets
|
9789da07e2ef932c5ea612737066ba88f4f26977
|
[
"MIT"
] | 13
|
2020-05-12T06:33:55.000Z
|
2021-12-20T07:59:43.000Z
|
import torch
from torch import nn
from .utils import get_num_channels
from .layers import StandardBlock
class StandardUNet(nn.Module):
def __init__(self,
input_shape_or_channels,
dim=None,
architecture=[2,2,2,2],
base_filters=32,
skip_connection=False,
block_type=StandardBlock,
zero_init=False,
*args,
**kwargs):
super(StandardUNet, self).__init__()
self.input_channels = get_num_channels(input_shape_or_channels)
self.base_filters = base_filters
self.architecture = architecture
self.n_levels = len(self.architecture)
self.dim = dim
self.skip_connection = skip_connection
self.block_type = block_type
pool_ops = [nn.MaxPool1d,
nn.MaxPool2d,
nn.MaxPool3d]
pool_op = pool_ops[dim-1]
upsampling_ops = [nn.ConvTranspose1d,
nn.ConvTranspose2d,
nn.ConvTranspose3d]
upsampling_op = upsampling_ops[dim-1]
filters = self.base_filters
filters_list = [filters]
self.module_L = nn.ModuleList()
self.module_R = nn.ModuleList()
self.downsampling_layers = nn.ModuleList()
self.upsampling_layers = nn.ModuleList()
# Left side of the U-Net
for i in range(self.n_levels):
self.module_L.append(nn.ModuleList())
self.downsampling_layers.append(
pool_op(kernel_size=2)
)
depth = architecture[i]
for j in range(depth):
if i == 0 and j == 0:
in_channels = self.input_channels
else:
in_channels = self.base_filters * (2**i)
if j == depth-1:
out_channels = self.base_filters * (2**(i+1))
else:
out_channels = self.base_filters * (2**i)
self.module_L[i].append(
self.block_type(self.dim, in_channels, out_channels, zero_init, *args, **kwargs)
)
# Right side of the U-Net
for i in range(self.n_levels-1):
self.module_R.append(nn.ModuleList())
depth = architecture[i]
for j in range(depth):
if j == 0:
in_channels = 3*self.base_filters * (2**(i+1))
else:
in_channels = self.base_filters * (2**(i+1))
out_channels = self.base_filters * (2**(i+1))
self.module_R[i].append(
self.block_type(self.dim, in_channels, out_channels, zero_init, *args, **kwargs)
)
self.upsampling_layers.append(
upsampling_op(self.base_filters * (2**(i+2)),
self.base_filters * (2**(i+2)),
kernel_size=2,
stride=2)
)
if self.skip_connection:
# We have to convert back to the original number of channels if
# we want a skip connection. We do this with an appropriate
# convolution.
conv_ops = [nn.Conv1d,
nn.Conv2d,
nn.Conv3d]
conv_op = conv_ops[self.dim-1]
self.output_layer = conv_op(self.base_filters*2,
self.input_channels,
3,
padding=1)
def forward(self, input, *args, **kwargs):
# FORWARD
skip_inputs = []
x = input
# Left side
for i in range(self.n_levels):
depth = self.architecture[i]
# Left side
for j in range(depth):
x = self.module_L[i][j](x)
# Downsampling L
if i < self.n_levels - 1:
skip_inputs.append(x)
x = self.downsampling_layers[i](x)
# Right side
for i in range(self.n_levels - 2, -1, -1):
depth = self.architecture[i]
# Upsampling R
x = self.upsampling_layers[i](x)
y = skip_inputs.pop()
x = torch.cat((x,y),dim=1)
for j in range(depth):
x = self.module_R[i][j](x)
if self.skip_connection:
x = self.output_layer(x) + input
return x
| 33.578571
| 100
| 0.478196
|
c8338493827b6156d8a392083666d8439c7104bf
| 562
|
py
|
Python
|
var/spack/repos/builtin/packages/libinih/package.py
|
player1537-forks/spack
|
822b7632222ec5a91dc7b7cda5fc0e08715bd47c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 11
|
2015-10-04T02:17:46.000Z
|
2018-02-07T18:23:00.000Z
|
var/spack/repos/builtin/packages/libinih/package.py
|
player1537-forks/spack
|
822b7632222ec5a91dc7b7cda5fc0e08715bd47c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 22
|
2017-08-01T22:45:10.000Z
|
2022-03-10T07:46:31.000Z
|
var/spack/repos/builtin/packages/libinih/package.py
|
player1537-forks/spack
|
822b7632222ec5a91dc7b7cda5fc0e08715bd47c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 4
|
2016-06-10T17:57:39.000Z
|
2018-09-11T04:59:38.000Z
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Libinih(MesonPackage):
"""
inih (INI Not Invented Here) is a simple .INI file parser written in C.
"""
homepage = "https://github.com/benhoyt/inih"
url = "https://github.com/benhoyt/inih/archive/refs/tags/r53.tar.gz"
git = "https://github.com/benhoyt/inih.git"
version('master', branch="master")
| 29.578947
| 77
| 0.688612
|
300031c71c91ad83d18767a77b2cc0ee03e6ed16
| 6,428
|
py
|
Python
|
invtorch/nn/modules/module.py
|
xmodar/invtorch
|
74b80be3b4126925e583282b6f78171b99788b37
|
[
"Apache-2.0"
] | 14
|
2021-11-18T11:26:11.000Z
|
2022-01-20T13:29:52.000Z
|
invtorch/nn/modules/module.py
|
xmodar/invtorch
|
74b80be3b4126925e583282b6f78171b99788b37
|
[
"Apache-2.0"
] | null | null | null |
invtorch/nn/modules/module.py
|
xmodar/invtorch
|
74b80be3b4126925e583282b6f78171b99788b37
|
[
"Apache-2.0"
] | null | null | null |
"""Base Invertible Modules"""
import itertools
from contextlib import contextmanager
import torch
from torch import nn
from ...autograd.grad_mode import backward_mode, dry_mode
from ...utils.checkpoint import checkpoint
from ...utils.tools import pack
__all__ = ['Module']
class Module(nn.Module):
"""Base invertible module"""
def __init__(self):
super().__init__()
self.seed = False # preserve RNG state in backward
self.checkpoint = True # enables or disables checkpointing
self.invertible = True # use inverse if checkpointing is enabled
self._reversed = False # switch function and inverse
def forward(self, *args, **kwargs):
"""Perform the forward pass"""
private = {
'seed': self.seed,
'enabled': self.checkpoint,
'inverse': self.call_inverse if self.invertible else None,
}
assert all(k not in kwargs for k in private), 'got an illegal argument'
kwargs.update(private)
return self.process(checkpoint(self.call_function, *args, **kwargs))
def function(self, *args):
"""Compute the outputs of the function given the inputs"""
raise NotImplementedError
def inverse(self, *args):
"""Compute the inputs of the function given the outputs"""
raise NotImplementedError
@property
def call_function(self):
"""Current function (according to `self.reversed`)"""
return self.inverse if self.reversed else self.function
@property
def call_inverse(self):
"""Current inverse (according to `self.reversed`)"""
return self.function if self.reversed else self.inverse
@property
def reversible(self):
"""Whether function and inverse can be switched"""
return False
def reverse(self, mode=None):
"""Switch function and inverse"""
if not self.reversed if mode is None else mode:
assert self.reversible, 'module is not reversible'
self._reversed = True
else:
self._reversed = False
return self
@property
def checkpoint(self):
"""Whether the module is in checkpoint or pass_through mode"""
return self._checkpoint
@checkpoint.setter
def checkpoint(self, value):
if value:
self._checkpoint = True
else:
self._checkpoint = self._invertible = False
@property
def invertible(self):
"""Whether the module is in invertible or simple checkpoint mode"""
return self._checkpoint and self._invertible
@invertible.setter
def invertible(self, value):
if value:
self._invertible = self._checkpoint = True
else:
self._invertible = False
@property
def reversed(self):
"""Whether function and inverse should be switched"""
return self._reversed
@reversed.setter
def reversed(self, value):
self.reverse(value)
@property
def num_outputs(self):
"""End index to slice `call_function()`'s outputs in `forward()`"""
return None
@property
def num_inputs(self):
"""End index to slice `call_inverse()`'s outputs in `forward()`"""
return None
def process(self, args, inverse=False):
"""Process the outputs of `call_function()` or `call_inverse()`"""
args = pack(args)
assert isinstance(args, tuple), 'should only output a `tuple`'
num_args = self.num_inputs if inverse else self.num_outputs
if num_args is None:
num_args = len(args)
elif num_args < 0:
num_args += len(args)
assert 0 < num_args <= len(args), f'needs {num_args} args'
return args[0] if num_args == 1 else args[:num_args]
def check(self, *args, rtol=1e-3, atol=1e-5):
"""Check invertability and second forward pass consistency"""
def check(args1, args2, message):
for arg1, arg2 in itertools.zip_longest(args1, args2):
is_tensor = torch.is_tensor(arg1)
assert is_tensor == torch.is_tensor(arg2), message
same = not is_tensor or torch.allclose(arg1, arg2, rtol, atol)
assert same, message
with dry_mode():
outputs = pack(self.call_function(*args))
with torch.inference_mode():
inputs = pack(self.call_inverse(*outputs))
check(args, inputs, 'inverted tensors mismatch (try double precision)')
with backward_mode():
second = pack(self.call_function(*args))
if self.seed:
message = 'second forward pass mismatched despite `self.seed=True`'
else:
message = 'second forward pass mismatched (try `self.seed=True`)'
check(outputs, second, message)
return True
def get_extra_state(self):
return {
'seed': self.seed,
'checkpoint': self.checkpoint,
'invertible': self.invertible,
'reversed': self.reversed,
}
def set_extra_state(self, state):
self.seed = state['seed']
self.checkpoint = state['checkpoint']
self.invertible = state['invertible']
self.reversed = state['reversed']
@contextmanager
def temp_mode(self, **kwargs):
"""Set, temporarily, the mode of the model"""
state = {}
for key in ('seed', 'checkpoint', 'invertible', 'reversed'):
state[key] = getattr(self, key)
if key in kwargs and state[key] == bool(kwargs[key]):
kwargs.pop(key)
assert all(k in state for k in kwargs), 'got an illegal argument'
if 'checkpoint' in kwargs and 'invertible' in kwargs:
assert kwargs['checkpoint'] or not kwargs['invertible'], (
'set either `checkpoint` or `invertible` or avoid conflict')
try:
for key, value in kwargs.items():
setattr(self, key, value)
yield self
finally:
for key, value in state.items():
setattr(self, key, value)
def extra_repr(self):
extra = f'reversed={self.reversed}, checkpoint={self.checkpoint}'
if self.checkpoint:
extra += f', invertible={self.invertible}, seed={self.seed}'
return extra
def __repr__(self):
return 'Inv' + super().__repr__()
| 34.374332
| 79
| 0.60781
|
f3f3b0001b880c6b44df6ecf5d674e70a6eb0283
| 1,420
|
py
|
Python
|
custom_components/samsungtv_encrypted/PySmartCrypto/command_encryption.py
|
MizterB/ha-samsungtv-encrypted
|
6b81a311b5d40b4a7f3311917ba7eade91bb6cd5
|
[
"Apache-2.0"
] | 41
|
2020-03-08T23:49:29.000Z
|
2022-01-25T01:33:57.000Z
|
custom_components/samsungtv_encrypted/PySmartCrypto/command_encryption.py
|
MizterB/ha-samsungtv-encrypted
|
6b81a311b5d40b4a7f3311917ba7eade91bb6cd5
|
[
"Apache-2.0"
] | 88
|
2020-03-08T23:11:36.000Z
|
2022-03-15T01:32:21.000Z
|
custom_components/samsungtv_encrypted/PySmartCrypto/command_encryption.py
|
MizterB/ha-samsungtv-encrypted
|
6b81a311b5d40b4a7f3311917ba7eade91bb6cd5
|
[
"Apache-2.0"
] | 29
|
2020-03-16T09:24:41.000Z
|
2022-03-14T06:44:46.000Z
|
from hashlib import md5
from base64 import b64decode
from base64 import b64encode
from Crypto.Cipher import AES
import binascii
# Padding for the input string --not
# related to encryption itself.
BLOCK_SIZE = 16 # Bytes
pad = lambda s: s + (BLOCK_SIZE - len(s) % BLOCK_SIZE) * \
chr(BLOCK_SIZE - len(s) % BLOCK_SIZE)
unpad = lambda s: s[:-ord(s[len(s) - 1:])]
class AESCipher:
"""
Usage:
c = AESCipher('password').encrypt('message')
m = AESCipher('password').decrypt(c)
Tested under Python 3 and PyCrypto 2.6.1.
"""
def __init__(self, key, session_id):
self.key = binascii.unhexlify(key)
self.session_id = session_id
def decrypt(self, enc):
cipher = AES.new(self.key, AES.MODE_ECB)
return unpad(cipher.decrypt(binascii.unhexlify(enc)))
def encrypt(self, raw):
cipher = AES.new(self.key, AES.MODE_ECB)
return cipher.encrypt(bytes(pad(raw), encoding = "utf8"))
def generate_command(self,key_press):
command_bytes = self.encrypt(self.generate_json(key_press))
int_array = ','.join((list(map(str, command_bytes))))
return '5::/com.samsung.companion:{"name":"callCommon","args":[{"Session_Id":' + str(self.session_id) + ',"body":"[' + int_array + ']"}]}'
def generate_json(self,key_press):
return '{"method":"POST","body":{"plugin":"RemoteControl","param1":"uuid:12345","param2":"Click","param3":"' + key_press + '","param4":false,"api":"SendRemoteKey","version":"1.000"}}'
| 32.272727
| 185
| 0.689437
|
b69420463a7e079e9c7dc894a50509a0273a42c2
| 548
|
py
|
Python
|
tutorial/matplotlib-tutorial/image_clip_path.py
|
zixia/python-facenet
|
d86e0c49a9ce413bef6e58a19a9f723aadcef968
|
[
"MIT"
] | 4
|
2018-06-11T03:02:49.000Z
|
2018-07-11T07:18:52.000Z
|
tutorial/matplotlib-tutorial/image_clip_path.py
|
zixia/python-facenet
|
d86e0c49a9ce413bef6e58a19a9f723aadcef968
|
[
"MIT"
] | null | null | null |
tutorial/matplotlib-tutorial/image_clip_path.py
|
zixia/python-facenet
|
d86e0c49a9ce413bef6e58a19a9f723aadcef968
|
[
"MIT"
] | 2
|
2017-08-31T05:35:36.000Z
|
2018-10-11T16:42:15.000Z
|
"""
http://matplotlib.org/examples/images_contours_and_fields/image_demo_clip_path.html
Demo of image that's been clipped by a circular patch.
"""
# %%
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import matplotlib.cbook as cbook
image_file = cbook.get_sample_data('grace_hopper.png')
image = plt.imread(image_file)
fig, ax = plt.subplots()
im = ax.imshow(image)
patch = patches.Circle(
(260, 200),
radius=200,
transform=ax.transData
)
im.set_clip_path(patch)
# ax.add_patch(patch)
ax.axis('off')
plt.show()
| 21.076923
| 83
| 0.748175
|
5bfc9e528b1e39f02302fc0e428e17c864c94289
| 5,612
|
py
|
Python
|
mosdef_code/spectra/check_line_broadness.py
|
brianlorenz/code
|
e24277bbb1deb2f0488f7b6e1f28c7b633c2c12b
|
[
"MIT"
] | null | null | null |
mosdef_code/spectra/check_line_broadness.py
|
brianlorenz/code
|
e24277bbb1deb2f0488f7b6e1f28c7b633c2c12b
|
[
"MIT"
] | null | null | null |
mosdef_code/spectra/check_line_broadness.py
|
brianlorenz/code
|
e24277bbb1deb2f0488f7b6e1f28c7b633c2c12b
|
[
"MIT"
] | 1
|
2021-12-08T01:20:12.000Z
|
2021-12-08T01:20:12.000Z
|
import initialize_mosdef_dirs as imd
from astropy.io import ascii
import pandas as pd
import numpy as np
from scipy import interpolate
import scipy.integrate as integrate
from scipy.optimize import curve_fit
import os
import matplotlib.pyplot as plt
def check_broadness(groupID, run_name, rest_wave, width=15):
"""Makes a plot of how broad a line is by convolving it with all of the filters
Parameters:
groupID (int): ID of the group to convolve
run_name (str): Name of the prospector run that you are looking at to convolve
rest_wave (int): Wavelength closest to the line
width (int): Angstroms on either side of the line to consider in the convolution
"""
# Read in the spectrum
spec_df = ascii.read(imd.prospector_fit_csvs_dir + f'/{run_name}_csvs/{groupID}_spec.csv').to_pandas()
spec_df_cut = spec_df[np.logical_and(spec_df['rest_wavelength']>rest_wave-width, spec_df['rest_wavelength']<rest_wave+1+width)]
spec_interp = interpolate.interp1d(spec_df_cut['rest_wavelength'], spec_df_cut['spec50_flambda'], bounds_error=False, fill_value=0)
# Test plot, looks good, it grabs the line
# wave_plot = np.arange(6553, 6573, 0.2)
# plt.plot(wave_plot, spec_interp(wave_plot))
# plt.show()
# Find the filters
filt_folder = imd.composite_filter_csvs_dir + f'/{groupID}_filter_csvs/'
filt_files = [file for file in os.listdir(filt_folder) if '.csv' in file]
# loop over each point, storing both the point and the integrated flux value at that point
points = []
fluxes = []
for i in range(len(filt_files)):
filt_file = filt_files[i]
point = filt_file.split('.')[0].split('_')[1]
print(f'Reading in filter for point {point}...')
filt = ascii.read(filt_folder + filt_file).to_pandas()
filt_interp = interpolate.interp1d(filt['rest_wavelength'], filt['transmission'], bounds_error=False)
# Test plot, looks good, it grabs the filter
# wave_plot = np.arange(20000, 33000, 0.73)
# plt.plot(wave_plot, filt_interp(wave_plot))
# plt.show()
def flux_func_numerator(wave):
"""Function that you need to integrate to get the flux"""
return spec_interp(wave)*filt_interp(wave)*wave*10**18
def flux_func_denominator(wave):
"""Function that you need to integrate to get the flux"""
return filt_interp(wave)*wave
# numerator = integrate.quad(flux_func_numerator, 801, 25000)[0]
# denominator = integrate.quad(flux_func_denominator, 801, 25000)[0]
# Testing trapz integration
wave_array = np.arange(801, 39999, 0.1)
numerator = integrate.trapz(flux_func_numerator(wave_array))
denominator = integrate.trapz(flux_func_denominator(wave_array))
flux = numerator / denominator
print(f'Num: {numerator}')
print(f'Dem: {denominator}')
print(f'-----------------')
points.append(int(point))
fluxes.append(flux / 10**18)
line_width_df = pd.DataFrame(zip(points, fluxes), columns=['rest_wavelength', 'flux'])
line_width_df.to_csv(imd.line_widths_dir + f'/group{groupID}_{rest_wave}_broadness.csv', index=False)
def plot_broadness(groupID, rest_waves):
'''Plots the broadness of all of the provided lines on one axis
Parameters:
groupID (int): ID of the composite
rest_waves (list): List of peak wavelengths in angtroms, rounded
'''
colors = ['black','blue']
run_count = 0
min_bounds = []
max_bounds = []
fig, ax = plt.subplots(figsize = (8,8))
for rest_wave in rest_waves:
line_width_df = ascii.read(imd.line_width_csvs_dir + f'/group{groupID}_{rest_wave}_broadness.csv').to_pandas()
line_width_df['flux'] = line_width_df['flux']*10**18
guess = [6563, 3000, 50]
def gaussian(x, mu, sig, amp):
return amp * np.exp(-np.power(x - mu, 2.) / (2 * np.power(sig, 2.)))
popt, pcov = curve_fit(gaussian, line_width_df['rest_wavelength'], line_width_df['flux'], p0=guess)
mean = popt[0]
sigma = popt[1]
amp = popt[2]
gauss_waves = np.arange(rest_wave-4000, rest_wave+4000, 1)
gauss_ys = gaussian(gauss_waves, mean, sigma, amp)
ax.plot(line_width_df['rest_wavelength'], line_width_df['flux'], marker='o', ls='None', color=colors[run_count])
ax.plot(gauss_waves, gauss_ys, marker='None', ls='-', color='red')
if run_count==0:
ylim = ax.get_ylim()
min_bound = mean-2*np.abs(sigma)
max_bound = mean+2*np.abs(sigma)
min_bounds.append(min_bound)
max_bounds.append(max_bound)
ax.plot([min_bound, min_bound], [-1000, 1000], ls='--', marker='None', color=colors[run_count])
ax.plot([max_bound, max_bound], [-1000, 1000], ls='--', marker='None', color=colors[run_count])
run_count += 1
ax.set_xscale('log')
ax.set_ylabel('Flux (*10^18)')
ax.set_xlabel('Wavelength ($\AA$)')
ax.set_ylim(ylim)
fig.savefig(imd.line_width_images_dir + f'/{groupID}_widths.pdf')
# Save the bounds
bounds_df = pd.DataFrame(zip(rest_waves, min_bounds, max_bounds), columns=['rest_wavelength', 'min_bound', 'max_bound'])
bounds_df.to_csv(imd.line_widths_dir + f'/{groupID}_bounds.csv', index=False)
for groupID in range(0, 29):
try:
plot_broadness(groupID, [6563,5007])
except:
pass
# for groupID in range(0, 29):
# try:
# check_broadness(groupID, 'redshift_maggies', 5007)
# except:
# pass
| 37.413333
| 135
| 0.656272
|
a8a6be3d2b5cea8702763c75c0ac12ef819b87a2
| 10,868
|
py
|
Python
|
applications/camera_calibration/scripts/create_calibration_pattern.py
|
xiesc/camera_calibration
|
8bd0071a1175894101f6dd204345297010756c09
|
[
"BSD-3-Clause"
] | 1
|
2020-11-03T13:25:49.000Z
|
2020-11-03T13:25:49.000Z
|
applications/camera_calibration/scripts/create_calibration_pattern.py
|
xiesc/camera_calibration
|
8bd0071a1175894101f6dd204345297010756c09
|
[
"BSD-3-Clause"
] | null | null | null |
applications/camera_calibration/scripts/create_calibration_pattern.py
|
xiesc/camera_calibration
|
8bd0071a1175894101f6dd204345297010756c09
|
[
"BSD-3-Clause"
] | 1
|
2020-12-05T07:41:04.000Z
|
2020-12-05T07:41:04.000Z
|
# Copyright 2019 ETH Zürich, Thomas Schöps
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import argparse
import os
import math
import sys
import numpy as np
from scipy.misc import imread
# This requires reportlab, installed like this:
# sudo pip3 install reportlab
from reportlab.pdfgen import canvas
from reportlab.lib.pagesizes import letter, A4
from reportlab.lib.units import inch, cm, mm
def GetStarCoord(square_length, i, num_star_segments, center_x, center_y):
angle = (2 * math.pi) * i / num_star_segments
x = math.sin(angle)
y = math.cos(angle)
max_abs_x = max(abs(x), abs(y))
x /= max_abs_x
y /= max_abs_x
return (center_x - 0.5 * square_length * x,
center_y + 0.5 * square_length * y)
if __name__ == '__main__':
# Define arguments
parser = argparse.ArgumentParser(description="Create calibration patterns.")
parser.add_argument("--tag36h11_path", required=True,
help="Path to a folder containing the 36h11 AprilTag images. May be downloaded from: https://github.com/AprilRobotics/apriltag-imgs")
parser.add_argument("--output_base_path", required=True,
help="Base path to the PDF and YAML output files (excluding the file extensions).")
parser.add_argument("--paper_size", default="A4",
help="Paper size; supported values: A4, letter.")
parser.add_argument("--num_star_segments", default="16",
help="Number of segments of each star in the pattern. Refers to the sum of black and white segments. 4 would give a checkerboard.")
parser.add_argument("--apriltag_index", default="0",
help="Index of the AprilTag to use for the pattern.")
parser.add_argument("--margin_in_cm", default="0.4",
help="Page margin in centimeters.")
parser.add_argument("--approx_square_length_in_cm", default="1.2",
help="Approximate star square length in centimeters. May get slightly modified such that the squares exactly fit into the print area.")
parser.add_argument("--apriltag_length_in_squares", default="4",
help="Length of the AprilTag measured in star squares.")
# Parse and check arguments
args = parser.parse_args()
num_star_segments = int(args.num_star_segments)
apriltag_index = int(args.apriltag_index)
margin_in_cm = float(args.margin_in_cm)
approx_square_length_in_cm = float(args.approx_square_length_in_cm)
apriltag_length_in_squares = int(args.apriltag_length_in_squares)
pagesize = A4
if args.paper_size == "A4":
pagesize = A4
elif args.paper_size == "letter":
pagesize = letter
else:
print("Error: The given paper size (" + args.paper_size + ") must be either A4 or letter.")
sys.exit(1)
pdf_path = args.output_base_path + '.pdf'
metadata_path = args.output_base_path + '.yaml'
tag_path = os.path.join(args.tag36h11_path, 'tag36_11_{:0>5d}.png'.format(apriltag_index))
if num_star_segments < 4:
print('Error: The number of star segments must be larger or equal to four.')
sys.exit(1)
if num_star_segments % 4 != 0:
print('Warning: The number of star segments must be divisible by four for the symmetry-based detector.')
if not os.path.exists(tag_path):
print('Error: Required file does not exist: ' + tag_path)
sys.exit(1)
# Set up page. (0, 0) is at the bottom-left of the page.
c = canvas.Canvas(pdf_path, pagesize=pagesize)
c.setFillColorRGB(0, 0, 0)
width, height = pagesize
margin = margin_in_cm * cm
start_x = margin
end_x = width - margin
start_y = height - margin
end_y = margin
print_area_width = abs(end_x - start_x)
print_area_height = abs(end_y - start_y)
# Determine the checkerboard resolution
approx_square_length = approx_square_length_in_cm * cm
squares_length_1 = print_area_width / round(print_area_width / approx_square_length)
squares_length_2 = print_area_height / round(print_area_height / approx_square_length)
square_length = min(squares_length_1, squares_length_2)
squares_x = math.floor(print_area_width / square_length)
squares_y = math.floor(print_area_height / square_length)
unused_x = print_area_width - squares_x * square_length
pattern_start_x = start_x + 0.5 * unused_x
unused_y = print_area_height - squares_y * square_length
pattern_start_y = start_y - 0.5 * unused_y
# Draw AprilTag in the middle
clip_path = c.beginPath()
im = imread(tag_path).astype(np.uint8)
tag_width = im.shape[0]
tag_height = im.shape[1]
if tag_width != tag_height:
print('Non-square tags are not supported')
sys.exit(1)
tag_x = squares_x // 2 - apriltag_length_in_squares // 2
tag_start_x = pattern_start_x + tag_x * square_length
tag_y = squares_y // 2 - apriltag_length_in_squares // 2
tag_start_y = pattern_start_y - tag_y * square_length
tag_square_length = apriltag_length_in_squares * square_length / tag_width
for x in range(0, tag_width):
for y in range(0, tag_height):
if im[y][x][0] < 127:
c.rect(tag_start_x + x * tag_square_length,
tag_start_y - y * tag_square_length - tag_square_length,
tag_square_length,
tag_square_length,
stroke=0,
fill=1)
clip_path.moveTo(tag_start_x, tag_start_y)
clip_path.lineTo(tag_start_x + tag_width * tag_square_length, tag_start_y)
clip_path.lineTo(tag_start_x + tag_width * tag_square_length, tag_start_y - tag_height * tag_square_length)
clip_path.lineTo(tag_start_x, tag_start_y - tag_height * tag_square_length)
clip_path.lineTo(tag_start_x, tag_start_y)
pattern_end_x = end_x - 0.5 * unused_x
pattern_end_y = end_y + 0.5 * unused_y
clip_path.moveTo(pattern_start_x, pattern_start_y)
clip_path.lineTo(pattern_end_x, pattern_start_y)
clip_path.lineTo(pattern_end_x, pattern_end_y)
clip_path.lineTo(pattern_start_x, pattern_end_y)
clip_path.lineTo(pattern_start_x, pattern_start_y)
# Draw checkerboard
c.clipPath(clip_path, stroke=0, fill=0)
for x in range(-1, squares_x):
for y in range(0, squares_y + 1):
center_x = pattern_start_x + (x + 1) * square_length
center_y = pattern_start_y - y * square_length
path = c.beginPath()
# Draw all black segments
for segment in range(0, num_star_segments, 2):
path.moveTo(center_x, center_y)
sc1 = GetStarCoord(square_length, segment, num_star_segments, center_x, center_y)
path.lineTo(sc1[0], sc1[1])
# Add point at the square corner?
angle1 = (2 * math.pi) * (segment) / num_star_segments
angle2 = (2 * math.pi) * (segment + 1) / num_star_segments
if math.floor((angle1 - math.pi / 4) / (math.pi / 2)) != math.floor((angle2 - math.pi / 4) / (math.pi / 2)):
corner_angle = (math.pi / 4) + (math.pi / 2) * math.floor((angle2 - math.pi / 4) / (math.pi / 2))
corner_x = math.sin(corner_angle)
corner_y = math.cos(corner_angle)
normalizer = abs(corner_x)
corner_x /= normalizer
corner_y /= normalizer
corner_coord = (center_x - 0.5 * square_length * corner_x,
center_y + 0.5 * square_length * corner_y)
path.lineTo(corner_coord[0], corner_coord[1])
sc2 = GetStarCoord(square_length, segment + 1, num_star_segments, center_x, center_y)
path.lineTo(sc2[0], sc2[1])
path.lineTo(center_x, center_y)
c.drawPath(path, stroke=0, fill=1)
# Write metadata
with open(metadata_path, 'wb') as metadata_file:
metadata_file.write(bytes('num_star_segments: ' + str(num_star_segments) + '\n', 'UTF-8'))
metadata_file.write(bytes('squares_x: ' + str(squares_x) + '\n', 'UTF-8'))
metadata_file.write(bytes('squares_y: ' + str(squares_y) + '\n', 'UTF-8'))
metadata_file.write(bytes('square_length_in_meters: ' + str(0.01 * square_length / cm) + '\n', 'UTF-8'))
metadata_file.write(bytes('page:\n', 'UTF-8'))
metadata_file.write(bytes(' width_mm: ' + str(width / mm) + '\n', 'UTF-8'))
metadata_file.write(bytes(' height_mm: ' + str(height / mm) + '\n', 'UTF-8'))
metadata_file.write(bytes(' pattern_start_x_mm: ' + str(pattern_start_x / mm) + '\n', 'UTF-8'))
metadata_file.write(bytes(' pattern_start_y_mm: ' + str((height - pattern_start_y) / mm) + '\n', 'UTF-8'))
metadata_file.write(bytes(' pattern_end_x_mm: ' + str(pattern_end_x / mm) + '\n', 'UTF-8'))
metadata_file.write(bytes(' pattern_end_y_mm: ' + str((height - pattern_end_y) / mm) + '\n', 'UTF-8'))
metadata_file.write(bytes('apriltags:\n', 'UTF-8'))
metadata_file.write(bytes(' - tag_x: ' + str(tag_x) + '\n', 'UTF-8'))
metadata_file.write(bytes(' tag_y: ' + str(tag_y) + '\n', 'UTF-8'))
metadata_file.write(bytes(' width: ' + str(apriltag_length_in_squares) + '\n', 'UTF-8'))
metadata_file.write(bytes(' height: ' + str(apriltag_length_in_squares) + '\n', 'UTF-8'))
metadata_file.write(bytes(' index: ' + str(apriltag_index) + '\n', 'UTF-8'))
# Save the page
c.setTitle('Calibration pattern #' + str(apriltag_index))
c.setAuthor('Calibration pattern generation script')
c.showPage()
c.save()
print('Successfully generated pattern:\n' + pdf_path + '\nwith metadata:\n' + metadata_path)
| 43.822581
| 157
| 0.688075
|
5957f574864cfc36583d783bd5ac791643dbe615
| 605
|
py
|
Python
|
haokan.py
|
squ33ker/Dlink_Parse
|
b8ea35e64e480720fff5f466c3959e631b379abf
|
[
"MIT"
] | 142
|
2021-04-10T01:54:06.000Z
|
2022-03-29T11:22:43.000Z
|
haokan.py
|
squ33ker/Dlink_Parse
|
b8ea35e64e480720fff5f466c3959e631b379abf
|
[
"MIT"
] | 4
|
2021-04-11T00:50:30.000Z
|
2021-09-14T13:00:56.000Z
|
haokan.py
|
squ33ker/Dlink_Parse
|
b8ea35e64e480720fff5f466c3959e631b379abf
|
[
"MIT"
] | 57
|
2021-05-21T09:58:12.000Z
|
2022-03-31T06:49:01.000Z
|
import requests
import re
import json
class haokan:
def __init__(self, url):
self.url = url
self.headers = {
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 11_2_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36¬",
}
def getJson(self, text):
res = re.findall("PRELOADED_STATE__\s=\s(.*?);", text)[0]
res = json.loads(res)
return res
def start(self):
res = requests.get(self.url, headers=self.headers).text
return self.getJson(res)
if __name__ == '__main__':
haokan().start()
| 24.2
| 149
| 0.601653
|
a7a404ccde43b5c0cef09258b3ff7c189c0ea9a1
| 2,234
|
py
|
Python
|
euca2ools/commands/iam/listmfadevices.py
|
sjones4/euca2ools
|
03b0e421eeebd8f402422a0ad6994bd6ee4e4127
|
[
"BSD-2-Clause"
] | null | null | null |
euca2ools/commands/iam/listmfadevices.py
|
sjones4/euca2ools
|
03b0e421eeebd8f402422a0ad6994bd6ee4e4127
|
[
"BSD-2-Clause"
] | null | null | null |
euca2ools/commands/iam/listmfadevices.py
|
sjones4/euca2ools
|
03b0e421eeebd8f402422a0ad6994bd6ee4e4127
|
[
"BSD-2-Clause"
] | 2
|
2016-06-24T20:19:40.000Z
|
2020-02-05T10:50:19.000Z
|
# Copyright 2009-2013 Eucalyptus Systems, Inc.
#
# Redistribution and use of this software in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from euca2ools.commands.iam import IAMRequest, AS_ACCOUNT
from requestbuilder import Arg
from requestbuilder.response import PaginatedResponse
class ListMFADevices(IAMRequest):
DESCRIPTION = "List a user's MFA devices"
ARGS = [Arg('-u', '--user-name', dest='UserName', metavar='USER',
help='user to list MFA devices for (default: current user)'),
AS_ACCOUNT]
LIST_TAGS = ['MFADevices']
def main(self):
return PaginatedResponse(self, (None,), ('MFADevices',))
def prepare_for_page(self, page):
# Pages are defined by markers
self.params['Marker'] = page
def get_next_page(self, response):
if response.get('IsTruncated') == 'true':
return response['Marker']
def print_result(self, result):
for device in result.get('MFADevices', []):
print device['SerialNumber']
| 42.961538
| 77
| 0.732319
|
ccc97c5de0ae0a6490739b53580e96de914abfa2
| 355
|
py
|
Python
|
python/148-SortList.py
|
vermouth1992/Leetcode
|
0d7dda52b12f9e01d88fc279243742cd8b4bcfd1
|
[
"MIT"
] | null | null | null |
python/148-SortList.py
|
vermouth1992/Leetcode
|
0d7dda52b12f9e01d88fc279243742cd8b4bcfd1
|
[
"MIT"
] | null | null | null |
python/148-SortList.py
|
vermouth1992/Leetcode
|
0d7dda52b12f9e01d88fc279243742cd8b4bcfd1
|
[
"MIT"
] | null | null | null |
"""
Sort a linked list in O(n log n) time using constant space complexity.
"""
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
def sortList(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
| 20.882353
| 70
| 0.580282
|
c9fb10966cd84268c6d017f0795fb65c86504e1d
| 3,033
|
py
|
Python
|
pyOCD/flash/flash_stm32f031.py
|
mesheven/mesh-pyocd-old
|
99ecfeeac95820dacab52a1280b0fba6d4f51fc9
|
[
"Apache-2.0"
] | null | null | null |
pyOCD/flash/flash_stm32f031.py
|
mesheven/mesh-pyocd-old
|
99ecfeeac95820dacab52a1280b0fba6d4f51fc9
|
[
"Apache-2.0"
] | null | null | null |
pyOCD/flash/flash_stm32f031.py
|
mesheven/mesh-pyocd-old
|
99ecfeeac95820dacab52a1280b0fba6d4f51fc9
|
[
"Apache-2.0"
] | null | null | null |
"""
mbed CMSIS-DAP debugger
Copyright (c) 2006-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from flash import Flash
flash_algo = { 'load_address' : 0x20000000,
'instructions' : [
0xE00ABE00, 0x062D780D, 0x24084068, 0xD3000040, 0x1E644058, 0x1C49D1FA, 0x2A001E52, 0x4770D1F2,
0x49544853, 0x48546048, 0x20006048, 0xb5104770, 0x20344603, 0x60e04c4f, 0xbd102000, 0x20004601,
0xb5004770, 0x23002200, 0x6902484a, 0x40102080, 0xd1012880, 0xffe4f7ff, 0x4846bf00, 0x07d868c3,
0xd1fa0fc0, 0x69024843, 0x43022004, 0x61024841, 0x20406902, 0x483f4302, 0xbf006102, 0x68c3483d,
0x0fc007d8, 0x483bd1fa, 0x21046902, 0x43884610, 0x48384602, 0x20006102, 0xb510bd00, 0x22004603,
0x48342400, 0x20806902, 0x28804010, 0xf7ffd101, 0xbf00ffb7, 0x68c4482f, 0x0fc007e0, 0x482dd1fa,
0x20026902, 0x482b4302, 0x61436102, 0x20406902, 0x48284302, 0xbf006102, 0x68c44826, 0x0fc007e0,
0x4824d1fa, 0x21026902, 0x43884610, 0x48214602, 0x20006102, 0xb5f7bd10, 0x22004615, 0x27002600,
0x462c9b00, 0x6902481b, 0x40102080, 0xd1012880, 0xff86f7ff, 0x4817bf00, 0x07f068c6, 0xd1fa0fc0,
0x4814e01b, 0x20016902, 0x48124302, 0x88206102, 0xbf008018, 0x68c6480f, 0x0fc007f0, 0x8820d1fa,
0x42888819, 0x480bd006, 0x08526902, 0x61020052, 0xbdfe2001, 0x1ca41c9b, 0x98011c7f, 0x42b80840,
0x4804d8df, 0x08526902, 0x61020052, 0xe7f02000, 0x45670123, 0x40022000, 0xcdef89ab, 0x00000000,
],
'pc_init' : 0x2000002F,
'pc_eraseAll' : 0x20000043,
'pc_erase_sector' : 0x2000009B,
'pc_program_page' : 0x200000F7,
'static_base' : 0x200001A0,
'begin_data' : 0x20000600, # Analyzer uses a max of 256 B data (32 pages * 4 bytes / page)
'begin_stack' : 0x20000600,
'min_program_length' : 2,
'analyzer_supported' : True,
'analyzer_address' : 0x20000A00 # Analyzer, 0x20000A00--0x20001000
};
class Flash_stm32f031(Flash):
def __init__(self, target):
super(Flash_stm32f031, self).__init__(target, flash_algo)
| 54.160714
| 130
| 0.616551
|
ebe7869b55db4736175b00562c3b025ff0c9dd11
| 3,136
|
py
|
Python
|
rstem/projects/space_invaders/space_invaders_2.py
|
readysetstem/readysetstem-api
|
01e1360f4a28a6783ee1e0fa1bc239dd999de6be
|
[
"Apache-2.0"
] | 1
|
2018-02-23T20:20:45.000Z
|
2018-02-23T20:20:45.000Z
|
rstem/projects/space_invaders/space_invaders_2.py
|
readysetstem/readysetstem-api
|
01e1360f4a28a6783ee1e0fa1bc239dd999de6be
|
[
"Apache-2.0"
] | 1
|
2016-10-25T18:00:15.000Z
|
2016-10-25T18:00:15.000Z
|
rstem/projects/space_invaders/space_invaders_2.py
|
readysetstem/readysetstem-api
|
01e1360f4a28a6783ee1e0fa1bc239dd999de6be
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright (c) 2014, Scott Silver Labs, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#This is the second project in the "Space invaders" series.
#This project adds missles to the game when the player
#presses the button.
#Imports, need sys for exit function
from rstem import led_matrix
from rstem import accel
import RPi.GPIO as GPIO
import time
#Initialize matrix, accelerometer, and GPIO, the matrix layout and accelerometer channel may changes from user to user
led_matrix.init_grid(2)
accel.init(1)
GPIO.setmode(GPIO.BCM)
GPIO.setup(4, GPIO.IN, pull_up_down = GPIO.PUD_UP)
#Game entity data
player_pos = [7, 0]
missles = []
#Game timing data, missles get updated and therefore move roughly sixty times faster than enemies initialy
game_tick = 0
game_tick_max = 64
enemy_tick = 60
start_time = time.time()
#Function to add missle at the players position to set of current missles
def fire_missle(channel):
missles.append(Missle([int(round(player_pos[0])), int(round(player_pos[1]))],[0, 1]))
#Call fire_missle when fire button is pressed
GPIO.add_event_detect(4, GPIO.FALLING, callback=fire_missle, bouncetime = 300)
#Useful clamp function to make sure the data passed to point is on the matrix
def clamp(x):
return max(0, min(x, 15))
#Missle keeps track of its current position and current direction
class Missle:
def __init__(self, position, direction):
self.pos = position
self.dir = direction
# Move missle on missle update tick
def update(self):
self.pos[0] = self.pos[0] + self.dir[0]
self.pos[1] = self.pos[1] + self.dir[1]
if self.pos[1] > 15 or self.pos[1] < 0 or self.pos[0] < 0 or self.pos[1] > 15:
missles.remove(self)
try:
# Start game
while True:
# Clear previous framebuffer
led_matrix.fill(0)
# Update and redraw missles
for m in missles:
m.update()
led_matrix.point(m.pos[0], m.pos[1])
# Get angles from accelerometer
data = accel.angles()
# Generate smooth movement data using IIR filter, and make a 1/4 turn move
# the player to the edge of the screen
player_pos[0] = player_pos[0] + (clamp(data[0]*8*4/90 + 7) - player_pos[0])*0.1
# Draw player
led_matrix.point(int(round(player_pos[0])), int(round(player_pos[1])))
# Show framebuffer
led_matrix.show()
# Delay one game tick, in this case 1ms
time.sleep(0.001)
#Stop if player hits Ctrl-C
except KeyboardInterrupt:
pass
#Clean everything up
finally:
GPIO.cleanup()
led_matrix.cleanup()
| 30.745098
| 118
| 0.691008
|
5d505065709ebdb10e4155b2df0e09bd1687992b
| 2,545
|
py
|
Python
|
user.py
|
Brian-M-code/password-locker
|
12ae594f310baa72dd7df6b7f91749418f0b1925
|
[
"MIT"
] | null | null | null |
user.py
|
Brian-M-code/password-locker
|
12ae594f310baa72dd7df6b7f91749418f0b1925
|
[
"MIT"
] | null | null | null |
user.py
|
Brian-M-code/password-locker
|
12ae594f310baa72dd7df6b7f91749418f0b1925
|
[
"MIT"
] | null | null | null |
import string
import secrets
import pyperclip
import random
class User:
# Class Variables
# global users_list
users_list = []
def __init__(self,first_name,last_name,password):
'''
Method to define the properties for each user object will hold.
'''
# instance variables
self.first_name = first_name
self.last_name = last_name
self.password = password
def save_user(self):
'''
Function to save a newly created user instance
'''
User.users_list.append(self)
class Credential:
'''
Class to create account credentials, generate passwords and save their information
'''
# Class Variables
credentials_list =[]
user_credentials_list = []
@classmethod
def check_user(cls,first_name,password):
'''
Method that checks if the name and password entered match entries in the users_list
'''
current_user = ''
for user in User.users_list:
if (user.first_name == first_name and user.password == password):
current_user = user.first_name
return current_user
def __init__(self,user_name,site_name,account_name,password):
'''
Method to define the properties for each user object will hold.
'''
# instance variables
self.user_name = user_name
self.site_name = site_name
self.account_name = account_name
self.password = password
def save_credentials(self):
'''
Function to save a newly created user instance
'''
# global users_list
Credential.credentials_list.append(self)
def generate_password(self, char=string.ascii_uppercase+string.ascii_lowercase+string.digits):
'''
Function to generate an 8 character password for a credential
'''
gen_pass=''.join(random.choice(char) for _ in range())
return gen_pass
@classmethod
def display_credentials(cls,user_name):
'''
Class method to display the list of credentials saved
'''
user_credentials_list = []
for credential in cls.credentials_list:
if credential.user_name == user_name:
user_credentials_list.append(credential)
return user_credentials_list
@classmethod
def find_by_site_name(cls, site_name):
'''
Method that takes in a site_name and returns a credential that matches that site_name.
'''
for credential in cls.credentials_list:
if credential.site_name == site_name:
return credential
@classmethod
def copy_credential(cls,site_name):
'''
Class method that copies a credential's info after the credential's site name is entered
'''
find_credential = Credential.find_by_site_name(site_name)
return pyperclip.copy(find_credential.password)
| 24.95098
| 95
| 0.743811
|
ece5985dc043465f4b258f6d25cdc513fb0bf48d
| 16,355
|
py
|
Python
|
tools/test_files/test_vault/less_than_10/rd53_133.py
|
Astlaan/OpenQL
|
404b3edf4406071992e9ad190303b12e143689a0
|
[
"Apache-2.0"
] | null | null | null |
tools/test_files/test_vault/less_than_10/rd53_133.py
|
Astlaan/OpenQL
|
404b3edf4406071992e9ad190303b12e143689a0
|
[
"Apache-2.0"
] | null | null | null |
tools/test_files/test_vault/less_than_10/rd53_133.py
|
Astlaan/OpenQL
|
404b3edf4406071992e9ad190303b12e143689a0
|
[
"Apache-2.0"
] | null | null | null |
from openql import openql as ql
import os
import argparse
def circuit(config_file, new_scheduler='yes', scheduler='ASAP', uniform_sched= 'no', sched_commute = 'yes', mapper='base', moves='no', maptiebreak='random', initial_placement='no', output_dir_name='test_output', optimize='no', measurement=True, log_level='LOG_WARNING'):
curdir = os.path.dirname(__file__)
output_dir = os.path.join(curdir, output_dir_name)
ql.set_option('output_dir', output_dir)
ql.set_option('optimize', optimize)
ql.set_option('scheduler', scheduler)
ql.set_option('scheduler_uniform', uniform_sched)
ql.set_option('mapper', mapper)
ql.set_option('initialplace', initial_placement)
ql.set_option('log_level', log_level)
ql.set_option('scheduler_post179', new_scheduler)
ql.set_option('scheduler_commute', sched_commute)
ql.set_option('mapusemoves', moves)
ql.set_option('maptiebreak', maptiebreak)
config_fn = os.path.join(curdir, config_file)
# platform = ql.Platform('platform_none', config_fn)
platform = ql.Platform('starmon', config_fn)
num_circuits = 1
num_qubits = 7
p = ql.Program('rd53_133', platform, num_qubits)
k = ql.Kernel('rd53_133', platform, num_qubits)
k.gate('h',[6])
k.gate('t',[3])
k.gate('t',[5])
k.gate('t',[6])
k.gate('cnot',[5,3])
k.gate('cnot',[6,5])
k.gate('cnot',[3,6])
k.gate('tdag',[5])
k.gate('cnot',[3,5])
k.gate('tdag',[3])
k.gate('tdag',[5])
k.gate('t',[6])
k.gate('cnot',[6,5])
k.gate('cnot',[3,6])
k.gate('cnot',[5,3])
k.gate('h',[6])
k.gate('h',[5])
k.gate('t',[2])
k.gate('t',[4])
k.gate('t',[5])
k.gate('cnot',[4,2])
k.gate('cnot',[5,4])
k.gate('cnot',[2,5])
k.gate('tdag',[4])
k.gate('cnot',[2,4])
k.gate('tdag',[2])
k.gate('tdag',[4])
k.gate('t',[5])
k.gate('cnot',[5,4])
k.gate('cnot',[2,5])
k.gate('cnot',[4,2])
k.gate('h',[5])
k.gate('h',[4])
k.gate('t',[0])
k.gate('t',[1])
k.gate('t',[4])
k.gate('cnot',[1,0])
k.gate('cnot',[4,1])
k.gate('cnot',[0,4])
k.gate('tdag',[1])
k.gate('cnot',[0,1])
k.gate('tdag',[0])
k.gate('tdag',[1])
k.gate('t',[4])
k.gate('cnot',[4,1])
k.gate('cnot',[0,4])
k.gate('cnot',[1,0])
k.gate('h',[4])
k.gate('h',[5])
k.gate('t',[2])
k.gate('t',[4])
k.gate('t',[5])
k.gate('cnot',[4,2])
k.gate('cnot',[5,4])
k.gate('cnot',[2,5])
k.gate('tdag',[4])
k.gate('cnot',[2,4])
k.gate('tdag',[2])
k.gate('tdag',[4])
k.gate('t',[5])
k.gate('cnot',[5,4])
k.gate('cnot',[2,5])
k.gate('cnot',[4,2])
k.gate('h',[5])
k.gate('h',[6])
k.gate('t',[3])
k.gate('t',[5])
k.gate('t',[6])
k.gate('cnot',[5,3])
k.gate('cnot',[6,5])
k.gate('cnot',[3,6])
k.gate('tdag',[5])
k.gate('cnot',[3,5])
k.gate('tdag',[3])
k.gate('tdag',[5])
k.gate('t',[6])
k.gate('cnot',[6,5])
k.gate('cnot',[3,6])
k.gate('cnot',[5,3])
k.gate('h',[6])
k.gate('h',[5])
k.gate('t',[2])
k.gate('t',[4])
k.gate('t',[5])
k.gate('cnot',[4,2])
k.gate('cnot',[5,4])
k.gate('cnot',[2,5])
k.gate('tdag',[4])
k.gate('cnot',[2,4])
k.gate('tdag',[2])
k.gate('tdag',[4])
k.gate('t',[5])
k.gate('cnot',[5,4])
k.gate('cnot',[2,5])
k.gate('cnot',[4,2])
k.gate('h',[5])
k.gate('h',[4])
k.gate('t',[0])
k.gate('t',[1])
k.gate('t',[4])
k.gate('cnot',[1,0])
k.gate('cnot',[4,1])
k.gate('cnot',[0,4])
k.gate('tdag',[1])
k.gate('cnot',[0,1])
k.gate('tdag',[0])
k.gate('tdag',[1])
k.gate('t',[4])
k.gate('cnot',[4,1])
k.gate('cnot',[0,4])
k.gate('cnot',[1,0])
k.gate('h',[4])
k.gate('h',[5])
k.gate('t',[2])
k.gate('t',[4])
k.gate('t',[5])
k.gate('cnot',[4,2])
k.gate('cnot',[5,4])
k.gate('cnot',[2,5])
k.gate('tdag',[4])
k.gate('cnot',[2,4])
k.gate('tdag',[2])
k.gate('tdag',[4])
k.gate('t',[5])
k.gate('cnot',[5,4])
k.gate('cnot',[2,5])
k.gate('cnot',[4,2])
k.gate('h',[5])
k.gate('h',[6])
k.gate('t',[4])
k.gate('t',[5])
k.gate('t',[6])
k.gate('cnot',[5,4])
k.gate('cnot',[6,5])
k.gate('cnot',[4,6])
k.gate('tdag',[5])
k.gate('cnot',[4,5])
k.gate('tdag',[4])
k.gate('tdag',[5])
k.gate('t',[6])
k.gate('cnot',[6,5])
k.gate('cnot',[4,6])
k.gate('cnot',[5,4])
k.gate('h',[6])
k.gate('h',[5])
k.gate('t',[2])
k.gate('t',[3])
k.gate('t',[5])
k.gate('cnot',[3,2])
k.gate('cnot',[5,3])
k.gate('cnot',[2,5])
k.gate('tdag',[3])
k.gate('cnot',[2,3])
k.gate('tdag',[2])
k.gate('tdag',[3])
k.gate('t',[5])
k.gate('cnot',[5,3])
k.gate('cnot',[2,5])
k.gate('cnot',[3,2])
k.gate('h',[5])
k.gate('h',[3])
k.gate('t',[0])
k.gate('t',[1])
k.gate('t',[3])
k.gate('cnot',[1,0])
k.gate('cnot',[3,1])
k.gate('cnot',[0,3])
k.gate('tdag',[1])
k.gate('cnot',[0,1])
k.gate('tdag',[0])
k.gate('tdag',[1])
k.gate('t',[3])
k.gate('cnot',[3,1])
k.gate('cnot',[0,3])
k.gate('cnot',[1,0])
k.gate('h',[3])
k.gate('h',[5])
k.gate('t',[2])
k.gate('t',[3])
k.gate('t',[5])
k.gate('cnot',[3,2])
k.gate('cnot',[5,3])
k.gate('cnot',[2,5])
k.gate('tdag',[3])
k.gate('cnot',[2,3])
k.gate('tdag',[2])
k.gate('tdag',[3])
k.gate('t',[5])
k.gate('cnot',[5,3])
k.gate('cnot',[2,5])
k.gate('cnot',[3,2])
k.gate('h',[5])
k.gate('h',[6])
k.gate('t',[4])
k.gate('t',[5])
k.gate('t',[6])
k.gate('cnot',[5,4])
k.gate('cnot',[6,5])
k.gate('cnot',[4,6])
k.gate('tdag',[5])
k.gate('cnot',[4,5])
k.gate('tdag',[4])
k.gate('tdag',[5])
k.gate('t',[6])
k.gate('cnot',[6,5])
k.gate('cnot',[4,6])
k.gate('cnot',[5,4])
k.gate('h',[6])
k.gate('h',[5])
k.gate('t',[2])
k.gate('t',[3])
k.gate('t',[5])
k.gate('cnot',[3,2])
k.gate('cnot',[5,3])
k.gate('cnot',[2,5])
k.gate('tdag',[3])
k.gate('cnot',[2,3])
k.gate('tdag',[2])
k.gate('tdag',[3])
k.gate('t',[5])
k.gate('cnot',[5,3])
k.gate('cnot',[2,5])
k.gate('cnot',[3,2])
k.gate('h',[5])
k.gate('h',[3])
k.gate('t',[0])
k.gate('t',[1])
k.gate('t',[3])
k.gate('cnot',[1,0])
k.gate('cnot',[3,1])
k.gate('cnot',[0,3])
k.gate('tdag',[1])
k.gate('cnot',[0,1])
k.gate('tdag',[0])
k.gate('tdag',[1])
k.gate('t',[3])
k.gate('cnot',[3,1])
k.gate('cnot',[0,3])
k.gate('cnot',[1,0])
k.gate('h',[3])
k.gate('h',[5])
k.gate('t',[2])
k.gate('t',[3])
k.gate('t',[5])
k.gate('cnot',[3,2])
k.gate('cnot',[5,3])
k.gate('cnot',[2,5])
k.gate('tdag',[3])
k.gate('cnot',[2,3])
k.gate('tdag',[2])
k.gate('tdag',[3])
k.gate('t',[5])
k.gate('cnot',[5,3])
k.gate('cnot',[2,5])
k.gate('cnot',[3,2])
k.gate('h',[5])
k.gate('h',[6])
k.gate('t',[4])
k.gate('t',[5])
k.gate('t',[6])
k.gate('cnot',[5,4])
k.gate('cnot',[6,5])
k.gate('cnot',[4,6])
k.gate('tdag',[5])
k.gate('cnot',[4,5])
k.gate('tdag',[4])
k.gate('tdag',[5])
k.gate('t',[6])
k.gate('cnot',[6,5])
k.gate('cnot',[4,6])
k.gate('cnot',[5,4])
k.gate('h',[6])
k.gate('h',[5])
k.gate('t',[3])
k.gate('t',[0])
k.gate('t',[5])
k.gate('cnot',[0,3])
k.gate('cnot',[5,0])
k.gate('cnot',[3,5])
k.gate('tdag',[0])
k.gate('cnot',[3,0])
k.gate('tdag',[3])
k.gate('tdag',[0])
k.gate('t',[5])
k.gate('cnot',[5,0])
k.gate('cnot',[3,5])
k.gate('cnot',[0,3])
k.gate('h',[5])
k.gate('h',[0])
k.gate('t',[1])
k.gate('t',[2])
k.gate('t',[0])
k.gate('cnot',[2,1])
k.gate('cnot',[0,2])
k.gate('cnot',[1,0])
k.gate('tdag',[2])
k.gate('cnot',[1,2])
k.gate('tdag',[1])
k.gate('tdag',[2])
k.gate('t',[0])
k.gate('cnot',[0,2])
k.gate('cnot',[1,0])
k.gate('cnot',[2,1])
k.gate('h',[0])
k.gate('h',[5])
k.gate('t',[3])
k.gate('t',[0])
k.gate('t',[5])
k.gate('cnot',[0,3])
k.gate('cnot',[5,0])
k.gate('cnot',[3,5])
k.gate('tdag',[0])
k.gate('cnot',[3,0])
k.gate('tdag',[3])
k.gate('tdag',[0])
k.gate('t',[5])
k.gate('cnot',[5,0])
k.gate('cnot',[3,5])
k.gate('cnot',[0,3])
k.gate('h',[5])
k.gate('h',[6])
k.gate('t',[4])
k.gate('t',[5])
k.gate('t',[6])
k.gate('cnot',[5,4])
k.gate('cnot',[6,5])
k.gate('cnot',[4,6])
k.gate('tdag',[5])
k.gate('cnot',[4,5])
k.gate('tdag',[4])
k.gate('tdag',[5])
k.gate('t',[6])
k.gate('cnot',[6,5])
k.gate('cnot',[4,6])
k.gate('cnot',[5,4])
k.gate('h',[6])
k.gate('h',[5])
k.gate('t',[3])
k.gate('t',[0])
k.gate('t',[5])
k.gate('cnot',[0,3])
k.gate('cnot',[5,0])
k.gate('cnot',[3,5])
k.gate('tdag',[0])
k.gate('cnot',[3,0])
k.gate('tdag',[3])
k.gate('tdag',[0])
k.gate('t',[5])
k.gate('cnot',[5,0])
k.gate('cnot',[3,5])
k.gate('cnot',[0,3])
k.gate('h',[5])
k.gate('h',[0])
k.gate('t',[1])
k.gate('t',[2])
k.gate('t',[0])
k.gate('cnot',[2,1])
k.gate('cnot',[0,2])
k.gate('cnot',[1,0])
k.gate('tdag',[2])
k.gate('cnot',[1,2])
k.gate('tdag',[1])
k.gate('tdag',[2])
k.gate('t',[0])
k.gate('cnot',[0,2])
k.gate('cnot',[1,0])
k.gate('cnot',[2,1])
k.gate('h',[0])
k.gate('h',[5])
k.gate('t',[3])
k.gate('t',[0])
k.gate('t',[5])
k.gate('cnot',[0,3])
k.gate('cnot',[5,0])
k.gate('cnot',[3,5])
k.gate('tdag',[0])
k.gate('cnot',[3,0])
k.gate('tdag',[3])
k.gate('tdag',[0])
k.gate('t',[5])
k.gate('cnot',[5,0])
k.gate('cnot',[3,5])
k.gate('cnot',[0,3])
k.gate('h',[5])
k.gate('h',[5])
k.gate('t',[1])
k.gate('t',[2])
k.gate('t',[5])
k.gate('cnot',[2,1])
k.gate('cnot',[5,2])
k.gate('cnot',[1,5])
k.gate('tdag',[2])
k.gate('cnot',[1,2])
k.gate('tdag',[1])
k.gate('tdag',[2])
k.gate('t',[5])
k.gate('cnot',[5,2])
k.gate('cnot',[1,5])
k.gate('cnot',[2,1])
k.gate('h',[5])
k.gate('cnot',[2,1])
k.gate('h',[6])
k.gate('t',[4])
k.gate('t',[5])
k.gate('t',[6])
k.gate('cnot',[5,4])
k.gate('cnot',[6,5])
k.gate('cnot',[4,6])
k.gate('tdag',[5])
k.gate('cnot',[4,5])
k.gate('tdag',[4])
k.gate('tdag',[5])
k.gate('t',[6])
k.gate('cnot',[6,5])
k.gate('cnot',[4,6])
k.gate('cnot',[5,4])
k.gate('h',[6])
k.gate('h',[5])
k.gate('t',[3])
k.gate('t',[2])
k.gate('t',[5])
k.gate('cnot',[2,3])
k.gate('cnot',[5,2])
k.gate('cnot',[3,5])
k.gate('tdag',[2])
k.gate('cnot',[3,2])
k.gate('tdag',[3])
k.gate('tdag',[2])
k.gate('t',[5])
k.gate('cnot',[5,2])
k.gate('cnot',[3,5])
k.gate('cnot',[2,3])
k.gate('h',[5])
k.gate('h',[2])
k.gate('t',[0])
k.gate('t',[1])
k.gate('t',[2])
k.gate('cnot',[1,0])
k.gate('cnot',[2,1])
k.gate('cnot',[0,2])
k.gate('tdag',[1])
k.gate('cnot',[0,1])
k.gate('tdag',[0])
k.gate('tdag',[1])
k.gate('t',[2])
k.gate('cnot',[2,1])
k.gate('cnot',[0,2])
k.gate('cnot',[1,0])
k.gate('h',[2])
k.gate('h',[5])
k.gate('t',[3])
k.gate('t',[2])
k.gate('t',[5])
k.gate('cnot',[2,3])
k.gate('cnot',[5,2])
k.gate('cnot',[3,5])
k.gate('tdag',[2])
k.gate('cnot',[3,2])
k.gate('tdag',[3])
k.gate('tdag',[2])
k.gate('t',[5])
k.gate('cnot',[5,2])
k.gate('cnot',[3,5])
k.gate('cnot',[2,3])
k.gate('h',[5])
k.gate('h',[6])
k.gate('t',[4])
k.gate('t',[5])
k.gate('t',[6])
k.gate('cnot',[5,4])
k.gate('cnot',[6,5])
k.gate('cnot',[4,6])
k.gate('tdag',[5])
k.gate('cnot',[4,5])
k.gate('tdag',[4])
k.gate('tdag',[5])
k.gate('t',[6])
k.gate('cnot',[6,5])
k.gate('cnot',[4,6])
k.gate('cnot',[5,4])
k.gate('h',[6])
k.gate('h',[5])
k.gate('t',[3])
k.gate('t',[2])
k.gate('t',[5])
k.gate('cnot',[2,3])
k.gate('cnot',[5,2])
k.gate('cnot',[3,5])
k.gate('tdag',[2])
k.gate('cnot',[3,2])
k.gate('tdag',[3])
k.gate('tdag',[2])
k.gate('t',[5])
k.gate('cnot',[5,2])
k.gate('cnot',[3,5])
k.gate('cnot',[2,3])
k.gate('h',[5])
k.gate('h',[2])
k.gate('t',[0])
k.gate('t',[1])
k.gate('t',[2])
k.gate('cnot',[1,0])
k.gate('cnot',[2,1])
k.gate('cnot',[0,2])
k.gate('tdag',[1])
k.gate('cnot',[0,1])
k.gate('tdag',[0])
k.gate('tdag',[1])
k.gate('t',[2])
k.gate('cnot',[2,1])
k.gate('cnot',[0,2])
k.gate('cnot',[1,0])
k.gate('h',[2])
k.gate('h',[5])
k.gate('t',[3])
k.gate('t',[2])
k.gate('t',[5])
k.gate('cnot',[2,3])
k.gate('cnot',[5,2])
k.gate('cnot',[3,5])
k.gate('tdag',[2])
k.gate('cnot',[3,2])
k.gate('tdag',[3])
k.gate('tdag',[2])
k.gate('t',[5])
k.gate('cnot',[5,2])
k.gate('cnot',[3,5])
k.gate('cnot',[2,3])
k.gate('h',[5])
k.gate('h',[5])
k.gate('t',[0])
k.gate('t',[1])
k.gate('t',[5])
k.gate('cnot',[1,0])
k.gate('cnot',[5,1])
k.gate('cnot',[0,5])
k.gate('tdag',[1])
k.gate('cnot',[0,1])
k.gate('tdag',[0])
k.gate('tdag',[1])
k.gate('t',[5])
k.gate('cnot',[5,1])
k.gate('cnot',[0,5])
k.gate('cnot',[1,0])
k.gate('h',[5])
k.gate('cnot',[1,0])
k.gate('h',[5])
k.gate('t',[0])
k.gate('t',[3])
k.gate('t',[5])
k.gate('cnot',[3,0])
k.gate('cnot',[5,3])
k.gate('cnot',[0,5])
k.gate('tdag',[3])
k.gate('cnot',[0,3])
k.gate('tdag',[0])
k.gate('tdag',[3])
k.gate('t',[5])
k.gate('cnot',[5,3])
k.gate('cnot',[0,5])
k.gate('cnot',[3,0])
k.gate('h',[5])
k.gate('cnot',[0,3])
k.gate('h',[5])
k.gate('t',[3])
k.gate('t',[4])
k.gate('t',[5])
k.gate('cnot',[4,3])
k.gate('cnot',[5,4])
k.gate('cnot',[3,5])
k.gate('tdag',[4])
k.gate('cnot',[3,4])
k.gate('tdag',[3])
k.gate('tdag',[4])
k.gate('t',[5])
k.gate('cnot',[5,4])
k.gate('cnot',[3,5])
k.gate('cnot',[4,3])
k.gate('h',[5])
k.gate('cnot',[3,4])
if measurement:
for q in range(num_qubits):
k.gate('measure', [q])
p.add_kernel(k)
p.compile()
ql.set_option('mapper', 'no')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='OpenQL compilation of a Quantum Algorithm')
parser.add_argument('config_file', help='Path to the OpenQL configuration file to compile this algorithm')
parser.add_argument('--new_scheduler', nargs='?', default='yes', help='Scheduler defined by Hans')
parser.add_argument('--scheduler', nargs='?', default='ASAP', help='Scheduler specification (ASAP (default), ALAP, ...)')
parser.add_argument('--uniform_sched', nargs='?', default='no', help='Uniform shceduler actication (yes or no)')
parser.add_argument('--sched_commute', nargs='?', default='yes', help='Permits two-qubit gates to be commutable')
parser.add_argument('--mapper', nargs='?', default='base', help='Mapper specification (base, minextend, minextendrc)')
parser.add_argument('--moves', nargs='?', default='no', help='Let the use of moves')
parser.add_argument('--maptiebreak', nargs='?', default='random', help='')
parser.add_argument('--initial_placement', nargs='?', default='no', help='Initial placement specification (yes or no)')
parser.add_argument('--out_dir', nargs='?', default='test_output', help='Folder name to store the compilation')
parser.add_argument('--measurement', nargs='?', default=True, help='Add measurement to all the qubits in the end of the algorithm')
args = parser.parse_args()
try:
circuit(args.config_file, args.new_scheduler, args.scheduler, args.uniform_sched, args.sched_commute, args.mapper, args.moves, args.maptiebreak, args.initial_placement, args.out_dir)
except TypeError:
print('\nCompiled, but some gate is not defined in the configuration file. \nThe gate will be invoked like it is.')
raise
| 25.755906
| 270
| 0.467441
|
d02c5af1eb177db138aeed094c68cd69db10c397
| 5,860
|
py
|
Python
|
nbdev/sync.py
|
theccalderon/nbdev
|
59a49fbc587894d7ef73970e762cca8c92cf5a13
|
[
"Apache-2.0"
] | 1
|
2021-02-15T05:48:35.000Z
|
2021-02-15T05:48:35.000Z
|
nbdev/sync.py
|
bhoov/nbdev
|
0e071dc35c7cafebd7945367badb5894cab21c2e
|
[
"Apache-2.0"
] | 2
|
2021-09-28T01:11:23.000Z
|
2022-02-26T06:50:19.000Z
|
nbdev/sync.py
|
bhoov/nbdev
|
0e071dc35c7cafebd7945367badb5894cab21c2e
|
[
"Apache-2.0"
] | null | null | null |
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/01_sync.ipynb (unless otherwise specified).
__all__ = ['get_name', 'qual_name', 'source_nb', 'relimport2name', 'script2notebook', 'diff_nb_script']
# Cell
from .imports import *
from .export import *
# Cell
def _get_property_name(p):
"Get the name of property `p`"
if hasattr(p, 'fget'):
return p.fget.func.__qualname__ if hasattr(p.fget, 'func') else p.fget.__qualname__
else: return next(iter(re.findall(r'\'(.*)\'', str(p)))).split('.')[-1]
def get_name(obj):
"Get the name of `obj`"
if hasattr(obj, '__name__'): return obj.__name__
elif getattr(obj, '_name', False): return obj._name
elif hasattr(obj,'__origin__'): return str(obj.__origin__).split('.')[-1] #for types
elif type(obj)==property: return _get_property_name(obj)
else: return str(obj).split('.')[-1]
# Cell
def qual_name(obj):
"Get the qualified name of `obj`"
if hasattr(obj,'__qualname__'): return obj.__qualname__
if inspect.ismethod(obj): return f"{get_name(obj.__self__)}.{get_name(fn)}"
return get_name(obj)
# Cell
def source_nb(func, is_name=None, return_all=False, mod=None):
"Return the name of the notebook where `func` was defined"
is_name = is_name or isinstance(func, str)
if mod is None: mod = get_nbdev_module()
index = mod.index
name = func if is_name else qual_name(func)
while len(name) > 0:
if name in index: return (name,index[name]) if return_all else index[name]
name = '.'.join(name.split('.')[:-1])
# Cell
_re_cell = re.compile(r'^# Cell|^# Comes from\s+(\S+), cell')
# Cell
def _split(code):
lines = code.split('\n')
nbs_path = Config().nbs_path.relative_to(Config().config_file.parent)
prefix = '' if nbs_path == Path('.') else f'{nbs_path}/'
default_nb = re.search(f'File to edit: {prefix}(\\S+)\\s+', lines[0]).groups()[0]
s,res = 1,[]
while _re_cell.search(lines[s]) is None: s += 1
e = s+1
while e < len(lines):
while e < len(lines) and _re_cell.search(lines[e]) is None: e += 1
grps = _re_cell.search(lines[s]).groups()
nb = grps[0] or default_nb
content = lines[s+1:e]
while len(content) > 1 and content[-1] == '': content = content[:-1]
res.append((nb, '\n'.join(content)))
s,e = e,e+1
return res
# Cell
def relimport2name(name, mod_name):
"Unwarps a relative import in `name` according to `mod_name`"
if mod_name.endswith('.py'): mod_name = mod_name[:-3]
mods = mod_name.split(os.path.sep)
i = last_index(Config().lib_name, mods)
mods = mods[i:]
if name=='.': return '.'.join(mods[:-1])
i = 0
while name[i] == '.': i += 1
return '.'.join(mods[:-i] + [name[i:]])
# Cell
#Catches any from .bla import something and catches .bla in group 1, the imported thing(s) in group 2.
_re_loc_import = re.compile(r'(^\s*)from (\.\S*) import (.*)$')
_re_loc_import1 = re.compile(r'(^\s*)import (\.\S*)(.*)$')
# Cell
def _deal_loc_import(code, fname):
def _replace(m):
sp,mod,obj = m.groups()
return f"{sp}from {relimport2name(mod, fname)} import {obj}"
def _replace1(m):
sp,mod,end = m.groups()
return f"{sp}import {relimport2name(mod, fname)}{end}"
return '\n'.join([_re_loc_import1.sub(_replace1, _re_loc_import.sub(_replace,line)) for line in code.split('\n')])
# Cell
def _script2notebook(fname, dic, silent=False):
"Put the content of `fname` back in the notebooks it came from."
if os.environ.get('IN_TEST',0): return # don't export if running tests
fname = Path(fname)
with open(fname, encoding='utf8') as f: code = f.read()
splits = _split(code)
rel_name = fname.absolute().resolve().relative_to(Config().lib_path)
key = str(rel_name.with_suffix(''))
assert len(splits)==len(dic[key]), f"Exported file from notebooks should have {len(dic[fname])} cells but has {len(splits)}."
assert all([c1[0]==c2[1]] for c1,c2 in zip(splits, dic[key]))
splits = [(c2[0],c1[0],c1[1]) for c1,c2 in zip(splits, dic[key])]
nb_fnames = {Config().nbs_path/s[1] for s in splits}
for nb_fname in nb_fnames:
nb = read_nb(nb_fname)
for i,f,c in splits:
c = _deal_loc_import(c, str(fname))
if f == nb_fname.name:
l = nb['cells'][i]['source'].split('\n')[0]
nb['cells'][i]['source'] = l + '\n' + c
NotebookNotary().sign(nb)
nbformat.write(nb, str(nb_fname), version=4)
if not silent: print(f"Converted {rel_name}.")
# Cell
def script2notebook(fname=None, silent=False):
"Update the notebooks from any changes made in the modules corresponding to `fname`"
if os.environ.get('IN_TEST',0): return
dic = notebook2script(silent=True, to_dict=True)
exported = get_nbdev_module().modules
if fname is None:
files = [f for f in Config().lib_path.glob('**/*.py') if str(f.relative_to(Config().lib_path)) in exported]
else: files = glob.glob(fname)
[ _script2notebook(f, dic, silent=silent) for f in files]
# Cell
import subprocess
from distutils.dir_util import copy_tree
# Cell
def diff_nb_script():
"Print the diff between the notebooks and the library in lib_path"
lib_folder = Config().lib_path
with tempfile.TemporaryDirectory() as d1, tempfile.TemporaryDirectory() as d2:
copy_tree(Config().lib_path, d1)
notebook2script(silent=True)
copy_tree(Config().lib_path, d2)
shutil.rmtree(Config().lib_path)
shutil.copytree(d1, str(Config().lib_path))
for d in [d1, d2]:
if (Path(d)/'__pycache__').exists(): shutil.rmtree(Path(d)/'__pycache__')
res = subprocess.run(['diff', '-ru', d1, d2], stdout=subprocess.PIPE)
print(res.stdout.decode('utf-8'))
| 40.413793
| 129
| 0.630546
|
e56185c5f39178ae4229787ab6a81a31bde8ef49
| 1,167
|
py
|
Python
|
week1/ex6.py
|
kingpeen/My_Pynet
|
328b5b3441ace4e3bbd524c726833a077c2e2dd6
|
[
"Apache-2.0"
] | null | null | null |
week1/ex6.py
|
kingpeen/My_Pynet
|
328b5b3441ace4e3bbd524c726833a077c2e2dd6
|
[
"Apache-2.0"
] | null | null | null |
week1/ex6.py
|
kingpeen/My_Pynet
|
328b5b3441ace4e3bbd524c726833a077c2e2dd6
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
'''
Write a Python program that creates a list. One of the elements of the list
should be a dictionary with at least two keys. Write this list out to a file
using both YAML and JSON formats. The YAML file should be in the expanded form.
'''
import yaml
import json
def main():
'''
Write a Python program that creates a list. One of the elements of the list
should be a dictionary with at least two keys. Write this list out to a file
using both YAML and JSON formats. The YAML file should be in the expanded
form.
'''
yaml_file = 'my_test.yml'
json_file = 'my_test.json'
my_dict = {
'ip_addr': '172.31.200.1',
'platform': 'cisco_ios',
'vendor': 'cisco',
'model': '1921'
}
my_list = [
'some string',
99,
18,
my_dict,
'another string',
'final string'
]
with open(yaml_file, "w") as f:
f.write(yaml.dump(my_list, default_flow_style=False))
f.write("############ \n ")
yaml.dump(my_list, f)
with open(json_file, "w") as f:
json.dump(my_list, f)
if __name__ == "__main__":
main()
| 23.34
| 80
| 0.60497
|
c831f8ff1cdc1e72723d6a5ac532a984bf6fd3aa
| 6,498
|
py
|
Python
|
lib/services/vloadbalancer/ncloud_vloadbalancer/model/set_target_group_description_response.py
|
NaverCloudPlatform/ncloud-sdk-python
|
5976dfabd205c615fcf57ac2f0ab67313ee6953c
|
[
"MIT"
] | 12
|
2018-11-20T04:30:49.000Z
|
2021-11-09T12:34:26.000Z
|
lib/services/vloadbalancer/ncloud_vloadbalancer/model/set_target_group_description_response.py
|
NaverCloudPlatform/ncloud-sdk-python
|
5976dfabd205c615fcf57ac2f0ab67313ee6953c
|
[
"MIT"
] | 1
|
2019-01-24T15:56:15.000Z
|
2019-05-31T07:56:55.000Z
|
lib/services/vloadbalancer/ncloud_vloadbalancer/model/set_target_group_description_response.py
|
NaverCloudPlatform/ncloud-sdk-python
|
5976dfabd205c615fcf57ac2f0ab67313ee6953c
|
[
"MIT"
] | 6
|
2018-06-29T03:45:50.000Z
|
2022-03-18T01:51:45.000Z
|
# coding: utf-8
"""
vloadbalancer
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from ncloud_vloadbalancer.model.target_group import TargetGroup # noqa: F401,E501
class SetTargetGroupDescriptionResponse(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'request_id': 'str',
'return_code': 'str',
'return_message': 'str',
'total_rows': 'int',
'target_group_list': 'list[TargetGroup]'
}
attribute_map = {
'request_id': 'requestId',
'return_code': 'returnCode',
'return_message': 'returnMessage',
'total_rows': 'totalRows',
'target_group_list': 'targetGroupList'
}
def __init__(self, request_id=None, return_code=None, return_message=None, total_rows=None, target_group_list=None): # noqa: E501
"""SetTargetGroupDescriptionResponse - a model defined in Swagger""" # noqa: E501
self._request_id = None
self._return_code = None
self._return_message = None
self._total_rows = None
self._target_group_list = None
self.discriminator = None
if request_id is not None:
self.request_id = request_id
if return_code is not None:
self.return_code = return_code
if return_message is not None:
self.return_message = return_message
if total_rows is not None:
self.total_rows = total_rows
if target_group_list is not None:
self.target_group_list = target_group_list
@property
def request_id(self):
"""Gets the request_id of this SetTargetGroupDescriptionResponse. # noqa: E501
:return: The request_id of this SetTargetGroupDescriptionResponse. # noqa: E501
:rtype: str
"""
return self._request_id
@request_id.setter
def request_id(self, request_id):
"""Sets the request_id of this SetTargetGroupDescriptionResponse.
:param request_id: The request_id of this SetTargetGroupDescriptionResponse. # noqa: E501
:type: str
"""
self._request_id = request_id
@property
def return_code(self):
"""Gets the return_code of this SetTargetGroupDescriptionResponse. # noqa: E501
:return: The return_code of this SetTargetGroupDescriptionResponse. # noqa: E501
:rtype: str
"""
return self._return_code
@return_code.setter
def return_code(self, return_code):
"""Sets the return_code of this SetTargetGroupDescriptionResponse.
:param return_code: The return_code of this SetTargetGroupDescriptionResponse. # noqa: E501
:type: str
"""
self._return_code = return_code
@property
def return_message(self):
"""Gets the return_message of this SetTargetGroupDescriptionResponse. # noqa: E501
:return: The return_message of this SetTargetGroupDescriptionResponse. # noqa: E501
:rtype: str
"""
return self._return_message
@return_message.setter
def return_message(self, return_message):
"""Sets the return_message of this SetTargetGroupDescriptionResponse.
:param return_message: The return_message of this SetTargetGroupDescriptionResponse. # noqa: E501
:type: str
"""
self._return_message = return_message
@property
def total_rows(self):
"""Gets the total_rows of this SetTargetGroupDescriptionResponse. # noqa: E501
:return: The total_rows of this SetTargetGroupDescriptionResponse. # noqa: E501
:rtype: int
"""
return self._total_rows
@total_rows.setter
def total_rows(self, total_rows):
"""Sets the total_rows of this SetTargetGroupDescriptionResponse.
:param total_rows: The total_rows of this SetTargetGroupDescriptionResponse. # noqa: E501
:type: int
"""
self._total_rows = total_rows
@property
def target_group_list(self):
"""Gets the target_group_list of this SetTargetGroupDescriptionResponse. # noqa: E501
:return: The target_group_list of this SetTargetGroupDescriptionResponse. # noqa: E501
:rtype: list[TargetGroup]
"""
return self._target_group_list
@target_group_list.setter
def target_group_list(self, target_group_list):
"""Sets the target_group_list of this SetTargetGroupDescriptionResponse.
:param target_group_list: The target_group_list of this SetTargetGroupDescriptionResponse. # noqa: E501
:type: list[TargetGroup]
"""
self._target_group_list = target_group_list
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SetTargetGroupDescriptionResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 30.083333
| 134
| 0.628347
|
4395c2d4e74cdfd6ac4fe48c550ffe4ae4f0b5be
| 5,174
|
py
|
Python
|
dead_hosts/launcher/updater/pyfunceble_config.py
|
dead-hosts/infrastructure-launcher
|
23287ef68007532958a3385703e01fbab651a0dc
|
[
"MIT"
] | 4
|
2020-04-20T01:15:44.000Z
|
2021-06-17T07:55:11.000Z
|
dead_hosts/launcher/updater/pyfunceble_config.py
|
dead-hosts/infrastructure-launcher
|
23287ef68007532958a3385703e01fbab651a0dc
|
[
"MIT"
] | 2
|
2020-05-21T02:49:24.000Z
|
2020-06-06T13:06:44.000Z
|
dead_hosts/launcher/updater/pyfunceble_config.py
|
dead-hosts/infrastructure-launcher
|
23287ef68007532958a3385703e01fbab651a0dc
|
[
"MIT"
] | 2
|
2020-05-21T05:48:35.000Z
|
2021-07-05T06:47:20.000Z
|
"""
Dead Hosts's launcher - The launcher of the Dead-Hosts infrastructure.
Provides the updater of the PyFunceble configuration.
Author:
Nissar Chababy, @funilrys, contactTATAfunilrysTODTODcom
Project link:
https://github.com/dead-hosts/infrastructure-launcher
License:
::
MIT License
Copyright (c) 2019, 2020, 2021 Dead Hosts
Copyright (c) 2019, 2020. 2021 Nissar Chababy
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import importlib.resources
import logging
import os
from typing import Optional
from PyFunceble.helpers.dict import DictHelper
from PyFunceble.helpers.file import FileHelper
from PyFunceble.helpers.merge import Merge
import dead_hosts.launcher.defaults.links
import dead_hosts.launcher.defaults.paths
import dead_hosts.launcher.defaults.pyfunceble
from dead_hosts.launcher.info_manager import InfoManager
from dead_hosts.launcher.updater.base import UpdaterBase
class PyFuncebleConfigUpdater(UpdaterBase):
"""
Provides the updated of the PyFunceble configuration.
"""
def __init__(self, info_manager: InfoManager) -> None:
self.pyfunceble_config_file_instance = FileHelper(
os.path.join(info_manager.PYFUNCEBLE_CONFIG_DIR, ".PyFunceble.yaml")
)
super().__init__(info_manager)
@property
def authorized(self) -> bool:
return not self.info_manager.own_management
@staticmethod
def get_commit_message(message: str, ping: Optional[str] = None) -> str:
"""
Provides the commit message to use.
"""
if ping:
return f"{message} | cc {ping} | "
return message
def pre(self) -> "PyFuncebleConfigUpdater":
logging.info(
"Started to update %r.",
self.pyfunceble_config_file_instance.path,
)
return self
def post(self) -> "PyFuncebleConfigUpdater":
logging.info(
"Finished to update %r",
self.pyfunceble_config_file_instance.path,
)
return self
def start(self) -> "PyFuncebleConfigUpdater":
with importlib.resources.path(
"PyFunceble.data.infrastructure", ".PyFunceble_production.yaml"
) as file_path:
local_version = DictHelper(
DictHelper().from_yaml_file(str(file_path))
).flatten()
local_version = Merge(
dead_hosts.launcher.defaults.pyfunceble.CONFIGURATION
).into(local_version, strict=True)
if self.info_manager.custom_pyfunceble_config and isinstance(
self.info_manager.custom_pyfunceble_config, dict
):
logging.info(
"Custom PyFunceble configuration given, "
"appending them to the local configuration file."
)
local_version = Merge(self.info_manager.custom_pyfunceble_config).into(
local_version, strict=True
)
if self.info_manager.ping:
logging.info("Ping names given, appending them to the commit message.")
local_version[
"cli_testing.ci.end_commit_message"
] = self.get_commit_message(
local_version["cli_testing.ci.end_commit_message"],
ping=self.info_manager.get_ping_for_commit(),
)
local_version = Merge(
dead_hosts.launcher.defaults.pyfunceble.PERSISTENT_CONFIG
).into(local_version, strict=True)
if FileHelper(
os.path.join(
self.info_manager.WORKSPACE_DIR,
dead_hosts.launcher.defaults.paths.EXAMPLE_INFO_FILENAME,
)
).exists():
local_version["cli_testing.ci.active"] = False
# Default behavior of PyFunceble since 4.0.0b12.
local_version["cli_testing.autocontinue"] = False
local_version = DictHelper(local_version).unflatten()
DictHelper(local_version).to_yaml_file(
self.pyfunceble_config_file_instance.path
)
logging.debug("Configuration:\n%s", self.pyfunceble_config_file_instance.read())
return self
| 33.597403
| 88
| 0.676846
|
5750afa4293cdacab8dd00966128e781c96f2a36
| 19,271
|
py
|
Python
|
robot/transform.py
|
sgowda/brain-python-interface
|
708e2a5229d0496a8ce9de32bda66f0925d366d9
|
[
"Apache-2.0"
] | 7
|
2015-08-25T00:28:49.000Z
|
2020-04-14T22:58:51.000Z
|
robot/transform.py
|
sgowda/brain-python-interface
|
708e2a5229d0496a8ce9de32bda66f0925d366d9
|
[
"Apache-2.0"
] | 89
|
2020-08-03T16:54:08.000Z
|
2022-03-09T19:56:19.000Z
|
robot/transform.py
|
sgowda/brain-python-interface
|
708e2a5229d0496a8ce9de32bda66f0925d366d9
|
[
"Apache-2.0"
] | 4
|
2016-10-05T17:54:26.000Z
|
2020-08-06T15:37:09.000Z
|
"""
Primitive operations for 3x3 orthonormal and 4x4 homogeneous matrices.
@author: Peter Corke
@copyright: Peter Corke
"""
from numpy import *
from robot.utility import *
from numpy.linalg import norm
from .Quaternion import *
def rotx(theta):
"""
Rotation about X-axis
@type theta: number
@param theta: the rotation angle
@rtype: 3x3 orthonormal matrix
@return: rotation about X-axis
@see: L{roty}, L{rotz}, L{rotvec}
"""
ct = cos(theta)
st = sin(theta)
return mat([[1, 0, 0],
[0, ct, -st],
[0, st, ct]])
def roty(theta):
"""
Rotation about Y-axis
@type theta: number
@param theta: the rotation angle
@rtype: 3x3 orthonormal matrix
@return: rotation about Y-axis
@see: L{rotx}, L{rotz}, L{rotvec}
"""
ct = cos(theta)
st = sin(theta)
return mat([[ct, 0, st],
[0, 1, 0],
[-st, 0, ct]])
def rotz(theta):
"""
Rotation about Z-axis
@type theta: number
@param theta: the rotation angle
@rtype: 3x3 orthonormal matrix
@return: rotation about Z-axis
@see: L{rotx}, L{roty}, L{rotvec}
"""
ct = cos(theta)
st = sin(theta)
return mat([[ct, -st, 0],
[st, ct, 0],
[ 0, 0, 1]])
def trotx(theta):
"""
Rotation about X-axis
@type theta: number
@param theta: the rotation angle
@rtype: 4x4 homogeneous matrix
@return: rotation about X-axis
@see: L{troty}, L{trotz}, L{rotx}
"""
return r2t(rotx(theta))
def troty(theta):
"""
Rotation about Y-axis
@type theta: number
@param theta: the rotation angle
@rtype: 4x4 homogeneous matrix
@return: rotation about Y-axis
@see: L{troty}, L{trotz}, L{roty}
"""
return r2t(roty(theta))
def trotz(theta):
"""
Rotation about Z-axis
@type theta: number
@param theta: the rotation angle
@rtype: 4x4 homogeneous matrix
@return: rotation about Z-axis
@see: L{trotx}, L{troty}, L{rotz}
"""
return r2t(rotz(theta))
##################### Euler angles
def tr2eul(m):
"""
Extract Euler angles.
Returns a vector of Euler angles corresponding to the rotational part of
the homogeneous transform. The 3 angles correspond to rotations about
the Z, Y and Z axes respectively.
@type m: 3x3 or 4x4 matrix
@param m: the rotation matrix
@rtype: 1x3 matrix
@return: Euler angles [S{theta} S{phi} S{psi}]
@see: L{eul2tr}, L{tr2rpy}
"""
try:
m = mat(m)
if ishomog(m):
euler = mat(zeros((1,3)))
if norm(m[0,2])<finfo(float).eps and norm(m[1,2])<finfo(float).eps:
# singularity
euler[0,0] = 0
sp = 0
cp = 1
euler[0,1] = arctan2(cp*m[0,2] + sp*m[1,2], m[2,2])
euler[0,2] = arctan2(-sp*m[0,0] + cp*m[1,0], -sp*m[0,1] + cp*m[1,1])
return euler
else:
euler[0,0] = arctan2(m[1,2],m[0,2])
sp = sin(euler[0,0])
cp = cos(euler[0,0])
euler[0,1] = arctan2(cp*m[0,2] + sp*m[1,2], m[2,2])
euler[0,2] = arctan2(-sp*m[0,0] + cp*m[1,0], -sp*m[0,1] + cp*m[1,1])
return euler
except ValueError:
euler = []
for i in range(0,len(m)):
euler.append(tr2eul(m[i]))
return euler
def eul2r(phi, theta=None, psi=None):
"""
Rotation from Euler angles.
Two call forms:
- R = eul2r(S{theta}, S{phi}, S{psi})
- R = eul2r([S{theta}, S{phi}, S{psi}])
These correspond to rotations about the Z, Y, Z axes respectively.
@type phi: number or list/array/matrix of angles
@param phi: the first Euler angle, or a list/array/matrix of angles
@type theta: number
@param theta: the second Euler angle
@type psi: number
@param psi: the third Euler angle
@rtype: 3x3 orthonormal matrix
@return: R([S{theta} S{phi} S{psi}])
@see: L{tr2eul}, L{eul2tr}, L{tr2rpy}
"""
n = 1
if theta == None and psi==None:
# list/array/matrix argument
phi = mat(phi)
if numcols(phi) != 3:
error('bad arguments')
else:
n = numrows(phi)
psi = phi[:,2]
theta = phi[:,1]
phi = phi[:,0]
elif (theta!=None and psi==None) or (theta==None and psi!=None):
error('bad arguments')
elif not isinstance(phi,(int,int32,float,float64)):
# all args are vectors
phi = mat(phi)
n = numrows(phi)
theta = mat(theta)
psi = mat(psi)
if n>1:
R = []
for i in range(0,n):
r = rotz(phi[i,0]) * roty(theta[i,0]) * rotz(psi[i,0])
R.append(r)
return R
try:
r = rotz(phi[0,0]) * roty(theta[0,0]) * rotz(psi[0,0])
return r
except:
r = rotz(phi) * roty(theta) * rotz(psi)
return r
def eul2tr(phi,theta=None,psi=None):
"""
Rotation from Euler angles.
Two call forms:
- R = eul2tr(S{theta}, S{phi}, S{psi})
- R = eul2tr([S{theta}, S{phi}, S{psi}])
These correspond to rotations about the Z, Y, Z axes respectively.
@type phi: number or list/array/matrix of angles
@param phi: the first Euler angle, or a list/array/matrix of angles
@type theta: number
@param theta: the second Euler angle
@type psi: number
@param psi: the third Euler angle
@rtype: 4x4 homogenous matrix
@return: R([S{theta} S{phi} S{psi}])
@see: L{tr2eul}, L{eul2r}, L{tr2rpy}
"""
return r2t( eul2r(phi, theta, psi) )
################################## RPY angles
def tr2rpy(m,zyx=False):
"""
Extract RPY angles.
Returns a vector of RPY angles corresponding to the rotational part of
the homogeneous transform. The 3 angles correspond to rotations about
the Z, Y and X axes respectively.
@type m: 3x3 or 4x4 matrix
@param m: the rotation matrix
@rtype: 1x3 matrix
@return: RPY angles [S{theta} S{phi} S{psi}]
@see: L{rpy2tr}, L{tr2eul}
"""
try:
if ~zyx:
m = mat(m)
if ishomog(m):
rpy = mat(zeros((1,3)))
if norm(m[2,2])<finfo(float).eps and norm(m[1,2])<finfo(float).eps:
# singularity
rpy[0,0] = 0
rpy[0,1] = arctan2(m[0,2], m[2,2]) # pitch
rpy[0,2] = arctan2(m[1,0], m[1,1]) # yaw is sum of roll+yaw
return rpy
else:
rpy[0,0] = arctan2(-m[1,3],m[2,2])
sp = sin(rpy[0,0])
cp = cos(rpy[0,0])
rpy[0,1] = arctan2(m[0,2], cp*m[2,2] - sp*m[1,2])# pitch
rpy[0,2] = arctan2(-m[0,1], m[0,0])# yaw
return rpy
else:
m = mat(m)
if ishomog(m):
rpy = mat(zeros((1,3)))
if norm(m[0,0])<finfo(float).eps and norm(m[1,0])<finfo(float).eps:
# singularity
rpy[0,0] = 0
rpy[0,1] = arctan2(-m[2,0], m[0,0])
rpy[0,2] = arctan2(-m[1,2], m[1,1])
return rpy
else:
rpy[0,0] = arctan2(m[1,0],m[0,0])
sp = sin(rpy[0,0])
cp = cos(rpy[0,0])
rpy[0,1] = arctan2(-m[2,0], cp*m[0,0] + sp*m[1,0])
rpy[0,2] = arctan2(sp*m[0,2] - cp*m[1,2], cp*m[1,1] - sp*m[0,1])
return rpy
except ValueError:
rpy = []
for i in range(0,len(m)):
rpy.append(tr2rpy(m[i]))
return rpy
def rpy2r(roll, pitch=None,yaw=None,zyx=False,deg=False):
"""
Rotation from RPY angles.
Two call forms:
- R = rpy2r(S{theta}, S{phi}, S{psi})
- R = rpy2r([S{theta}, S{phi}, S{psi}])
These correspond to rotations about the Z, Y, X axes respectively.
@type roll: number or list/array/matrix of angles
@param roll: roll angle, or a list/array/matrix of angles
@type pitch: number
@param pitch: pitch angle
@type yaw: number
@param yaw: yaw angle
@rtype: 4x4 homogenous matrix
@return: R([S{theta} S{phi} S{psi}])
@see: L{tr2rpy}, L{rpy2r}, L{tr2eul}
"""
n=1
if pitch==None and yaw==None:
roll= mat(roll)
if numcols(roll) != 3:
error('bad arguments')
n = numrows(roll)
pitch = roll[:,1]
yaw = roll[:,2]
roll = roll[:,0]
if deg:
#convert to degrees
d2r = pi/180.0
roll = roll * d2r;
pitch = pitch * d2r;
yaw = yaw * d2r;
if ~zyx:
# XYZ order
if n>1:
R = []
for i in range(0,n):
r = rotz(roll[i,0]) * roty(pitch[i,0]) * rotx(yaw[i,0])
R.append(r)
return R
try:
r = rotz(roll[0,0]) * roty(pitch[0,0]) * rotx(yaw[0,0])
return r
except:
r = rotx(roll) * roty(pitch) * rotz(yaw)
return r
else:
# XYZ order
if n>1:
R = []
for i in range(0,n):
r = rotz(roll[i,0]) * roty(pitch[i,0]) * rotx(yaw[i,0])
R.append(r)
return R
try:
r = rotz(roll[0,0]) * roty(pitch[0,0]) * rotx(yaw[0,0])
return r
except:
r = rotz(roll) * roty(pitch) * rotx(yaw)
return r
def rpy2tr(roll, pitch=None, yaw=None, zyx=False, deg=False):
"""
Rotation from RPY angles.
Two call forms:
- R = rpy2tr(r, p, y)
- R = rpy2tr([r, p, y])
These correspond to rotations about the Z, Y, X axes respectively.
@type roll: number or list/array/matrix of angles
@param roll: roll angle, or a list/array/matrix of angles
@type pitch: number
@param pitch: pitch angle
@type yaw: number
@param yaw: yaw angle
@rtype: 4x4 homogenous matrix
@return: R([S{theta} S{phi} S{psi}])
@see: L{tr2rpy}, L{rpy2r}, L{tr2eul}
"""
return r2t( rpy2r(roll, pitch, yaw, zxy, deg) )
###################################### OA vector form
def oa2r(o,a):
"""Rotation from 2 vectors.
The matrix is formed from 3 vectors such that::
R = [N O A] and N = O x A.
In robotics A is the approach vector, along the direction of the robot's
gripper, and O is the orientation vector in the direction between the
fingertips.
The submatrix is guaranteed to be orthonormal so long as O and A are
not parallel.
@type o: 3-vector
@param o: The orientation vector.
@type a: 3-vector
@param a: The approach vector
@rtype: 3x3 orthonormal rotation matrix
@return: Rotatation matrix
@see: L{rpy2r}, L{eul2r}
"""
n = crossp(o, a)
n = unit(n)
o = crossp(a, n);
o = unit(o).reshape(3,1)
a = unit(a).reshape(3,1)
return bmat('n o a')
def oa2tr(o,a):
"""otation from 2 vectors.
The rotation submatrix is formed from 3 vectors such that::
R = [N O A] and N = O x A.
In robotics A is the approach vector, along the direction of the robot's
gripper, and O is the orientation vector in the direction between the
fingertips.
The submatrix is guaranteed to be orthonormal so long as O and A are
not parallel.
@type o: 3-vector
@param o: The orientation vector.
@type a: 3-vector
@param a: The approach vector
@rtype: 4x4 homogeneous transformation matrix
@return: Transformation matrix
@see: L{rpy2tr}, L{eul2tr}
"""
return r2t(oa2r(o,a))
###################################### angle/vector form
def rotvec2r(theta, v):
"""
Rotation about arbitrary axis. Compute a rotation matrix representing
a rotation of C{theta} about the vector C{v}.
@type v: 3-vector
@param v: rotation vector
@type theta: number
@param theta: the rotation angle
@rtype: 3x3 orthonormal matrix
@return: rotation
@see: L{rotx}, L{roty}, L{rotz}
"""
v = arg2array(v);
ct = cos(theta)
st = sin(theta)
vt = 1-ct
r = mat([[ct, -v[2]*st, v[1]*st],\
[v[2]*st, ct, -v[0]*st],\
[-v[1]*st, v[0]*st, ct]])
return v*v.T*vt+r
def rotvec2tr(theta, v):
"""
Rotation about arbitrary axis. Compute a rotation matrix representing
a rotation of C{theta} about the vector C{v}.
@type v: 3-vector
@param v: rotation vector
@type theta: number
@param theta: the rotation angle
@rtype: 4x4 homogeneous matrix
@return: rotation
@see: L{trotx}, L{troty}, L{trotz}
"""
return r2t(rotvec2r(theta, v))
###################################### translational transform
def transl(x, y=None, z=None):
"""
Create or decompose translational homogeneous transformations.
Create a homogeneous transformation
===================================
- T = transl(v)
- T = transl(vx, vy, vz)
The transformation is created with a unit rotation submatrix.
The translational elements are set from elements of v which is
a list, array or matrix, or from separate passed elements.
Decompose a homogeneous transformation
======================================
- v = transl(T)
Return the translation vector
"""
if y==None and z==None:
x=mat(x)
try:
if ishomog(x):
return x[0:3,3].reshape(3,1)
else:
return concatenate((concatenate((eye(3),x.reshape(3,1)),1),mat([0,0,0,1])))
except AttributeError:
n=len(x)
r = [[],[],[]]
for i in range(n):
r = concatenate((r,x[i][0:3,3]),1)
return r
elif y!=None and z!=None:
return concatenate((concatenate((eye(3),mat([x,y,z]).T),1),mat([0,0,0,1])))
###################################### Skew symmetric transform
def skew(*args):
"""
Convert to/from skew-symmetric form. A skew symmetric matrix is a matrix
such that M = -M'
Two call forms
-ss = skew(v)
-v = skew(ss)
The first form builds a 3x3 skew-symmetric from a 3-element vector v.
The second form takes a 3x3 skew-symmetric matrix and returns the 3 unique
elements that it contains.
"""
def ss(b):
return matrix([
[0, -b[2], b[1]],
[b[2], 0, -b[0]],
[-b[1], b[0], 0]]);
if len(args) == 1:
# convert matrix to skew vector
b = args[0];
if isrot(b):
return 0.5*matrix( [b[2,1]-b[1,2], b[0,2]-b[2,0], b[1,0]-b[0,1]] );
elif ishomog(b):
return vstack( (b[0:3,3], 0.5*matrix( [b[2,1]-b[1,2], b[0,2]-b[2,0], b[1,0]-b[0,1]] ).T) );
# build skew-symmetric matrix
b = arg2array(b);
if len(b) == 3:
return ss(b);
elif len(b) == 6:
r = hstack( (ss(b[3:6]), mat(b[0:3]).T) );
r = vstack( (r, mat([0, 0, 0, 1])) );
return r;
elif len(args) == 3:
return ss(args);
elif len(args) == 6:
r = hstack( (ss(args[3:6]), mat(args[0:3]).T) );
r = vstack( (r, mat([0, 0, 0, 1])) );
return r;
else:
raise ValueError;
def tr2diff(t1, t2):
"""
Convert a transform difference to differential representation.
Returns the 6-element differential motion required to move
from T1 to T2 in base coordinates.
@type t1: 4x4 homogeneous transform
@param t1: Initial value
@type t2: 4x4 homogeneous transform
@param t2: Final value
@rtype: 6-vector
@return: Differential motion [dx dy dz drx dry drz]
@see: L{skew}
"""
t1 = mat(t1)
t2 = mat(t2)
d = concatenate(
(t2[0:3,3]-t1[0:3,3],
0.5*( crossp(t1[0:3,0], t2[0:3,0]) +
crossp(t1[0:3,1], t2[0:3,1]) +
crossp(t1[0:3,2], t2[0:3,2]) )
))
return d
################################## Utility
def trinterp(T0, T1, r):
"""
Interpolate homogeneous transformations.
Compute a homogeneous transform interpolation between C{T0} and C{T1} as
C{r} varies from 0 to 1 such that::
trinterp(T0, T1, 0) = T0
trinterp(T0, T1, 1) = T1
Rotation is interpolated using quaternion spherical linear interpolation.
@type T0: 4x4 homogeneous transform
@param T0: Initial value
@type T1: 4x4 homogeneous transform
@param T1: Final value
@type r: number
@param r: Interpolation index, in the range 0 to 1 inclusive
@rtype: 4x4 homogeneous transform
@return: Interpolated value
@see: L{quaternion}, L{ctraj}
"""
q0 = Quaternion(T0)
q1 = Quaternion(T1)
p0 = transl(T0)
p1 = transl(T1)
qr = q0.interp(q1, r)
pr = p0*(1-r) + r*p1
return vstack( (concatenate((qr.R(),pr),1), mat([0,0,0,1])) )
def trnorm(t):
"""
Normalize a homogeneous transformation.
Finite word length arithmetic can cause transforms to become `unnormalized',
that is the rotation submatrix is no longer orthonormal (det(R) != 1).
The rotation submatrix is re-orthogonalized such that the approach vector
(third column) is unchanged in direction::
N = O x A
O = A x N
@type t: 4x4 homogeneous transformation
@param t: the transform matrix to convert
@rtype: 3x3 orthonormal rotation matrix
@return: rotation submatrix
@see: L{oa2tr}
@bug: Should work for 3x3 matrix as well.
"""
t = mat(t) # N O A
n = crossp(t[0:3,1],t[0:3,2]) # N = O X A
o = crossp(t[0:3,2],t[0:3,0]) # O = A x N
return concatenate(( concatenate((unit(n),unit(t[0:3,1]),unit(t[0:3,2]),t[0:3,3]),1),
mat([0,0,0,1])))
def t2r(T):
"""
Return rotational submatrix of a homogeneous transformation.
@type T: 4x4 homogeneous transformation
@param T: the transform matrix to convert
@rtype: 3x3 orthonormal rotation matrix
@return: rotation submatrix
"""
if ishomog(T)==False:
error( 'input must be a homogeneous transform')
return T[0:3,0:3]
def r2t(R):
"""
Convert a 3x3 orthonormal rotation matrix to a 4x4 homogeneous transformation::
T = | R 0 |
| 0 1 |
@type R: 3x3 orthonormal rotation matrix
@param R: the rotation matrix to convert
@rtype: 4x4 homogeneous matrix
@return: homogeneous equivalent
"""
return concatenate( (concatenate( (R, zeros((3,1))),1), mat([0,0,0,1])) )
| 27.490728
| 103
| 0.518032
|
5c3adfda9d1f2b50a8ab55b3a95b306407ca1588
| 24,286
|
py
|
Python
|
python_modules/dagster/dagster/core/workspace/context.py
|
dehume/dagster
|
3b55c4e864775b7a70ed8ff539629317a1202505
|
[
"Apache-2.0"
] | null | null | null |
python_modules/dagster/dagster/core/workspace/context.py
|
dehume/dagster
|
3b55c4e864775b7a70ed8ff539629317a1202505
|
[
"Apache-2.0"
] | null | null | null |
python_modules/dagster/dagster/core/workspace/context.py
|
dehume/dagster
|
3b55c4e864775b7a70ed8ff539629317a1202505
|
[
"Apache-2.0"
] | null | null | null |
import sys
import threading
import time
import warnings
from abc import ABC, abstractmethod
from collections import OrderedDict
from contextlib import ExitStack
from typing import TYPE_CHECKING, Dict, List, Optional, Union, cast
from dagster import check
from dagster.core.errors import DagsterInvariantViolationError, DagsterRepositoryLocationLoadError
from dagster.core.execution.plan.state import KnownExecutionState
from dagster.core.host_representation import (
ExternalExecutionPlan,
ExternalPipeline,
GrpcServerRepositoryLocation,
PipelineSelector,
RepositoryHandle,
RepositoryLocation,
RepositoryLocationOrigin,
)
from dagster.core.host_representation.grpc_server_registry import (
GrpcServerRegistry,
ProcessGrpcServerRegistry,
)
from dagster.core.host_representation.grpc_server_state_subscriber import (
LocationStateChangeEvent,
LocationStateChangeEventType,
LocationStateSubscriber,
)
from dagster.core.host_representation.origin import GrpcServerRepositoryLocationOrigin
from dagster.core.instance import DagsterInstance
from dagster.grpc.server_watcher import create_grpc_watch_thread
from dagster.utils.error import SerializableErrorInfo, serializable_error_info_from_exc_info
from .load_target import WorkspaceLoadTarget
from .permissions import get_user_permissions
from .workspace import IWorkspace, WorkspaceLocationEntry, WorkspaceLocationLoadStatus
if TYPE_CHECKING:
from rx.subjects import Subject
from dagster.core.host_representation import (
ExternalPartitionConfigData,
ExternalPartitionExecutionErrorData,
ExternalPartitionNamesData,
ExternalPartitionSetExecutionParamData,
ExternalPartitionTagsData,
)
DAGIT_GRPC_SERVER_HEARTBEAT_TTL = 45
class BaseWorkspaceRequestContext(IWorkspace):
"""
This class is a request-scoped object that stores (1) a reference to all repository locations
that exist on the `IWorkspaceProcessContext` at the start of the request and (2) a snapshot of the
workspace at the start of the request.
This object is needed because a process context and the repository locations on that context can
be updated (for example, from a thread on the process context). If a request is accessing a
repository location at the same time the repository location was being cleaned up, we would run
into errors.
"""
@property
@abstractmethod
def instance(self) -> DagsterInstance:
pass
@abstractmethod
def get_workspace_snapshot(self) -> Dict[str, WorkspaceLocationEntry]:
pass
@abstractmethod
def get_location_entry(self, name: str) -> Optional[WorkspaceLocationEntry]:
pass
@property
@abstractmethod
def process_context(self) -> "IWorkspaceProcessContext":
pass
@property
@abstractmethod
def version(self) -> Optional[str]:
pass
@property
@abstractmethod
def permissions(self) -> Dict[str, bool]:
pass
@abstractmethod
def has_permission(self, permission: str) -> bool:
pass
@property
def show_instance_config(self) -> bool:
return True
def get_location(self, location_name: str):
location_entry = self.get_location_entry(location_name)
if not location_entry:
raise DagsterInvariantViolationError(
f"Location {location_name} does not exist in workspace"
)
if location_entry.repository_location:
return location_entry.repository_location
error_info = cast(SerializableErrorInfo, location_entry.load_error)
raise DagsterRepositoryLocationLoadError(
f"Failure loading {location_name}: {error_info.to_string()}",
load_error_infos=[error_info],
)
@property
def repository_locations(self) -> List[RepositoryLocation]:
return [
entry.repository_location
for entry in self.get_workspace_snapshot().values()
if entry.repository_location
]
@property
def repository_location_names(self) -> List[str]:
return list(self.get_workspace_snapshot())
def repository_location_errors(self) -> List[SerializableErrorInfo]:
return [
entry.load_error for entry in self.get_workspace_snapshot().values() if entry.load_error
]
def get_repository_location(self, name: str) -> RepositoryLocation:
location_entry = self.get_location_entry(name)
if not location_entry:
raise Exception(f"Location {name} not in workspace")
if location_entry.load_error:
raise Exception(f"Error loading location {name}: {location_entry.load_error}")
return cast(RepositoryLocation, location_entry.repository_location)
def has_repository_location_error(self, name: str) -> bool:
return self.get_repository_location_error(name) != None
def get_repository_location_error(self, name: str) -> Optional[SerializableErrorInfo]:
entry = self.get_location_entry(name)
return entry.load_error if entry else None
def has_repository_location_name(self, name: str) -> bool:
return bool(self.get_location_entry(name))
def has_repository_location(self, name: str) -> bool:
location_entry = self.get_location_entry(name)
return bool(location_entry and location_entry.repository_location != None)
def is_reload_supported(self, name: str) -> bool:
entry = self.get_location_entry(name)
return entry.origin.is_reload_supported if entry else False
def is_shutdown_supported(self, name: str) -> bool:
entry = self.get_location_entry(name)
return entry.origin.is_shutdown_supported if entry else False
def reload_repository_location(self, name: str) -> "BaseWorkspaceRequestContext":
# This method reloads the location on the process context, and returns a new
# request context created from the updated process context
self.process_context.reload_repository_location(name)
return self.process_context.create_request_context()
def shutdown_repository_location(self, name: str):
self.process_context.shutdown_repository_location(name)
def reload_workspace(self) -> "BaseWorkspaceRequestContext":
self.process_context.reload_workspace()
return self.process_context.create_request_context()
def has_external_pipeline(self, selector: PipelineSelector) -> bool:
check.inst_param(selector, "selector", PipelineSelector)
loc = self.get_repository_location(selector.location_name)
return (
loc is not None
and loc.has_repository(selector.repository_name)
and loc.get_repository(selector.repository_name).has_external_pipeline(
selector.pipeline_name
)
)
def get_full_external_pipeline(self, selector: PipelineSelector) -> ExternalPipeline:
return (
self.get_repository_location(selector.location_name)
.get_repository(selector.repository_name)
.get_full_external_pipeline(selector.pipeline_name)
)
def get_external_execution_plan(
self,
external_pipeline: ExternalPipeline,
run_config: dict,
mode: str,
step_keys_to_execute: List[str],
known_state: KnownExecutionState,
) -> ExternalExecutionPlan:
return self.get_repository_location(
external_pipeline.handle.location_name
).get_external_execution_plan(
external_pipeline=external_pipeline,
run_config=run_config,
mode=mode,
step_keys_to_execute=step_keys_to_execute,
known_state=known_state,
instance=self.instance,
)
def get_external_partition_config(
self, repository_handle: RepositoryHandle, partition_set_name: str, partition_name: str
) -> Union["ExternalPartitionConfigData", "ExternalPartitionExecutionErrorData"]:
return self.get_repository_location(
repository_handle.location_name
).get_external_partition_config(
repository_handle=repository_handle,
partition_set_name=partition_set_name,
partition_name=partition_name,
)
def get_external_partition_tags(
self, repository_handle: RepositoryHandle, partition_set_name: str, partition_name: str
) -> Union["ExternalPartitionTagsData", "ExternalPartitionExecutionErrorData"]:
return self.get_repository_location(
repository_handle.location_name
).get_external_partition_tags(
repository_handle=repository_handle,
partition_set_name=partition_set_name,
partition_name=partition_name,
)
def get_external_partition_names(
self, repository_handle: RepositoryHandle, partition_set_name: str
) -> Union["ExternalPartitionNamesData", "ExternalPartitionExecutionErrorData"]:
return self.get_repository_location(
repository_handle.location_name
).get_external_partition_names(repository_handle, partition_set_name)
def get_external_partition_set_execution_param_data(
self,
repository_handle: RepositoryHandle,
partition_set_name: str,
partition_names: List[str],
) -> Union["ExternalPartitionSetExecutionParamData", "ExternalPartitionExecutionErrorData"]:
return self.get_repository_location(
repository_handle.location_name
).get_external_partition_set_execution_param_data(
repository_handle=repository_handle,
partition_set_name=partition_set_name,
partition_names=partition_names,
)
def get_external_notebook_data(self, repository_location_name, notebook_path: str):
check.str_param(repository_location_name, "repository_location_name")
check.str_param(notebook_path, "notebook_path")
repository_location = self.get_repository_location(repository_location_name)
return repository_location.get_external_notebook_data(notebook_path=notebook_path)
class WorkspaceRequestContext(BaseWorkspaceRequestContext):
def __init__(
self,
instance: DagsterInstance,
workspace_snapshot: Dict[str, WorkspaceLocationEntry],
process_context: "WorkspaceProcessContext",
version: Optional[str],
source: Optional[object],
):
self._instance = instance
self._workspace_snapshot = workspace_snapshot
self._process_context = process_context
self._version = version
self._source = source
@property
def instance(self) -> DagsterInstance:
return self._instance
def get_workspace_snapshot(self) -> Dict[str, WorkspaceLocationEntry]:
return self._workspace_snapshot
def get_location_entry(self, name) -> Optional[WorkspaceLocationEntry]:
return self._workspace_snapshot.get(name)
@property
def process_context(self) -> "IWorkspaceProcessContext":
return self._process_context
@property
def version(self) -> Optional[str]:
return self._version
@property
def read_only(self) -> bool:
return self._process_context.read_only
@property
def permissions(self) -> Dict[str, bool]:
return self._process_context.permissions
def has_permission(self, permission: str) -> bool:
permissions = self._process_context.permissions
check.invariant(
permission in permissions, f"Permission {permission} not listed in permissions map"
)
return permissions[permission]
@property
def source(self) -> Optional[object]:
"""
The source of the request this WorkspaceRequestContext originated from.
For example in Dagit this object represents the web request.
"""
return self._source
class IWorkspaceProcessContext(ABC):
"""
Class that stores process-scoped information about a dagit session.
In most cases, you will want to create an `BaseWorkspaceRequestContext` to create a request-scoped
object.
"""
@abstractmethod
def create_request_context(self, source=None) -> BaseWorkspaceRequestContext:
"""
Create a usable fixed context for the scope of a request.
Args:
source (Optional[Any]):
The source of the request, such as an object representing the web request
or http connection.
"""
@property
@abstractmethod
def version(self) -> str:
pass
@property
@abstractmethod
def location_state_events(self) -> "Subject":
pass
@abstractmethod
def reload_repository_location(self, name: str) -> None:
pass
def shutdown_repository_location(self, name: str) -> None:
raise NotImplementedError
@abstractmethod
def reload_workspace(self) -> None:
pass
@property
@abstractmethod
def instance(self):
pass
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, traceback):
pass
class WorkspaceProcessContext(IWorkspaceProcessContext):
"""
This class is a process-scoped object that:
1. Maintain an update-to-date dictionary of repository locations
1. Create a `WorkspaceRequestContext` to be the workspace for each request
2. Run watch thread processes that monitor repository locations
To access a RepositoryLocation, you should create a `WorkspaceRequestContext`
using `create_request_context`.
"""
def __init__(
self,
instance: DagsterInstance,
workspace_load_target: Optional[WorkspaceLoadTarget],
version: str = "",
read_only: bool = False,
grpc_server_registry=None,
):
self._stack = ExitStack()
check.opt_str_param(version, "version")
check.bool_param(read_only, "read_only")
# lazy import for perf
from rx.subjects import Subject
self._instance = check.inst_param(instance, "instance", DagsterInstance)
self._workspace_load_target = check.opt_inst_param(
workspace_load_target, "workspace_load_target", WorkspaceLoadTarget
)
self._location_state_events = Subject()
self._location_state_subscriber = LocationStateSubscriber(
self._location_state_events_handler
)
self._read_only = read_only
self._version = version
# Guards changes to _location_dict, _location_error_dict, and _location_origin_dict
self._lock = threading.Lock()
# Only ever set up by main thread
self._watch_thread_shutdown_events: Dict[str, threading.Event] = {}
self._watch_threads: Dict[str, threading.Thread] = {}
self._state_subscribers: List[LocationStateSubscriber] = []
self.add_state_subscriber(self._location_state_subscriber)
if grpc_server_registry:
self._grpc_server_registry: GrpcServerRegistry = check.inst_param(
grpc_server_registry, "grpc_server_registry", GrpcServerRegistry
)
else:
self._grpc_server_registry = self._stack.enter_context(
ProcessGrpcServerRegistry(
reload_interval=0,
heartbeat_ttl=DAGIT_GRPC_SERVER_HEARTBEAT_TTL,
startup_timeout=instance.code_server_process_startup_timeout,
)
)
self._location_entry_dict: Dict[str, WorkspaceLocationEntry] = OrderedDict()
with self._lock:
self._load_workspace()
@property
def workspace_load_target(self):
return self._workspace_load_target
def add_state_subscriber(self, subscriber):
self._state_subscribers.append(subscriber)
def _load_workspace(self):
assert self._lock.locked()
repository_location_origins = (
self._workspace_load_target.create_origins() if self._workspace_load_target else []
)
check.list_param(
repository_location_origins,
"repository_location_origins",
of_type=RepositoryLocationOrigin,
)
self._location_entry_dict = OrderedDict()
for origin in repository_location_origins:
check.invariant(
self._location_entry_dict.get(origin.location_name) is None,
'Cannot have multiple locations with the same name, got multiple "{name}"'.format(
name=origin.location_name,
),
)
if origin.supports_server_watch:
self._start_watch_thread(origin)
self._location_entry_dict[origin.location_name] = self._load_location(origin)
def _create_location_from_origin(
self, origin: RepositoryLocationOrigin
) -> Optional[RepositoryLocation]:
if not self._grpc_server_registry.supports_origin(origin):
return origin.create_location()
else:
endpoint = (
self._grpc_server_registry.reload_grpc_endpoint(origin)
if self._grpc_server_registry.supports_reload
else self._grpc_server_registry.get_grpc_endpoint(origin)
)
return GrpcServerRepositoryLocation(
origin=origin,
server_id=endpoint.server_id,
port=endpoint.port,
socket=endpoint.socket,
host=endpoint.host,
heartbeat=True,
watch_server=False,
grpc_server_registry=self._grpc_server_registry,
)
@property
def instance(self):
return self._instance
@property
def read_only(self):
return self._read_only
@property
def permissions(self) -> Dict[str, bool]:
return get_user_permissions(self)
@property
def version(self) -> str:
return self._version
def _send_state_event_to_subscribers(self, event: LocationStateChangeEvent) -> None:
check.inst_param(event, "event", LocationStateChangeEvent)
for subscriber in self._state_subscribers:
subscriber.handle_event(event)
def _start_watch_thread(self, origin: GrpcServerRepositoryLocationOrigin) -> None:
location_name = origin.location_name
check.invariant(location_name not in self._watch_thread_shutdown_events)
client = origin.create_client()
shutdown_event, watch_thread = create_grpc_watch_thread(
location_name,
client,
on_updated=lambda location_name, new_server_id: self._send_state_event_to_subscribers(
LocationStateChangeEvent(
LocationStateChangeEventType.LOCATION_UPDATED,
location_name=location_name,
message="Server has been updated.",
server_id=new_server_id,
)
),
on_error=lambda location_name: self._send_state_event_to_subscribers(
LocationStateChangeEvent(
LocationStateChangeEventType.LOCATION_ERROR,
location_name=location_name,
message="Unable to reconnect to server. You can reload the server once it is "
"reachable again",
)
),
)
self._watch_thread_shutdown_events[location_name] = shutdown_event
self._watch_threads[location_name] = watch_thread
watch_thread.start()
def _load_location(self, origin):
assert self._lock.locked()
location_name = origin.location_name
location = None
error = None
try:
location = self._create_location_from_origin(origin)
except Exception:
error = serializable_error_info_from_exc_info(sys.exc_info())
warnings.warn(
"Error loading repository location {location_name}:{error_string}".format(
location_name=location_name, error_string=error.to_string()
)
)
return WorkspaceLocationEntry(
origin=origin,
repository_location=location,
load_error=error,
load_status=WorkspaceLocationLoadStatus.LOADED,
display_metadata=location.get_display_metadata()
if location
else origin.get_display_metadata(),
update_timestamp=time.time(),
)
def create_snapshot(self):
with self._lock:
return self._location_entry_dict.copy()
@property
def repository_locations_count(self):
with self._lock:
return len(self._location_entry_dict)
@property
def repository_location_names(self):
with self._lock:
return list(self._location_entry_dict)
def has_repository_location(self, location_name):
check.str_param(location_name, "location_name")
with self._lock:
return (
location_name in self._location_entry_dict
and self._location_entry_dict[location_name].repository_location
)
def has_repository_location_error(self, location_name):
check.str_param(location_name, "location_name")
with self._lock:
return (
location_name in self._location_entry_dict
and self._location_entry_dict[location_name].load_error
)
def reload_repository_location(self, name: str) -> None:
# Can be called from a background thread
with self._lock:
# Relying on GC to clean up the old location once nothing else
# is referencing it
self._location_entry_dict[name] = self._load_location(
self._location_entry_dict[name].origin
)
def shutdown_repository_location(self, name: str):
with self._lock:
self._location_entry_dict[name].origin.shutdown_server()
def reload_workspace(self):
# Can be called from a background thread
with self._lock:
self._cleanup_locations()
self._load_workspace()
def _cleanup_locations(self):
assert self._lock.locked()
for _, event in self._watch_thread_shutdown_events.items():
event.set()
for _, watch_thread in self._watch_threads.items():
watch_thread.join()
self._watch_thread_shutdown_events = {}
self._watch_threads = {}
for entry in self._location_entry_dict.values():
if entry.repository_location:
entry.repository_location.cleanup()
self._location_entry_dict = OrderedDict()
def create_request_context(self, source=None) -> WorkspaceRequestContext:
return WorkspaceRequestContext(
instance=self._instance,
workspace_snapshot=self.create_snapshot(),
process_context=self,
version=self.version,
source=source,
)
@property
def location_state_events(self) -> "Subject":
return self._location_state_events
def _location_state_events_handler(self, event: LocationStateChangeEvent) -> None:
# If the server was updated or we were not able to reconnect, we immediately reload the
# location handle
if event.event_type in (
LocationStateChangeEventType.LOCATION_UPDATED,
LocationStateChangeEventType.LOCATION_ERROR,
):
# In case of an updated location, reload the handle to get updated repository data and
# re-attach a subscriber
# In case of a location error, just reload the handle in order to update the workspace
# with the correct error messages
self.reload_repository_location(event.location_name)
self._location_state_events.on_next(event)
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, traceback):
with self._lock:
self._cleanup_locations()
self._stack.close()
| 35.767305
| 102
| 0.680474
|
a4fd95cdbbffd86f4c4e7ef02992638073ae43c5
| 1,109
|
py
|
Python
|
quickstart.py
|
erickmartinez/pydlcp
|
611eceeb0816af432e1c06ee171376af2bc13a0e
|
[
"BSD-3-Clause"
] | null | null | null |
quickstart.py
|
erickmartinez/pydlcp
|
611eceeb0816af432e1c06ee171376af2bc13a0e
|
[
"BSD-3-Clause"
] | null | null | null |
quickstart.py
|
erickmartinez/pydlcp
|
611eceeb0816af432e1c06ee171376af2bc13a0e
|
[
"BSD-3-Clause"
] | null | null | null |
import pydlcp.arduino_board as ard
import pydlcp.controller as controller
import configparser
import os
settings = r'G:\Shared drives\FenningLab2\LabData\ImpedanceAnalyzer\DLCP\20200922_training\D69_clean_low_frequency.ini'
arduino_com = 'COM8'
unit_name = 'HP1'
pin = 1
pinMappings = {
'keithley': 'A0', 'fan': 'A1', 'thermocouple': '10', 1: '2', 2: '3', 3: '4', 4: '5', 5: '6', 6: ' 7', 7: '8', 8: '9'
}
if __name__ == '__main__':
if not os.path.exists(settings):
raise FileExistsError('Settings file: \'{0}\' does not exist!'.format(settings))
config = configparser.ConfigParser()
config.read(settings)
a = ard.ArduinoBoard(address=arduino_com, name=unit_name, pin_mappings=pinMappings)
a.connect()
dlcp_controller = controller.Controller()
dlcp_controller.connect_devices()
dlcp_controller.load_test_config(config=config)
# a.connect_keithley()
a.pin_on(2)
try:
dlcp_controller.start_dlcp()
except Exception as e:
print(e)
finally:
a.pin_off(2)
dlcp_controller.disconnect_devices()
a.disconnect()
| 30.805556
| 120
| 0.679892
|
7ed9cae26cadfbf52b8679b7edb377dff7bd90d7
| 320
|
py
|
Python
|
2018/day-01/part2.py
|
amochtar/adventofcode
|
292e7f00a1e19d2149d00246b0a77fedfcd3bd08
|
[
"MIT"
] | 1
|
2019-12-27T22:36:30.000Z
|
2019-12-27T22:36:30.000Z
|
2018/day-01/part2.py
|
amochtar/adventofcode
|
292e7f00a1e19d2149d00246b0a77fedfcd3bd08
|
[
"MIT"
] | null | null | null |
2018/day-01/part2.py
|
amochtar/adventofcode
|
292e7f00a1e19d2149d00246b0a77fedfcd3bd08
|
[
"MIT"
] | null | null | null |
def solve(input):
f = 0
ff = set([f])
frs = list(map(int, input))
while True:
for fr in frs:
f += fr
if f in ff:
print(f)
return
ff.add(f)
with open('input.txt', 'r') as f:
input = f.read().splitlines()
solve(input)
| 16.842105
| 33
| 0.425
|
9212a5939104a181dcdd48fe7d37ab61603d3868
| 593
|
gyp
|
Python
|
deps/libgdal/gyp-formats/mrsid_lidar.gyp
|
seraph144/node-gdal
|
c6987705ced2b4eba8be123ececa40be80e56694
|
[
"Apache-2.0"
] | null | null | null |
deps/libgdal/gyp-formats/mrsid_lidar.gyp
|
seraph144/node-gdal
|
c6987705ced2b4eba8be123ececa40be80e56694
|
[
"Apache-2.0"
] | null | null | null |
deps/libgdal/gyp-formats/mrsid_lidar.gyp
|
seraph144/node-gdal
|
c6987705ced2b4eba8be123ececa40be80e56694
|
[
"Apache-2.0"
] | null | null | null |
{
"includes": [
"../common.gypi"
],
"targets": [
{
"target_name": "libgdal_mrsid_lidar_frmt",
"type": "static_library",
"sources": [
"../gdal/frmts/mrsid_lidar/gdal_MG4Lidar.cpp"
],
"include_dirs": [
"../gdal/frmts/mrsid_lidar",
"../gdal/frmts/gtiff/libgeotiff",
# The mrsid_include variable needs to be set to the full path of your local lizard tech libs
"<(mrsid_include)/Lidar_DSDK/include"
]
}
]
}
| 29.65
| 108
| 0.473862
|
c94b8eaa127adee64848c9105b93aad25d5c9b8b
| 2,573
|
py
|
Python
|
generate_flashcards.py
|
district10/shuangpin-heatmap
|
0a299d4f567673648e5ca08db7744b0be1d90951
|
[
"MIT"
] | 9
|
2020-03-09T14:27:10.000Z
|
2022-01-11T13:57:53.000Z
|
generate_flashcards.py
|
district10/shuangpin-heatmap
|
0a299d4f567673648e5ca08db7744b0be1d90951
|
[
"MIT"
] | null | null | null |
generate_flashcards.py
|
district10/shuangpin-heatmap
|
0a299d4f567673648e5ca08db7744b0be1d90951
|
[
"MIT"
] | 2
|
2021-01-13T11:27:36.000Z
|
2022-03-07T16:37:52.000Z
|
import pygal
import pypinyin
from typing import Union, Set, Dict, List, Any, Tuple, Optional
import os
import sys
import json
from collections import defaultdict
import numpy as np
import re
from pprint import pprint
from shuangpin_heatmap import pinyin2shuangpin, mkdir_p
import shutil
PWD = os.path.abspath(os.path.dirname(__file__))
if __name__ == '__main__':
path = f'{PWD}/data/sample3.txt'
output_directory = '/home/tzx/git/blog/notes/cards_shuangpin'
mkdir_p(output_directory)
with open(path) as f:
lines = f.readlines()
cards = []
for line in lines:
line = line.strip()
if not line:
continue
pinyin = [k[0] for k in pypinyin.pinyin(line, style=pypinyin.Style.NORMAL, errors='ignore')]
cache = {}
trans = {}
shuangpin = [
pinyin2shuangpin(
py,
shuangpin_schema_name='ziranma',
cache=cache,
translated=trans,
) for py in pinyin
]
py2sp = [[py, sp] for py, sp in trans.items() if py != sp]
if not py2sp:
continue
cards.append(f'{output_directory}/card_{len(cards):08d}.md')
with open(cards[-1], 'w') as f:
if len(line) < 40:
prefix = f' '
line = line.replace('\n', ';')
f.write(f'- "{line}" -<\n\n : ')
else:
prefix = ''
f.write(f'{line}\n')
if py2sp:
f.write(f'| 拼音 | 双拼 |\n')
f.write(f'{prefix}| :--- | :--: |\n')
for (py, sp) in py2sp:
if py == sp:
continue
f.write(f'{prefix}| {py} | {sp} |\n')
f.write(f'\n{prefix}```')
f.write(f'\n{prefix}{"".join(shuangpin)}')
f.write(f'\n{prefix}{line}')
# ziranma = [
# pinyin2shuangpin(
# py,
# shuangpin_schema_name='ziranma',
# cache=cache,
# translated=trans,
# ) for py in pinyin
# ]
# f.write(f'\n\n\n\n{prefix}{"".join(ziranma)}')
f.write(f'\n{prefix}```\n')
with open(f'{output_directory}/index.md', 'w') as f:
f.write('# Cards\n')
for card in cards:
basename = os.path.basename(card)
f.write(f'\n- [{basename}]({basename})')
print(f'done, wrote #{len(cards)} cards to {output_directory}')
| 31.765432
| 100
| 0.487369
|
7ffe8debb982828d2a4bdc7cbc20ff9c992e3bbb
| 36,292
|
py
|
Python
|
research/object_detection/utils/config_util.py
|
Santhanalakshmimano/SpeedBump_detection_usingCV
|
7b68f260cf1351d757983a48c5a62e063df807c9
|
[
"Apache-2.0"
] | null | null | null |
research/object_detection/utils/config_util.py
|
Santhanalakshmimano/SpeedBump_detection_usingCV
|
7b68f260cf1351d757983a48c5a62e063df807c9
|
[
"Apache-2.0"
] | null | null | null |
research/object_detection/utils/config_util.py
|
Santhanalakshmimano/SpeedBump_detection_usingCV
|
7b68f260cf1351d757983a48c5a62e063df807c9
|
[
"Apache-2.0"
] | 1
|
2021-07-13T01:22:08.000Z
|
2021-07-13T01:22:08.000Z
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions for reading and updating configuration files."""
import os
import tensorflow as tf
from google.protobuf import text_format
from tensorflow.python.lib.io import file_io
from protos import eval_pb2
from protos import graph_rewriter_pb2
from protos import input_reader_pb2
from protos import model_pb2
from protos import pipeline_pb2
from protos import train_pb2
def get_image_resizer_config(model_config):
"""Returns the image resizer config from a model config.
Args:
model_config: A model_pb2.DetectionModel.
Returns:
An image_resizer_pb2.ImageResizer.
Raises:
ValueError: If the model type is not recognized.
"""
meta_architecture = model_config.WhichOneof("model")
if meta_architecture == "faster_rcnn":
return model_config.faster_rcnn.image_resizer
if meta_architecture == "ssd":
return model_config.ssd.image_resizer
raise ValueError("Unknown model type: {}".format(meta_architecture))
def get_spatial_image_size(image_resizer_config):
"""Returns expected spatial size of the output image from a given config.
Args:
image_resizer_config: An image_resizer_pb2.ImageResizer.
Returns:
A list of two integers of the form [height, width]. `height` and `width` are
set -1 if they cannot be determined during graph construction.
Raises:
ValueError: If the model type is not recognized.
"""
if image_resizer_config.HasField("fixed_shape_resizer"):
return [
image_resizer_config.fixed_shape_resizer.height,
image_resizer_config.fixed_shape_resizer.width
]
if image_resizer_config.HasField("keep_aspect_ratio_resizer"):
if image_resizer_config.keep_aspect_ratio_resizer.pad_to_max_dimension:
return [image_resizer_config.keep_aspect_ratio_resizer.max_dimension] * 2
else:
return [-1, -1]
raise ValueError("Unknown image resizer type.")
def get_configs_from_pipeline_file(pipeline_config_path, config_override=None):
"""Reads config from a file containing pipeline_pb2.TrainEvalPipelineConfig.
Args:
pipeline_config_path: Path to pipeline_pb2.TrainEvalPipelineConfig text
proto.
config_override: A pipeline_pb2.TrainEvalPipelineConfig text proto to
override pipeline_config_path.
Returns:
Dictionary of configuration objects. Keys are `model`, `train_config`,
`train_input_config`, `eval_config`, `eval_input_config`. Value are the
corresponding config objects.
"""
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
with tf.gfile.GFile(pipeline_config_path, "r") as f:
proto_str = f.read()
text_format.Merge(proto_str, pipeline_config)
if config_override:
text_format.Merge(config_override, pipeline_config)
return create_configs_from_pipeline_proto(pipeline_config)
def create_configs_from_pipeline_proto(pipeline_config):
"""Creates a configs dictionary from pipeline_pb2.TrainEvalPipelineConfig.
Args:
pipeline_config: pipeline_pb2.TrainEvalPipelineConfig proto object.
Returns:
Dictionary of configuration objects. Keys are `model`, `train_config`,
`train_input_config`, `eval_config`, `eval_input_configs`. Value are
the corresponding config objects or list of config objects (only for
eval_input_configs).
"""
configs = {}
configs["model"] = pipeline_config.model
configs["train_config"] = pipeline_config.train_config
configs["train_input_config"] = pipeline_config.train_input_reader
configs["eval_config"] = pipeline_config.eval_config
configs["eval_input_configs"] = pipeline_config.eval_input_reader
# Keeps eval_input_config only for backwards compatibility. All clients should
# read eval_input_configs instead.
if configs["eval_input_configs"]:
configs["eval_input_config"] = configs["eval_input_configs"][0]
if pipeline_config.HasField("graph_rewriter"):
configs["graph_rewriter_config"] = pipeline_config.graph_rewriter
return configs
def get_graph_rewriter_config_from_file(graph_rewriter_config_file):
"""Parses config for graph rewriter.
Args:
graph_rewriter_config_file: file path to the graph rewriter config.
Returns:
graph_rewriter_pb2.GraphRewriter proto
"""
graph_rewriter_config = graph_rewriter_pb2.GraphRewriter()
with tf.gfile.GFile(graph_rewriter_config_file, "r") as f:
text_format.Merge(f.read(), graph_rewriter_config)
return graph_rewriter_config
def create_pipeline_proto_from_configs(configs):
"""Creates a pipeline_pb2.TrainEvalPipelineConfig from configs dictionary.
This function performs the inverse operation of
create_configs_from_pipeline_proto().
Args:
configs: Dictionary of configs. See get_configs_from_pipeline_file().
Returns:
A fully populated pipeline_pb2.TrainEvalPipelineConfig.
"""
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
pipeline_config.model.CopyFrom(configs["model"])
pipeline_config.train_config.CopyFrom(configs["train_config"])
pipeline_config.train_input_reader.CopyFrom(configs["train_input_config"])
pipeline_config.eval_config.CopyFrom(configs["eval_config"])
pipeline_config.eval_input_reader.extend(configs["eval_input_configs"])
if "graph_rewriter_config" in configs:
pipeline_config.graph_rewriter.CopyFrom(configs["graph_rewriter_config"])
return pipeline_config
def save_pipeline_config(pipeline_config, directory):
"""Saves a pipeline config text file to disk.
Args:
pipeline_config: A pipeline_pb2.TrainEvalPipelineConfig.
directory: The model directory into which the pipeline config file will be
saved.
"""
if not file_io.file_exists(directory):
file_io.recursive_create_dir(directory)
pipeline_config_path = os.path.join(directory, "pipeline.config")
config_text = text_format.MessageToString(pipeline_config)
with tf.gfile.Open(pipeline_config_path, "wb") as f:
tf.logging.info("Writing pipeline config file to %s",
pipeline_config_path)
f.write(config_text)
def get_configs_from_multiple_files(model_config_path="",
train_config_path="",
train_input_config_path="",
eval_config_path="",
eval_input_config_path="",
graph_rewriter_config_path=""):
"""Reads training configuration from multiple config files.
Args:
model_config_path: Path to model_pb2.DetectionModel.
train_config_path: Path to train_pb2.TrainConfig.
train_input_config_path: Path to input_reader_pb2.InputReader.
eval_config_path: Path to eval_pb2.EvalConfig.
eval_input_config_path: Path to input_reader_pb2.InputReader.
graph_rewriter_config_path: Path to graph_rewriter_pb2.GraphRewriter.
Returns:
Dictionary of configuration objects. Keys are `model`, `train_config`,
`train_input_config`, `eval_config`, `eval_input_config`. Key/Values are
returned only for valid (non-empty) strings.
"""
configs = {}
if model_config_path:
model_config = model_pb2.DetectionModel()
with tf.gfile.GFile(model_config_path, "r") as f:
text_format.Merge(f.read(), model_config)
configs["model"] = model_config
if train_config_path:
train_config = train_pb2.TrainConfig()
with tf.gfile.GFile(train_config_path, "r") as f:
text_format.Merge(f.read(), train_config)
configs["train_config"] = train_config
if train_input_config_path:
train_input_config = input_reader_pb2.InputReader()
with tf.gfile.GFile(train_input_config_path, "r") as f:
text_format.Merge(f.read(), train_input_config)
configs["train_input_config"] = train_input_config
if eval_config_path:
eval_config = eval_pb2.EvalConfig()
with tf.gfile.GFile(eval_config_path, "r") as f:
text_format.Merge(f.read(), eval_config)
configs["eval_config"] = eval_config
if eval_input_config_path:
eval_input_config = input_reader_pb2.InputReader()
with tf.gfile.GFile(eval_input_config_path, "r") as f:
text_format.Merge(f.read(), eval_input_config)
configs["eval_input_configs"] = [eval_input_config]
if graph_rewriter_config_path:
configs["graph_rewriter_config"] = get_graph_rewriter_config_from_file(
graph_rewriter_config_path)
return configs
def get_number_of_classes(model_config):
"""Returns the number of classes for a detection model.
Args:
model_config: A model_pb2.DetectionModel.
Returns:
Number of classes.
Raises:
ValueError: If the model type is not recognized.
"""
meta_architecture = model_config.WhichOneof("model")
if meta_architecture == "faster_rcnn":
return model_config.faster_rcnn.num_classes
if meta_architecture == "ssd":
return model_config.ssd.num_classes
raise ValueError("Expected the model to be one of 'faster_rcnn' or 'ssd'.")
def get_optimizer_type(train_config):
"""Returns the optimizer type for training.
Args:
train_config: A train_pb2.TrainConfig.
Returns:
The type of the optimizer
"""
return train_config.optimizer.WhichOneof("optimizer")
def get_learning_rate_type(optimizer_config):
"""Returns the learning rate type for training.
Args:
optimizer_config: An optimizer_pb2.Optimizer.
Returns:
The type of the learning rate.
"""
return optimizer_config.learning_rate.WhichOneof("learning_rate")
def _is_generic_key(key):
"""Determines whether the key starts with a generic config dictionary key."""
for prefix in [
"graph_rewriter_config",
"model",
"train_input_config",
"train_config",
"eval_config"]:
if key.startswith(prefix + "."):
return True
return False
def _check_and_convert_legacy_input_config_key(key):
"""Checks key and converts legacy input config update to specific update.
Args:
key: string indicates the target of update operation.
Returns:
is_valid_input_config_key: A boolean indicating whether the input key is to
update input config(s).
key_name: 'eval_input_configs' or 'train_input_config' string if
is_valid_input_config_key is true. None if is_valid_input_config_key is
false.
input_name: always returns None since legacy input config key never
specifies the target input config. Keeping this output only to match the
output form defined for input config update.
field_name: the field name in input config. `key` itself if
is_valid_input_config_key is false.
"""
key_name = None
input_name = None
field_name = key
is_valid_input_config_key = True
if field_name == "train_shuffle":
key_name = "train_input_config"
field_name = "shuffle"
elif field_name == "eval_shuffle":
key_name = "eval_input_configs"
field_name = "shuffle"
elif field_name == "train_input_path":
key_name = "train_input_config"
field_name = "input_path"
elif field_name == "eval_input_path":
key_name = "eval_input_configs"
field_name = "input_path"
elif field_name == "append_train_input_path":
key_name = "train_input_config"
field_name = "input_path"
elif field_name == "append_eval_input_path":
key_name = "eval_input_configs"
field_name = "input_path"
else:
is_valid_input_config_key = False
return is_valid_input_config_key, key_name, input_name, field_name
def check_and_parse_input_config_key(configs, key):
"""Checks key and returns specific fields if key is valid input config update.
Args:
configs: Dictionary of configuration objects. See outputs from
get_configs_from_pipeline_file() or get_configs_from_multiple_files().
key: string indicates the target of update operation.
Returns:
is_valid_input_config_key: A boolean indicate whether the input key is to
update input config(s).
key_name: 'eval_input_configs' or 'train_input_config' string if
is_valid_input_config_key is true. None if is_valid_input_config_key is
false.
input_name: the name of the input config to be updated. None if
is_valid_input_config_key is false.
field_name: the field name in input config. `key` itself if
is_valid_input_config_key is false.
Raises:
ValueError: when the input key format doesn't match any known formats.
ValueError: if key_name doesn't match 'eval_input_configs' or
'train_input_config'.
ValueError: if input_name doesn't match any name in train or eval input
configs.
ValueError: if field_name doesn't match any supported fields.
"""
key_name = None
input_name = None
field_name = None
fields = key.split(":")
if len(fields) == 1:
field_name = key
return _check_and_convert_legacy_input_config_key(key)
elif len(fields) == 3:
key_name = fields[0]
input_name = fields[1]
field_name = fields[2]
else:
raise ValueError("Invalid key format when overriding configs.")
# Checks if key_name is valid for specific update.
if key_name not in ["eval_input_configs", "train_input_config"]:
raise ValueError("Invalid key_name when overriding input config.")
# Checks if input_name is valid for specific update. For train input config it
# should match configs[key_name].name, for eval input configs it should match
# the name field of one of the eval_input_configs.
if isinstance(configs[key_name], input_reader_pb2.InputReader):
is_valid_input_name = configs[key_name].name == input_name
else:
is_valid_input_name = input_name in [
eval_input_config.name for eval_input_config in configs[key_name]
]
if not is_valid_input_name:
raise ValueError("Invalid input_name when overriding input config.")
# Checks if field_name is valid for specific update.
if field_name not in [
"input_path", "label_map_path", "shuffle", "mask_type",
"sample_1_of_n_examples"
]:
raise ValueError("Invalid field_name when overriding input config.")
return True, key_name, input_name, field_name
def merge_external_params_with_configs(configs, hparams=None, kwargs_dict=None):
"""Updates `configs` dictionary based on supplied parameters.
This utility is for modifying specific fields in the object detection configs.
Say that one would like to experiment with different learning rates, momentum
values, or batch sizes. Rather than creating a new config text file for each
experiment, one can use a single base config file, and update particular
values.
There are two types of field overrides:
1. Strategy-based overrides, which update multiple relevant configuration
options. For example, updating `learning_rate` will update both the warmup and
final learning rates.
In this case key can be one of the following formats:
1. legacy update: single string that indicates the attribute to be
updated. E.g. 'label_map_path', 'eval_input_path', 'shuffle'.
Note that when updating fields (e.g. eval_input_path, eval_shuffle) in
eval_input_configs, the override will only be applied when
eval_input_configs has exactly 1 element.
2. specific update: colon separated string that indicates which field in
which input_config to update. It should have 3 fields:
- key_name: Name of the input config we should update, either
'train_input_config' or 'eval_input_configs'
- input_name: a 'name' that can be used to identify elements, especially
when configs[key_name] is a repeated field.
- field_name: name of the field that you want to override.
For example, given configs dict as below:
configs = {
'model': {...}
'train_config': {...}
'train_input_config': {...}
'eval_config': {...}
'eval_input_configs': [{ name:"eval_coco", ...},
{ name:"eval_voc", ... }]
}
Assume we want to update the input_path of the eval_input_config
whose name is 'eval_coco'. The `key` would then be:
'eval_input_configs:eval_coco:input_path'
2. Generic key/value, which update a specific parameter based on namespaced
configuration keys. For example,
`model.ssd.loss.hard_example_miner.max_negatives_per_positive` will update the
hard example miner configuration for an SSD model config. Generic overrides
are automatically detected based on the namespaced keys.
Args:
configs: Dictionary of configuration objects. See outputs from
get_configs_from_pipeline_file() or get_configs_from_multiple_files().
hparams: A `HParams`.
kwargs_dict: Extra keyword arguments that are treated the same way as
attribute/value pairs in `hparams`. Note that hyperparameters with the
same names will override keyword arguments.
Returns:
`configs` dictionary.
Raises:
ValueError: when the key string doesn't match any of its allowed formats.
"""
if kwargs_dict is None:
kwargs_dict = {}
if hparams:
kwargs_dict.update(hparams.values())
for key, value in kwargs_dict.items():
tf.logging.info("Maybe overwriting %s: %s", key, value)
# pylint: disable=g-explicit-bool-comparison
if value == "" or value is None:
continue
# pylint: enable=g-explicit-bool-comparison
elif _maybe_update_config_with_key_value(configs, key, value):
continue
elif _is_generic_key(key):
_update_generic(configs, key, value)
else:
tf.logging.info("Ignoring config override key: %s", key)
return configs
def _maybe_update_config_with_key_value(configs, key, value):
"""Checks key type and updates `configs` with the key value pair accordingly.
Args:
configs: Dictionary of configuration objects. See outputs from
get_configs_from_pipeline_file() or get_configs_from_multiple_files().
key: String indicates the field(s) to be updated.
value: Value used to override existing field value.
Returns:
A boolean value that indicates whether the override succeeds.
Raises:
ValueError: when the key string doesn't match any of the formats above.
"""
is_valid_input_config_key, key_name, input_name, field_name = (
check_and_parse_input_config_key(configs, key))
if is_valid_input_config_key:
update_input_reader_config(
configs,
key_name=key_name,
input_name=input_name,
field_name=field_name,
value=value)
elif field_name == "learning_rate":
_update_initial_learning_rate(configs, value)
elif field_name == "batch_size":
_update_batch_size(configs, value)
elif field_name == "momentum_optimizer_value":
_update_momentum_optimizer_value(configs, value)
elif field_name == "classification_localization_weight_ratio":
# Localization weight is fixed to 1.0.
_update_classification_localization_weight_ratio(configs, value)
elif field_name == "focal_loss_gamma":
_update_focal_loss_gamma(configs, value)
elif field_name == "focal_loss_alpha":
_update_focal_loss_alpha(configs, value)
elif field_name == "train_steps":
_update_train_steps(configs, value)
elif field_name == "label_map_path":
_update_label_map_path(configs, value)
elif field_name == "mask_type":
_update_mask_type(configs, value)
elif field_name == "sample_1_of_n_eval_examples":
_update_all_eval_input_configs(configs, "sample_1_of_n_examples", value)
elif field_name == "eval_num_epochs":
_update_all_eval_input_configs(configs, "num_epochs", value)
elif field_name == "eval_with_moving_averages":
_update_use_moving_averages(configs, value)
elif field_name == "retain_original_images_in_eval":
_update_retain_original_images(configs["eval_config"], value)
elif field_name == "use_bfloat16":
_update_use_bfloat16(configs, value)
else:
return False
return True
def _update_tf_record_input_path(input_config, input_path):
"""Updates input configuration to reflect a new input path.
The input_config object is updated in place, and hence not returned.
Args:
input_config: A input_reader_pb2.InputReader.
input_path: A path to data or list of paths.
Raises:
TypeError: if input reader type is not `tf_record_input_reader`.
"""
input_reader_type = input_config.WhichOneof("input_reader")
if input_reader_type == "tf_record_input_reader":
input_config.tf_record_input_reader.ClearField("input_path")
if isinstance(input_path, list):
input_config.tf_record_input_reader.input_path.extend(input_path)
else:
input_config.tf_record_input_reader.input_path.append(input_path)
else:
raise TypeError("Input reader type must be `tf_record_input_reader`.")
def update_input_reader_config(configs,
key_name=None,
input_name=None,
field_name=None,
value=None,
path_updater=_update_tf_record_input_path):
"""Updates specified input reader config field.
Args:
configs: Dictionary of configuration objects. See outputs from
get_configs_from_pipeline_file() or get_configs_from_multiple_files().
key_name: Name of the input config we should update, either
'train_input_config' or 'eval_input_configs'
input_name: String name used to identify input config to update with. Should
be either None or value of the 'name' field in one of the input reader
configs.
field_name: Field name in input_reader_pb2.InputReader.
value: Value used to override existing field value.
path_updater: helper function used to update the input path. Only used when
field_name is "input_path".
Raises:
ValueError: when input field_name is None.
ValueError: when input_name is None and number of eval_input_readers does
not equal to 1.
"""
if isinstance(configs[key_name], input_reader_pb2.InputReader):
# Updates singular input_config object.
target_input_config = configs[key_name]
if field_name == "input_path":
path_updater(input_config=target_input_config, input_path=value)
else:
setattr(target_input_config, field_name, value)
elif input_name is None and len(configs[key_name]) == 1:
# Updates first (and the only) object of input_config list.
target_input_config = configs[key_name][0]
if field_name == "input_path":
path_updater(input_config=target_input_config, input_path=value)
else:
setattr(target_input_config, field_name, value)
elif input_name is not None and len(configs[key_name]):
# Updates input_config whose name matches input_name.
update_count = 0
for input_config in configs[key_name]:
if input_config.name == input_name:
setattr(input_config, field_name, value)
update_count = update_count + 1
if not update_count:
raise ValueError(
"Input name {} not found when overriding.".format(input_name))
elif update_count > 1:
raise ValueError("Duplicate input name found when overriding.")
else:
key_name = "None" if key_name is None else key_name
input_name = "None" if input_name is None else input_name
field_name = "None" if field_name is None else field_name
raise ValueError("Unknown input config overriding: "
"key_name:{}, input_name:{}, field_name:{}.".format(
key_name, input_name, field_name))
def _update_initial_learning_rate(configs, learning_rate):
"""Updates `configs` to reflect the new initial learning rate.
This function updates the initial learning rate. For learning rate schedules,
all other defined learning rates in the pipeline config are scaled to maintain
their same ratio with the initial learning rate.
The configs dictionary is updated in place, and hence not returned.
Args:
configs: Dictionary of configuration objects. See outputs from
get_configs_from_pipeline_file() or get_configs_from_multiple_files().
learning_rate: Initial learning rate for optimizer.
Raises:
TypeError: if optimizer type is not supported, or if learning rate type is
not supported.
"""
optimizer_type = get_optimizer_type(configs["train_config"])
if optimizer_type == "rms_prop_optimizer":
optimizer_config = configs["train_config"].optimizer.rms_prop_optimizer
elif optimizer_type == "momentum_optimizer":
optimizer_config = configs["train_config"].optimizer.momentum_optimizer
elif optimizer_type == "adam_optimizer":
optimizer_config = configs["train_config"].optimizer.adam_optimizer
else:
raise TypeError("Optimizer %s is not supported." % optimizer_type)
learning_rate_type = get_learning_rate_type(optimizer_config)
if learning_rate_type == "constant_learning_rate":
constant_lr = optimizer_config.learning_rate.constant_learning_rate
constant_lr.learning_rate = learning_rate
elif learning_rate_type == "exponential_decay_learning_rate":
exponential_lr = (
optimizer_config.learning_rate.exponential_decay_learning_rate)
exponential_lr.initial_learning_rate = learning_rate
elif learning_rate_type == "manual_step_learning_rate":
manual_lr = optimizer_config.learning_rate.manual_step_learning_rate
original_learning_rate = manual_lr.initial_learning_rate
learning_rate_scaling = float(learning_rate) / original_learning_rate
manual_lr.initial_learning_rate = learning_rate
for schedule in manual_lr.schedule:
schedule.learning_rate *= learning_rate_scaling
elif learning_rate_type == "cosine_decay_learning_rate":
cosine_lr = optimizer_config.learning_rate.cosine_decay_learning_rate
learning_rate_base = cosine_lr.learning_rate_base
warmup_learning_rate = cosine_lr.warmup_learning_rate
warmup_scale_factor = warmup_learning_rate / learning_rate_base
cosine_lr.learning_rate_base = learning_rate
cosine_lr.warmup_learning_rate = warmup_scale_factor * learning_rate
else:
raise TypeError("Learning rate %s is not supported." % learning_rate_type)
def _update_batch_size(configs, batch_size):
"""Updates `configs` to reflect the new training batch size.
The configs dictionary is updated in place, and hence not returned.
Args:
configs: Dictionary of configuration objects. See outputs from
get_configs_from_pipeline_file() or get_configs_from_multiple_files().
batch_size: Batch size to use for training (Ideally a power of 2). Inputs
are rounded, and capped to be 1 or greater.
"""
configs["train_config"].batch_size = max(1, int(round(batch_size)))
def _validate_message_has_field(message, field):
if not message.HasField(field):
raise ValueError("Expecting message to have field %s" % field)
def _update_generic(configs, key, value):
"""Update a pipeline configuration parameter based on a generic key/value.
Args:
configs: Dictionary of pipeline configuration protos.
key: A string key, dot-delimited to represent the argument key.
e.g. "model.ssd.train_config.batch_size"
value: A value to set the argument to. The type of the value must match the
type for the protocol buffer. Note that setting the wrong type will
result in a TypeError.
e.g. 42
Raises:
ValueError if the message key does not match the existing proto fields.
TypeError the value type doesn't match the protobuf field type.
"""
fields = key.split(".")
first_field = fields.pop(0)
last_field = fields.pop()
message = configs[first_field]
for field in fields:
_validate_message_has_field(message, field)
message = getattr(message, field)
_validate_message_has_field(message, last_field)
setattr(message, last_field, value)
def _update_momentum_optimizer_value(configs, momentum):
"""Updates `configs` to reflect the new momentum value.
Momentum is only supported for RMSPropOptimizer and MomentumOptimizer. For any
other optimizer, no changes take place. The configs dictionary is updated in
place, and hence not returned.
Args:
configs: Dictionary of configuration objects. See outputs from
get_configs_from_pipeline_file() or get_configs_from_multiple_files().
momentum: New momentum value. Values are clipped at 0.0 and 1.0.
Raises:
TypeError: If the optimizer type is not `rms_prop_optimizer` or
`momentum_optimizer`.
"""
optimizer_type = get_optimizer_type(configs["train_config"])
if optimizer_type == "rms_prop_optimizer":
optimizer_config = configs["train_config"].optimizer.rms_prop_optimizer
elif optimizer_type == "momentum_optimizer":
optimizer_config = configs["train_config"].optimizer.momentum_optimizer
else:
raise TypeError("Optimizer type must be one of `rms_prop_optimizer` or "
"`momentum_optimizer`.")
optimizer_config.momentum_optimizer_value = min(max(0.0, momentum), 1.0)
def _update_classification_localization_weight_ratio(configs, ratio):
"""Updates the classification/localization weight loss ratio.
Detection models usually define a loss weight for both classification and
objectness. This function updates the weights such that the ratio between
classification weight to localization weight is the ratio provided.
Arbitrarily, localization weight is set to 1.0.
Note that in the case of Faster R-CNN, this same ratio is applied to the first
stage objectness loss weight relative to localization loss weight.
The configs dictionary is updated in place, and hence not returned.
Args:
configs: Dictionary of configuration objects. See outputs from
get_configs_from_pipeline_file() or get_configs_from_multiple_files().
ratio: Desired ratio of classification (and/or objectness) loss weight to
localization loss weight.
"""
meta_architecture = configs["model"].WhichOneof("model")
if meta_architecture == "faster_rcnn":
model = configs["model"].faster_rcnn
model.first_stage_localization_loss_weight = 1.0
model.first_stage_objectness_loss_weight = ratio
model.second_stage_localization_loss_weight = 1.0
model.second_stage_classification_loss_weight = ratio
if meta_architecture == "ssd":
model = configs["model"].ssd
model.loss.localization_weight = 1.0
model.loss.classification_weight = ratio
def _get_classification_loss(model_config):
"""Returns the classification loss for a model."""
meta_architecture = model_config.WhichOneof("model")
if meta_architecture == "faster_rcnn":
model = model_config.faster_rcnn
classification_loss = model.second_stage_classification_loss
elif meta_architecture == "ssd":
model = model_config.ssd
classification_loss = model.loss.classification_loss
else:
raise TypeError("Did not recognize the model architecture.")
return classification_loss
def _update_focal_loss_gamma(configs, gamma):
"""Updates the gamma value for a sigmoid focal loss.
The configs dictionary is updated in place, and hence not returned.
Args:
configs: Dictionary of configuration objects. See outputs from
get_configs_from_pipeline_file() or get_configs_from_multiple_files().
gamma: Exponent term in focal loss.
Raises:
TypeError: If the classification loss is not `weighted_sigmoid_focal`.
"""
classification_loss = _get_classification_loss(configs["model"])
classification_loss_type = classification_loss.WhichOneof(
"classification_loss")
if classification_loss_type != "weighted_sigmoid_focal":
raise TypeError("Classification loss must be `weighted_sigmoid_focal`.")
classification_loss.weighted_sigmoid_focal.gamma = gamma
def _update_focal_loss_alpha(configs, alpha):
"""Updates the alpha value for a sigmoid focal loss.
The configs dictionary is updated in place, and hence not returned.
Args:
configs: Dictionary of configuration objects. See outputs from
get_configs_from_pipeline_file() or get_configs_from_multiple_files().
alpha: Class weight multiplier for sigmoid loss.
Raises:
TypeError: If the classification loss is not `weighted_sigmoid_focal`.
"""
classification_loss = _get_classification_loss(configs["model"])
classification_loss_type = classification_loss.WhichOneof(
"classification_loss")
if classification_loss_type != "weighted_sigmoid_focal":
raise TypeError("Classification loss must be `weighted_sigmoid_focal`.")
classification_loss.weighted_sigmoid_focal.alpha = alpha
def _update_train_steps(configs, train_steps):
"""Updates `configs` to reflect new number of training steps."""
configs["train_config"].num_steps = int(train_steps)
def _update_eval_steps(configs, eval_steps):
"""Updates `configs` to reflect new number of eval steps per evaluation."""
configs["eval_config"].num_examples = int(eval_steps)
def _update_all_eval_input_configs(configs, field, value):
"""Updates the content of `field` with `value` for all eval input configs."""
for eval_input_config in configs["eval_input_configs"]:
setattr(eval_input_config, field, value)
def _update_label_map_path(configs, label_map_path):
"""Updates the label map path for both train and eval input readers.
The configs dictionary is updated in place, and hence not returned.
Args:
configs: Dictionary of configuration objects. See outputs from
get_configs_from_pipeline_file() or get_configs_from_multiple_files().
label_map_path: New path to `StringIntLabelMap` pbtxt file.
"""
configs["train_input_config"].label_map_path = label_map_path
_update_all_eval_input_configs(configs, "label_map_path", label_map_path)
def _update_mask_type(configs, mask_type):
"""Updates the mask type for both train and eval input readers.
The configs dictionary is updated in place, and hence not returned.
Args:
configs: Dictionary of configuration objects. See outputs from
get_configs_from_pipeline_file() or get_configs_from_multiple_files().
mask_type: A string name representing a value of
input_reader_pb2.InstanceMaskType
"""
configs["train_input_config"].mask_type = mask_type
_update_all_eval_input_configs(configs, "mask_type", mask_type)
def _update_use_moving_averages(configs, use_moving_averages):
"""Updates the eval config option to use or not use moving averages.
The configs dictionary is updated in place, and hence not returned.
Args:
configs: Dictionary of configuration objects. See outputs from
get_configs_from_pipeline_file() or get_configs_from_multiple_files().
use_moving_averages: Boolean indicating whether moving average variables
should be loaded during evaluation.
"""
configs["eval_config"].use_moving_averages = use_moving_averages
def _update_retain_original_images(eval_config, retain_original_images):
"""Updates eval config with option to retain original images.
The eval_config object is updated in place, and hence not returned.
Args:
eval_config: A eval_pb2.EvalConfig.
retain_original_images: Boolean indicating whether to retain original images
in eval mode.
"""
eval_config.retain_original_images = retain_original_images
def _update_use_bfloat16(configs, use_bfloat16):
"""Updates `configs` to reflect the new setup on whether to use bfloat16.
The configs dictionary is updated in place, and hence not returned.
Args:
configs: Dictionary of configuration objects. See outputs from
get_configs_from_pipeline_file() or get_configs_from_multiple_files().
use_bfloat16: A bool, indicating whether to use bfloat16 for training.
"""
configs["train_config"].use_bfloat16 = use_bfloat16
| 38.814973
| 80
| 0.7473
|
f66aa47124f47c307ae7f2ee24b939df77566559
| 2,648
|
py
|
Python
|
app/models.py
|
tinabayi/blogs
|
690db73b8f2b9976217e19ab432cc42dd0fd83fd
|
[
"MIT"
] | null | null | null |
app/models.py
|
tinabayi/blogs
|
690db73b8f2b9976217e19ab432cc42dd0fd83fd
|
[
"MIT"
] | null | null | null |
app/models.py
|
tinabayi/blogs
|
690db73b8f2b9976217e19ab432cc42dd0fd83fd
|
[
"MIT"
] | null | null | null |
from . import db
from werkzeug.security import generate_password_hash,check_password_hash
from flask_login import UserMixin
from . import login_manager
class User(UserMixin,db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer,primary_key = True)
username = db.Column(db.String(255),index = True)
blogs = db.relationship('Blog',backref = 'user',lazy="dynamic")
email = db.Column(db.String(255),unique = True,index = True)
bio = db.Column(db.String(255))
profile_pic_path = db.Column(db.String())
pass_secure = db.Column(db.String(255))
@property
def password(self):
raise AttributeError('You cannot read the password attribute')
@password.setter
def password(self, password):
self.pass_secure = generate_password_hash(password)
def verify_password(self,password):
return check_password_hash(self.pass_secure,password)
def __repr__(self):
return f'User {self.username}'
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
class Blog(db.Model):
__tablename__ = 'blogs'
id = db.Column(db.Integer,primary_key = True)
user_id = db.Column(db.Integer,db.ForeignKey('users.id'))
description = db.Column(db.String(255))
comments = db.relationship('Comment',backref = 'blog',lazy="dynamic")
def save_blogs(self):
db.session.add(self)
db.session.commit()
@classmethod
def clear_blogs(cls):
Blog.all_blogs.clear()
@classmethod
def get_blogs(id):
blogs=Blog.query.all()
return blogs
class Comment(db.Model):
__tablename__ = 'comments'
id = db.Column(db.Integer,primary_key = True)
blog_id = db.Column(db.Integer,db.ForeignKey('blogs.id'))
comment = db.Column(db.String(255))
def save_comments(self):
db.session.add(self)
db.session.commit()
@classmethod
def clear_blogs(cls):
Blog.all_blogs.clear()
@classmethod
def get_comments(id):
all_comments=Comment.query.all()
return all_comments
def delete_comment(self):
db.session.delete(self)
db.session.commit()
class Subscribe(db.Model):
__tablename__ = 'subsribes'
id = db.Column(db.Integer,primary_key = True)
email = db.Column(db.String(255))
def __repr__(self):
return f'User {self.username}'
class Quote:
'''
Quote class to define Quote Objects
'''
def __init__(self,author,id,quote):
self.author=author
self.id =id
self.quote=quote
| 24.072727
| 74
| 0.646903
|
eeda4fc09d510ab7c3a8844b50360f132d3934eb
| 1,374
|
py
|
Python
|
cred-append.py
|
BlackDiverX/CredCompilator
|
737cc3e7e7992cd7c49dd5f5222371d275f1d4a1
|
[
"Apache-2.0"
] | 6
|
2017-11-02T16:26:10.000Z
|
2021-06-07T10:01:29.000Z
|
cred-append.py
|
BlackDiverX/CredCompilator
|
737cc3e7e7992cd7c49dd5f5222371d275f1d4a1
|
[
"Apache-2.0"
] | null | null | null |
cred-append.py
|
BlackDiverX/CredCompilator
|
737cc3e7e7992cd7c49dd5f5222371d275f1d4a1
|
[
"Apache-2.0"
] | 1
|
2019-09-04T12:03:09.000Z
|
2019-09-04T12:03:09.000Z
|
#!/usr/bin/python
# cred-append.py
# Version: 1.0
# License: Apache License Version 2.0
# Author: Georgii Starostin
# E-mail# blackdiverx@gmail.com
# Site: http://BlackDiver.net
import sys
if len (sys.argv) != 6:
print ("Утилита для добавления текста в начало или конец строки.")
print ("Синтаксис:")
print ("python cred-append.py <InFile> <append-text> <position> <format> <OutFile>")
print ("<InFile> - входной файл;")
print ("<append-text> - текст для добавления в конец строки;")
print ("<position> - добавление в начало [start] или конец [end] строки;")
print ("<format> - формат вывода[unix|win|mac]. Устаналивает формат перевода строки;")
print ("<OutFile> - файл результатов.")
print ("")
print ("Пример:")
print ("python cred-append.py Logins.txt a end unix Result.txt")
exit();
def formattype(x):
return{
"unix":"\n",
"win":"\r\n",
"mac":"\r"
}.get(x)
with open(sys.argv[1] , "r") as ins:
Farray = []
for line in ins:
Farray.append((line))
f = open(sys.argv[5],'w')
if sys.argv[3] == 'end':
i = 0
while i<len(Farray):
f.write((Farray[i]).rstrip('\r').rstrip('\n')+sys.argv[2]+formattype(sys.argv[4]))
i=i+1
if sys.argv[3] == 'start':
i = 0
while i<len(Farray):
f.write(sys.argv[2]+(Farray[i]).rstrip('\r').rstrip('\n')+formattype(sys.argv[4]))
i=i+1
f.close()
| 28.040816
| 88
| 0.614993
|
32a46fe7ceb6cfa9bbefaad5a1bdfa42c1fccef5
| 7,253
|
py
|
Python
|
src/audio/aubio/python/tests/test_specdesc.py
|
vrushank-agrawal/video_editor_BX23
|
3a458114f499e0ba3d1c61afde2b9d30bc76459b
|
[
"Apache-2.0"
] | null | null | null |
src/audio/aubio/python/tests/test_specdesc.py
|
vrushank-agrawal/video_editor_BX23
|
3a458114f499e0ba3d1c61afde2b9d30bc76459b
|
[
"Apache-2.0"
] | null | null | null |
src/audio/aubio/python/tests/test_specdesc.py
|
vrushank-agrawal/video_editor_BX23
|
3a458114f499e0ba3d1c61afde2b9d30bc76459b
|
[
"Apache-2.0"
] | null | null | null |
#! /usr/bin/env python
from numpy.testing import TestCase, assert_equal, assert_almost_equal
from numpy import random, arange, log, zeros
from aubio import specdesc, cvec, float_type
methods = ["default",
"energy",
"hfc",
"complex",
"phase",
"specdiff",
"kl",
"mkl",
"specflux",
"centroid",
"spread",
"skewness",
"kurtosis",
"slope",
"decrease",
"rolloff"]
buf_size = 2048
class aubio_specdesc(TestCase):
def test_members(self):
o = specdesc()
for method in methods:
o = specdesc(method, buf_size)
assert_equal ([o.buf_size, o.method], [buf_size, method])
spec = cvec(buf_size)
spec.norm[0] = 1
spec.norm[1] = 1./2.
#print "%20s" % method, str(o(spec))
o(spec)
spec.norm = random.random_sample((len(spec.norm),)).astype(float_type)
spec.phas = random.random_sample((len(spec.phas),)).astype(float_type)
#print "%20s" % method, str(o(spec))
assert (o(spec) != 0.)
def test_phase(self):
o = specdesc("phase", buf_size)
spec = cvec(buf_size)
# phase of zeros is zero
assert_equal (o(spec), 0.)
spec.phas = random.random_sample((len(spec.phas),)).astype(float_type)
# phase of random is not zero
spec.norm[:] = 1
assert (o(spec) != 0.)
def test_specdiff(self):
o = specdesc("phase", buf_size)
spec = cvec(buf_size)
# specdiff of zeros is zero
assert_equal (o(spec), 0.)
spec.phas = random.random_sample((len(spec.phas),)).astype(float_type)
# phase of random is not zero
spec.norm[:] = 1
assert (o(spec) != 0.)
def test_hfc(self):
o = specdesc("hfc")
c = cvec()
assert_equal( 0., o(c))
a = arange(c.length, dtype=float_type)
c.norm = a
assert_equal (a, c.norm)
assert_equal ( sum(a*(a+1)), o(c))
def test_complex(self):
o = specdesc("complex")
c = cvec()
assert_equal( 0., o(c))
a = arange(c.length, dtype=float_type)
c.norm = a
assert_equal (a, c.norm)
# the previous run was on zeros, so previous frames are still 0
# so we have sqrt ( abs ( r2 ^ 2) ) == r2
assert_equal ( sum(a), o(c))
# second time. c.norm = a, so, r1 = r2, and the euclidian distance is 0
assert_equal ( 0, o(c))
def test_kl(self):
o = specdesc("kl")
c = cvec()
assert_equal( 0., o(c))
a = arange(c.length, dtype=float_type)
c.norm = a
assert_almost_equal( sum(a * log(1.+ a/1.e-1 ) ) / o(c), 1., decimal=6)
def test_mkl(self):
o = specdesc("mkl")
c = cvec()
assert_equal( 0., o(c))
a = arange(c.length, dtype=float_type)
c.norm = a
assert_almost_equal( sum(log(1.+ a/1.e-1 ) ) / o(c), 1, decimal=6)
def test_specflux(self):
o = specdesc("specflux")
c = cvec()
assert_equal( 0., o(c))
a = arange(c.length, dtype=float_type)
c.norm = a
assert_equal( sum(a), o(c))
assert_equal( 0, o(c))
c.norm = zeros(c.length, dtype=float_type)
assert_equal( 0, o(c))
def test_centroid(self):
o = specdesc("centroid")
c = cvec()
# make sure centroid of zeros is zero
assert_equal( 0., o(c))
a = arange(c.length, dtype=float_type)
c.norm = a
centroid = sum(a*a) / sum(a)
assert_almost_equal (centroid, o(c), decimal = 2)
c.norm = a * .5
assert_almost_equal (centroid, o(c), decimal = 2)
def test_spread(self):
o = specdesc("spread")
c = cvec(1024)
ramp = arange(c.length, dtype=float_type)
assert_equal( 0., o(c))
a = ramp
c.norm = a
centroid = sum(a*a) / sum(a)
spread = sum( a * pow(ramp - centroid, 2.) ) / sum(a)
assert_almost_equal (o(c), spread, decimal = 1)
def test_skewness(self):
o = specdesc("skewness")
c = cvec()
assert_equal( 0., o(c))
a = arange(c.length, dtype=float_type)
c.norm = a
centroid = sum(a*a) / sum(a)
spread = sum( (a - centroid)**2 *a) / sum(a)
skewness = sum( (a - centroid)**3 *a) / sum(a) / spread **1.5
assert_almost_equal (skewness, o(c), decimal = 2)
c.norm = a * 3
assert_almost_equal (skewness, o(c), decimal = 2)
def test_kurtosis(self):
o = specdesc("kurtosis")
c = cvec()
assert_equal( 0., o(c))
a = arange(c.length, dtype=float_type)
c.norm = a
centroid = sum(a*a) / sum(a)
spread = sum( (a - centroid)**2 *a) / sum(a)
kurtosis = sum( (a - centroid)**4 *a) / sum(a) / spread **2
assert_almost_equal (kurtosis, o(c), decimal = 2)
def test_slope(self):
o = specdesc("slope")
c = cvec()
assert_equal( 0., o(c))
a = arange(c.length * 2, 0, -2, dtype=float_type)
k = arange(c.length, dtype=float_type)
c.norm = a
num = len(a) * sum(k*a) - sum(k)*sum(a)
den = (len(a) * sum(k**2) - sum(k)**2)
slope = num/den/sum(a)
assert_almost_equal (slope, o(c), decimal = 5)
a = arange(0, c.length * 2, +2, dtype=float_type)
c.norm = a
num = len(a) * sum(k*a) - sum(k)*sum(a)
den = (len(a) * sum(k**2) - sum(k)**2)
slope = num/den/sum(a)
assert_almost_equal (slope, o(c), decimal = 5)
a = arange(0, c.length * 2, +2, dtype=float_type)
c.norm = a * 2
assert_almost_equal (slope, o(c), decimal = 5)
def test_decrease(self):
o = specdesc("decrease")
c = cvec()
assert_equal( 0., o(c))
a = arange(c.length * 2, 0, -2, dtype=float_type)
k = arange(c.length, dtype=float_type)
c.norm = a
decrease = sum((a[1:] - a [0]) / k[1:]) / sum(a[1:])
assert_almost_equal (decrease, o(c), decimal = 5)
a = arange(0, c.length * 2, +2, dtype=float_type)
c.norm = a
decrease = sum((a[1:] - a [0]) / k[1:]) / sum(a[1:])
assert_almost_equal (decrease, o(c), decimal = 5)
a = arange(0, c.length * 2, +2, dtype=float_type)
c.norm = a * 2
decrease = sum((a[1:] - a [0]) / k[1:]) / sum(a[1:])
assert_almost_equal (decrease, o(c), decimal = 5)
def test_rolloff(self):
o = specdesc("rolloff")
c = cvec()
assert_equal( 0., o(c))
a = arange(c.length * 2, 0, -2, dtype=float_type)
c.norm = a
cumsum = .95*sum(a*a)
i = 0; rollsum = 0
while rollsum < cumsum:
rollsum += a[i]*a[i]
i+=1
rolloff = i
assert_equal (rolloff, o(c))
class aubio_specdesc_wrong(TestCase):
def test_negative(self):
with self.assertRaises(ValueError):
specdesc("default", -10)
def test_unknown(self):
with self.assertRaises(RuntimeError):
specdesc("unknown", 512)
if __name__ == '__main__':
from unittest import main
main()
| 31.128755
| 82
| 0.524886
|
ddc83e627025704c5e7fbc401781c975fad633b4
| 648
|
py
|
Python
|
e2e/scripts/st_info.py
|
kamito/streamlit
|
af68a915b3a1f37ddd411d081e430dad70869c45
|
[
"Apache-2.0"
] | 19,099
|
2019-08-25T14:00:15.000Z
|
2022-03-31T21:00:28.000Z
|
e2e/scripts/st_info.py
|
linzhou-zhong/streamlit
|
fde1b548e4bf2d2e5a97b5c3fcf655d43134b342
|
[
"Apache-2.0"
] | 3,078
|
2019-08-25T19:50:14.000Z
|
2022-03-31T23:26:14.000Z
|
e2e/scripts/st_info.py
|
linzhou-zhong/streamlit
|
fde1b548e4bf2d2e5a97b5c3fcf655d43134b342
|
[
"Apache-2.0"
] | 1,892
|
2019-08-26T04:44:24.000Z
|
2022-03-30T16:11:51.000Z
|
# Copyright 2018-2021 Streamlit Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import streamlit as st
st.info("This info message is awesome!")
| 36
| 74
| 0.762346
|
20d9a4c2812c63b13e845105f6b7111076c05881
| 1,471
|
py
|
Python
|
machinelearn2.py
|
varnaugj/Python-Early-Codes
|
3b659529c65dc608eaf41ec5d5ffaa4c18704946
|
[
"MIT"
] | null | null | null |
machinelearn2.py
|
varnaugj/Python-Early-Codes
|
3b659529c65dc608eaf41ec5d5ffaa4c18704946
|
[
"MIT"
] | null | null | null |
machinelearn2.py
|
varnaugj/Python-Early-Codes
|
3b659529c65dc608eaf41ec5d5ffaa4c18704946
|
[
"MIT"
] | null | null | null |
#News Article specific reader
#import nltk
#from nltk import send_tokenize
#from nltk import word_tokenize
import requests
import newspaper
from bs4 import BeautifulSoup as bs
from newspaper import Article
def news():
#target we want to open
url = 'https://www.cnn.com'
papertest = newspaper.build(url)
for article in papertest.articles:
print(article.url)
#open with GET method
resp = requests.get(url)
#http_respone 200 means link works
if resp.status_code==200:
print("Successfully opened the web page")
print("the news are as follow :-\n")
# article = Article(url)
# article.download()
# article.parse()
# article.nlp()
# text = article.text
# print (type(text))
# print("\n")
# print(text)
# print("\n")
# print(len(text))
# print(article.keywords)
else:
print("Error")
#news()
def news2():
#url = 'https://www.cnn.com/politics'
url = 'https://www.foxnews.com/politics'
#open with GET method
resp = requests.get(url)
soup = bs(resp.text, 'html.parser')
rawtext = soup.get_text()
#http_respone 200 means link works
if resp.status_code==200:
print("Successfully opened the web page")
print("the news are as follow :-\n")
for link in soup.find_all('a'):
print(link.get('href'))
else:
print("Error")
news2()
| 21.014286
| 49
| 0.600272
|
44609016798ad96c96c6d1b464e4a38f819977d8
| 11,761
|
py
|
Python
|
pandadoc_client/model/document_create_request_content_placeholders.py
|
PandaDoc/pandadoc-api-python-client
|
a707c540e788eee485cc338f29ca363acca4973e
|
[
"MIT"
] | 27
|
2021-11-16T11:30:13.000Z
|
2022-03-17T08:56:18.000Z
|
pandadoc_client/model/document_create_request_content_placeholders.py
|
PandaDoc/pandadoc-api-python-client
|
a707c540e788eee485cc338f29ca363acca4973e
|
[
"MIT"
] | null | null | null |
pandadoc_client/model/document_create_request_content_placeholders.py
|
PandaDoc/pandadoc-api-python-client
|
a707c540e788eee485cc338f29ca363acca4973e
|
[
"MIT"
] | 2
|
2021-12-16T13:38:15.000Z
|
2022-01-09T00:38:00.000Z
|
"""
PandaDoc Public API
PandaDoc Public API documentation # noqa: E501
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from pandadoc_client.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from pandadoc_client.exceptions import ApiAttributeError
def lazy_import():
from pandadoc_client.model.document_create_request_content_library_items import DocumentCreateRequestContentLibraryItems
globals()['DocumentCreateRequestContentLibraryItems'] = DocumentCreateRequestContentLibraryItems
class DocumentCreateRequestContentPlaceholders(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'block_id': (str,), # noqa: E501
'content_library_items': ([DocumentCreateRequestContentLibraryItems],), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'block_id': 'block_id', # noqa: E501
'content_library_items': 'content_library_items', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""DocumentCreateRequestContentPlaceholders - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
block_id (str): Content placeholder block id. [optional] # noqa: E501
content_library_items ([DocumentCreateRequestContentLibraryItems]): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""DocumentCreateRequestContentPlaceholders - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
block_id (str): Content placeholder block id. [optional] # noqa: E501
content_library_items ([DocumentCreateRequestContentLibraryItems]): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| 44.381132
| 124
| 0.583539
|
13139a39a5b5ddcc410924d54dba2ad031ca1239
| 1,057
|
py
|
Python
|
data-handling-scripts/split_rid_newendo.py
|
bolero2/DeepLearning-dc
|
680266128d5a7aff590e2d6b9b71cb340b95c2ab
|
[
"Apache-2.0"
] | 2
|
2021-04-23T03:49:30.000Z
|
2021-04-23T03:49:33.000Z
|
data-handling-scripts/split_rid_newendo.py
|
bolero2/DeepLearning-dc
|
680266128d5a7aff590e2d6b9b71cb340b95c2ab
|
[
"Apache-2.0"
] | null | null | null |
data-handling-scripts/split_rid_newendo.py
|
bolero2/DeepLearning-dc
|
680266128d5a7aff590e2d6b9b71cb340b95c2ab
|
[
"Apache-2.0"
] | null | null | null |
import pandas as pd
import glob
import os
import random
import shutil as sh
df = pd.read_csv("RID_for_test_newENDO.csv")
rid = list(df.loc[df['GUBUN'] == "TRAIN"]['RID'])
print(f'RID= {rid}')
root = os.getcwd()
print(root)
os.chdir(f'{root}/yolo_dataset/')
for imagename in glob.glob('*.jpg'):
three = imagename[0:3]
if three[-1] == '내':
idnum = three[0]
elif three[-1] == '_':
idnum = three[0:2]
else:
idnum = three
if int(idnum) in rid:
print(f'Target= {idnum}')
sh.copy(f'{root}/yolo_dataset/{imagename}', f'{root}/rid/train/')
"""
if len(idx) == 1:
os.chdir(root + "/" + idx)
full_path = f'{root}/{idx}/'
# print(os.getcwd())
filelist = os.listdir()
for filename in filelist:
file_rid = filename[0:8]
if file_rid in rid8:
shutil.copy(f'{full_path}{filename}', f'{root}/test/{idx}/')
else:
shutil.copy(f'{full_path}{filename}', f'{root}/train/{idx}/')
"""
| 25.166667
| 77
| 0.535478
|
7d07851c3aa3f4962dd40b267f3fdf81c60126f3
| 3,492
|
py
|
Python
|
src/mykrobe/typing/models/variant.py
|
chamilaadikaram/mykrobe
|
2bcebf7b37f1c1416f397374da6ebfd02ce1aead
|
[
"MIT"
] | 1
|
2020-08-08T01:08:01.000Z
|
2020-08-08T01:08:01.000Z
|
src/mykrobe/typing/models/variant.py
|
chamilaadikaram/mykrobe
|
2bcebf7b37f1c1416f397374da6ebfd02ce1aead
|
[
"MIT"
] | null | null | null |
src/mykrobe/typing/models/variant.py
|
chamilaadikaram/mykrobe
|
2bcebf7b37f1c1416f397374da6ebfd02ce1aead
|
[
"MIT"
] | null | null | null |
import datetime
import json
import logging
logger = logging.getLogger(__name__)
class VariantProbeCoverage(object):
def __init__(self, reference_coverages,
alternate_coverages,
var_name=None,
params={}):
self.reference_coverages = reference_coverages
self.alternate_coverages = alternate_coverages
self.var_name = var_name
self.params = params
if self.reference_coverages and self.alternate_coverages:
self.best_alternate_coverage = self._choose_best_alternate_coverage()
self.best_reference_coverage = self._choose_best_reference_coverage()
def _choose_best_coverage(self, coverages):
coverages.sort(
key=lambda x: x.k_count,
reverse=True)
current_best = coverages[0]
for probe_coverage in coverages[1:]:
if probe_coverage.k_count < current_best.k_count:
current_best = current_best
else:
if probe_coverage.percent_coverage > current_best.percent_coverage:
current_best = probe_coverage
elif probe_coverage.min_depth > current_best.min_depth:
current_best = probe_coverage
elif probe_coverage.min_depth <= current_best.min_depth:
if probe_coverage.median_depth > current_best.median_depth:
current_best = probe_coverage
return current_best
def _choose_best_alternate_coverage(self):
return self._choose_best_coverage(self.alternate_coverages)
def _choose_best_reference_coverage(self):
best_reference_coverage = self._choose_best_coverage(
self.reference_coverages)
return best_reference_coverage
@property
def coverage_dict(self):
return {"reference": self.best_reference_coverage.coverage_dict,
"alternate": self.best_alternate_coverage.coverage_dict
}
def __str__(self):
d = self.coverage_dict
d['variant'] = self.var_name
return json.dumps(d)
def __repr__(self):
return self.__str__()
@property
def reference_coverage(self):
return self.best_reference_coverage
@property
def reference_percent_coverage(self):
return self.best_reference_coverage.percent_coverage
@property
def reference_kmer_count(self):
return self.best_reference_coverage.k_count
@property
def reference_median_depth(self):
return self.best_reference_coverage.median_depth
@property
def reference_min_depth(self):
return self.best_reference_coverage.min_depth
@property
def reference_klen(self):
return self.best_reference_coverage.klen
@property
def alternate_percent_coverage(self):
return self.best_alternate_coverage.percent_coverage
@alternate_percent_coverage.setter
def alternate_percent_coverage(self, value):
self.best_alternate_coverage.percent_coverage = value
@property
def alternate_median_depth(self):
return self.best_alternate_coverage.median_depth
@property
def alternate_kmer_count(self):
return self.best_alternate_coverage.k_count
@property
def alternate_min_depth(self):
return self.best_alternate_coverage.min_depth
@property
def alternate_klen(self):
return self.best_alternate_coverage.klen
| 32.333333
| 83
| 0.684994
|
0a61e3a9ac1bd1b8232eb8bb893b97a4327bc46c
| 8,166
|
py
|
Python
|
Lib/concurrent/futures/thread.py
|
MaxNoe/cpython
|
29d018aa63b72161cfc67602dc3dbd386272da64
|
[
"CNRI-Python-GPL-Compatible"
] | 1
|
2021-03-26T10:54:41.000Z
|
2021-03-26T10:54:41.000Z
|
Lib/concurrent/futures/thread.py
|
MaxNoe/cpython
|
29d018aa63b72161cfc67602dc3dbd386272da64
|
[
"CNRI-Python-GPL-Compatible"
] | 4
|
2022-03-30T01:50:22.000Z
|
2022-03-30T01:50:28.000Z
|
Lib/concurrent/futures/thread.py
|
MaxNoe/cpython
|
29d018aa63b72161cfc67602dc3dbd386272da64
|
[
"CNRI-Python-GPL-Compatible"
] | 1
|
2021-02-01T20:44:21.000Z
|
2021-02-01T20:44:21.000Z
|
# Copyright 2009 Brian Quinlan. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Implements ThreadPoolExecutor."""
__author__ = 'Brian Quinlan (brian@sweetapp.com)'
import atexit
from concurrent.futures import _base
import itertools
import queue
import threading
import weakref
import os
# Workers are created as daemon threads. This is done to allow the interpreter
# to exit when there are still idle threads in a ThreadPoolExecutor's thread
# pool (i.e. shutdown() was not called). However, allowing workers to die with
# the interpreter has two undesirable properties:
# - The workers would still be running during interpreter shutdown,
# meaning that they would fail in unpredictable ways.
# - The workers could be killed while evaluating a work item, which could
# be bad if the callable being evaluated has external side-effects e.g.
# writing to a file.
#
# To work around this problem, an exit handler is installed which tells the
# workers to exit when their work queues are empty and then waits until the
# threads finish.
_threads_queues = weakref.WeakKeyDictionary()
_shutdown = False
def _python_exit():
global _shutdown
_shutdown = True
items = list(_threads_queues.items())
for t, q in items:
q.put(None)
for t, q in items:
t.join()
atexit.register(_python_exit)
class _WorkItem(object):
def __init__(self, future, fn, args, kwargs):
self.future = future
self.fn = fn
self.args = args
self.kwargs = kwargs
def run(self):
if not self.future.set_running_or_notify_cancel():
return
try:
result = self.fn(*self.args, **self.kwargs)
except BaseException as exc:
self.future.set_exception(exc)
# Break a reference cycle with the exception 'exc'
self = None
else:
self.future.set_result(result)
def _worker(executor_reference, work_queue, initializer, initargs):
if initializer is not None:
try:
initializer(*initargs)
except BaseException:
_base.LOGGER.critical('Exception in initializer:', exc_info=True)
executor = executor_reference()
if executor is not None:
executor._initializer_failed()
return
try:
while True:
work_item = work_queue.get(block=True)
if work_item is not None:
work_item.run()
# Delete references to object. See issue16284
del work_item
continue
executor = executor_reference()
# Exit if:
# - The interpreter is shutting down OR
# - The executor that owns the worker has been collected OR
# - The executor that owns the worker has been shutdown.
if _shutdown or executor is None or executor._shutdown:
# Flag the executor as shutting down as early as possible if it
# is not gc-ed yet.
if executor is not None:
executor._shutdown = True
# Notice other workers
work_queue.put(None)
return
del executor
except BaseException:
_base.LOGGER.critical('Exception in worker', exc_info=True)
class BrokenThreadPool(_base.BrokenExecutor):
"""
Raised when a worker thread in a ThreadPoolExecutor failed initializing.
"""
class ThreadPoolExecutor(_base.Executor):
# Used to assign unique thread names when thread_name_prefix is not supplied.
_counter = itertools.count().__next__
def __init__(self, max_workers=None, thread_name_prefix='',
initializer=None, initargs=()):
"""Initializes a new ThreadPoolExecutor instance.
Args:
max_workers: The maximum number of threads that can be used to
execute the given calls.
thread_name_prefix: An optional name prefix to give our threads.
initializer: An callable used to initialize worker threads.
initargs: A tuple of arguments to pass to the initializer.
"""
if max_workers is None:
# Use this number because ThreadPoolExecutor is often
# used to overlap I/O instead of CPU work.
max_workers = (os.cpu_count() or 1) * 5
if max_workers <= 0:
raise ValueError("max_workers must be greater than 0")
if initializer is not None and not callable(initializer):
raise TypeError("initializer must be a callable")
self._max_workers = max_workers
self._work_queue = queue.SimpleQueue()
self._threads = set()
self._broken = False
self._shutdown = False
self._shutdown_lock = threading.Lock()
self._thread_name_prefix = (thread_name_prefix or
("ThreadPoolExecutor-%d" % self._counter()))
self._initializer = initializer
self._initargs = initargs
def submit(*args, **kwargs):
if len(args) >= 2:
self, fn, *args = args
elif not args:
raise TypeError("descriptor 'submit' of 'ThreadPoolExecutor' object "
"needs an argument")
elif 'fn' in kwargs:
fn = kwargs.pop('fn')
self, *args = args
import warnings
warnings.warn("Passing 'fn' as keyword argument is deprecated",
DeprecationWarning, stacklevel=2)
else:
raise TypeError('submit expected at least 1 positional argument, '
'got %d' % (len(args)-1))
with self._shutdown_lock:
if self._broken:
raise BrokenThreadPool(self._broken)
if self._shutdown:
raise RuntimeError('cannot schedule new futures after shutdown')
if _shutdown:
raise RuntimeError('cannot schedule new futures after '
'interpreter shutdown')
f = _base.Future()
w = _WorkItem(f, fn, args, kwargs)
self._work_queue.put(w)
self._adjust_thread_count()
return f
submit.__doc__ = _base.Executor.submit.__doc__
def _adjust_thread_count(self):
# When the executor gets lost, the weakref callback will wake up
# the worker threads.
def weakref_cb(_, q=self._work_queue):
q.put(None)
# TODO(bquinlan): Should avoid creating new threads if there are more
# idle threads than items in the work queue.
num_threads = len(self._threads)
if num_threads < self._max_workers:
thread_name = '%s_%d' % (self._thread_name_prefix or self,
num_threads)
t = threading.Thread(name=thread_name, target=_worker,
args=(weakref.ref(self, weakref_cb),
self._work_queue,
self._initializer,
self._initargs))
t.daemon = True
t.start()
self._threads.add(t)
_threads_queues[t] = self._work_queue
def _initializer_failed(self):
with self._shutdown_lock:
self._broken = ('A thread initializer failed, the thread pool '
'is not usable anymore')
# Drain work queue and mark pending futures failed
while True:
try:
work_item = self._work_queue.get_nowait()
except queue.Empty:
break
if work_item is not None:
work_item.future.set_exception(BrokenThreadPool(self._broken))
def shutdown(self, wait=True):
with self._shutdown_lock:
self._shutdown = True
self._work_queue.put(None)
if wait:
for t in self._threads:
t.join()
shutdown.__doc__ = _base.Executor.shutdown.__doc__
| 36.950226
| 82
| 0.59613
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.