hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f1a6184fa2d53d7b5baa96f74858af1bfa6bb07d
| 707
|
py
|
Python
|
example/complex_factories_sample/migrations/0003_hasautoslugexample.py
|
kaoslabsinc/django-building-blocks
|
b5f74121b4002e2b96b5addd6f2593c1a2cead98
|
[
"BSD-3-Clause"
] | 1
|
2021-08-03T12:42:37.000Z
|
2021-08-03T12:42:37.000Z
|
example/complex_factories_sample/migrations/0003_hasautoslugexample.py
|
kaoslabsinc/django-building-blocks
|
b5f74121b4002e2b96b5addd6f2593c1a2cead98
|
[
"BSD-3-Clause"
] | 4
|
2021-07-27T18:22:35.000Z
|
2021-08-06T21:55:39.000Z
|
example/complex_factories_sample/migrations/0003_hasautoslugexample.py
|
kaoslabsinc/django-building-blocks
|
b5f74121b4002e2b96b5addd6f2593c1a2cead98
|
[
"BSD-3-Clause"
] | null | null | null |
# Generated by Django 3.2.5 on 2021-07-27 00:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('complex_factories_sample', '0002_hasautocodegeneratefunctionexample'),
]
operations = [
migrations.CreateModel(
name='HasAutoSlugExample',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('slug', models.SlugField(max_length=255, unique=True)),
],
options={
'abstract': False,
},
),
]
| 28.28
| 117
| 0.577086
|
ee4b91a0875fe093ad5e3948fb3edd18c322e9e8
| 19,293
|
py
|
Python
|
benchmarks/f3_wrong_hints_permutations/scaling_ltl_timed_transition_system/18-sender_receiver_35.py
|
EnricoMagnago/F3
|
c863215c318d7d5f258eb9be38c6962cf6863b52
|
[
"MIT"
] | 3
|
2021-04-23T23:29:26.000Z
|
2022-03-23T10:00:30.000Z
|
benchmarks/f3_wrong_hints_permutations/scaling_ltl_timed_transition_system/18-sender_receiver_35.py
|
EnricoMagnago/F3
|
c863215c318d7d5f258eb9be38c6962cf6863b52
|
[
"MIT"
] | null | null | null |
benchmarks/f3_wrong_hints_permutations/scaling_ltl_timed_transition_system/18-sender_receiver_35.py
|
EnricoMagnago/F3
|
c863215c318d7d5f258eb9be38c6962cf6863b52
|
[
"MIT"
] | 1
|
2021-11-17T22:02:56.000Z
|
2021-11-17T22:02:56.000Z
|
from typing import FrozenSet
from collections import Iterable
from math import log, ceil
from mathsat import msat_term, msat_env
from mathsat import msat_make_constant, msat_declare_function
from mathsat import msat_get_integer_type, msat_get_rational_type, msat_get_bool_type
from mathsat import msat_make_and, msat_make_not, msat_make_or, msat_make_iff
from mathsat import msat_make_leq, msat_make_equal, msat_make_true
from mathsat import msat_make_number, msat_make_plus, msat_make_times
from pysmt.environment import Environment as PysmtEnv
import pysmt.typing as types
from ltl.ltl import TermMap, LTLEncoder
from utils import name_next, symb_to_next
from hint import Hint, Location
delta_name = "delta"
def decl_consts(menv: msat_env, name: str, c_type) -> tuple:
assert not name.startswith("_"), name
s = msat_declare_function(menv, name, c_type)
s = msat_make_constant(menv, s)
x_s = msat_declare_function(menv, name_next(name), c_type)
x_s = msat_make_constant(menv, x_s)
return s, x_s
def make_enum(menv, v_name: str, enum_size: int):
bool_type = msat_get_bool_type(menv)
num_bits = ceil(log(enum_size, 2))
b_vars = []
for idx in range(num_bits):
c_name = "{}{}".format(v_name, idx)
b_vars.append(tuple(decl_consts(menv, c_name, bool_type)))
vals = []
x_vals = []
for enum_val in range(enum_size):
bit_val = format(enum_val, '0{}b'.format(num_bits))
assert len(bit_val) == num_bits
assert all(c in {'0', '1'} for c in bit_val)
assign = [b_vars[idx] if c == '1' else
(msat_make_not(menv, b_vars[idx][0]),
msat_make_not(menv, b_vars[idx][1]))
for idx, c in enumerate(reversed(bit_val))]
pred = assign[0][0]
x_pred = assign[0][1]
for it in assign[1:]:
pred = msat_make_and(menv, pred, it[0])
x_pred = msat_make_and(menv, x_pred, it[1])
vals.append(pred)
x_vals.append(x_pred)
assert len(vals) == enum_size
assert len(x_vals) == enum_size
return b_vars, vals, x_vals
def msat_make_minus(menv: msat_env, arg0: msat_term, arg1: msat_term):
m_one = msat_make_number(menv, "-1")
arg1 = msat_make_times(menv, arg1, m_one)
return msat_make_plus(menv, arg0, arg1)
def msat_make_lt(menv: msat_env, arg0: msat_term, arg1: msat_term):
geq = msat_make_geq(menv, arg0, arg1)
return msat_make_not(menv, geq)
def msat_make_geq(menv: msat_env, arg0: msat_term, arg1: msat_term):
return msat_make_leq(menv, arg1, arg0)
def msat_make_gt(menv: msat_env, arg0: msat_term, arg1: msat_term):
leq = msat_make_leq(menv, arg0, arg1)
return msat_make_not(menv, leq)
def msat_make_impl(menv: msat_env, arg0: msat_term, arg1: msat_term):
n_arg0 = msat_make_not(menv, arg0)
return msat_make_or(menv, n_arg0, arg1)
def diverging_symbs(menv: msat_env) -> frozenset:
real_type = msat_get_rational_type(menv)
delta = msat_declare_function(menv, delta_name, real_type)
delta = msat_make_constant(menv, delta)
return frozenset([delta])
def check_ltl(menv: msat_env, enc: LTLEncoder) -> (Iterable, msat_term,
msat_term, msat_term):
assert menv
assert isinstance(menv, msat_env)
assert enc
assert isinstance(enc, LTLEncoder)
int_type = msat_get_integer_type(menv)
real_type = msat_get_rational_type(menv)
r2s, x_r2s = decl_consts(menv, "r2s", int_type)
s2r, x_s2r = decl_consts(menv, "s2r", int_type)
delta, x_delta = decl_consts(menv, delta_name, real_type)
sender = Sender("s", menv, enc, r2s, x_r2s, s2r, x_s2r, delta)
receiver = Receiver("r", menv, enc, s2r, x_s2r, r2s, x_r2s, delta)
curr2next = {r2s: x_r2s, s2r: x_s2r, delta: x_delta}
for comp in [sender, receiver]:
for s, x_s in comp.symb2next.items():
curr2next[s] = x_s
zero = msat_make_number(menv, "0")
init = msat_make_and(menv, receiver.init, sender.init)
trans = msat_make_and(menv, receiver.trans, sender.trans)
# invar delta >= 0
init = msat_make_and(menv, init,
msat_make_geq(menv, delta, zero))
trans = msat_make_and(menv, trans,
msat_make_geq(menv, x_delta, zero))
# delta > 0 -> (r2s' = r2s & s2r' = s2r)
lhs = msat_make_gt(menv, delta, zero)
rhs = msat_make_and(menv,
msat_make_equal(menv, x_r2s, r2s),
msat_make_equal(menv, x_s2r, s2r))
trans = msat_make_and(menv, trans,
msat_make_impl(menv, lhs, rhs))
# (G F !s.stutter) -> G (s.wait_ack -> F s.send)
lhs = enc.make_G(enc.make_F(msat_make_not(menv, sender.stutter)))
rhs = enc.make_G(msat_make_impl(menv, sender.wait_ack,
enc.make_F(sender.send)))
ltl = msat_make_impl(menv, lhs, rhs)
return TermMap(curr2next), init, trans, ltl
class Module:
def __init__(self, name: str, menv: msat_env, enc: LTLEncoder,
*args, **kwargs):
self.name = name
self.menv = menv
self.enc = enc
self.symb2next = {}
true = msat_make_true(menv)
self.init = true
self.trans = true
def _symb(self, v_name, v_type):
v_name = "{}_{}".format(self.name, v_name)
return decl_consts(self.menv, v_name, v_type)
def _enum(self, v_name: str, enum_size: int):
c_name = "{}_{}".format(self.name, v_name)
return make_enum(self.menv, c_name, enum_size)
class Sender(Module):
def __init__(self, name: str, menv: msat_env, enc: LTLEncoder,
in_c, x_in_c, out_c, x_out_c, delta):
super().__init__(name, menv, enc)
bool_type = msat_get_bool_type(menv)
int_type = msat_get_integer_type(menv)
real_type = msat_get_rational_type(menv)
loc, x_loc = self._symb("l", bool_type)
evt, x_evt = self._symb("evt", bool_type)
msg_id, x_msg_id = self._symb("msg_id", int_type)
timeout, x_timeout = self._symb("timeout", real_type)
c, x_c = self._symb("c", real_type)
self.move = evt
self.stutter = msat_make_not(menv, evt)
self.x_move = x_evt
self.x_stutter = msat_make_not(menv, x_evt)
self.send = loc
self.wait_ack = msat_make_not(menv, loc)
self.x_send = x_loc
self.x_wait_ack = msat_make_not(menv, x_loc)
self.symb2next = {loc: x_loc, evt: x_evt, msg_id: x_msg_id,
timeout: x_timeout, c: x_c}
zero = msat_make_number(menv, "0")
one = msat_make_number(menv, "1")
base_timeout = one
# send & c = 0 & msg_id = 0
self.init = msat_make_and(menv,
msat_make_and(menv, self.send,
msat_make_equal(menv, c,
zero)),
msat_make_equal(menv, msg_id, zero))
# invar: wait_ack -> c <= timeout
self.init = msat_make_and(
menv, self.init,
msat_make_impl(menv, self.wait_ack,
msat_make_leq(menv, c, timeout)))
self.trans = msat_make_impl(menv, self.x_wait_ack,
msat_make_leq(menv, x_c, x_timeout))
# delta > 0 | stutter -> l' = l & msg_id' = msg_id & timeout' = timeout &
# c' = c + delta & out_c' = out_c
lhs = msat_make_or(menv, msat_make_gt(menv, delta, zero), self.stutter)
rhs = msat_make_and(
menv,
msat_make_and(menv,
msat_make_iff(menv, x_loc, loc),
msat_make_equal(menv, x_msg_id, msg_id)),
msat_make_and(menv,
msat_make_equal(menv, x_timeout, timeout),
msat_make_equal(menv, x_c,
msat_make_plus(menv, c, delta))))
rhs = msat_make_and(menv, rhs,
msat_make_equal(menv, x_out_c, out_c))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
disc_t = msat_make_and(menv, self.move,
msat_make_equal(menv, delta, zero))
# (send & send') ->
# (msg_id' = msg_id & timeout' = base_timeout & c' = 0 & out_c' = out_c)
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.send, self.x_send))
rhs = msat_make_and(
menv,
msat_make_and(menv,
msat_make_equal(menv, x_msg_id, msg_id),
msat_make_equal(menv, x_timeout, base_timeout)),
msat_make_and(menv,
msat_make_equal(menv, x_c, zero),
msat_make_equal(menv, x_out_c, out_c)))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (send & wait_ack') ->
# (msg_id' = msg_id + 1 & timeout' = base_timeout & c' = 0 & out_c' = out_c)
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.send, self.x_wait_ack))
rhs = msat_make_and(
menv,
msat_make_and(menv,
msat_make_equal(menv, x_msg_id,
msat_make_plus(menv, msg_id, one)),
msat_make_equal(menv, x_timeout, base_timeout)),
msat_make_and(menv,
msat_make_equal(menv, x_c, zero),
msat_make_equal(menv, x_out_c, out_c)))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (wait_ack) -> (c' = 0 & out_c' = out_c &
# (wait_ack' <-> (in_c != msg_id & c > timeout))
lhs = msat_make_and(menv, disc_t, self.wait_ack)
rhs_iff = msat_make_and(menv,
msat_make_not(menv,
msat_make_equal(menv, in_c,
msg_id)),
msat_make_geq(menv, c, timeout))
rhs_iff = msat_make_iff(menv, self.x_wait_ack, rhs_iff)
rhs = msat_make_and(menv,
msat_make_and(menv,
msat_make_equal(menv, x_c, zero),
msat_make_equal(menv, x_out_c,
out_c)),
rhs_iff)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (wait_ack & wait_ack') -> (timeout' > timeout)
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.wait_ack,
self.x_wait_ack))
rhs = msat_make_gt(menv, x_timeout, timeout)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (wait_ack) -> (send' <-> (in_c = msg_id & c < timeout))
lhs = msat_make_and(menv, disc_t, self.wait_ack)
rhs = msat_make_iff(menv, self.x_send,
msat_make_and(menv,
msat_make_equal(menv, in_c, msg_id),
msat_make_lt(menv, c, timeout)))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (wait_ack & send') -> (timeout' = base_timeout)
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.wait_ack, self.x_send))
rhs = msat_make_equal(menv, x_timeout, base_timeout)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
class Receiver(Module):
def __init__(self, name: str, menv: msat_env, enc: LTLEncoder,
in_c, x_in_c, out_c, x_out_c, delta):
super().__init__(name, menv, enc)
bool_type = msat_get_bool_type(menv)
loc, x_loc = self._symb("l", bool_type)
self.wait = loc
self.work = msat_make_not(menv, loc)
self.x_wait = x_loc
self.x_work = msat_make_not(menv, x_loc)
self.symb2next = {loc: x_loc}
zero = msat_make_number(menv, "0")
# wait
self.init = self.wait
# delta > 0 -> loc' = loc & out_c' = out_c
lhs = msat_make_gt(menv, delta, zero)
rhs = msat_make_and(menv,
msat_make_iff(menv, x_loc, loc),
msat_make_equal(menv, x_out_c, out_c))
self.trans = msat_make_impl(menv, lhs, rhs)
disc_t = msat_make_equal(menv, delta, zero)
# wait -> (wait' <-> in_c = out_c)
lhs = msat_make_and(menv, disc_t, self.wait)
rhs = msat_make_iff(menv, self.x_wait,
msat_make_equal(menv, in_c, out_c))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (wait & wait') -> (out_c' = out_c)
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.wait, self.x_wait))
rhs = msat_make_equal(menv, x_out_c, out_c)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (wait & work') -> out_c' = in_c
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.wait, self.x_work))
rhs = msat_make_equal(menv, x_out_c, in_c)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# work -> out_c' = out_c
lhs = msat_make_and(menv, disc_t, self.work)
rhs = msat_make_equal(menv, x_out_c, out_c)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
def hints(env: PysmtEnv) -> FrozenSet[Hint]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
delta = mgr.Symbol(delta_name, types.REAL)
r2s = mgr.Symbol("r2s", types.INT)
s2r = mgr.Symbol("r2s", types.INT)
s_l = mgr.Symbol("s_l", types.BOOL)
s_evt = mgr.Symbol("s_evt", types.BOOL)
s_msg_id = mgr.Symbol("s_msg_id", types.INT)
s_timeout = mgr.Symbol("s_timeout", types.REAL)
s_c = mgr.Symbol("s_c", types.REAL)
r_l = mgr.Symbol("r_l", types.BOOL)
symbs = frozenset([delta, r2s, s2r, s_l, s_evt, s_msg_id, s_timeout, s_c,
r_l])
x_delta = symb_to_next(mgr, delta)
x_r2s = symb_to_next(mgr, r2s)
x_s2r = symb_to_next(mgr, s2r)
x_s_l = symb_to_next(mgr, s_l)
x_s_evt = symb_to_next(mgr, s_evt)
x_s_msg_id = symb_to_next(mgr, s_msg_id)
x_s_timeout = symb_to_next(mgr, s_timeout)
x_s_c = symb_to_next(mgr, s_c)
x_r_l = symb_to_next(mgr, r_l)
res = []
r0 = mgr.Real(0)
r1 = mgr.Real(1)
i0 = mgr.Int(0)
i1 = mgr.Int(1)
loc0 = Location(env, mgr.Equals(s_msg_id, i0))
loc0.set_progress(0, mgr.Equals(x_s_msg_id, i0))
hint = Hint("h_s_msg_id0", env, frozenset([s_msg_id]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.GE(r2s, i0))
loc0.set_progress(0, mgr.Equals(x_r2s, i1))
hint = Hint("h_r2s1", env, frozenset([r2s]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, s_l)
loc0.set_progress(1, mgr.Not(x_s_l))
loc1 = Location(env, mgr.Not(s_l))
loc1.set_progress(0, x_s_l)
hint = Hint("h_s_l1", env, frozenset([s_l]), symbs)
hint.set_locs([loc0, loc1])
res.append(hint)
loc0 = Location(env, mgr.Equals(s_timeout, r0))
loc0.set_progress(0, mgr.Equals(x_s_timeout, r0))
hint = Hint("h_s_timeout0", env, frozenset([s_timeout]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, s_l)
loc0.set_progress(0, x_s_l)
hint = Hint("h_s_l0", env, frozenset([s_l]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.GE(s2r, i0))
loc0.set_progress(0, mgr.Equals(x_s2r, mgr.Plus(s2r, i1)))
hint = Hint("h_s2r2", env, frozenset([s2r]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.GE(s_c, r0))
loc0.set_progress(0, mgr.Equals(x_s_c, mgr.Plus(s_c, r1)))
hint = Hint("h_s_c1", env, frozenset([s_c]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, s_evt)
loc0.set_progress(1, mgr.Not(x_s_evt))
loc1 = Location(env, mgr.Not(s_evt))
loc1.set_progress(0, x_s_evt)
hint = Hint("h_s_evt1", env, frozenset([s_evt]), symbs)
hint.set_locs([loc0, loc1])
res.append(hint)
loc0 = Location(env, mgr.GE(delta, r0))
loc0.set_progress(0, mgr.Equals(x_delta, r1))
hint = Hint("h_delta1", env, frozenset([delta]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.GE(s_timeout, r0))
loc0.set_progress(0, mgr.Equals(x_s_timeout, mgr.Plus(s_timeout, r1)))
hint = Hint("h_s_timeout1", env, frozenset([s_timeout]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.GE(s2r, i0))
loc0.set_progress(0, mgr.Equals(x_s2r, i1))
hint = Hint("h_s2r1", env, frozenset([s2r]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, r_l)
loc0.set_progress(1, mgr.Not(x_r_l))
loc1 = Location(env, mgr.Not(r_l))
loc1.set_progress(0, x_r_l)
hint = Hint("h_r_l1", env, frozenset([r_l]), symbs)
hint.set_locs([loc0, loc1])
res.append(hint)
loc0 = Location(env, mgr.Equals(r2s, i0))
loc0.set_progress(0, mgr.Equals(x_r2s, i0))
hint = Hint("h_r2s0", env, frozenset([r2s]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.GE(s_msg_id, i0))
loc0.set_progress(0, mgr.Equals(x_s_msg_id, mgr.Plus(s_msg_id, i1)))
hint = Hint("h_s_msg_id1", env, frozenset([s_msg_id]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.Equals(delta, r0))
loc0.set_progress(0, mgr.Equals(x_delta, r0))
hint = Hint("h_delta0", env, frozenset([delta]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, s_evt)
loc0.set_progress(0, x_s_evt)
hint = Hint("h_s_evt0", env, frozenset([s_evt]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.Equals(s_c, r0))
loc0.set_progress(0, mgr.Equals(x_s_c, r0))
hint = Hint("h_s_c0", env, frozenset([s_c]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.GE(delta, r0))
loc0.set_progress(0, mgr.Equals(x_delta, mgr.Plus(delta, r1)))
hint = Hint("h_delta2", env, frozenset([delta]), symbs)
hint.set_locs([loc0])
res.append(hint)
return frozenset(res)
| 38.053254
| 89
| 0.579226
|
ba62ea267175dae4e3191a0bd55fee8d9e39dea7
| 15,301
|
py
|
Python
|
setup.py
|
phamlehuy53/mmcv
|
519b4ec028bdf2cc4ec32195fb78e6111774a004
|
[
"Apache-2.0"
] | 1
|
2021-08-22T14:47:13.000Z
|
2021-08-22T14:47:13.000Z
|
setup.py
|
zsinba/mmcv
|
f31f1cdb8ee6671a512d6756d2f0dfd68d04272f
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
zsinba/mmcv
|
f31f1cdb8ee6671a512d6756d2f0dfd68d04272f
|
[
"Apache-2.0"
] | 1
|
2020-12-10T08:35:35.000Z
|
2020-12-10T08:35:35.000Z
|
import glob
import os
import platform
import re
from pkg_resources import DistributionNotFound, get_distribution
from setuptools import find_packages, setup
EXT_TYPE = ''
try:
import torch
if torch.__version__ == 'parrots':
from parrots.utils.build_extension import BuildExtension
EXT_TYPE = 'parrots'
else:
from torch.utils.cpp_extension import BuildExtension
EXT_TYPE = 'pytorch'
cmd_class = {'build_ext': BuildExtension}
except ModuleNotFoundError:
cmd_class = {}
print('Skip building ext ops due to the absence of torch.')
def choose_requirement(primary, secondary):
"""If some version of primary requirement installed, return primary, else
return secondary."""
try:
name = re.split(r'[!<>=]', primary)[0]
get_distribution(name)
except DistributionNotFound:
return secondary
return str(primary)
def get_version():
version_file = 'mmcv/version.py'
with open(version_file, 'r', encoding='utf-8') as f:
exec(compile(f.read(), version_file, 'exec'))
return locals()['__version__']
def parse_requirements(fname='requirements/runtime.txt', with_version=True):
"""Parse the package dependencies listed in a requirements file but strips
specific versioning information.
Args:
fname (str): path to requirements file
with_version (bool, default=False): if True include version specs
Returns:
List[str]: list of requirements items
CommandLine:
python -c "import setup; print(setup.parse_requirements())"
"""
import sys
from os.path import exists
require_fpath = fname
def parse_line(line):
"""Parse information from a line in a requirements text file."""
if line.startswith('-r '):
# Allow specifying requirements in other files
target = line.split(' ')[1]
for info in parse_require_file(target):
yield info
else:
info = {'line': line}
if line.startswith('-e '):
info['package'] = line.split('#egg=')[1]
else:
# Remove versioning from the package
pat = '(' + '|'.join(['>=', '==', '>']) + ')'
parts = re.split(pat, line, maxsplit=1)
parts = [p.strip() for p in parts]
info['package'] = parts[0]
if len(parts) > 1:
op, rest = parts[1:]
if ';' in rest:
# Handle platform specific dependencies
# http://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-platform-specific-dependencies
version, platform_deps = map(str.strip,
rest.split(';'))
info['platform_deps'] = platform_deps
else:
version = rest # NOQA
info['version'] = (op, version)
yield info
def parse_require_file(fpath):
with open(fpath, 'r') as f:
for line in f.readlines():
line = line.strip()
if line and not line.startswith('#'):
for info in parse_line(line):
yield info
def gen_packages_items():
if exists(require_fpath):
for info in parse_require_file(require_fpath):
parts = [info['package']]
if with_version and 'version' in info:
parts.extend(info['version'])
if not sys.version.startswith('3.4'):
# apparently package_deps are broken in 3.4
platform_deps = info.get('platform_deps')
if platform_deps is not None:
parts.append(';' + platform_deps)
item = ''.join(parts)
yield item
packages = list(gen_packages_items())
return packages
install_requires = parse_requirements()
try:
# OpenCV installed via conda.
import cv2 # NOQA: F401
major, minor, *rest = cv2.__version__.split('.')
if int(major) < 3:
raise RuntimeError(
f'OpenCV >=3 is required but {cv2.__version__} is installed')
except ImportError:
# If first not installed install second package
CHOOSE_INSTALL_REQUIRES = [('opencv-python-headless>=3',
'opencv-python>=3')]
for main, secondary in CHOOSE_INSTALL_REQUIRES:
install_requires.append(choose_requirement(main, secondary))
def get_extensions():
extensions = []
if os.getenv('MMCV_WITH_TRT', '0') != '0':
ext_name = 'mmcv._ext_trt'
from torch.utils.cpp_extension import include_paths, library_paths
library_dirs = []
libraries = []
include_dirs = []
tensorrt_path = os.getenv('TENSORRT_DIR', '0')
tensorrt_lib_path = glob.glob(
os.path.join(tensorrt_path, 'targets', '*', 'lib'))[0]
library_dirs += [tensorrt_lib_path]
libraries += ['nvinfer', 'nvparsers', 'nvinfer_plugin']
libraries += ['cudart']
define_macros = []
extra_compile_args = {'cxx': []}
include_path = os.path.abspath('./mmcv/ops/csrc/common/cuda')
include_trt_path = os.path.abspath('./mmcv/ops/csrc/tensorrt')
include_dirs.append(include_path)
include_dirs.append(include_trt_path)
include_dirs.append(os.path.join(tensorrt_path, 'include'))
include_dirs += include_paths(cuda=True)
op_files = glob.glob('./mmcv/ops/csrc/tensorrt/plugins/*')
define_macros += [('MMCV_WITH_CUDA', None)]
define_macros += [('MMCV_WITH_TRT', None)]
cuda_args = os.getenv('MMCV_CUDA_ARGS')
extra_compile_args['nvcc'] = [cuda_args] if cuda_args else []
# prevent cub/thrust conflict with other python library
# More context See issues #1454
extra_compile_args['nvcc'] += ['-Xcompiler=-fno-gnu-unique']
library_dirs += library_paths(cuda=True)
from setuptools import Extension
ext_ops = Extension(
name=ext_name,
sources=op_files,
include_dirs=include_dirs,
define_macros=define_macros,
extra_compile_args=extra_compile_args,
language='c++',
library_dirs=library_dirs,
libraries=libraries)
extensions.append(ext_ops)
if os.getenv('MMCV_WITH_OPS', '0') == '0':
return extensions
if EXT_TYPE == 'parrots':
ext_name = 'mmcv._ext'
from parrots.utils.build_extension import Extension
# new parrots op impl do not use MMCV_USE_PARROTS
# define_macros = [('MMCV_USE_PARROTS', None)]
define_macros = []
include_dirs = []
op_files = glob.glob('./mmcv/ops/csrc/pytorch/cuda/*.cu') +\
glob.glob('./mmcv/ops/csrc/parrots/*.cpp')
include_dirs.append(os.path.abspath('./mmcv/ops/csrc/common'))
include_dirs.append(os.path.abspath('./mmcv/ops/csrc/common/cuda'))
cuda_args = os.getenv('MMCV_CUDA_ARGS')
extra_compile_args = {
'nvcc': [cuda_args] if cuda_args else [],
'cxx': [],
}
if torch.cuda.is_available() or os.getenv('FORCE_CUDA', '0') == '1':
define_macros += [('MMCV_WITH_CUDA', None)]
extra_compile_args['nvcc'] += [
'-D__CUDA_NO_HALF_OPERATORS__',
'-D__CUDA_NO_HALF_CONVERSIONS__',
'-D__CUDA_NO_HALF2_OPERATORS__',
]
ext_ops = Extension(
name=ext_name,
sources=op_files,
include_dirs=include_dirs,
define_macros=define_macros,
extra_compile_args=extra_compile_args,
cuda=True,
pytorch=True)
extensions.append(ext_ops)
elif EXT_TYPE == 'pytorch':
ext_name = 'mmcv._ext'
from torch.utils.cpp_extension import CppExtension, CUDAExtension
# prevent ninja from using too many resources
try:
import psutil
num_cpu = len(psutil.Process().cpu_affinity())
cpu_use = max(4, num_cpu - 1)
except (ModuleNotFoundError, AttributeError):
cpu_use = 4
os.environ.setdefault('MAX_JOBS', str(cpu_use))
define_macros = []
# Before PyTorch1.8.0, when compiling CUDA code, `cxx` is a
# required key passed to PyTorch. Even if there is no flag passed
# to cxx, users also need to pass an empty list to PyTorch.
# Since PyTorch1.8.0, it has a default value so users do not need
# to pass an empty list anymore.
# More details at https://github.com/pytorch/pytorch/pull/45956
extra_compile_args = {'cxx': []}
# Since the PR (https://github.com/open-mmlab/mmcv/pull/1463) uses
# c++14 features, the argument ['std=c++14'] must be added here.
# However, in the windows environment, some standard libraries
# will depend on c++17 or higher. In fact, for the windows
# environment, the compiler will choose the appropriate compiler
# to compile those cpp files, so there is no need to add the
# argument
if platform.system() != 'Windows':
extra_compile_args['cxx'] = ['-std=c++14']
include_dirs = []
is_rocm_pytorch = False
try:
from torch.utils.cpp_extension import ROCM_HOME
is_rocm_pytorch = True if ((torch.version.hip is not None) and
(ROCM_HOME is not None)) else False
except ImportError:
pass
project_dir = 'mmcv/ops/csrc/'
if is_rocm_pytorch:
from torch.utils.hipify import hipify_python
hipify_python.hipify(
project_directory=project_dir,
output_directory=project_dir,
includes='mmcv/ops/csrc/*',
show_detailed=True,
is_pytorch_extension=True,
)
define_macros += [('MMCV_WITH_CUDA', None)]
define_macros += [('HIP_DIFF', None)]
cuda_args = os.getenv('MMCV_CUDA_ARGS')
extra_compile_args['nvcc'] = [cuda_args] if cuda_args else []
op_files = glob.glob('./mmcv/ops/csrc/pytorch/hip/*') \
+ glob.glob('./mmcv/ops/csrc/pytorch/cpu/hip/*')
extension = CUDAExtension
include_dirs.append(os.path.abspath('./mmcv/ops/csrc/common/hip'))
elif torch.cuda.is_available() or os.getenv('FORCE_CUDA', '0') == '1':
define_macros += [('MMCV_WITH_CUDA', None)]
cuda_args = os.getenv('MMCV_CUDA_ARGS')
extra_compile_args['nvcc'] = [cuda_args] if cuda_args else []
op_files = glob.glob('./mmcv/ops/csrc/pytorch/*.cpp') + \
glob.glob('./mmcv/ops/csrc/pytorch/cpu/*.cpp') + \
glob.glob('./mmcv/ops/csrc/pytorch/cuda/*.cu') + \
glob.glob('./mmcv/ops/csrc/pytorch/cuda/*.cpp')
extension = CUDAExtension
include_dirs.append(os.path.abspath('./mmcv/ops/csrc/common'))
include_dirs.append(os.path.abspath('./mmcv/ops/csrc/common/cuda'))
else:
print(f'Compiling {ext_name} without CUDA')
op_files = glob.glob('./mmcv/ops/csrc/pytorch/*.cpp') + \
glob.glob('./mmcv/ops/csrc/pytorch/cpu/*.cpp')
extension = CppExtension
include_dirs.append(os.path.abspath('./mmcv/ops/csrc/common'))
# Since the PR (https://github.com/open-mmlab/mmcv/pull/1463) uses
# c++14 features, the argument ['std=c++14'] must be added here.
# However, in the windows environment, some standard libraries
# will depend on c++17 or higher. In fact, for the windows
# environment, the compiler will choose the appropriate compiler
# to compile those cpp files, so there is no need to add the
# argument
if 'nvcc' in extra_compile_args and platform.system() != 'Windows':
extra_compile_args['nvcc'] += ['-std=c++14']
ext_ops = extension(
name=ext_name,
sources=op_files,
include_dirs=include_dirs,
define_macros=define_macros,
extra_compile_args=extra_compile_args)
extensions.append(ext_ops)
if EXT_TYPE == 'pytorch' and os.getenv('MMCV_WITH_ORT', '0') != '0':
ext_name = 'mmcv._ext_ort'
from torch.utils.cpp_extension import library_paths, include_paths
import onnxruntime
library_dirs = []
libraries = []
include_dirs = []
ort_path = os.getenv('ONNXRUNTIME_DIR', '0')
library_dirs += [os.path.join(ort_path, 'lib')]
libraries.append('onnxruntime')
define_macros = []
extra_compile_args = {'cxx': []}
include_path = os.path.abspath('./mmcv/ops/csrc/onnxruntime')
include_dirs.append(include_path)
include_dirs.append(os.path.join(ort_path, 'include'))
op_files = glob.glob('./mmcv/ops/csrc/onnxruntime/cpu/*')
if onnxruntime.get_device() == 'GPU' or os.getenv('FORCE_CUDA',
'0') == '1':
define_macros += [('MMCV_WITH_CUDA', None)]
cuda_args = os.getenv('MMCV_CUDA_ARGS')
extra_compile_args['nvcc'] = [cuda_args] if cuda_args else []
op_files += glob.glob('./mmcv/ops/csrc/onnxruntime/gpu/*')
include_dirs += include_paths(cuda=True)
library_dirs += library_paths(cuda=True)
else:
include_dirs += include_paths(cuda=False)
library_dirs += library_paths(cuda=False)
from setuptools import Extension
ext_ops = Extension(
name=ext_name,
sources=op_files,
include_dirs=include_dirs,
define_macros=define_macros,
extra_compile_args=extra_compile_args,
language='c++',
library_dirs=library_dirs,
libraries=libraries)
extensions.append(ext_ops)
return extensions
setup(
name='mmcv' if os.getenv('MMCV_WITH_OPS', '0') == '0' else 'mmcv-full',
version=get_version(),
description='OpenMMLab Computer Vision Foundation',
keywords='computer vision',
packages=find_packages(),
include_package_data=True,
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Topic :: Utilities',
],
url='https://github.com/open-mmlab/mmcv',
author='MMCV Contributors',
author_email='openmmlab@gmail.com',
setup_requires=['pytest-runner'],
tests_require=['pytest'],
install_requires=install_requires,
ext_modules=get_extensions(),
cmdclass=cmd_class,
zip_safe=False)
| 39.639896
| 125
| 0.588785
|
ecf47c21389da8806ec1dc196d69d07887e5acf2
| 318
|
py
|
Python
|
Google/Google CodeJam Preparation/temp.py
|
EldiiarDzhunusov/Code
|
6b0708e4007233d3efdc74c09d09ee5bc377a45d
|
[
"MIT"
] | 2
|
2020-10-12T06:50:03.000Z
|
2021-06-08T17:19:43.000Z
|
Google/Google CodeJam Preparation/temp.py
|
EldiiarDzhunusov/Code
|
6b0708e4007233d3efdc74c09d09ee5bc377a45d
|
[
"MIT"
] | null | null | null |
Google/Google CodeJam Preparation/temp.py
|
EldiiarDzhunusov/Code
|
6b0708e4007233d3efdc74c09d09ee5bc377a45d
|
[
"MIT"
] | 1
|
2020-12-22T16:44:50.000Z
|
2020-12-22T16:44:50.000Z
|
t= int(input(""))
while t!=0:
c=1
size = int(input(""))
lydia = list(input(""))
myroad=[]
for i in range(len(lydia)):
if lydia[i]=="E":
myroad.append("S")
else:
myroad.append("E")
print("Case #{}:".format(c),"".join(myroad))
c +=1
t -=1
| 19.875
| 48
| 0.433962
|
4bb73b7aaa5b7699ed617db6c7d9b0b3afb94442
| 4,988
|
py
|
Python
|
bilean/common/utils.py
|
lvdongbing/bilean
|
592f5fb53e3bceee35a01d0171905b282bc9a3db
|
[
"Apache-2.0"
] | 2
|
2016-01-03T11:20:42.000Z
|
2016-01-06T06:41:51.000Z
|
bilean/common/utils.py
|
lvdongbing/bilean
|
592f5fb53e3bceee35a01d0171905b282bc9a3db
|
[
"Apache-2.0"
] | null | null | null |
bilean/common/utils.py
|
lvdongbing/bilean
|
592f5fb53e3bceee35a01d0171905b282bc9a3db
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
'''
Utilities module.
'''
import random
import string
from cryptography.fernet import Fernet
import requests
from requests import exceptions
from six.moves import urllib
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import encodeutils
from oslo_utils import strutils
from bilean.common import exception
from bilean.common.i18n import _
from bilean.common.i18n import _LI
cfg.CONF.import_opt('max_response_size', 'bilean.common.config')
LOG = logging.getLogger(__name__)
class URLFetchError(exception.Error, IOError):
pass
def parse_int_param(name, value, allow_zero=True, allow_negative=False,
lower_limit=None, upper_limit=None):
if value is None:
return None
if value in ('0', 0):
if allow_zero:
return int(value)
raise exception.InvalidParameter(name=name, value=value)
try:
result = int(value)
except (TypeError, ValueError):
raise exception.InvalidParameter(name=name, value=value)
else:
if any([(allow_negative is False and result < 0),
(lower_limit and result < lower_limit),
(upper_limit and result > upper_limit)]):
raise exception.InvalidParameter(name=name, value=value)
return result
def parse_bool_param(name, value):
if str(value).lower() not in ('true', 'false'):
raise exception.InvalidParameter(name=name, value=str(value))
return strutils.bool_from_string(value, strict=True)
def url_fetch(url, allowed_schemes=('http', 'https')):
'''Get the data at the specified URL.
The URL must use the http: or https: schemes.
The file: scheme is also supported if you override
the allowed_schemes argument.
Raise an IOError if getting the data fails.
'''
LOG.info(_LI('Fetching data from %s'), url)
components = urllib.parse.urlparse(url)
if components.scheme not in allowed_schemes:
raise URLFetchError(_('Invalid URL scheme %s') % components.scheme)
if components.scheme == 'file':
try:
return urllib.request.urlopen(url).read()
except urllib.error.URLError as uex:
raise URLFetchError(_('Failed to retrieve data: %s') % uex)
try:
resp = requests.get(url, stream=True)
resp.raise_for_status()
# We cannot use resp.text here because it would download the entire
# file, and a large enough file would bring down the engine. The
# 'Content-Length' header could be faked, so it's necessary to
# download the content in chunks to until max_response_size is reached.
# The chunk_size we use needs to balance CPU-intensive string
# concatenation with accuracy (eg. it's possible to fetch 1000 bytes
# greater than max_response_size with a chunk_size of 1000).
reader = resp.iter_content(chunk_size=1000)
result = ""
for chunk in reader:
result += chunk
if len(result) > cfg.CONF.max_response_size:
raise URLFetchError("Data exceeds maximum allowed size (%s"
" bytes)" % cfg.CONF.max_response_size)
return result
except exceptions.RequestException as ex:
raise URLFetchError(_('Failed to retrieve data: %s') % ex)
def encrypt(msg):
'''Encrypt message with random key.
:param msg: message to be encrypted
:returns: encrypted msg and key to decrypt
'''
password = Fernet.generate_key()
f = Fernet(password)
key = f.encrypt(encodeutils.safe_encode(msg))
return encodeutils.safe_decode(password), encodeutils.safe_decode(key)
def decrypt(msg, key):
'''Decrypt message using provided key.
:param msg: encrypted message
:param key: key used to decrypt
:returns: decrypted message string
'''
f = Fernet(encodeutils.safe_encode(msg))
msg = f.decrypt(encodeutils.safe_encode(key))
return encodeutils.safe_decode(msg)
def random_name(length=8):
if length <= 0:
return ''
lead = random.choice(string.ascii_letters)
tail = ''.join(random.choice(string.ascii_letters + string.digits)
for i in range(length-1))
return lead + tail
def format_time(value):
"""Cut microsecond and format to isoformat string."""
if value:
value = value.replace(microsecond=0)
value = value.isoformat()
return value
| 31.56962
| 79
| 0.679631
|
ad98409ffd63e1c06ba579a91fcfd251d351303a
| 219
|
py
|
Python
|
aioalice/utils/json.py
|
mahenzon/aioalice
|
f87b2e24c42444b5cb274c95eff20555314ec4f6
|
[
"MIT"
] | 33
|
2019-09-22T16:35:40.000Z
|
2022-03-24T11:24:05.000Z
|
aioalice/utils/json.py
|
mahenzon/aioalice
|
f87b2e24c42444b5cb274c95eff20555314ec4f6
|
[
"MIT"
] | 7
|
2019-09-26T17:43:01.000Z
|
2021-02-24T21:08:48.000Z
|
aioalice/utils/json.py
|
mahenzon/aioalice
|
f87b2e24c42444b5cb274c95eff20555314ec4f6
|
[
"MIT"
] | 11
|
2019-09-26T09:51:59.000Z
|
2022-03-14T16:14:12.000Z
|
try:
import simplejson as json
except ImportError:
try:
import rapidjson as json
except ImportError:
try:
import ujson as json
except ImportError:
import json
| 19.909091
| 32
| 0.593607
|
2b88e21162f95e512abb40c2a44e9511c85c43db
| 1,027
|
py
|
Python
|
apps/markets3/migrations/0003_auto_20160802_1306.py
|
uktrade/enav-alpha
|
8d38f05763367ca6b6747203241f267612fd6e44
|
[
"MIT"
] | null | null | null |
apps/markets3/migrations/0003_auto_20160802_1306.py
|
uktrade/enav-alpha
|
8d38f05763367ca6b6747203241f267612fd6e44
|
[
"MIT"
] | 67
|
2016-07-11T12:57:58.000Z
|
2016-08-08T12:59:19.000Z
|
apps/markets3/migrations/0003_auto_20160802_1306.py
|
UKTradeInvestment/enav-alpha
|
8d38f05763367ca6b6747203241f267612fd6e44
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-08-02 13:06
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('markets3', '0002_market_platform_type'),
]
operations = [
migrations.AddField(
model_name='market',
name='local_customer_service',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='market',
name='product_type',
field=models.CharField(blank=True, choices=[('0', 'Luxury'), ('1', 'Mid rage'), ('2', 'Discount')],
max_length=1, null=True),
),
migrations.AlterField(
model_name='market',
name='logistics_structure',
field=models.CharField(blank=True, choices=[('0', 'Dropshipping'), ('1', 'Warehousing')],
max_length=1, null=True),
),
]
| 31.121212
| 111
| 0.550146
|
642ca6d7b895cf09e78cd91aee64b6e8f60a9bdd
| 1,262
|
py
|
Python
|
plugins/digitalbitbox/qt.py
|
BTCPrivate/electrum-bitcoinprivate
|
d18dbd83353d006136bc986e143e19dbb954c36a
|
[
"MIT"
] | 1
|
2021-04-02T20:35:15.000Z
|
2021-04-02T20:35:15.000Z
|
plugins/digitalbitbox/qt.py
|
ArdeshirV/electrum-bitcoinprivate
|
d18dbd83353d006136bc986e143e19dbb954c36a
|
[
"MIT"
] | null | null | null |
plugins/digitalbitbox/qt.py
|
ArdeshirV/electrum-bitcoinprivate
|
d18dbd83353d006136bc986e143e19dbb954c36a
|
[
"MIT"
] | 1
|
2021-04-06T18:34:31.000Z
|
2021-04-06T18:34:31.000Z
|
from functools import partial
from ..hw_wallet.qt import QtHandlerBase, QtPluginBase
from .digitalbitbox import DigitalBitboxPlugin
from electrum_bitcoinprivate.i18n import _
from electrum_bitcoinprivate.plugins import hook
from electrum_bitcoinprivate.wallet import Standard_Wallet
class Plugin(DigitalBitboxPlugin, QtPluginBase):
icon_unpaired = ":icons/digitalbitbox_unpaired.png"
icon_paired = ":icons/digitalbitbox.png"
def create_handler(self, window):
return DigitalBitbox_Handler(window)
@hook
def receive_menu(self, menu, addrs, wallet):
if type(wallet) is not Standard_Wallet:
return
keystore = wallet.get_keystore()
if type(keystore) is not self.keystore_class:
return
if not self.is_mobile_paired():
return
if not keystore.is_p2pkh():
return
if len(addrs) == 1:
def show_address():
keystore.thread.add(partial(self.show_address, wallet, keystore, addrs[0]))
menu.addAction(_("Show on {}").format(self.device), show_address)
class DigitalBitbox_Handler(QtHandlerBase):
def __init__(self, win):
super(DigitalBitbox_Handler, self).__init__(win, 'Digital Bitbox')
| 28.681818
| 91
| 0.693344
|
ae821e00f9cbe2cf504b6b2f1485e72948385fa2
| 20,108
|
py
|
Python
|
ebcli/operations/platform_version_ops.py
|
sdolenc/aws-elastic-beanstalk-cli
|
4167a38bd599a4433c62c1d3516b8836248a4171
|
[
"Apache-2.0"
] | 110
|
2020-01-15T22:58:46.000Z
|
2022-03-27T20:47:33.000Z
|
ebcli/operations/platform_version_ops.py
|
QPC-database/aws-elastic-beanstalk-cli
|
87ad9d8bbe5e4e7cb01b1bd4392eda33cb1943f7
|
[
"Apache-2.0"
] | 89
|
2020-01-15T23:18:34.000Z
|
2022-03-31T21:56:05.000Z
|
ebcli/operations/platform_version_ops.py
|
QPC-database/aws-elastic-beanstalk-cli
|
87ad9d8bbe5e4e7cb01b1bd4392eda33cb1943f7
|
[
"Apache-2.0"
] | 50
|
2020-01-15T22:58:53.000Z
|
2022-02-11T17:39:28.000Z
|
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from datetime import datetime
import os
import sys
import tempfile
from shutil import copyfile, move
import threading
import yaml
from semantic_version import Version
from termcolor import colored
from ebcli.core import io, fileoperations
from ebcli.core.ebglobals import Constants
from ebcli.lib import elasticbeanstalk, heuristics, s3
from ebcli.objects import api_filters
from ebcli.objects.exceptions import (
InvalidPlatformVersionError,
NotFoundError,
PlatformWorkspaceEmptyError,
ValidationError,
)
from ebcli.objects.platform import PlatformBranch, PlatformVersion
from ebcli.objects.sourcecontrol import SourceControl
from ebcli.operations import commonops, logsops
from ebcli.operations.tagops import tagops
from ebcli.resources.statics import namespaces, option_names
from ebcli.resources.strings import alerts, strings, prompts
from ebcli.resources.regex import PackerRegExpressions, PlatformRegExpressions
class PackerStreamMessage(object):
def __init__(self, event):
self.event = event
def raw_message(self):
event = self.event
if isinstance(event, bytes):
event = event.decode('utf-8')
matches = PackerRegExpressions.LOG_MESSAGE_REGEX.search(event)
return matches.groups(0)[0] if matches else None
def message_severity(self):
matches = PackerRegExpressions.LOG_MESSAGE_SEVERITY_REGEX.search(self.event)
return matches.groups(0)[0] if matches else None
def format(self):
ui_message = self.ui_message()
if ui_message:
return ui_message
other_packer_message = self.other_packer_message()
if other_packer_message:
if sys.version_info < (3, 0):
other_packer_message = other_packer_message.encode('utf-8')
other_packer_message_target = self.other_packer_message_target()
formatted_other_message = '{}:{}'.format(
other_packer_message_target,
other_packer_message
)
if sys.version_info < (3, 0):
formatted_other_message = formatted_other_message.decode('utf-8')
return formatted_other_message
other_message = self.other_message()
if other_message:
return other_message
def ui_message(self):
return self.__return_match(PackerRegExpressions.PACKER_UI_MESSAGE_FORMAT_REGEX)
def other_packer_message(self):
return self.__return_match(PackerRegExpressions.PACKER_OTHER_MESSAGE_DATA_REGEX)
def other_packer_message_target(self):
return self.__return_match(PackerRegExpressions.PACKER_OTHER_MESSAGE_TARGET_REGEX)
def other_message(self):
return self.__return_match(PackerRegExpressions.OTHER_FORMAT_REGEX)
def __return_match(self, regex):
raw_message = self.raw_message()
if not raw_message:
return
if isinstance(raw_message, bytes):
raw_message = raw_message.decode('utf-8')
matches = regex.search(raw_message)
return matches.groups(0)[0].strip() if matches else None
class PackerStreamFormatter(object):
def format(self, message, stream_name=None):
packer_stream_message = PackerStreamMessage(message)
if packer_stream_message.raw_message():
formatted_message = packer_stream_message.format()
else:
formatted_message = '{0} {1}'.format(stream_name, message)
return formatted_message
def create_platform_version(
version,
major_increment,
minor_increment,
patch_increment,
instance_type,
vpc=None,
staged=False,
timeout=None,
tags=None,
):
_raise_if_directory_is_empty()
_raise_if_platform_definition_file_is_missing()
version and _raise_if_version_format_is_invalid(version)
platform_name = fileoperations.get_platform_name()
instance_profile = fileoperations.get_instance_profile(None)
key_name = commonops.get_default_keyname()
version = version or _resolve_version_number(
platform_name,
major_increment,
minor_increment,
patch_increment
)
tags = tagops.get_and_validate_tags(tags)
source_control = SourceControl.get_source_control()
io.log_warning(strings['sc.unstagedchanges']) if source_control.untracked_changes_exist() else None
version_label = _resolve_version_label(source_control, staged)
bucket, key, file_path = _resolve_s3_bucket_and_key(platform_name, version_label, source_control, staged)
_upload_platform_version_to_s3_if_necessary(bucket, key, file_path)
io.log_info('Creating Platform Version ' + version_label)
response = elasticbeanstalk.create_platform_version(
platform_name, version, bucket, key, instance_profile, key_name, instance_type, tags, vpc)
environment_name = 'eb-custom-platform-builder-packer'
io.echo(colored(
strings['platformbuildercreation.info'].format(environment_name), attrs=['reverse']))
fileoperations.update_platform_version(version)
commonops.set_environment_for_current_branch(environment_name)
stream_platform_logs(response, platform_name, version, timeout)
def delete_platform_version(platform_version, force=False):
arn = version_to_arn(platform_version)
if not force:
io.echo(prompts['platformdelete.confirm'].replace('{platform-arn}', arn))
io.validate_action(prompts['platformdelete.validate'], arn)
environments = []
try:
environments = [env for env in elasticbeanstalk.get_environments() if env.platform.version == arn]
except NotFoundError:
pass
if len(environments) > 0:
_, platform_name, platform_version = PlatformVersion.arn_to_platform(arn)
raise ValidationError(strings['platformdeletevalidation.error'].format(
platform_name,
platform_version,
'\n '.join([env.name for env in environments])
))
response = elasticbeanstalk.delete_platform(arn)
request_id = response['ResponseMetadata']['RequestId']
timeout = 10
commonops.wait_for_success_events(request_id, timeout_in_minutes=timeout, platform_arn=arn)
def describe_custom_platform_version(
owner=None,
platform_arn=None,
platform_name=None,
platform_version=None,
status=None
):
if not platform_arn:
platforms = list_custom_platform_versions(
platform_name=platform_name,
platform_version=platform_version,
status=status
)
platform_arn = platforms[0]
return elasticbeanstalk.describe_platform_version(platform_arn)
def find_custom_platform_version_from_string(solution_string):
available_custom_platforms = list_custom_platform_versions()
for custom_platform_matcher in [
PlatformVersion.match_with_complete_arn,
PlatformVersion.match_with_platform_name,
]:
matched_custom_platform = custom_platform_matcher(available_custom_platforms, solution_string)
if matched_custom_platform:
return matched_custom_platform
def get_latest_custom_platform_version(platform):
"""
:param platform: A custom platform ARN or a custom platform name
:return: A PlatformVersion object representing the latest version of `platform`
"""
account_id, platform_name, platform_version = PlatformVersion.arn_to_platform(platform)
if account_id:
matching_platforms = list_custom_platform_versions(
platform_name=platform_name,
status='Ready'
)
if matching_platforms:
return PlatformVersion(matching_platforms[0])
def get_latest_eb_managed_platform(platform_arn):
account_id, platform_name, platform_version = PlatformVersion.arn_to_platform(platform_arn)
if not account_id:
matching_platforms = list_eb_managed_platform_versions(
platform_name=platform_name,
status='Ready'
)
if matching_platforms:
return PlatformVersion(matching_platforms[0])
def get_latest_platform_version(platform_name=None, owner=None, ignored_states=None):
if ignored_states is None:
ignored_states = ['Deleting', 'Failed']
platforms = get_platforms(
platform_name=platform_name,
ignored_states=ignored_states,
owner=owner,
platform_version="latest"
)
try:
return platforms[platform_name]
except KeyError:
return None
def get_platforms(platform_name=None, ignored_states=None, owner=None, platform_version=None):
platform_list = list_custom_platform_versions(
platform_name=platform_name,
platform_version=platform_version
)
platforms = dict()
for platform in platform_list:
if ignored_states and platform['PlatformStatus'] in ignored_states:
continue
_, platform_name, platform_version = PlatformVersion.arn_to_platform(platform)
platforms[platform_name] = platform_version
return platforms
def get_platform_arn(platform_name, platform_version, owner=None):
platform = describe_custom_platform_version(
platform_name=platform_name,
platform_version=platform_version,
owner=owner
)
if platform:
return platform['PlatformArn']
def get_platform_versions_for_branch(branch_name, recommended_only=False):
filters = [
{
'Type': 'PlatformBranchName',
'Operator': '=',
'Values': [branch_name],
}
]
if recommended_only:
filters.append({
'Type': 'PlatformLifecycleState',
'Operator': '=',
'Values': ['Recommended'],
})
platform_version_summaries = elasticbeanstalk.list_platform_versions(
filters=filters)
return [
PlatformVersion.from_platform_version_summary(summary)
for summary in platform_version_summaries]
def get_preferred_platform_version_for_branch(branch_name):
"""
Gets the latest recommended platform version for a platform branch. If no
platform versions are recommended it retreives the latest.
"""
matched_versions = get_platform_versions_for_branch(branch_name)
matched_versions = list(sorted(
matched_versions,
key=lambda x: x.sortable_version,
reverse=True))
recommended_versions = [
version for version in matched_versions if version.is_recommended]
if len(recommended_versions) > 0:
return recommended_versions[0]
elif len(matched_versions) > 0:
return matched_versions[0]
else:
raise NotFoundError(alerts['platform.invalidstring'].format(
branch_name))
def list_custom_platform_versions(
platform_name=None,
platform_version=None,
show_status=False,
status=None
):
filters = [api_filters.PlatformOwnerFilter(values=[Constants.OWNED_BY_SELF]).json()]
return list_platform_versions(filters, platform_name, platform_version, show_status, status)
def list_eb_managed_platform_versions(
platform_name=None,
platform_version=None,
show_status=False,
status=None
):
filters = [api_filters.PlatformOwnerFilter(values=['AWSElasticBeanstalk']).json()]
return list_platform_versions(filters, platform_name, platform_version, show_status, status)
def list_platform_versions(
filters,
platform_name=None,
platform_version=None,
show_status=False,
status=None
):
if platform_name:
filters.append(
api_filters.PlatformNameFilter(values=[platform_name]).json()
)
if platform_version:
filters.append(
api_filters.PlatformVersionFilter(values=[platform_version]).json()
)
if status:
filters.append(
api_filters.PlatformStatusFilter(values=[status]).json()
)
platforms_list = elasticbeanstalk.list_platform_versions(filters=filters)
return __formatted_platform_descriptions(platforms_list, show_status)
def stream_platform_logs(response, platform_name, version, timeout):
arn = response['PlatformSummary']['PlatformArn']
request_id = response['ResponseMetadata']['RequestId']
streamer = io.get_event_streamer()
builder_events = threading.Thread(
target=logsops.stream_platform_logs,
args=(platform_name, version, streamer, 5, None, PackerStreamFormatter()))
builder_events.daemon = True
builder_events.start()
commonops.wait_for_success_events(
request_id,
platform_arn=arn,
streamer=streamer,
timeout_in_minutes=timeout or 30
)
def version_to_arn(platform_version):
platform_name = fileoperations.get_platform_name()
arn = None
if PlatformRegExpressions.VALID_PLATFORM_VERSION_FORMAT.match(platform_version):
arn = get_platform_arn(platform_name, platform_version, owner=Constants.OWNED_BY_SELF)
elif PlatformVersion.is_valid_arn(platform_version):
arn = platform_version
elif PlatformRegExpressions.VALID_PLATFORM_SHORT_FORMAT.match(platform_version):
match = PlatformRegExpressions.VALID_PLATFORM_SHORT_FORMAT.match(platform_version)
platform_name, platform_version = match.group(1, 2)
arn = get_platform_arn(platform_name, platform_version, owner=Constants.OWNED_BY_SELF)
if not arn:
raise InvalidPlatformVersionError(strings['exit.nosuchplatformversion'])
return arn
def _create_app_version_zip_if_not_present_on_s3(
platform_name,
version_label,
source_control,
staged
):
s3_bucket, s3_key = commonops.get_app_version_s3_location(platform_name, version_label)
file_name, file_path = None, None
if s3_bucket is None and s3_key is None:
file_name, file_path = commonops._zip_up_project(version_label, source_control, staged=staged)
s3_bucket = elasticbeanstalk.get_storage_location()
s3_key = platform_name + '/' + file_name
return s3_bucket, s3_key, file_path
def _datetime_now():
return datetime.now()
def _enable_healthd():
option_settings = []
option_settings.append({
'namespace': namespaces.HEALTH_SYSTEM,
'option_name': option_names.SYSTEM_TYPE,
'value': 'enhanced'
})
option_settings.append({
'namespace': namespaces.ENVIRONMENT,
'option_name': option_names.SERVICE_ROLE,
'value': 'aws-elasticbeanstalk-service-role'
})
fileoperations.ProjectRoot.traverse()
with open('platform.yaml', 'r') as stream:
platform_yaml = yaml.safe_load(stream)
try:
platform_options = platform_yaml['option_settings']
except KeyError:
platform_options = []
options_to_inject = []
for option in option_settings:
found_option = False
for platform_option in platform_options:
if option['namespace'] == (
platform_option['namespace']
and option['option_name'] == platform_option['option_name']
):
found_option = True
break
if not found_option:
options_to_inject.append(option)
platform_options.extend(options_to_inject)
platform_yaml['option_settings'] = list(platform_options)
with open('platform.yaml', 'w') as stream:
stream.write(yaml.dump(platform_yaml, default_flow_style=False))
def _generate_platform_yaml_copy():
file_descriptor, original_platform_yaml = tempfile.mkstemp()
os.close(file_descriptor)
copyfile('platform.yaml', original_platform_yaml)
return original_platform_yaml
def _raise_if_directory_is_empty():
cwd = os.getcwd()
fileoperations.ProjectRoot.traverse()
try:
if heuristics.directory_is_empty():
raise PlatformWorkspaceEmptyError(strings['exit.platformworkspaceempty'])
finally:
os.chdir(cwd)
def _raise_if_platform_definition_file_is_missing():
if not heuristics.has_platform_definition_file():
raise PlatformWorkspaceEmptyError(strings['exit.no_pdf_file'])
def _raise_if_version_format_is_invalid(version):
if not PlatformRegExpressions.VALID_PLATFORM_VERSION_FORMAT.match(version):
raise InvalidPlatformVersionError(strings['exit.invalidversion'])
def _resolve_s3_bucket_and_key(
platform_name,
version_label,
source_control,
staged
):
platform_yaml_copy = _generate_platform_yaml_copy()
try:
_enable_healthd()
s3_bucket, s3_key, file_path = _create_app_version_zip_if_not_present_on_s3(
platform_name,
version_label,
source_control,
staged
)
finally:
move(platform_yaml_copy, 'platform.yaml')
return s3_bucket, s3_key, file_path
def _resolve_version_label(source_control, staged):
version_label = source_control.get_version_label()
if staged:
timestamp = _datetime_now().strftime("%y%m%d_%H%M%S")
version_label = version_label + '-stage-' + timestamp
return version_label
def _resolve_version_number(
platform_name,
major_increment,
minor_increment,
patch_increment
):
version = get_latest_platform_version(
platform_name=platform_name,
owner=Constants.OWNED_BY_SELF,
ignored_states=[]
)
if version is None:
version = '1.0.0'
else:
major, minor, patch = version.split('.', 3)
if major_increment:
major = str(int(major) + 1)
minor = '0'
patch = '0'
if minor_increment:
minor = str(int(minor) + 1)
patch = '0'
if patch_increment or not(major_increment or minor_increment):
patch = str(int(patch) + 1)
version = "%s.%s.%s" % (major, minor, patch)
return version
def __formatted_platform_descriptions(platforms_list, show_status):
platform_tuples = []
for platform in platforms_list:
platform_tuples.append(
{
'PlatformArn': platform['PlatformArn'],
'PlatformStatus': platform['PlatformStatus']
}
)
platform_tuples.sort(
key=lambda platform_tuple: (
PlatformVersion.get_platform_name(platform_tuple['PlatformArn']),
Version(PlatformVersion.get_platform_version(platform_tuple['PlatformArn']))
),
reverse=True
)
formatted_platform_descriptions = []
for index, platform_tuple in enumerate(platform_tuples):
if show_status:
formatted_platform_description = '{platform_arn} Status: {platform_status}'.format(
platform_arn=platform_tuple['PlatformArn'],
platform_status=platform_tuple['PlatformStatus']
)
else:
formatted_platform_description = platform_tuple['PlatformArn']
formatted_platform_descriptions.append(formatted_platform_description)
return formatted_platform_descriptions
def _upload_platform_version_to_s3_if_necessary(bucket, key, file_path):
try:
s3.get_object_info(bucket, key)
io.log_info('S3 Object already exists. Skipping upload.')
except NotFoundError:
io.log_info('Uploading archive to s3 location: ' + key)
s3.upload_platform_version(bucket, key, file_path)
fileoperations.delete_app_versions()
| 31.517241
| 109
| 0.701562
|
20a6c985d526f34522849a1ac0510dc9b1aa909f
| 1,595
|
py
|
Python
|
tools/vscode-extension/server/tests/test_imports.py
|
orlandoojr1/wave
|
e86d0c87c6c67e510fb4e1fa571982ca0a09f33c
|
[
"Apache-2.0"
] | 1
|
2022-03-02T21:54:36.000Z
|
2022-03-02T21:54:36.000Z
|
tools/vscode-extension/server/tests/test_imports.py
|
orlandoojr1/wave
|
e86d0c87c6c67e510fb4e1fa571982ca0a09f33c
|
[
"Apache-2.0"
] | null | null | null |
tools/vscode-extension/server/tests/test_imports.py
|
orlandoojr1/wave
|
e86d0c87c6c67e510fb4e1fa571982ca0a09f33c
|
[
"Apache-2.0"
] | null | null | null |
import os
from server.lsp_server import did_save
from server.parser import read_file
from server.tests.utils import BaseTestCase, FakeSaveParams, root_uri
class TestImportCompletions(BaseTestCase):
def test_import_deps(self):
self.assert_state('q.client.', doc_uri=os.path.join(root_uri, 'utils.py'))
def test_remove_import_deps(self):
self.assert_state('q.client.', doc_uri=os.path.join(root_uri, 'utils.py'))
file_path = os.path.join(root_uri, 'main.py')
removed_imports = read_file(file_path).replace('import utils, utils2', '')
# Mock save action.
did_save(self.server, FakeSaveParams(removed_imports, file_path))
completions = self.get_completions('q.client.', doc_uri=os.path.join(root_uri, 'utils.py'))
self.assertEqual(len(completions), 1)
self.assertTrue('regular_import' in completions)
def test_add_import_deps(self):
file_path = os.path.join(root_uri, 'main.py')
removed_imports = read_file(file_path).replace('import utils, utils2', '')
# Mock save action.
did_save(self.server, FakeSaveParams(removed_imports, file_path))
completions = self.get_completions('q.client.', doc_uri=os.path.join(root_uri, 'utils.py'))
self.assertEqual(len(completions), 1)
self.assertTrue('regular_import' in completions)
# Mock save action.
did_save(self.server, FakeSaveParams('import utils, utils2\n' + removed_imports, file_path))
self.assert_state('q.client.', doc_uri=os.path.join(root_uri, 'utils.py'))
| 43.108108
| 100
| 0.692163
|
3d60411b9169a34fe013df41882bae4d5b49762b
| 56,886
|
py
|
Python
|
drgpom/methods/biomarkers/neuron_biomarkers.py
|
oliverbritton/drg-pom
|
8a1455db7791b35501071070b4c74c5b83283dbf
|
[
"MIT"
] | 1
|
2021-05-11T20:10:42.000Z
|
2021-05-11T20:10:42.000Z
|
drgpom/methods/biomarkers/neuron_biomarkers.py
|
oliverbritton/drg-pom
|
8a1455db7791b35501071070b4c74c5b83283dbf
|
[
"MIT"
] | null | null | null |
drgpom/methods/biomarkers/neuron_biomarkers.py
|
oliverbritton/drg-pom
|
8a1455db7791b35501071070b4c74c5b83283dbf
|
[
"MIT"
] | null | null | null |
# neuron_biomarkers.py
# calculation of AP biomarkers from neuronal voltage traces
import sys
import numpy as np
import pandas as pd
from scipy import optimize
from matplotlib import pyplot as plt
from . import davidson_biomarkers as db
from .. import simulation_helpers as sh
from .. import analysis as an
# Biomarkers to manage and analyse neuronal simulation data and potentially experimental
# data too
RHEO_FAIL = np.nan # Code to return if rheobase calculation fails.
# np.nan == np.nan returns False so use is not instead. pom code
# relies on this value being nan to interface with pandas correctly.
def calculate_biomarkers(traces, model):
" Calculate every biomarker and output to dict "
" TODO: Use the rheobase to work out what simulation to run to calculate biomarkers "
" off of (at rheobase) "
# biomarker_names = ['APFullWidth', 'APPeak', 'APRiseTime', 'APSlopeMin', 'APSlopeMax',. 'AHPAmp', 'AHPTau', 'ISI', 'RMP', 'Rheobase']
biomarkers = calculate_simple_biomarkers(traces, model)
biomarkers['RMP'] = np.mean(calculate_rmp(traces))
# Need to do rheobase separately
biomarkers['Rheobase'] = calculate_rheobase(model, amp_step=0.1, amp_max=5, make_plot=False,)
return biomarkers
def average_biomarker_values(biomarkers, how_to_handle_nans='return'):
" Average biomarker values for multiple APs while handling biomarkers that "
if how_to_handle_nans == 'return': # Advantage is we return nan if there are any nans - good for calibration and trouble shooting - shows up weird models easily.
pass
elif how_to_handle_nans == 'remove': # Risky option. Advantage is we still get a number back in mixed cases of nan and non-nan biomarkers, which is potentially risky as it hides a problem in one or more APs.
biomarkers = np.array(biomarkers)
if biomarkers[~np.isnan(biomarkers)].size == 0:
return np.nan
else:
biomarkers = biomarkers[~np.isnan(biomarkers)]
else:
raise ValueError("{} is not an accepted nan handling method.".format(how_to_handle_nans))
mean_result = np.mean(biomarkers)
return mean_result
def calculate_simple_biomarkers(traces, model="Not needed", how_to_handle_nans='return'):
""" Calculate every biomarker that can be calculated from a normal simulation trace and output to dict - rheobase and RMP need to be calculated separately."""
biomarkers = {}
# biomarker_names = ['APFullWidth', 'APPeak', 'APRiseTime', 'APSlopeMin', 'APSlopeMax',. 'AHPAmp', 'AHPTau', 'ISI', 'RMP', 'Rheobase']
def error_handle(filename, traces): # Error handler for finding out why biomarkers are throwing errors.
import pickle
print(sys.exc_info())
print(traces['numAPs'])
plt.figure()
for t,v in zip(traces['t'],traces['v']):
plt.plot(t,v)
with open(filename, 'wb') as handle:
pickle.dump(traces, handle)
print("Error, traces dumped to {}.".format(filename))
try:
biomarkers['APFullWidth'] = average_biomarker_values(calculate_ap_full_width(traces,threshold=5.,method='gradient'), how_to_handle_nans)
except:
error_handle('fullwidth.pickle',traces)
try:
biomarkers['APHalfWidth'] = average_biomarker_values(calculate_ap_half_width(traces,threshold=5.,method='gradient'), how_to_handle_nans)
except:
error_handle('halfwidth.pickle',traces)
biomarkers['APPeak'] = average_biomarker_values(calculate_ap_peak(traces),how_to_handle_nans)
try:
biomarkers['APRiseTime'] = average_biomarker_values(calculate_ap_rise_time(traces,dvdtthreshold=5),how_to_handle_nans)
except:
error_handle('risetime.pickle',traces)
ap_slope_mins, ap_slope_maxs = calculate_ap_slope_min_max(traces)
biomarkers['APSlopeMin'] = average_biomarker_values(ap_slope_mins, how_to_handle_nans)
biomarkers['APSlopeMax'] = average_biomarker_values(ap_slope_maxs, how_to_handle_nans)
biomarkers['Threshold'] = average_biomarker_values(calculate_threshold(traces), how_to_handle_nans)
amp, tau, trough = fit_afterhyperpolarization(traces=traces,dvdt_threshold=5, ahp_model='single_exp', full_output=False)
"""
try:
amp, tau = fit_afterhyperpolarization(traces=traces,dvdt_threshold=5, ahp_model='single_exp', full_output=False)
except:
error_handle('fitahp.pickle',traces)
amp=0
tau=0
"""
biomarkers['AHPAmp'] = amp
biomarkers['AHPTau'] = tau
biomarkers['AHPTrough'] = trough
biomarkers['ISI'] = inter_spike_interval(traces)
biomarkers['numAPs'] = traces['numAPs']
return biomarkers
def compute_model_biomarkers(model=None, mechanisms=None, make_plot=False, sim_kwargs=None, xlims=None):
" Find all standard biomarkers of a model or mechanism set. "
biomarkers = {}
if model == None:
model = sh.build_model(mechanisms)
# Else use model
if sim_kwargs:
sim_kwargs['model'] = model
else:
sim_kwargs = sh.get_default_simulation_kwargs(model=model)
rheobase = calculate_rheobase(model, amp_step=0.1, amp_max=5, make_plot=False, sim_kwargs = sim_kwargs)
if (isinstance(rheobase,float) == False) & (isinstance(rheobase,int) == False):
# Rheobase not found, don't calculate other biomarkers
find_other_biomarkers = False
else:
find_other_biomarkers = True
if sim_kwargs:
sim_kwargs['amp'] = rheobase
sim_kwargs['model'] = model
else:
sim_kwargs = sh.get_default_simulation_kwargs(amp=rheobase, model=model)
sim_kwargs['make_plot'] = make_plot
if find_other_biomarkers:
output = sh.simulation(**sim_kwargs)
t = output['t']; v = output['v']
t = t[::2]; v = v[::2] # 20 kHz
traces = split_trace_into_aps(t,v)
biomarkers = calculate_simple_biomarkers(traces,model,how_to_handle_nans='remove')
# RMP
rmp_kwargs = {'amp':0.0, 'dur':3000., 'delay':0., 'interval':0., 'num_stims':1, 't_stop':3000.}
for kwarg in sim_kwargs:
# Write in sim_kwargs where they are not already present in rmp_kwargs
# so that non-RMP specific kwargs are consistent between simulations
if kwarg not in rmp_kwargs:
rmp_kwargs[kwarg] = sim_kwargs[kwarg]
output = sh.simulation(**rmp_kwargs)
rmp_t = output['t']; rmp_v = output['v']
rmp_t = rmp_t[::2]; rmp_v = rmp_v[::2] # 20 kHz
rmp_traces = split_trace_into_aps(rmp_t,rmp_v)
rmp = np.mean(calculate_rmp(rmp_traces))
if (make_plot & (xlims != None)):
plt.xlim(xlims[0], xlims[1])
# If we calculated other biomarkers, add extras calculated in separate simulations.
# If we didn't add to empty dictionary, will leave nans when added to master dataframe
# which is what we want.
biomarkers['Rheobase'] = rheobase
biomarkers['RMP'] = rmp
return biomarkers
" --- Calculation and trace manipulation functions -- "
def split_trace_into_aps(t,v,threshold=0,time_threshold=5, check_voltage_gradient=True):#
"""
Threshold is at 0 mV which can let RF to cause spurious AP detection unless
we perform a voltage gradient check, which defaults to True.
-- Old ideas to solve the spurious AP detection problem with threshold at 0 mV --
One idea is to do split trace and then calculate AP width using a voltage threshold of something like -25 mV.
Then, if AP width is really long (> 100 ms?), redo the calculation with a lower threshold (0 mV?).
If mean AP width is then < 100 ms, we use the new split. We could write a log file to say that this happened,
with the trace in it.
However, that is complex and may break if something comes up I haven't thought of.
Instead we could reset the default threshold to 0 mV but add in a gradient check on the voltage crossing from below.
Currently a gradient threshold of 1 mV/ms seems like it should be effective although I don't have any examples of slow
calcium initated APs to test against.
"""
# Units for defaults
# t, time_threshold - ms
# v, threshold - mV
assert len(t) == len(v), "v and t length mismatch"
crossings = []
time_crossings = np.array([])
# Looks for crossings from below
for i,voltage in enumerate(v[:-1]):
if (voltage < threshold) & (v[i+1] >= threshold):
# Check local voltage gradient if neeeded, if gradient is too small ignore the crossing
# Time window set to 1.0 to try to counteract bug with averaging too much of the pre-upstroke.
if (check_voltage_gradient) & (is_voltage_gradient_too_small(i, t, v, dvdt_threshold=1.0, time_window=1.0)):
continue # Don't add the crossing if the local voltage gradient is small and we're checking for that
crossings.append(i)
time_crossings = np.append(time_crossings,t[i])
# For each crossing, remove all instances within the time threshold, leaving only the first crossing of the threshold
grouped_crossings = np.zeros(np.size(crossings),float)
for i in range(len(crossings)-1):
if grouped_crossings[i] == 0:
nearby_crossings = np.array( (time_crossings[i+1:] - time_crossings[i]) < time_threshold )
# Assign
grouped_crossings[i+1:] += nearby_crossings
assert all(grouped_crossings < 2), "Grouped crossing grouped more than once"
firstCrossIndices = np.where(grouped_crossings == 0)
# Need to turn crossings into a numpy array to index it with np.where
firstCrossings = np.array(crossings)[firstCrossIndices]
numAPs = len(firstCrossings)
assert numAPs >= 0, "Negative number of APs!"
# Assign time and voltage to traces
times = []
voltages = []
# If 1 or 0 APs, return 1 trace, otherwise...
# if (numAPs == 0) | (numAPs == 1):
# times.append(t)
# voltages.append(v)
"""
There are some commented assumptions about where traces begin and end here. The core idea is that all data points in the trace have to be assigned to 1 and only 1 AP. If areas of quiescence are a problem for particular analysis methods, they will be stripped out by other specialised functions.
Our goal in this function is to divide up the trace without leaving any of it out, so that we have everything for any future analysis.
"""
# If we have multiple APs, for each AP find the minimum value
# of Vm before the next AP
if numAPs > 0:
startIdx = np.zeros(numAPs,int)
endIdx = np.zeros(numAPs,int)
for AP in range(numAPs):
if AP == 0:
startIdx[0] = 0 # Start of first AP is beginning of trace
else:
startIdx[AP] = endIdx[AP-1]+1 # Start of all other APs is after last AP
if AP == numAPs-1:
endIdx[AP] = len(v)-1 # End of last AP is end of trace
else:
# Calculate end of this trace - end is minimum voltage of this trace
# From threshold of this AP to just before beginning of next threshold
voltageDuringCurrentAP = v[firstCrossings[AP]:firstCrossings[AP+1]]
# Get index of minimum voltage AFTER the peak
max_idx = np.argmax(voltageDuringCurrentAP)
minVmIdx = np.argmin(voltageDuringCurrentAP[max_idx:])
endIdx[AP] = firstCrossings[AP] + max_idx + minVmIdx # Don't think I need to minus 1 because Python indices start at 0
times.append(t[startIdx[AP]:endIdx[AP]+1])
voltages.append(v[startIdx[AP]:endIdx[AP]+1]) # Add 1 to as Python slicing ends 1 before last index
for i in range(len(startIdx)-1):
assert endIdx[i]+1 == startIdx[i+1], "startIdx and endIdx don't match up."
# Case for no APs - numAPs causes problems here so set indices manually
elif numAPs == 0:
times.append(t)
voltages.append(v)
startIdx = np.array([0],int)
endIdx = np.array([len(v)-1],int)
assert startIdx[0] == 0, "First AP doesn't start at beginning of trace."
assert endIdx[-1] == len(v)-1, "Last AP doesn't end at end of trace."
return{'t':times, 'v':voltages, 'startIndices':startIdx, 'endIndices':endIdx, 'numAPs':numAPs}
SplitTraceIntoAPs = split_trace_into_aps # Alias
def voltage_gradient(t,v, method='gradient'):
# There is a gradient function in numpy to take central differences
if method == 'gradient':
dvdt = np.gradient(v,t)# Central differences except at end points
elif method == 'diff':
dvdt = np.diff(v)/np.diff(t) # Difference between adjacent points
else :
raise ValueError("Method not found.")
return dvdt
VoltageGradient = voltage_gradient # Alias
def is_voltage_gradient_too_small(i, t, v, dvdt_threshold, time_window):
"""
Check if the voltage gradient around the threshold crossing from below at v[i] to v[i+1]
is too small to have a reasonable likelihood of being a real AP.
Inputs:
i - index at which threshold is crossed, between v[i] and v[i+1]
t,v - time and voltage arrays
dvdt threshold - mV/ms
time window (either side of indices i and i+1, so effective window is double the size) - ms
"""
voltage_gradient_too_small = False
# Get time window around v[i] and v[i+1]
lower_t_bound = t[i] - time_window
if lower_t_bound < 0: lower_t_bound = 0
upper_t_bound = t[i+1] + time_window
# Get indices of t and v that are within the window
t = np.array(t)
window_indices = (t >= lower_t_bound) & (t <= upper_t_bound)
_t = t[window_indices]
_v = v[window_indices]
_dvdt = np.gradient(_v,_t)
# Check mean gradient against threshold
if np.mean(_dvdt) < dvdt_threshold:
voltage_gradient_too_small = True
return voltage_gradient_too_small
# --- Biomarkers ---
def rmp(v):
# RMP should be calculated from a quiescent trace (no stimulus)
# Ignore first 90% of trace to remove artifacts
vLen = len(v)
startIdx = 90*vLen//100
RMP = min(v[startIdx:])
RMPIdx = np.argmin(v[startIdx:])
return RMP, RMPIdx
RMP = rmp # Alias
def input_res(t, v, current_injection_time):
# Input resistance calculated from a protocol with an equilibration phase
# to get to RMP, followed by a sustained small current input.
# Input res then = (v[-1] - v[RMP])/(I-0)
# In Davidson, 50 to 100 pA current pulse was used to determine input resistance.
# Divide trace at injection time:
# Get RMP at t < injection time:
# Get RMP at t > injection time:
# Rheobase - find the first trace with an action potential
# Assumes traces are sorted in order from smallest amplitude upwards
""" To Do - check that simulations is a bunch of simulations, not just an array """
def rheobase(simulations,amps):
# Check amps is sorted
for i in range(len(amps)-1):
assert amps[i+1] > amps[i], 'Amps in rheobase biomarker not increasing monotonically!'
for simulation,amp in zip(simulations,amps):
# Search for first trace which produces an AP
result = SplitTraceIntoAPs(simulation['t'],simulation['v'])
if result['numAPs'] > 0:
return {'rheobase':amp, 'trace':simulation}
# If no APs found
return {'rheobase':np.nan, 'trace':[]}
Rheobase = rheobase # Alias
def ap_peak(v):
peak = max(v)
location = np.argmax(v)
return [peak,location]
APPeak = ap_peak # Alias
def threshold(t, v, dvdt_threshold=5.):
# Calculation of threshold voltage as described in Davidson et al., 2014 PAIN using gradient
# Threshold is in V/s - default of 5 is what was used by Davidson et al.
dvdt = np.gradient(v,t)
thresholds = []
for i, gradient in enumerate(dvdt[0:-1]):
if (gradient < dvdt_threshold) & (dvdt[i+1] > dvdt_threshold): # Look for crossing of threshold
thresholds.append(v[i])
if thresholds:
return thresholds[0] # Only use first threshold of crossing
else:
return np.nan
def ap_rise_time(t,v,threshold=5):
"""
Threshold here is a dVdt threshold in mV/ms`
Default threshold is taken from Davidson et al. 2014, PAIN
"""
assert threshold > 0, 'Rise time threshold is a gradient threshold, should be > 0!'
dVdt = np.gradient(v,t)
peak = ap_peak(v)
peak_idx = peak[1]
peak_time = t[peak_idx]
# If dVdt is a tuple, second part is gradient
found_thresholds = []
for i,gradient in enumerate(dVdt[0:-1]): # Is dVdt a time vector as well?
if gradient < threshold:
if dVdt[i+1] > threshold:
found_thresholds.append(i)
num_threshold = len(found_thresholds)
if num_threshold == 1:
threshold_time = t[found_thresholds[0]]
rise_time = peak_time - threshold_time
if rise_time < 0:
#rise_time = 'Rise time < 0: %.3f' % rise_time
rise_time = np.nan
# assert rise_time >=0, 'Rise time < 0!'
elif num_threshold == 0:
rise_time = np.nan
elif num_threshold > 1:
# assert False, 'More than 1 threshold for rise time - APs may not be clearly separated.'
# Take the first one - later ones are probably rapid spikes e.g. on the shoulder
threshold_time = t[found_thresholds[0]]
rise_time = peak_time - threshold_time
return rise_time
APRiseTime = ap_rise_time # Alias
def ap_slope_min_max(t,v):
dVdt = np.gradient(v,t)
slope_min = min(dVdt)
slope_max = max(dVdt)
### Need mins and maxes
return [slope_min,slope_max]
APSlopeMinMax = ap_slope_min_max # Alias
def ap_width(t, v, alpha, _threshold=5., threshold_type='gradient'):
"""
Generic ap width calculating function. Alpha determines the fraction of the voltage
gap between threshold and ap peak that is used to set the voltage threshold.
Specifically, if Th is the calculate threshold voltage and P is the peak voltage,
the voltage threshold used depends on alpha so that width threshold WTh = alpha*P + (1-alpha)*Th
So for full width alpha = 0, as we just use the bare threshold Th, for half width alpha = 0.5,
and alpha = 1 should give a width of 0 as it goes all the way to the peak.
Defaults are consistent with Davidson et al. 2014 (5 mV/ms gradient to find threshold voltage)
_threshold named to avoid overlapping with threshold function
"""
# Calculate AP threshold and AP peak voltages
if threshold_type == 'gradient':
v_threshold = threshold(t, v, _threshold)
elif threshold_type == 'voltage':
v_threshold = _threshold
else:
raise ValueError("threshold type: {} not recognised".format(threshold_type))
if np.isnan(v_threshold):
return np.nan
v_peak = ap_peak(v)[0]
width_v_threshold = alpha * v_peak + (1.0 - alpha) * v_threshold
# Find crossing points
ups, downs = find_threshold_crossings(v, width_v_threshold)
# Check we have crossings
if ups and downs:
last_down = downs[-1]
first_up = ups[0]
width = t[last_down] - t[first_up]
return width
else:
return np.nan
def ap_full_width(t,v ,_threshold=5., threshold_type='gradient'):
"""
Calculate full width of AP by one of two methods, a voltage threshold
or a voltage/time gradient threshold
Defaults are consistent with Davidson et al. 2014 (5 mV/ms gradient to find threshold voltage)
_threshold named to avoid overlapping with threshold function
"""
if threshold_type == 'voltage':
assert not np.isnan(_threshold), "Threshold {} is nan".format(_threshold)
ups, downs = find_threshold_crossings(v, _threshold)
elif threshold_type == 'gradient':
# Find voltage at which we cross the dvdt threshold
dvdt = np.gradient(v,t)
gradient_threshold = None
for i, _ in enumerate(dvdt[:-1]):
if (dvdt[i] < _threshold) and (dvdt[i+1] >= _threshold):
gradient_threshold = v[i]
break
# Return if we don't cross the threshold
if gradient_threshold:
ups, downs = find_threshold_crossings(v, gradient_threshold)
else:
return np.nan
else:
raise ValueError("threshold type: {} not recognised".format(threshold_type))
#print(arr)
#print(ups,downs)
num_ups = len(ups)
num_downs = len(downs)
if (num_ups < 1) | (num_downs < 1):
# Not enough crossings
full_width = np.nan
elif (num_ups == 1) & (num_downs == 1):
# One crossing of threshold each way
full_width = t[downs[0]] - t[ups[0]]
elif (num_ups > 1) | (num_downs > 1):
# Too many crossings
# Find earliest crossing from below and latest crossing from above
# to calculate full width
first_up = ups[0]
last_down = downs[-1]
full_width = t[last_down] - t[first_up]
return full_width
APFullWidth = ap_full_width # Alias
def ap_half_width(t,v, dvdt_threshold=5.):
"""
Definition from neuroelectro.org:
AP duration at membrane voltage halfway between AP threshold and AP peak.
Currently only uses gradient method for finding threshold for simplicity.
"""
# Calculate AP threshold and AP peak voltages
v_threshold = threshold(t,v, dvdt_threshold=dvdt_threshold,)
v_peak = ap_peak(v)[0]
half_width_v_threshold = (v_threshold + v_peak)/2.
# Find crossing points
ups, downs = find_threshold_crossings(v,half_width_v_threshold)
# Check we have crossings
if ups and downs:
last_down = downs[-1]
first_up = ups[0]
half_width = t[last_down] - t[first_up]
return half_width
else:
return np.nan
def fit_afterhyperpolarization(traces, dvdt_threshold, max_time_from_peak=50., ahp_model = 'single_exp', full_output=False):
"""
Gather afterhyperpolarisation regions from a set of traces and fit them to a model
of a single exponential (other models can be added as needed)
Outputs:
Returns either amp,tau or if full_output is selected returns five outputs.
"""
# Fit to single exponential model
if ahp_model == 'single_exp':
def model(x, a, b, c):
return a - b * np.exp(-x/c)
else:
raise ValueError('Model \"{}\" not valid'.format(ahp_model))
# Return function if we have a result we can't fit a hyperpolarisation to
def hyperpolarisation_fit_failure(full_output):
if full_output:
return np.nan, np.nan, np.nan, np.nan,np.nan, np.nan
else:
return np.nan, np.nan, np.nan
# Arrange data to contain each interval between peaks (num APs > 1) or peak to end of trace (n=1)
"""
This caused a bug for thr last AP since when the stimulus turns off there is a bigger AHP than the AHP itself.
Instead, for the last trace in the sequence only, check only for 50 ms after the peak. Could also check for some
multiple of AP full width, but 50 ms should be sufficient for all but very long abnormal APs.
"""
num_APs = traces['numAPs']
if num_APs < 1:
return hyperpolarisation_fit_failure(full_output)
elif num_APs == 1:
_t = traces['t'][0]
_v = traces['v'][0]
max_idx = np.argmax(_v)
# Check that the peak is not right at the end of the trace
if max_idx == len(_v)-1:
return hyperpolarisation_fit_failure(full_output)
# Get the period between t(peak) and t(peak) + max_time_from_peak
t_peak = _t[max_idx]
t_end = t_peak + max_time_from_peak
end_idx = np.argmin(abs(_t - t_end))
ts = [_t[max_idx+1:end_idx]] # Single element lists from just after peak to end of max_time_from_peak period
vs = [_v[max_idx+1:end_idx]]
elif num_APs > 1:
ts = []
vs = []
# Traces 1 to N-1
for i in range(num_APs-1):
_ts = [traces['t'][idx] for idx in [i, i+1]]
_vs = [traces['v'][idx] for idx in [i, i+1]]
max_idxs = [np.argmax(_v) for _v in _vs]
# Concatenate the two parts of the interval from each trace
_t_start = _ts[0][max_idxs[0]:]
_t_end = _ts[1][:max_idxs[1]-1]
_v_start =_vs[0][max_idxs[0]:]
_v_end = _vs[1][:max_idxs[1]-1]
_t = np.concatenate([_t_start, _t_end], axis=0)
_v = np.concatenate([_v_start, _v_end], axis=0)
ts.append(_t)
vs.append(_v)
# Trace N - final trace - use same process as for when there is only 1 AP except change the index
_t = traces['t'][num_APs-1]
_v = traces['v'][num_APs-1]
max_idx = np.argmax(_v)
# Check that the peak is not right at the end of the trace
if max_idx == len(_v)-1:
return hyperpolarisation_fit_failure(full_output)
# Get the period between t(peak) and t(peak) + max_time_from_peak
t_peak = _t[max_idx]
t_end = t_peak + max_time_from_peak
end_idx = np.argmin(abs(_t - t_end))
ts.append(_t[max_idx+1:end_idx])
vs.append(_v[max_idx+1:end_idx])
# For each interval attempt to fit an AHP
amps = []
taus = []
troughs = []
if full_output:
output_ts = []
output_vs = []
popts = []
for i, (t, v) in enumerate(zip(ts, vs)):
# Start from the minimum, until dvdt exceeds the threshold given as input
min_idx = np.argmin(v)
dvdt = np.gradient(v,t)
threshold_exceeded = dvdt > dvdt_threshold
if any(threshold_exceeded):
cutoff_idx = np.where(threshold_exceeded)[0][0] - 1
else: # Use the whole trace
cutoff_idx = len(t)-1
t = t[min_idx:cutoff_idx]
v = v[min_idx:cutoff_idx]
# Check v and t are all there
if any(np.isnan(v)) | any(np.isnan(t)):
return hyperpolarisation_fit_failure(full_output)
# If the membrane potential slowly monotonically decreases after a spike, then the min_idx will be the
# last element of the trace, and so t and v will be empty.
# Also, if the AHP part of the trace has a very small number of elements, then the calculation might not work.
# So we will impose a minimum length threshold for the ahp.
# With dt = 0.05 ms after downsampling, and a real AHP tau of order 10 ms, any trace of
# less than length 100 (5 ms) is probably no good, # and any trace less than length 10 (0.5 ms)
# is almost certainly not going to contain a viable AHP.
# This does assume the default dt though so we should check it is not larger than about 0.1 ms,
# which would equate to a 1 ms minimum AHP duration.
length_threshold = 10
if (len(t) <= length_threshold) | (len(v) <= length_threshold):
return hyperpolarisation_fit_failure(full_output)
dt = np.gradient(t)
assert np.mean(dt) < 0.1, "dt is large, check length_threshold"
# We can check for this and return failure if it is the case. To do: think about whether this is the best
# way to handle the lack of an ahp. There could also be cases where we have an AP with no AHP,
# and these might give odd tau and amp readings that should not be averaged. For calibration this is fine,
# but for mechanistic investigation it might not be so good.
# Use scipy.optimise.curvefit to fit curve - another option would be to use the more complex
# LMFIT library, but I have no experience with it's advantages over the basic scipy lsq fit function
if ahp_model == 'single_exp':
t = t - t[0] # Zero out t as model assumes this
popt, pcov = optimize.curve_fit(model, t, v) # Do the fitting
# following neuroelectro: https://neuroelectro.org/ephys_prop/index/
# AHP amplitude is from threshold voltage to trough
trough = min(v)
# Need original AP to calculate
thresh = threshold(traces['t'][i], traces['v'][i], dvdt_threshold)
ahp_amp = trough - thresh # will be -ve
ahp_tau = popt[2]
ahp_trough = trough
" Alternate approaches to calculation"
"""
calculation_method = False
if calculation_method == "Simple":
ahp_amp = trough
elif calculation_method == "ToRecovery":
ahp_amp = None
"""
else:
raise ValueError('Model \"{}\" not valid'.format(ahp_model))
amps.append(ahp_amp)
taus.append(ahp_tau)
troughs.append(ahp_trough)
if full_output:
output_ts.append(t)
output_vs.append(v)
popts.append(popt)
# Return non averaged output and cutoff times and voltages if full output requested "
if full_output == True:
return amps, taus, troughs, output_ts, output_vs, popts
# Otherwise just return mean amplitude and time constant of the AHP "
else:
amp = np.mean(amps)
tau = np.mean(taus)
trough = np.mean(troughs)
return amp, tau, trough
"""
def expFunc(t, amp, slope, start):
return amp*(1 - np.exp(-slope*t)+start)
maxIdx = []
maxIdx.append(np.argmax(v)) # Get idx of max(v)
maxIdx.append(np.argmax(v2))# Get idx of max(v2)
workingTime = np.concatenate((t[maxIdx[0]:],t2[:maxIdx[1]+1]),0) ### join t[maxIdx1:] up to t2[1:maxIdx[1]]
workingVoltage = np.concatenate((v[maxIdx[0]:],v2[:maxIdx[1]+1]),0) ### join
# AHP amplitude
amp = min(workingVoltage)
ampIdx = np.argmin(workingVoltage)
# dvdt = VoltageGradient(workingTime[ampIdx:], workingVoltage[ampIdx:])
# plt.plot(workingTime[ampIdx:-1],dvdt)
# temp = np.argwhere(dvdt > dvdt_threshold) # Temp because we only need the first element
# takeoffIdx = temp[0][0] # TODO This will break if there's no points above dvdt_threshold
# plt.plot(workingTime[ampIdx:ampIdx+takeoffIdx],workingVoltage[ampIdx:ampIdx+takeoffIdx])
# plt.plot(workingTime,workingVoltage)
# AHP time constant
# TO DO!!
# Look up curve fitting
tau = 'Time constant not implemented'
return amp, tau
"""
def inter_spike_interval(traces):
""" Calculate average interspike interval from a divided set of traces
Total interspike interval is the time difference between the first and last peak of a trace,
divided by the number of intervals (number of APs - 1)
"""
numAPs = traces['numAPs']
if numAPs < 2:
#print('ISI cannot be calculated with < 2 APs')
return np.nan
else:
# Find the peak of the first and last trace
voltages = traces['v']
first_spike = np.argmax(voltages[0])
last_spike = np.argmax(voltages[-1])
# Get the time difference
times = traces['t']
time_diff = times[-1][last_spike] - times[0][first_spike]
assert time_diff > 0, 'time_diff for ISI < 0: {}'.format(time_diff)
# Divide by number of intervals (numAPs - 1) to get mean ISI
inter_spike_interval = time_diff/(numAPs-1)
return inter_spike_interval
def absmax(i):
"""
Returns the largest absolute value present in an array in its raw form
(e.g. in [-2, 0, 1] it returns -2, in [-2,0,3] it returns 3.)
"""
# Use the absolute largest value in its raw form
if max(i) > abs(min(i)):
return max(i)
elif abs(min(i)) >= max(i):
return min(i)
else:
raise ValueError()
# ---- Calculating biomarkers over multiple traces ----
def calculate_rmp(traces):
RMPVals = []
for i,v in enumerate(traces['v']):
RMPValue, RMPIdx = RMP(v)
RMPVals.append(RMPValue)
return RMPVals
CalculateRMP = calculate_rmp # Alias
def calculate_input_res():
input_res_vals = []
for i,v in enumerate(traces['v']):
input_res_vals.append(input_res(v))
return input_res_vals
CalculateInputRes = calculate_input_res # Alias
def calculate_ramp_ap():
"""
Can't remember what this biomarker was supposed to do?
We just run ramp simulations and calculate biomarkers on those now.
"""
# TODO
return 0
CalculateRampAP = calculate_ramp_ap # Alias
def calculate_rheobase(cell_model, amp_step=0.1, amp_max=5., make_plot=False, sim_kwargs=None, search='simple'):
" Run a series of simulations to calculate rheobase"
" Rheobase is defined as the threshold current for an infinite duration pulse "
" We'll try 2 seconds "
# Fill out sim_kwargs with defaults if needed
if sim_kwargs is None:
sim_kwargs = {}
default_kwargs = {'dur':500., 'delay':1000., 'interval':0., 'num_stims':1, 't_stop':1500.,
'mechanisms':None, 'make_plot':False, 'plot_type':'default', 'model':cell_model}
for kwarg in default_kwargs.keys():
if kwarg in sim_kwargs.keys():
pass
else:
sim_kwargs[kwarg] = default_kwargs[kwarg]
def rheobase_simulation(amp):
# Returns simulation amplitude if an AP is found, otherwise returns RHEO_FAIL if no APs found
sim_kwargs['amp'] = amp
output = sh.simulation(**sim_kwargs)
t = output['t']; v = output['v'];
# Look for an AP, after throwing away the delay period, leave a 1 ms run up to catch the start
run_up = 1.
delay = sim_kwargs['delay']
stim_period_indices = (t >= (delay-run_up)) # TODO - why did I put tuple here earlier?
t = t[stim_period_indices]
v = v[stim_period_indices]
traces = split_trace_into_aps(t,v,threshold=0.,time_threshold=5.)
if traces['numAPs'] > 0: # rheobase found
if make_plot:
plot_traces(traces)
rheobase = amp
return rheobase
else:
return RHEO_FAIL
amp_min = 0.
amps = np.arange(amp_min, amp_max, amp_step) # (nA)
# Two search modes
# 1. simple starts from amp_min and works up until it finds an AP
# 2. divide starts from the middle and does a binary search
# simple should be quicker when rheobase is usually very low and very few models have no rheobase
# divide should be quicker if rheobase is distributed any other way
if search == 'simple':
for amp in amps:
rheobase = rheobase_simulation(amp)
if rheobase is not RHEO_FAIL: # Is not is used because np.nan == np.nan reutrns False
return rheobase
return RHEO_FAIL
elif search == 'divide':
# Divide and conquer algorithm using a binary search
idx0 = 0
idxn = len(amps) - 1
rheobases = np.empty(len(amps))
rheobases[:] = None
while idx0 <= idxn:
midval = (idx0 + idxn)// 2
rheobase = rheobase_simulations(amps[midval])
rheobases[midval] = rheobase
if rheobase is not RHEO_FAIL: # Is not is used because np.nan == np.nan reutrns False
if midval == 0:
# Rheobase is minimum
return amps[0]
elif rheobases[midval-1] is not RHEO_FAIL: # Is not is used because np.nan == np.nan reutrns False
# Found minimal amp for an AP - return rheobase
return amps[midval]
else:
# AP found but not definitely lowest amp so lower idxn
idxn = midval - 1
elif rheobase is not RHEO_FAIL: # Is not is used because np.nan == np.nan reutrns False
if midval == (len(amps) - 1):
# No rheobase for highest amp
return RHEO_FAIL
elif isinstance(rheobases[midval+1], float):
# We've found highest amp with no AP, so one up is rheobase
return amps[midval+1]
else:
# No AP found but not definitely highest amp so raise idx0
idx0 = midval + 1
else:
raise Exception('Rheobase not accepted value.' )
raise Exception('No rheobase found')
elif search == 'smart':
# Simple search but after first two searches upwards we check the max value to check for
# no rheobase. If the first 5? searches fail we switch to binary.
# TODO
pass
CalculateRheobase = calculate_rheobase # Alias for compatibility
def calculate_threshold(traces, dvdt_threshold=5.):
thresholds = []
for t,v in zip(traces['t'], traces['v']):
thresholds.append(threshold(t, v, dvdt_threshold=dvdt_threshold,))
return thresholds
def calculate_ap_peak(traces):
ap_peak_vals = []
for _,v in zip(range(len(traces['t'])),traces['v']):
ap_peak_vals.append(ap_peak(v)[0])
return ap_peak_vals
CalculateAPPeak = calculate_ap_peak # Alias
def calculate_ap_rise_time(traces,dvdtthreshold=5.):
ap_rise_time_vals = []
for t,v in zip(traces['t'],traces['v']):
ap_rise_time_vals.append(ap_rise_time(t,v,dvdtthreshold))
return ap_rise_time_vals
CalculateAPRiseTime = calculate_ap_rise_time # Alias
def calculate_ap_slope_min_max(traces):
ap_slope_min_vals = []
ap_slope_max_vals = []
for t,v in zip(traces['t'],traces['v']):
dvdt = np.gradient(v,t)
ap_slope_min_vals.append(min(dvdt))
ap_slope_max_vals.append(max(dvdt))
return ap_slope_min_vals, ap_slope_max_vals
CalculateAPSlopeMinMax = calculate_ap_slope_min_max # Alias
def calculate_ap_width(traces, alpha, threshold=0, method='voltage'):
ap_width_vals = []
for t,v in zip(traces['t'],traces['v']):
ap_width_vals.append(ap_width(t,v,alpha,threshold,method))
return ap_width_vals
def calculate_ap_half_width(traces, threshold=0, method='voltage'):
alpha = 0.5
ap_half_width_vals = calculate_ap_width(traces,alpha,threshold,method)
return ap_half_width_vals
def calculate_ap_full_width(traces,threshold=0, method='voltage'):
alpha = 0.0 # Calculate at the threshold so set alpha = 0
ap_full_width_vals = calculate_ap_width(traces,alpha,threshold,method)
return ap_full_width_vals
CalculateAPFullWidth = calculate_ap_full_width # Alias
def calculate_ahp_amp(traces,dvdt_threshold=5):
ahp_amp_vals = []
if traces['numAPs'] > 1:
for i in range(traces['numAPs']-1):
t = traces['t'][i]
v = traces['v'][i]
t2 = traces['t'][i+1]
v2 = traces['v'][i+1]
amp, tau, trough = fit_afterhyperpolarization(t,v,t2,v2,dvdt_threshold)
AHPAmpVals.append(amp)
elif traces['numAPs'] == 1:
v = traces['v'][0]
max_idx = np.argmax(v)
working_voltage = v[max_idx:]### join
amp = min(working_voltage)
ahp_amp_vals.append(amp)
return ahp_amp_vals
CalculateAHPAmp = calculate_ahp_amp # Alias
def calculate_ahp_tau():
# TODO
return 0
CalculateAHPTau = calculate_ahp_tau # Alias
# -- Firing Patterns --
# See Balachandar and Prescott 2018 for algorithms
# TODO: Find algorithms for phasic and burst patterns
def determine_firing_pattern(traces, stim_start, stim_end):
"""
Define firing pattern of traces as one or more of n types:
1. Reluctant
2. Single
3. Tonic
4. Delayed
5. Gap
6. Phasic - multi-AP firing that ends before end of stimulus
7. Burst firing
8. Wide
9. Repolarisation failure
"""
def first_spike_delay(traces, stim_start):
# Find delay between stim start and first spike
first_spike_v = traces['v'][0]
first_spike_t = traces['t'][0]
single_spike_index = ap_peak(first_spike_v)[1]
single_spike_time = first_spike_t[single_spike_index]
delay = single_spike_time - stim_start
#print("delay = {}".format(delay))
return delay
def first_two_spikes_isi(traces):
# Find delay between first and second spikes
spike_times = []
for i in [0,1]:
spike_idx = ap_peak(traces['v'][i])[1]
spike_times.append(traces['t'][i][spike_idx])
delay = spike_times[1] - spike_times[0]
return delay
def second_third_spikes_isi(traces):
# Find delay between second and third spikes
spike_times = []
for i in [1,2]:
spike_idx = ap_peak(traces['v'][i])[1]
spike_times.append(traces['t'][i][spike_idx])
delay = spike_times[1] - spike_times[0]
return delay
def check_delayed(traces):
# Check if firing pattern is delayed
delayed = False
num_aps = traces['numAPs']
# Delayed firing pattern criterion for 1 spike:
# Delay from stim start to first spike is > 100 ms
if num_aps == 1:
if first_spike_delay(traces, stim_start) > 100.0:
delayed = True
# Delayed firing pattern criterion for > 1 spike:
# Delay between stimulus start and firing first spike is > 1.5
# times the ISI between spikes 1 and 2.
elif num_aps > 1:
if first_spike_delay(traces, stim_start) > 1.5*first_two_spikes_isi(traces):
delayed = True
return delayed
def check_gap(traces):
gap = False
num_aps = traces['numAPs']
# Gap firing criteria:
# Number of spikes > 2
# ISI between spikes 1 and 2 > 1.5 times ISI between spikes 2 and 3
gap = False
if num_aps > 2:
if first_two_spikes_isi(traces) > 1.5*second_third_spikes_isi(traces):
gap = True
return gap
def check_phasic(traces, stim_end, ratio_threshold=0.25):
"""
Phasic - firing of multiple APs followed by a period of quiescence.
Cases
1. Idea is use ratio of - Time from last spike to stimulus end:time from first to last spike
If the ratio is above some threshold.
2. Simply time from last peak to end of stimulus compared to a threshold.
"""
phasic = False
# Characterisation cases
case1 = True
case2 = False
# First, check we have multiple APs
# We will class single spikes as single spikes, not phasic.
num_aps = traces['numAPs']
if num_aps < 2:
return False
spike_times = []
for i in range(num_aps):
spike_idx = ap_peak(traces['v'][i])[1]
spike_times.append(traces['t'][i][spike_idx])
# Case 1
if case1:
last_spike_to_stim_end = stim_end - spike_times[-1]
# check stimulus ended before last spike, if not can't be phasic
if last_spike_to_stim_end > 0:
first_to_last_spike = spike_times[-1] - spike_times[0]
assert first_to_last_spike > 0
ratio = last_spike_to_stim_end/first_to_last_spike
#print("Ratio = {}".format(ratio))
if ratio > ratio_threshold:
phasic = True
# Case 2
if case2:
raw_time_threshold = 50.0
if last_spike_to_stimulus_end > raw_time_threshold:
phasic = True
return phasic
def check_bursting(traces, stim_start):
"""
Bursting - bursts of APs separated by rest periods
Not sure how to characterize currently.
1. Find all AP peaks.
2. Divide trace up into quiet periods and firing periods
Quiet period is region where distance between two APs or last AP and
stimulus end is greater than some multiple of the average ISI (median?).
"""
bursting = False
return bursting
def check_wide(traces, mean_width_threshold=10.0):
"""
Abnormally wide APs - feature seen when inserting hNav 1.8 into mice (Han et al. 2015).
"""
wide = False
# Get width of each AP using AP half width biomarker
# Use half width as we can compare against data in Han et al. 2015
# Figure 8D shows half-width distributions, which motivated the choice to
# set default width threshold for 'wide' designation to 10 ms.
half_widths = []
for t, v in zip(traces['t'], traces['v']):
half_widths.append(ap_half_width(t ,v, dvdt_threshold=5.))
if half_widths: # Check we have widths
if np.mean(half_widths) > mean_width_threshold:
wide = True
return wide
def check_rep_fail(traces, rep_fail_threshold=0.0):
"""
Repolarisation failure - trace does not recover to a reasonably depolarised voltage
This can be set by user but we'll start with using 0 mV as default threshold and
can tune as needed.
"""
rep_fail = False
last_trace = traces['v'][-1]
# Check last element of last trace against threshold
if last_trace[-1] > rep_fail_threshold:
rep_fail = True
return rep_fail
firing_pattern = []
num_aps = traces['numAPs']
if num_aps == 0:
firing_pattern.append('reluctant')
elif num_aps == 1:
firing_pattern.append('single')
if check_delayed(traces):
firing_pattern.append('delayed')
if check_wide(traces):
firing_pattern.append('wide')
if check_rep_fail(traces):
firing_pattern.append('rep_fail')
elif num_aps > 1:
firing_pattern.append('multi')
# Determine if tonic spiking - can't be delayed, gap, phasic or repolarisation failure
phasic = check_phasic(traces, stim_end, ratio_threshold=0.25)
delayed = check_delayed(traces)
gap = check_gap(traces)
rep_fail = check_rep_fail(traces)
if (not delayed) and (not gap) and (not phasic) and (not rep_fail):
firing_pattern.append('tonic')
if phasic:
firing_pattern.append('phasic')
if delayed:
firing_pattern.append('delayed')
if gap:
firing_pattern.append('gap')
if rep_fail:
firing_pattern.append('rep_fail')
# Check wide
if check_wide(traces, mean_width_threshold=10.0):
firing_pattern.append('wide')
#print(" TODO:Bursting")
return firing_pattern
# ---- Plotting ----
def plot_traces(traces):
for t,v in zip(traces['t'], traces['v']):
plt.plot(t,v)
# ---- I/O ----
def write_header(biomarker_file):
string = 'Index'
for biomarker in db.biomarkerNames:
string += (';' + biomarker)
string += ';' + 'stimAmp'
string += '\n'
biomarker_file.write(string)
return
WriteHeader = write_header # Alias
def write_biomarkers(biomarkers,biomarker_file):
# Write the values of each biomarker in csv format
string = str(biomarkers['Index'])
for biomarker in db.biomarkerNames:
string += (';' + str(biomarkers[biomarker]))
string += (';' + str(biomarkers['stimAmp']))
string += '\n'
biomarker_file.write(string)
return
WriteBiomarkers = write_biomarkers # Alias
# ---- Frequency intensity curve biomarkers ----
class FICurves(object):
"""
Class to hold FI curve data
Frequencies
Amplitudes
Results
Which simulations go together
And allow you to extract FI curves, plot them and obtain summary statistics
"""
def __init__(self, results, simulations):
self.results = results.copy()
self.simulations = simulations.copy()
self.groups = [] # Data storage for each group
"""
Get all the simulations that have a constant stimulus amplitude and aren't run to rheobase
and group by block parameters and stimulus type TODO: And also check all other features (e.g.
"""
self.group_simulations()
self.get_FI_curves()
# Process results to remove parameters
if 'Parameters' in self.results.columns.levels[0]:
self.results = self.results.drop('Parameters',axis=1,level=0)
self.results.columns = self.results.columns.drop('Parameters',level=0)
"""
If we need multiple stim unit definitions:
1. Turn above line into for loop
2 Create a function called get_stim_amp_designations that gives all the things like nA or pA to search for
3. Search for any of them in the simulation name and accept those that hit one and only one of them
"""
def group_simulations(self):
"""
Group simulations together that are the same except for their stimulus amplitudes
"""
self.groups = []
for name, sim in self.simulations.items():
amp, shared_params = self.get_simulation_parameters(sim.protocols)
# Check for a fixed amplitude (not a simulation to find rheobase)
if amp:
# Check whether there is an existing group with matching shared parameters (scaling factors, stim function)
group = self.check_for_existing_group(shared_params)
if group is not None:
# Add sim name and amplitude to group as key, val
self.groups[group]['simulations'][name] = amp
#print("Appending: {}".format(self.groups[group]))
else:
new_group = {'simulations': {name:amp}, 'shared_params':shared_params}
#print("Making: {}".format(new_group))
self.groups.append(new_group)
def get_simulation_parameters(self, sim_protocols):
"""
Extract needed simulation parameters
Currently: amplitude, stimulus type and scaling factors
"""
amp = sim_protocols['amp']
shared_params = {}
shared_params['stim_type'] = sim_protocols['stim_func']
if 'parameter_scaling' in sim_protocols:
shared_params['parameter_scaling'] = sim_protocols['parameter_scaling']
return amp, shared_params
def check_for_existing_group(self, shared_params):
"""
Check groups for a group that mathches other_params.
Returns:
Group index if group exists
None if group does not exist
"""
# Check if other_params matches all other_params in any other group
group_idx = None
for i, group in enumerate(self.groups):
if shared_params == group['shared_params']:
assert group_idx is None, "group_idx should equal None, instead: {}".format(group_idx)
group_idx = i
return group_idx
def get_FI_curves(self):
"""
Use the ISIs to compute firing curves for each group, for models that have non-nan ISIs
Firing curves are calculated for each simulation group
"""
num_groups = len(self.groups)
assert num_groups > 0, "Num groups: {} is not > 0".format(num_groups)
# Iterate through each group
for group in self.groups:
"""
Get frequencies for each simulation in the group and build into a dataframe
Dataframe format:
rows = model indices
columns = simulation amplitudes
"""
idx = self.results.index
amps = [amp for amp in group['simulations'].values()]
fi_data = pd.DataFrame(index=idx, columns=amps)
# Populate this group's fi curve df
for sim_name, amp in group['simulations'].items():
ISIs = self.results.loc[:,(sim_name,'ISI')]
frequencies = self.calculate_frequencies(ISIs)
fi_data.loc[:,amp] = frequencies
# Save this group's data
group['FI'] = fi_data
def plot_FI_curves(self):
"""
Plot FI curves and maybe compute some summary statistics
Do scatter and line plots so that if we have a single datapoint for a model it still gets plotted
"""
num_groups = len(self.groups)
subplot_dim = int(np.ceil(np.sqrt(num_groups))) # Number of subplots (square)
plt.figure(figsize=(10,10))
for i, group in enumerate(self.groups):
plt.subplot(subplot_dim,subplot_dim, i+1)
fi_data = group['FI'].copy()
for idx in fi_data.index:
data = fi_data.loc[idx,:]
plt.plot(data.index, data)
plt.scatter(data.index, data)
plt.xlim(min(fi_data.columns)-0.1, max(fi_data.columns)+0.1)
plt.ylim(0, None)
# Make a rough title
separator = '_'
for i, sim_name in enumerate(group['simulations']):
if i == 0:
temp_title = sim_name
else:
s1 = temp_title.split(separator)
s2 = sim_name.split(separator)
title_parts = [part for part in s1 if part in s2]
title = separator.join(title_parts)
plt.title(title)
plt.xlabel('I (nA)')
plt.ylabel('f (Hz)')
def calculate_frequencies(self, ISIs):
return 1000.0/ISIs # Converts ms to Hz: ISI of 500 ms = 2 Hz, ISI of 2000 ms = 0.5 Hz
# ---- Util ----
def get_biomarker_names(biomarker_set='all'):
'''
Biomarkers TODO from neuroelectro:
* Input resistance
* AP Half width
* Membrane time constant
* Cell capacitance (fixed by simulation)
* Maximum firing rate
* Sag ratio
* Adaptation ratio
* First spike latency
* FI slope
* Spike rise time
* Spontaneous firing rate
* There are others but think they are other names for the same concepts
'''
if biomarker_set == 'all':
biomarker_names = ['Threshold', 'APFullWidth', 'APPeak', 'APRiseTime', 'APSlopeMin', 'APSlopeMax', 'AHPAmp', 'AHPTau', 'AHPTrough', 'ISI', 'RMP', 'Rheobase']
else:
raise ValueError('biomarker_set {} not found'.format(biomarker_set))
return biomarker_names
def find_threshold_crossings(arr, _threshold):
"""
Find all indices at which a threshold is crossed from above and from below
in an array. Used for finding indices to compute ap widths and half widths.
"""
#print("threshold = {}".format(_threshold))
ups = []
downs = []
for i, _ in enumerate(arr[:-1]): # Don't iterate on last element
# Get crossings of threshold from below
if arr[i] < _threshold:
if arr[i+1] >= _threshold:
ups.append(i)
# Get crossings of threshold from above
if arr[i] > _threshold:
if arr[i+1] <= _threshold:
downs.append(i)
return ups, downs
def add_total_width_biomarker(pop, width_biomarker='APHalfWidth', filter_width=False, verbose=False):
"""
Add a total width biomarker to each simulation in a population's results
"""
def compute_total_width(df, width_biomarker, filter_width=False):
"""
Compute the total width of a simulation from its results dataframe
with optional filtering out of AP Width outliers
"""
freq = 1000.0/df['ISI']
numAPs = df['numAPs']
freq[numAPs == 1] = 1 # Approximation
width = df[width_biomarker]
if filter_width:
outlier_definition = an.get_outlier_definition(width_biomarker)
width = width[width < outlier_definition]
total_width = width * freq
total_width = total_width.fillna(0)
return total_width
simulations = [col for col in pop.results.columns.levels[0] if col not in ['Parameters']]
for col in simulations:
if verbose:
print(col)
total_width = compute_total_width(df=pop.results[col],
width_biomarker=width_biomarker,
filter_width=filter_width)
pop.results.loc[:, (col, 'APTotalWidth')] = total_width
| 39.586639
| 302
| 0.628116
|
c40f60e0a299755f90374a3d9149113439d53a84
| 9,589
|
py
|
Python
|
netbox_onboarding/tests/test_views.py
|
hoanhan101/ntc-netbox-plugin-onboarding
|
09764a0ac68e948accbd9346ea0f1ae0af569e3d
|
[
"Apache-2.0"
] | null | null | null |
netbox_onboarding/tests/test_views.py
|
hoanhan101/ntc-netbox-plugin-onboarding
|
09764a0ac68e948accbd9346ea0f1ae0af569e3d
|
[
"Apache-2.0"
] | null | null | null |
netbox_onboarding/tests/test_views.py
|
hoanhan101/ntc-netbox-plugin-onboarding
|
09764a0ac68e948accbd9346ea0f1ae0af569e3d
|
[
"Apache-2.0"
] | null | null | null |
"""Unit tests for netbox_onboarding views.
(c) 2020 Network To Code
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from django.contrib.auth.models import User, Permission
from django.test import Client, TestCase, override_settings
from django.urls import reverse
from dcim.models import Site
from netbox_onboarding.models import OnboardingTask
class OnboardingTaskListViewTestCase(TestCase):
"""Test the OnboardingTaskListView view."""
def setUp(self):
"""Create a user and baseline data for testing."""
self.user = User.objects.create(username="testuser")
self.client = Client()
self.client.force_login(self.user)
self.url = reverse("plugins:netbox_onboarding:onboarding_task_list")
self.site1 = Site.objects.create(name="USWEST", slug="uswest")
self.onboarding_task1 = OnboardingTask.objects.create(ip_address="10.10.10.10", site=self.site1)
self.onboarding_task2 = OnboardingTask.objects.create(ip_address="192.168.1.1", site=self.site1)
@override_settings(EXEMPT_VIEW_PERMISSIONS=["*"])
def test_list_onboarding_tasks_anonymous(self):
"""Verify that OnboardingTasks can be listed without logging in if permissions are exempted."""
self.client.logout()
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "netbox_onboarding/onboarding_tasks_list.html")
@override_settings(EXEMPT_VIEW_PERMISSIONS=[])
def test_list_onboarding_tasks(self):
"""Verify that OnboardingTasks can be listed by a user with appropriate permissions."""
# Attempt to access without permissions
response = self.client.get(self.url)
self.assertEqual(response.status_code, 403)
# Add permission
self.user.user_permissions.add(
Permission.objects.get(content_type__app_label="netbox_onboarding", codename="view_onboardingtask")
)
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "netbox_onboarding/onboarding_tasks_list.html")
class OnboardingTaskCreateViewTestCase(TestCase):
"""Test the OnboardingTaskCreateView view."""
def setUp(self):
"""Create a user and baseline data for testing."""
self.user = User.objects.create(username="testuser")
self.client = Client()
self.client.force_login(self.user)
self.url = reverse("plugins:netbox_onboarding:onboarding_task_add")
self.site1 = Site.objects.create(name="USWEST", slug="uswest")
@override_settings(EXEMPT_VIEW_PERMISSIONS=["*"])
def test_get_anonymous(self):
"""Verify that the view cannot be accessed by anonymous users even if permissions are exempted."""
self.client.logout()
response = self.client.get(self.url)
# Redirected to the login page
self.assertEqual(response.status_code, 302)
@override_settings(EXEMPT_VIEW_PERMISSIONS=[])
def test_get(self):
"""Verify that the view can be seen by a user with appropriate permissions."""
# Attempt to access without permissions
response = self.client.get(self.url)
self.assertEqual(response.status_code, 403)
# Add permission
self.user.user_permissions.add(
Permission.objects.get(content_type__app_label="netbox_onboarding", codename="add_onboardingtask")
)
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "netbox_onboarding/onboarding_task_edit.html")
@override_settings(EXEMPT_VIEW_PERMISSIONS=["*"])
def test_post_anonymous(self):
"""Verify that the view cannot be accessed by anonymous users even if permissions are exempted."""
self.client.logout()
response = self.client.get(self.url)
# Redirected to the login page
self.assertEqual(response.status_code, 302)
@override_settings(EXEMPT_VIEW_PERMISSIONS=[])
def test_post(self):
"""Verify that the view can be used by a user with appropriate permissions."""
# Attempt to access without permissions
response = self.client.post(self.url)
self.assertEqual(response.status_code, 403)
# Add permission
self.user.user_permissions.add(
Permission.objects.get(content_type__app_label="netbox_onboarding", codename="add_onboardingtask")
)
response = self.client.post(
self.url, data={"ip_address": "10.10.10.10", "site": "uswest", "port": "22", "timeout": "30"}
)
self.assertEqual(response.status_code, 302)
self.assertEqual(OnboardingTask.objects.count(), 1)
class OnboardingTaskBulkDeleteViewTestCase(TestCase):
"""Test the OnboardingTaskBulkDeleteView view."""
def setUp(self):
"""Create a user and baseline data for testing."""
self.user = User.objects.create(username="testuser")
self.client = Client()
self.client.force_login(self.user)
self.url = reverse("plugins:netbox_onboarding:onboarding_task_bulk_delete")
self.site1 = Site.objects.create(name="USWEST", slug="uswest")
self.onboarding_task1 = OnboardingTask.objects.create(ip_address="10.10.10.10", site=self.site1)
self.onboarding_task2 = OnboardingTask.objects.create(ip_address="192.168.1.1", site=self.site1)
@override_settings(EXEMPT_VIEW_PERMISSIONS=["*"])
def test_post_anonymous(self):
"""Verify that the view cannot be accessed by anonymous users even if permissions are exempted."""
self.client.logout()
response = self.client.post(self.url)
# Redirected to the login page
self.assertEqual(response.status_code, 302)
@override_settings(EXEMPT_VIEW_PERMISSIONS=[])
def test_post(self):
"""Verify that the view can be seen by a user with appropriate permissions."""
# Attempt to access without permissions
response = self.client.post(
self.url, data={"pk": [self.onboarding_task1.pk], "confirm": True, "_confirm": True}
)
self.assertEqual(response.status_code, 403)
# Add permission
self.user.user_permissions.add(
Permission.objects.get(content_type__app_label="netbox_onboarding", codename="delete_onboardingtask")
)
response = self.client.post(
self.url, data={"pk": [self.onboarding_task1.pk], "confirm": True, "_confirm": True}
)
self.assertEqual(response.status_code, 302)
self.assertEqual(OnboardingTask.objects.count(), 1)
class OnboardingTaskFeedBulkImportViewTestCase(TestCase):
"""Test the OnboardingTaskFeedBulkImportView view."""
def setUp(self):
"""Create a superuser and baseline data for testing."""
self.user = User.objects.create(username="testuser")
self.client = Client()
self.client.force_login(self.user)
self.url = reverse("plugins:netbox_onboarding:onboarding_task_import")
self.site1 = Site.objects.create(name="USWEST", slug="uswest")
@override_settings(EXEMPT_VIEW_PERMISSIONS=["*"])
def test_get_anonymous(self):
"""Verify that the import view cannot be seen by an anonymous user even if permissions are exempted."""
self.client.logout()
response = self.client.get(self.url)
# Redirected to the login page
self.assertEqual(response.status_code, 302)
@override_settings(EXEMPT_VIEW_PERMISSIONS=[])
def test_get(self):
"""Verify that the import view can be seen by a user with appropriate permissions."""
# Attempt to access without permissions
response = self.client.get(self.url)
self.assertEqual(response.status_code, 403)
# Add permission
self.user.user_permissions.add(
Permission.objects.get(content_type__app_label="netbox_onboarding", codename="add_onboardingtask")
)
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "utilities/obj_bulk_import.html")
@override_settings(EXEMPT_VIEW_PERMISSIONS=[])
def test_post(self):
"""Verify that tasks can be bulk-imported."""
csv_data = [
"site,ip_address",
"uswest,10.10.10.10",
"uswest,10.10.10.20",
"uswest,10.10.10.30",
]
# Attempt to access without permissions
response = self.client.post(self.url, data={"csv": "\n".join(csv_data)})
self.assertEqual(response.status_code, 403)
# Add permission
self.user.user_permissions.add(
Permission.objects.get(content_type__app_label="netbox_onboarding", codename="add_onboardingtask")
)
response = self.client.post(self.url, data={"csv": "\n".join(csv_data)})
self.assertEqual(response.status_code, 200)
self.assertEqual(OnboardingTask.objects.count(), len(csv_data) - 1)
| 41.691304
| 113
| 0.689957
|
0c946e23d9d9365b94324eeb63a27760eff8239c
| 1,609
|
py
|
Python
|
var/spack/repos/builtin/packages/espanso/package.py
|
kkauder/spack
|
6ae8d5c380c1f42094b05d38be26b03650aafb39
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2,360
|
2017-11-06T08:47:01.000Z
|
2022-03-31T14:45:33.000Z
|
var/spack/repos/builtin/packages/espanso/package.py
|
kkauder/spack
|
6ae8d5c380c1f42094b05d38be26b03650aafb39
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 13,838
|
2017-11-04T07:49:45.000Z
|
2022-03-31T23:38:39.000Z
|
var/spack/repos/builtin/packages/espanso/package.py
|
kkauder/spack
|
6ae8d5c380c1f42094b05d38be26b03650aafb39
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 1,793
|
2017-11-04T07:45:50.000Z
|
2022-03-30T14:31:53.000Z
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Espanso(Package):
"""Cross-platform Text Expander written in Rust"""
homepage = "https://github.com/federico-terzi/espanso"
url = "https://github.com/federico-terzi/espanso/releases/download/v0.6.3/espanso-linux.tar.gz"
maintainers = ['zicklag']
version('0.6.3', sha256='eb9f9563ed0924d1494f0b406b6d3df9d7df00e81affaf15023d1c82dd8ac561')
version('0.6.2', sha256='db2e53c8e0a17575f69739e53dd6a486dd0e912abbc7ac7c33d98567bd1f0e18')
version('0.6.1', sha256='0917d4a990bfc5ced368ce9fbc3aa4bc4dac4d39ddea88359dc628fee16daf87')
version('0.6.0', sha256='97689b734235224dde2fb4723bee24324a53355a6b549fb9d024a0c8ddb3cd98')
version('0.5.5', sha256='94687a3049a43ed4c2ed3814afb4e32e09dec8ec396e54a7b012de936f0260e9')
version('0.5.4', sha256='87e4c4a8a7bfb95a3ee987e34af3a37ca4d962bec3f863ef74be7fc8cdd1a9dd')
version('0.5.3', sha256='1db21f74385b1eb94ac6d27def550d02dce8da34bce1f8f4a0c4eb9bfd80d135')
version('0.5.2', sha256='69c8d3460ae497a2224cbf290c334c9151fc756053f65cbaf9ce8e9284ad50fd')
version('0.5.1', sha256='e68d90256f9eb26b57085b5170e238752bfbfcf3d50ccaa5693974460cb19deb')
version('0.5.0', sha256='f85c098a20b1022d8a6b751e3a56431caa01c796ce88ab95aae8950a1233da55')
depends_on('xclip')
depends_on('xdotool')
def install(self, spec, prefix):
mkdir(prefix.bin)
install('espanso', prefix.bin)
| 48.757576
| 104
| 0.778745
|
f97274119f9a359f25f3495d71e742f9f229fbe1
| 69
|
py
|
Python
|
routes/api_admin.py
|
ashhosts/kittenpanel
|
1463253f107262e97722ebb7d6bd939040d66c26
|
[
"MIT"
] | 1
|
2021-08-30T11:53:59.000Z
|
2021-08-30T11:53:59.000Z
|
routes/api_admin.py
|
ashhosts/kittenpanel
|
1463253f107262e97722ebb7d6bd939040d66c26
|
[
"MIT"
] | null | null | null |
routes/api_admin.py
|
ashhosts/kittenpanel
|
1463253f107262e97722ebb7d6bd939040d66c26
|
[
"MIT"
] | null | null | null |
from fastapi import APIRouter
router = APIRouter(prefix="/api/admin")
| 34.5
| 39
| 0.797101
|
18aed31fe65fa56bb74ee7808271b15da926ce29
| 4,661
|
py
|
Python
|
v2.5.7/toontown/minigame/TwoDSection.py
|
TTOFFLINE-LEAK/ttoffline
|
bb0e91704a755d34983e94288d50288e46b68380
|
[
"MIT"
] | 4
|
2019-07-01T15:46:43.000Z
|
2021-07-23T16:26:48.000Z
|
v2.5.7/toontown/minigame/TwoDSection.py
|
TTOFFLINE-LEAK/ttoffline
|
bb0e91704a755d34983e94288d50288e46b68380
|
[
"MIT"
] | 1
|
2019-06-29T03:40:05.000Z
|
2021-06-13T01:15:16.000Z
|
v2.5.7/toontown/minigame/TwoDSection.py
|
TTOFFLINE-LEAK/ttoffline
|
bb0e91704a755d34983e94288d50288e46b68380
|
[
"MIT"
] | 4
|
2019-07-28T21:18:46.000Z
|
2021-02-25T06:37:25.000Z
|
from panda3d.core import *
from direct.directnotify import DirectNotifyGlobal
from direct.showbase.DirectObject import DirectObject
from toontown.minigame import ToonBlitzGlobals
from toontown.minigame import TwoDBlock
from toontown.minigame import TwoDEnemyMgr
from toontown.minigame import TwoDTreasureMgr
from toontown.minigame import TwoDSpawnPointMgr
from toontown.minigame import TwoDStomperMgr
class TwoDSection(DirectObject):
notify = DirectNotifyGlobal.directNotify.newCategory('TwoDSection')
def __init__(self, indexNum, sectionInfo, sectionNP, sectionMgr):
self.indexNum = indexNum
self.sectionNP = sectionNP
self.sectionMgr = sectionMgr
self.blocks = []
self.load(sectionInfo)
def destroy(self):
for block in self.blocks:
block.destroy()
self.enemyMgr.destroy()
del self.enemyMgr
self.treasureMgr.destroy()
del self.treasureMgr
self.spawnPointMgr.destroy()
del self.spawnPointMgr
self.stomperMgr.destroy()
del self.stomperMgr
self.sectionMgr = None
self.sectionNP = None
self.blockList = []
self.enemyList = []
self.treasureList = []
self.spawnPointList = []
return
def load(self, sectionInfo):
self.sectionTypeNum = sectionInfo[0]
enemyIndicesSelected = sectionInfo[1]
treasureIndicesSelected = sectionInfo[2]
spawnPointIndicesSelected = sectionInfo[3]
stomperIndicesSelected = sectionInfo[4]
attribs = ToonBlitzGlobals.SectionTypes[self.sectionTypeNum]
self.length = attribs[1]
self.blockList = attribs[2]
enemiesPool = attribs[3]
treasuresPool = attribs[4]
spawnPointsPool = attribs[5]
stompersPool = attribs[6]
self.enemyList = []
for enemyIndex in enemyIndicesSelected:
self.enemyList.append(enemiesPool[enemyIndex])
self.treasureList = []
for treasure in treasureIndicesSelected:
treasureIndex = treasure[0]
treasureValue = treasure[1]
treasureAttribs = treasuresPool[treasureIndex]
self.treasureList.append((treasureAttribs, treasureValue))
self.spawnPointList = []
for spawnPointIndex in spawnPointIndicesSelected:
self.spawnPointList.append(spawnPointsPool[spawnPointIndex])
self.stomperList = []
for stomperIndex in stomperIndicesSelected:
self.stomperList.append(stompersPool[stomperIndex])
self.blocksNP = NodePath('Blocks')
self.blocksNP.reparentTo(self.sectionNP)
if self.blockList[0][1][0] != (0, 0, 12):
self.notify.warning('First block of section %s does not start at (0, 0, 12)' % self.sectionTypeNum)
for index in range(0, len(self.blockList)):
blockAttribs = self.blockList[index]
fileName = ToonBlitzGlobals.BlockTypes[blockAttribs[0]][0]
blockIndex = int(fileName[(-1)])
blockType = self.sectionMgr.game.assetMgr.blockTypes[blockIndex]
sectionizedId = self.getSectionizedId(index)
newBlock = TwoDBlock.TwoDBlock(blockType, sectionizedId, blockAttribs)
newBlock.model.reparentTo(self.blocksNP)
self.blocks.append(newBlock)
self.enemyMgr = TwoDEnemyMgr.TwoDEnemyMgr(self, self.enemyList)
self.treasureMgr = TwoDTreasureMgr.TwoDTreasureMgr(self, self.treasureList, self.enemyList)
self.spawnPointMgr = TwoDSpawnPointMgr.TwoDSpawnPointMgr(self, self.spawnPointList)
self.stomperMgr = TwoDStomperMgr.TwoDStomperMgr(self, self.stomperList)
if self.sectionTypeNum == 'end':
self.spawnPointMgr.setupLastSavePointHandle()
def enterPlay(self, elapsedTime):
for block in self.blocks:
block.start(elapsedTime)
self.enemyMgr.enterPlay(elapsedTime)
self.stomperMgr.enterPlay(elapsedTime)
def exitPlay(self):
pass
def enterPause(self):
for block in self.blocks:
block.enterPause()
self.enemyMgr.enterPause()
self.stomperMgr.enterPause()
def exitPause(self):
for block in self.blocks:
block.exitPause()
self.enemyMgr.exitPause()
self.stomperMgr.exitPause()
def getSectionizedId(self, num):
def getTwoDigitString(index):
if index < 10:
output = '0' + str(index)
else:
output = str(index)
return output
return getTwoDigitString(self.indexNum) + '-' + getTwoDigitString(num)
| 36.700787
| 111
| 0.661017
|
52f4cde386c35bfd9ee3b632cfc39521dd7f177b
| 259
|
py
|
Python
|
manage_beta.py
|
damienlaine/djoro-server
|
7f167662732e6106aa6be8b1e835f6321a981dc6
|
[
"MIT"
] | null | null | null |
manage_beta.py
|
damienlaine/djoro-server
|
7f167662732e6106aa6be8b1e835f6321a981dc6
|
[
"MIT"
] | 3
|
2020-02-12T03:12:33.000Z
|
2021-06-10T21:59:04.000Z
|
manage_beta.py
|
damienlaine/djoro-server
|
7f167662732e6106aa6be8b1e835f6321a981dc6
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "djoro_server.settings_beta")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| 25.9
| 81
| 0.783784
|
eb5219c12db6ce3577f19a7b047517b452000d1a
| 6,692
|
py
|
Python
|
OmniDB/OmniDB/settings.py
|
mapcoding-cn/omnidb-cn
|
f7cbd29b4cdbc68eb5c4c25c9703ac9448c54b0a
|
[
"MIT"
] | 2
|
2022-03-28T06:26:53.000Z
|
2022-03-29T04:28:54.000Z
|
OmniDB/OmniDB/settings.py
|
mapcoding-cn/OmniDB
|
f7cbd29b4cdbc68eb5c4c25c9703ac9448c54b0a
|
[
"MIT"
] | null | null | null |
OmniDB/OmniDB/settings.py
|
mapcoding-cn/OmniDB
|
f7cbd29b4cdbc68eb5c4c25c9703ac9448c54b0a
|
[
"MIT"
] | null | null | null |
import os
import sys
import shutil
import random
import string
import getpass
from . import custom_settings
#import ldap
#import django_auth_ldap
#import django_auth_ldap.config
# Development Mode
DEBUG = custom_settings.DEV_MODE
DESKTOP_MODE = custom_settings.DESKTOP_MODE
BASE_DIR = custom_settings.BASE_DIR
HOME_DIR = custom_settings.HOME_DIR
TEMP_DIR = os.path.join(BASE_DIR,'OmniDB_app','static','temp')
PLUGINS_DIR = os.path.join(BASE_DIR,'OmniDB_app','plugins')
PLUGINS_STATIC_DIR = os.path.join(BASE_DIR,'OmniDB_app','static','plugins')
APP_DIR = os.path.join(BASE_DIR,'OmniDB_app')
SESSION_COOKIE_SECURE = custom_settings.SESSION_COOKIE_SECURE
CSRF_COOKIE_SECURE = custom_settings.CSRF_COOKIE_SECURE
CSRF_TRUSTED_ORIGINS = []
SESSION_COOKIE_NAME = 'omnidb_sessionid'
CSRF_COOKIE_NAME = 'omnidb_csrftoken'
ALLOWED_HOSTS = ['*']
# Database
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(HOME_DIR, 'omnidb.db')
}
}
if DEBUG:
SECRET_KEY = 'ijbq-+%n_(_^ct+qnqp%ir8fzu3n#q^i71j4&y#-6#qe(dx!h3'
else:
SECRET_KEY = ''.join(random.choice(string.ascii_lowercase + string.digits) for i in range(50))
INSTALLED_APPS = [
'OmniDB_app',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'social_django',
'django_sass'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'OmniDB.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'OmniDB.wsgi.application'
#import ldap
#import django_auth_ldap.config
#from django_auth_ldap.config import LDAPSearch
#AUTH_LDAP_SERVER_URI = 'SERVER'
#AUTH_LDAP_BIND_DN = "uid=example,dc=example,dc=com"
#AUTH_LDAP_BIND_PASSWORD = "password"
#AUTH_LDAP_USER_SEARCH = django_auth_ldap.config.LDAPSearch(
# "uid=example,dc=example,dc=com", ldap.SCOPE_SUBTREE, "uid=%(user)s"
# )
#AUTH_LDAP_USER_ATTR_MAP = {
# "username": "sAMAccountName",
# "first_name": "givenName",
# "last_name": "sn",
# "email": "mail",
#}
#from django_auth_ldap.config import ActiveDirectoryGroupType
#AUTH_LDAP_GROUP_SEARCH = LDAPSearch(
# "dc=tech,dc=local", ldap.SCOPE_SUBTREE, "(objectCategory=Group)"
# )
#AUTH_LDAP_GROUP_TYPE = ActiveDirectoryGroupType(name_attr="cn")
#AUTH_LDAP_USER_FLAGS_BY_GROUP = {
# "is_superuser": "CN=django-admins,CN=Users,DC=TECH,DC=LOCAL",
# "is_staff": "CN=django-admins,CN=Users,DC=TECH,DC=LOCAL",
# }
#AUTH_LDAP_FIND_GROUP_PERMS = True
#AUTH_LDAP_CACHE_GROUPS = True
#AUTH_LDAP_GROUP_CACHE_TIMEOUT = 1 # 1 hour cache
AUTHENTICATION_BACKENDS = [
#'django_auth_ldap.backend.LDAPBackend',
'django.contrib.auth.backends.ModelBackend',
]
SOCIAL_AUTH_GITHUB_KEY = 'Iv1.b66f09dc30df16f3'
SOCIAL_AUTH_GITHUB_SECRET = '3403a3cc31a991d48ef72fbd73fa45e3af5b62ba'
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
LANGUAGE_CODE = 'zh-hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
PATH = custom_settings.PATH
# Processing PATH
if PATH == '/':
PATH = ''
elif PATH != '':
if PATH[0] != '/':
PATH = '/' + PATH
if PATH[len(PATH)-1] == '/':
PATH = PATH[:-1]
LOGIN_URL = PATH + '/omnidb_login'
LOGIN_REDIRECT_URL = PATH + '/'
STATIC_URL = PATH + '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, "OmniDB_app/static")
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.PickleSerializer'
#OMNIDB LOGGING
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format' : "[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s",
'datefmt' : "%m/%d/%Y %H:%M:%S"
},
},
'handlers': {
'logfile_omnidb': {
'class':'logging.handlers.RotatingFileHandler',
'filename': os.path.join(HOME_DIR, 'omnidb.log'),
'maxBytes': 1024*1024*5, # 5 MB
'backupCount': 5,
'formatter': 'standard',
},
'logfile_django': {
'class':'logging.handlers.RotatingFileHandler',
'filename': os.path.join(HOME_DIR, 'omnidb.log'),
'maxBytes': 1024*1024*5, # 5 MB
'backupCount': 5,
'formatter': 'standard',
'level':'ERROR',
},
'console_django':{
'class':'logging.StreamHandler',
'formatter': 'standard'
},
'console_omnidb_app':{
'class':'logging.StreamHandler',
'formatter': 'standard',
'level':'ERROR',
},
},
'loggers': {
'django': {
'handlers':['logfile_django','console_django'],
'propagate': False,
},
'OmniDB_app': {
'handlers': ['logfile_omnidb','console_omnidb_app'],
'propagate': False,
'level':'INFO',
},
'cherrypy.error': {
'handlers': ['logfile_django','console_omnidb_app'],
'level': 'INFO',
'propagate': False
}
}
}
#OMNIDB PARAMETERS
OMNIDB_VERSION = custom_settings.OMNIDB_VERSION
OMNIDB_SHORT_VERSION = custom_settings.OMNIDB_SHORT_VERSION
CH_CMDS_PER_PAGE = 20
PWD_TIMEOUT_TOTAL = 1800
PWD_TIMEOUT_REFRESH = 300
THREAD_POOL_MAX_WORKERS = 2
| 29.875
| 98
| 0.639719
|
7a71b4a62ca5c2b94bf6dddd55a086b19f0a37d8
| 242
|
py
|
Python
|
src/oscar/apps/wishlists/apps.py
|
Jean1508/ya-madoa
|
1ffb1d11e15bf33e4c3a09698675a4357e887eaa
|
[
"BSD-3-Clause"
] | null | null | null |
src/oscar/apps/wishlists/apps.py
|
Jean1508/ya-madoa
|
1ffb1d11e15bf33e4c3a09698675a4357e887eaa
|
[
"BSD-3-Clause"
] | 5
|
2021-05-28T19:38:28.000Z
|
2022-03-12T00:45:39.000Z
|
src/oscar/apps/wishlists/apps.py
|
Jean1508/ya-madoa
|
1ffb1d11e15bf33e4c3a09698675a4357e887eaa
|
[
"BSD-3-Clause"
] | null | null | null |
from django.utils.translation import gettext_lazy as _
from oscar.core.application import OscarConfig
class WishlistsConfig(OscarConfig):
label = 'wishlists'
name = 'oscar.apps.wishlists'
verbose_name = _('Wishlists')
| 24.2
| 55
| 0.731405
|
03ce15e39cf7c42d2dff2639579cebb966e36851
| 3,529
|
py
|
Python
|
Machine_Learning/Feature_Tutorials/02-profiling-example/files/alexnet_zcu102/common/dputils.py
|
mkolod/Vitis-Tutorials
|
33d6cf9686398ef1179778dc0da163291c68b465
|
[
"Apache-2.0"
] | 3
|
2020-10-29T15:00:30.000Z
|
2021-10-21T08:09:34.000Z
|
Machine_Learning/Feature_Tutorials/02-profiling-example/files/alexnet_zcu102/common/dputils.py
|
mkolod/Vitis-Tutorials
|
33d6cf9686398ef1179778dc0da163291c68b465
|
[
"Apache-2.0"
] | 20
|
2020-10-31T03:19:03.000Z
|
2020-11-02T18:59:49.000Z
|
Machine_Learning/Feature_Tutorials/02-profiling-example/files/alexnet_zcu102/common/dputils.py
|
mkolod/Vitis-Tutorials
|
33d6cf9686398ef1179778dc0da163291c68b465
|
[
"Apache-2.0"
] | 9
|
2020-10-14T02:04:10.000Z
|
2020-12-01T08:23:02.000Z
|
'''
Copyright 2019 Xilinx Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from ctypes import *
import cv2
import numpy as np
from dnndk import n2cube
try:
pyc_libdputils = cdll.LoadLibrary("libn2cube.so")
except Exception:
print('Load libn2cube.so failed\nPlease install DNNDK first!')
def dpuSetInputImageWithScale(task, nodeName, image, mean, scale, idx=0):
"""Set image into DPU Task's input Tensor with a specified scale parameter"""
height = n2cube.dpuGetInputTensorHeight(task, nodeName, idx)
width = n2cube.dpuGetInputTensorWidth(task, nodeName, idx)
channel = n2cube.dpuGetInputTensorChannel(task, nodeName, idx)
(imageHeight, imageWidth, imageChannel) = image.shape
inputMean = (c_float * channel)()
for i in range(0, channel):
inputMean[i] = mean[i]
if height == imageHeight and width == imageWidth:
newImage = image
else:
newImage = cv2.resize(image, (width, height), 0, 0, cv2.INTER_LINEAR)
inputImage = np.asarray(newImage, dtype=np.byte)
inputImage2 = inputImage.ctypes.data_as(c_char_p)
return pyc_libdputils.pyc_dpuSetInputData(task,
c_char_p(nodeName.encode("utf-8")), inputImage2,
c_int(height),
c_int(width),
c_int(imageChannel), inputMean,
c_float(scale), c_int(idx))
def dpuSetInputImage(task, nodeName, image, mean, idx=0):
"""
Set image into DPU Task's input Tensor
task: DPU Task
nodeName: The pointer to DPU Node name.
image: Input image in OpenCV Mat format. Single channel and 3-channel input image are supported.
mean: Mean value array which contains 1 member for single channel input image
or 3 members for 3-channel input image
Note: You can get the mean values from the input Caffe prototxt.
At present, the format of mean value file is not yet supported
idx: The index of a single input tensor for the Node, with default value as 0
"""
return dpuSetInputImageWithScale(task, nodeName, image, mean, 1.0, idx)
def dpuSetInputImage2(task, nodeName, image, idx=0):
"""
Set image into DPU Task's input Tensor (mean values automatically processed by N2Cube)
nodeName: The pointer to DPU Node name.
image: Input image in OpenCV Mat format. Single channel and 3-channel input image are supported.
idx: The index of a single input tensor for the Node, with default value as 0
"""
channel = n2cube.dpuGetInputTensorChannel(task, nodeName, idx)
output = (c_float * channel)()
outputMean = POINTER(c_float)(output)
pyc_libdputils.loadMean(task, outputMean, channel)
for i in range(channel):
outputMean[i] = float(outputMean[i])
return dpuSetInputImageWithScale(task, nodeName, image, outputMean, 1.0,
idx)
| 42.518072
| 103
| 0.665061
|
5343e380683f1555be794e3bf29dcad44a5421db
| 2,168
|
py
|
Python
|
WWV_utility2.py
|
rkayakr/ProcessPolt
|
6952b812f151f941b46592540e205770b8cfa38e
|
[
"CC0-1.0"
] | 1
|
2021-11-17T12:41:00.000Z
|
2021-11-17T12:41:00.000Z
|
WWV_utility2.py
|
rkayakr/ProcessPolt
|
6952b812f151f941b46592540e205770b8cfa38e
|
[
"CC0-1.0"
] | null | null | null |
WWV_utility2.py
|
rkayakr/ProcessPolt
|
6952b812f151f941b46592540e205770b8cfa38e
|
[
"CC0-1.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
20 February 2020
WWV utility file
Routines and classes used in WWV file management and graphing
David Kazdan, AD8Y
John Gibbons, N8OBJ - mods to plot header 2/3/20
"""
#%% utility function needed here to convert ISO time into decimal hours
def time_string_to_decimals(time_string): #returns float decimal hours
#print('Input time string=',time_string)
# if (NewHdr = 'New'): # if new header strip off date and Zulu stuff
# time_string = time_string[11:-1] # Hack off date 'YYYY-MM-DDT' and ending 'Z'
time_string = time_string[11:-1] # Hack off date 'YYYY-MM-DDT' and ending 'Z'
#print('Used Time_String=',time_string)
fields=time_string.split(":")
hours=float(fields[0]) if len(fields)>0 else 0.0
minutes=float(fields[1])/60. if len(fields)>0 else 0.0
seconds=float(fields[2])/3600. if len(fields)>0 else 0.0
#print('Hr=',hours, ' Min=',minutes, ' Sec=',seconds, '\n')
return (hours + minutes + seconds)
#%
#%% modified from "Double-y axis plot,
# http://kitchingroup.cheme.cmu.edu/blog/2013/09/13/Plotting-two-datasets-with-very-different-scales/
#import matplotlib.pyplot as plt
#from matplotlib.pyplot import plot, legend, show, grid, figure, savefig
def graph_Doppler_and_power_data():
fig = plt.figure(figsize=(19,10)) # inches x, y with 72 dots per inch
ax1 = fig.add_subplot(111)
ax1.plot(hours, filtDoppler, 'k') # color k for black
ax1.set_ylabel('Doppler shift, Hz')
ax1.set_xlim(0,24) # UTC day
ax1.set_ylim([-1, 1]) # -1 to 1 Hz for Doppler shift
#
ax2 = ax1.twinx()
ax2.plot(hours, filtPower, 'r-') # NOTE: Set for filtered version
ax2.set_ylabel('Power in relative dB', color='r')
ax2.set_ylim(min_power, max_power) #as determined above for this data set
for tl in ax2.get_yticklabels():
tl.set_color('r')
#
# plt.title('HF Beacon Doppler Shift Plot for: ' + OrgName + ' \nLat= ' + Lat + ' Long=' + Lon + ' Elv= ' + Alt + ' M\n WWV 5 MHz ' + PlotDate)
# plt.savefig(DATADIR+ 'two-scales-5.png', dpi=250, orientation='landscape')
#&
| 41.692308
| 159
| 0.649446
|
9ac04ed66f2122f3cdadca94f5f1e0b0bed2ad9d
| 18,810
|
py
|
Python
|
test/functional/test_framework/util.py
|
stamhe/bitcoin-abc
|
a1ba303c6b4f164ae94612e83b824e564405a96e
|
[
"MIT"
] | 1
|
2022-01-09T22:29:10.000Z
|
2022-01-09T22:29:10.000Z
|
test/functional/test_framework/util.py
|
EGYVOICE/bitcoin-abc-avalanche
|
e0f1fe857e1fc85f01903f1c323c2d5c54aecc1c
|
[
"MIT"
] | 17
|
2021-08-06T21:27:41.000Z
|
2022-03-31T08:28:08.000Z
|
test/functional/test_framework/util.py
|
EGYVOICE/bitcoin-abc-avalanche
|
e0f1fe857e1fc85f01903f1c323c2d5c54aecc1c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2014-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Helpful routines for regression testing."""
import inspect
import json
import logging
import os
import re
import time
import unittest
from base64 import b64encode
from binascii import unhexlify
from decimal import ROUND_DOWN, Decimal
from io import BytesIO
from subprocess import CalledProcessError
from . import coverage
from .authproxy import AuthServiceProxy, JSONRPCException
logger = logging.getLogger("TestFramework.utils")
# Assert functions
##################
def assert_approx(v, vexp, vspan=10):
"""Assert that `v` is within `vspan` of `vexp`"""
if v < vexp - vspan:
raise AssertionError("{} < [{}..{}]".format(
str(v), str(vexp - vspan), str(vexp + vspan)))
if v > vexp + vspan:
raise AssertionError("{} > [{}..{}]".format(
str(v), str(vexp - vspan), str(vexp + vspan)))
def assert_fee_amount(fee, tx_size, fee_per_kB, wiggleroom=2):
"""
Assert the fee was in range
wiggleroom defines an amount that the test expects the wallet to be off by
when estimating fees. This can be due to the dummy signature that is added
during fee calculation, or due to the wallet funding transactions using the
ceiling of the calculated fee.
"""
target_fee = satoshi_round(tx_size * fee_per_kB / 1000)
if fee < (tx_size - wiggleroom) * fee_per_kB / 1000:
raise AssertionError(
"Fee of {} XEC too low! (Should be {} XEC)".format(str(fee), str(target_fee)))
if fee > (tx_size + wiggleroom) * fee_per_kB / 1000:
raise AssertionError(
"Fee of {} XEC too high! (Should be {} XEC)".format(str(fee), str(target_fee)))
def assert_equal(thing1, thing2, *args):
if thing1 != thing2 or any(thing1 != arg for arg in args):
raise AssertionError("not({})".format(" == ".join(str(arg)
for arg in (thing1, thing2) + args)))
def assert_greater_than(thing1, thing2):
if thing1 <= thing2:
raise AssertionError("{} <= {}".format(str(thing1), str(thing2)))
def assert_greater_than_or_equal(thing1, thing2):
if thing1 < thing2:
raise AssertionError("{} < {}".format(str(thing1), str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
assert_raises_message(exc, None, fun, *args, **kwds)
def assert_raises_message(exc, message, fun, *args, **kwds):
try:
fun(*args, **kwds)
except JSONRPCException:
raise AssertionError(
"Use assert_raises_rpc_error() to test RPC failures")
except exc as e:
if message is not None and message not in e.error['message']:
raise AssertionError(
"Expected substring not found in error message:\nsubstring: '{}'\nerror message: '{}'.".format(
message, e.error['message']))
except Exception as e:
raise AssertionError(
"Unexpected exception raised: " + type(e).__name__)
else:
raise AssertionError("No exception raised")
def assert_raises_process_error(returncode, output, fun, *args, **kwds):
"""Execute a process and asserts the process return code and output.
Calls function `fun` with arguments `args` and `kwds`. Catches a CalledProcessError
and verifies that the return code and output are as expected. Throws AssertionError if
no CalledProcessError was raised or if the return code and output are not as expected.
Args:
returncode (int): the process return code.
output (string): [a substring of] the process output.
fun (function): the function to call. This should execute a process.
args*: positional arguments for the function.
kwds**: named arguments for the function.
"""
try:
fun(*args, **kwds)
except CalledProcessError as e:
if returncode != e.returncode:
raise AssertionError(
"Unexpected returncode {}".format(e.returncode))
if output not in e.output:
raise AssertionError("Expected substring not found:" + e.output)
else:
raise AssertionError("No exception raised")
def assert_raises_rpc_error(code, message, fun, *args, **kwds):
"""Run an RPC and verify that a specific JSONRPC exception code and message is raised.
Calls function `fun` with arguments `args` and `kwds`. Catches a JSONRPCException
and verifies that the error code and message are as expected. Throws AssertionError if
no JSONRPCException was raised or if the error code/message are not as expected.
Args:
code (int), optional: the error code returned by the RPC call (defined
in src/rpc/protocol.h). Set to None if checking the error code is not required.
message (string), optional: [a substring of] the error string returned by the
RPC call. Set to None if checking the error string is not required.
fun (function): the function to call. This should be the name of an RPC.
args*: positional arguments for the function.
kwds**: named arguments for the function.
"""
assert try_rpc(code, message, fun, *args, **kwds), "No exception raised"
def try_rpc(code, message, fun, *args, **kwds):
"""Tries to run an rpc command.
Test against error code and message if the rpc fails.
Returns whether a JSONRPCException was raised."""
try:
fun(*args, **kwds)
except JSONRPCException as e:
# JSONRPCException was thrown as expected. Check the code and message
# values are correct.
if (code is not None) and (code != e.error["code"]):
raise AssertionError(
"Unexpected JSONRPC error code {}".format(e.error["code"]))
if (message is not None) and (message not in e.error['message']):
raise AssertionError(
"Expected substring not found in error message:\nsubstring: '{}'\nerror message: '{}'.".format(
message, e.error['message']))
return True
except Exception as e:
raise AssertionError(
"Unexpected exception raised: " + type(e).__name__)
else:
return False
def assert_is_hex_string(string):
try:
int(string, 16)
except Exception as e:
raise AssertionError(
"Couldn't interpret {!r} as hexadecimal; raised: {}".format(string, e))
def assert_is_hash_string(string, length=64):
if not isinstance(string, str):
raise AssertionError(
"Expected a string, got type {!r}".format(type(string)))
elif length and len(string) != length:
raise AssertionError(
"String of length {} expected; got {}".format(length, len(string)))
elif not re.match('[abcdef0-9]+$', string):
raise AssertionError(
"String {!r} contains invalid characters for a hash.".format(string))
def assert_array_result(object_array, to_match, expected,
should_not_find=False):
"""
Pass in array of JSON objects, a dictionary with key/value pairs
to match against, and another dictionary with expected key/value
pairs.
If the should_not_find flag is true, to_match should not be found
in object_array
"""
if should_not_find:
assert_equal(expected, {})
num_matched = 0
for item in object_array:
all_match = True
for key, value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
elif should_not_find:
num_matched = num_matched + 1
for key, value in expected.items():
if item[key] != value:
raise AssertionError("{} : expected {}={}".format(
str(item), str(key), str(value)))
num_matched = num_matched + 1
if num_matched == 0 and not should_not_find:
raise AssertionError("No objects matched {}".format(str(to_match)))
if num_matched > 0 and should_not_find:
raise AssertionError("Objects were found {}".format(str(to_match)))
# Utility functions
###################
def check_json_precision():
"""Make sure json library being used does not lose precision converting XEC
values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n))) * 1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def EncodeDecimal(o):
if isinstance(o, Decimal):
return str(o)
raise TypeError(repr(o) + " is not JSON serializable")
def count_bytes(hex_string):
return len(bytearray.fromhex(hex_string))
def hex_str_to_bytes(hex_str):
return unhexlify(hex_str.encode('ascii'))
def str_to_b64str(string):
return b64encode(string.encode('utf-8')).decode('ascii')
def satoshi_round(amount):
return Decimal(amount).quantize(Decimal('0.01'), rounding=ROUND_DOWN)
def wait_until_helper(predicate, *, attempts=float('inf'),
timeout=float('inf'), lock=None, timeout_factor=1.0):
"""Sleep until the predicate resolves to be True.
Warning: Note that this method is not recommended to be used in tests as it is
not aware of the context of the test framework. Using the `wait_until()` members
from `BitcoinTestFramework` or `P2PInterface` class ensures the timeout is
properly scaled. Furthermore, `wait_until()` from `P2PInterface` class in
`p2p.py` has a preset lock.
"""
if attempts == float('inf') and timeout == float('inf'):
timeout = 60
timeout = timeout * timeout_factor
attempt = 0
time_end = time.time() + timeout
while attempt < attempts and time.time() < time_end:
if lock:
with lock:
if predicate():
return
else:
if predicate():
return
attempt += 1
time.sleep(0.05)
# Print the cause of the timeout
predicate_source = "''''\n" + inspect.getsource(predicate) + "'''"
logger.error("wait_until() failed. Predicate: {}".format(predicate_source))
if attempt >= attempts:
raise AssertionError("Predicate {} not true after {} attempts".format(
predicate_source, attempts))
elif time.time() >= time_end:
raise AssertionError(
"Predicate {} not true after {} seconds".format(predicate_source, timeout))
raise RuntimeError('Unreachable')
# RPC/P2P connection constants and functions
############################################
# The maximum number of nodes a single test can spawn
MAX_NODES = 12
# Don't assign rpc or p2p ports lower than this (for example: 18333 is the
# default testnet port)
PORT_MIN = int(os.getenv('TEST_RUNNER_PORT_MIN', default=20000))
# The number of ports to "reserve" for p2p and rpc, each
PORT_RANGE = 5000
class PortSeed:
# Must be initialized with a unique integer for each process
n = None
def get_rpc_proxy(url, node_number, *, timeout=None, coveragedir=None):
"""
Args:
url (str): URL of the RPC server to call
node_number (int): the node number (or id) that this calls to
Kwargs:
timeout (int): HTTP timeout in seconds
coveragedir (str): Directory
Returns:
AuthServiceProxy. convenience object for making RPC calls.
"""
proxy_kwargs = {}
if timeout is not None:
proxy_kwargs['timeout'] = int(timeout)
proxy = AuthServiceProxy(url, **proxy_kwargs)
proxy.url = url # store URL on proxy for info
coverage_logfile = coverage.get_filename(
coveragedir, node_number) if coveragedir else None
return coverage.AuthServiceProxyWrapper(proxy, coverage_logfile)
def p2p_port(n):
assert n <= MAX_NODES
return PORT_MIN + n + \
(MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
def rpc_port(n):
return PORT_MIN + PORT_RANGE + n + \
(MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
def rpc_url(datadir, chain, host, port):
rpc_u, rpc_p = get_auth_cookie(datadir, chain)
if host is None:
host = '127.0.0.1'
return "http://{}:{}@{}:{}".format(rpc_u, rpc_p, host, int(port))
# Node functions
################
def initialize_datadir(dirname, n, chain):
datadir = get_datadir_path(dirname, n)
if not os.path.isdir(datadir):
os.makedirs(datadir)
# Translate chain name to config name
if chain == 'testnet3':
chain_name_conf_arg = 'testnet'
chain_name_conf_section = 'test'
else:
chain_name_conf_arg = chain
chain_name_conf_section = chain
with open(os.path.join(datadir, "bitcoin.conf"), 'w', encoding='utf8') as f:
f.write("{}=1\n".format(chain_name_conf_arg))
f.write("[{}]\n".format(chain_name_conf_section))
f.write("port=" + str(p2p_port(n)) + "\n")
f.write("rpcport=" + str(rpc_port(n)) + "\n")
f.write("fallbackfee=200\n")
f.write("server=1\n")
f.write("keypool=1\n")
f.write("discover=0\n")
f.write("dnsseed=0\n")
f.write("listenonion=0\n")
f.write("usecashaddr=1\n")
f.write("shrinkdebugfile=0\n")
os.makedirs(os.path.join(datadir, 'stderr'), exist_ok=True)
os.makedirs(os.path.join(datadir, 'stdout'), exist_ok=True)
return datadir
def get_datadir_path(dirname, n):
return os.path.join(dirname, "node" + str(n))
def append_config(datadir, options):
with open(os.path.join(datadir, "bitcoin.conf"), 'a', encoding='utf8') as f:
for option in options:
f.write(option + "\n")
def get_auth_cookie(datadir, chain):
user = None
password = None
if os.path.isfile(os.path.join(datadir, "bitcoin.conf")):
with open(os.path.join(datadir, "bitcoin.conf"), 'r', encoding='utf8') as f:
for line in f:
if line.startswith("rpcuser="):
assert user is None # Ensure that there is only one rpcuser line
user = line.split("=")[1].strip("\n")
if line.startswith("rpcpassword="):
assert password is None # Ensure that there is only one rpcpassword line
password = line.split("=")[1].strip("\n")
try:
with open(os.path.join(datadir, chain, ".cookie"), 'r', encoding="ascii") as f:
userpass = f.read()
split_userpass = userpass.split(':')
user = split_userpass[0]
password = split_userpass[1]
except OSError:
pass
if user is None or password is None:
raise ValueError("No RPC credentials")
return user, password
# If a cookie file exists in the given datadir, delete it.
def delete_cookie_file(datadir, chain):
if os.path.isfile(os.path.join(datadir, chain, ".cookie")):
logger.debug("Deleting leftover cookie file")
os.remove(os.path.join(datadir, chain, ".cookie"))
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
# Transaction/Block functions
#############################
def find_output(node, txid, amount, *, blockhash=None):
"""
Return index to output of txid with value amount
Raises exception if there is none.
"""
txdata = node.getrawtransaction(txid, 1, blockhash)
for i in range(len(txdata["vout"])):
if txdata["vout"][i]["value"] == amount:
return i
raise RuntimeError("find_output txid {} : {} not found".format(
txid, str(amount)))
# Create large OP_RETURN txouts that can be appended to a transaction
# to make it large (helper for constructing large transactions).
def gen_return_txouts():
# Some pre-processing to create a bunch of OP_RETURN txouts to insert into transactions we create
# So we have big transactions (and therefore can't fit very many into each block)
# create one script_pubkey
script_pubkey = "6a4d0200" # OP_RETURN OP_PUSH2 512 bytes
for _ in range(512):
script_pubkey = script_pubkey + "01"
# concatenate 128 txouts of above script_pubkey which we'll insert before
# the txout for change
txouts = []
from .messages import CTxOut
txout = CTxOut()
txout.nValue = 0
txout.scriptPubKey = hex_str_to_bytes(script_pubkey)
for _ in range(128):
txouts.append(txout)
return txouts
# Create a spend of each passed-in utxo, splicing in "txouts" to each raw
# transaction to make it large. See gen_return_txouts() above.
def create_lots_of_big_transactions(node, txouts, utxos, num, fee):
addr = node.getnewaddress()
txids = []
from .messages import CTransaction
for _ in range(num):
t = utxos.pop()
inputs = [{"txid": t["txid"], "vout": t["vout"]}]
outputs = {}
change = t['amount'] - fee
outputs[addr] = satoshi_round(change)
rawtx = node.createrawtransaction(inputs, outputs)
tx = CTransaction()
tx.deserialize(BytesIO(hex_str_to_bytes(rawtx)))
for txout in txouts:
tx.vout.append(txout)
newtx = tx.serialize().hex()
signresult = node.signrawtransactionwithwallet(
newtx, None, "NONE|FORKID")
txid = node.sendrawtransaction(signresult["hex"], 0)
txids.append(txid)
return txids
def find_vout_for_address(node, txid, addr):
"""
Locate the vout index of the given transaction sending to the
given address. Raises runtime error exception if not found.
"""
tx = node.getrawtransaction(txid, True)
for i in range(len(tx["vout"])):
if any([addr == a for a in tx["vout"][i]["scriptPubKey"]["addresses"]]):
return i
raise RuntimeError(
"Vout not found for address: txid={}, addr={}".format(txid, addr))
def modinv(a, n):
"""Compute the modular inverse of a modulo n using the extended Euclidean
Algorithm. See https://en.wikipedia.org/wiki/Extended_Euclidean_algorithm#Modular_integers.
"""
# TODO: Change to pow(a, -1, n) available in Python 3.8
t1, t2 = 0, 1
r1, r2 = n, a
while r2 != 0:
q = r1 // r2
t1, t2 = t2, t1 - q * t2
r1, r2 = r2, r1 - q * r2
if r1 > 1:
return None
if t1 < 0:
t1 += n
return t1
class TestFrameworkUtil(unittest.TestCase):
def test_modinv(self):
test_vectors = [
[7, 11],
[11, 29],
[90, 13],
[1891, 3797],
[6003722857, 77695236973],
]
for a, n in test_vectors:
self.assertEqual(modinv(a, n), pow(a, n - 2, n))
| 34.833333
| 111
| 0.632483
|
96e2bfd7397f358bd9511ab0b3c963337f174c60
| 869
|
py
|
Python
|
setup.py
|
dchoruzy/django_admin_sticky_notes
|
3e2643dbbf1c405d82852281fd1398a7f02419b6
|
[
"MIT"
] | 6
|
2021-09-08T04:45:21.000Z
|
2021-09-14T17:44:30.000Z
|
setup.py
|
dchoruzy/django_admin_sticky_notes
|
3e2643dbbf1c405d82852281fd1398a7f02419b6
|
[
"MIT"
] | null | null | null |
setup.py
|
dchoruzy/django_admin_sticky_notes
|
3e2643dbbf1c405d82852281fd1398a7f02419b6
|
[
"MIT"
] | null | null | null |
import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="django-admin-sticky-notes",
version="1.0.0",
author="Dariusz Choruzy",
author_email="dariusz.choruzy@gmail.com",
description="Django admin sticky notes",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/dchoruzy/django-admin-sticky-notes",
project_urls={
"Bug Tracker": "https://github.com/dchoruzy/django-admin-sticky-notes/issues",
},
license='MIT License',
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
packages=setuptools.find_packages(),
include_package_data=True,
python_requires=">=3.6",
)
| 32.185185
| 86
| 0.670886
|
3130f1a4242a80d0415cd24010fe42a0cc199485
| 1,607
|
py
|
Python
|
example/simple_example.py
|
jupiterbak/FAPSDemonstratorAPI
|
4a6f4f251a9836676577509b5cfe72ee3ffbb1d1
|
[
"MIT"
] | 1
|
2018-11-25T20:58:13.000Z
|
2018-11-25T20:58:13.000Z
|
example/simple_example.py
|
jupiterbak/FAPSDemonstratorAPI
|
4a6f4f251a9836676577509b5cfe72ee3ffbb1d1
|
[
"MIT"
] | null | null | null |
example/simple_example.py
|
jupiterbak/FAPSDemonstratorAPI
|
4a6f4f251a9836676577509b5cfe72ee3ffbb1d1
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
import pika
from FAPSDemonstratorAPI import Command, CommandMode, ParameterMode, Program
print('pika version: %s' % pika.__version__)
if __name__ == '__main__':
demonstrator_program = Program()
if demonstrator_program.connect():
demonstrator_program.reset()
demonstrator_program.append_instruction(
Command.CMD_SET_PATH_VELO,
CommandMode.WCD,
20,
0,
0,
ParameterMode.ABSOLUTE,
0
)
demonstrator_program.append_instruction(
Command.CMD_POS_REL_XYZ,
CommandMode.WCD,
10,
0,
0,
ParameterMode.ABSOLUTE,
0
)
demonstrator_program.append_instruction(
Command.CMD_POS_REL_XYZ,
CommandMode.WCD,
-10,
0,
0,
ParameterMode.ABSOLUTE,
0
)
demonstrator_program.append_instruction(
Command.CMD_SET_PATH_VELO,
CommandMode.WCD,
50,
0,
0,
ParameterMode.ABSOLUTE,
0
)
demonstrator_program.execute()
else:
print('Connection cannot be established to the Demonstrator')
| 31.509804
| 76
| 0.439328
|
6f71e9e01ed5f23e0de0b75a0c70747bbc5a0316
| 26,028
|
py
|
Python
|
lte/gateway/python/magma/pipelined/service_manager.py
|
parthiban337/magma
|
51bc4c02aa7214821ebefcc94a201e9740730aa4
|
[
"BSD-3-Clause"
] | null | null | null |
lte/gateway/python/magma/pipelined/service_manager.py
|
parthiban337/magma
|
51bc4c02aa7214821ebefcc94a201e9740730aa4
|
[
"BSD-3-Clause"
] | 70
|
2021-05-31T08:39:40.000Z
|
2022-03-25T16:31:46.000Z
|
lte/gateway/python/magma/pipelined/service_manager.py
|
kkahrs/magma
|
73e666627dc28e0c492feab7321bb7d6dd433b09
|
[
"BSD-3-Clause"
] | 1
|
2021-07-07T14:26:13.000Z
|
2021-07-07T14:26:13.000Z
|
#!/usr/bin/env python3
"""
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
ServiceManager manages the lifecycle and chaining of network services,
which are cloud managed and provide discrete network functions.
These network services consist of Ryu apps, which operate on tables managed by
the ServiceManager. OVS provides a set number of tables that can be
programmed to match and modify traffic. We split these tables two categories,
main tables and scratch tables.
All apps from the same service are associated with a main table, which is
visible to other services and they are used to forward traffic between
different services.
Apps can also optionally claim additional scratch tables, which may be
required for complex flow matching and aggregation use cases. Scratch tables
should not be accessible to apps from other services.
"""
# pylint: skip-file
# pylint does not play well with aioeventlet, as it uses asyncio.async which
# produces a parse error
import time
import asyncio
import logging
from concurrent.futures import Future
from collections import namedtuple, OrderedDict
from typing import List
import aioeventlet
from lte.protos.mconfig.mconfigs_pb2 import PipelineD
from lte.protos.mobilityd_pb2_grpc import MobilityServiceStub
from lte.protos.session_manager_pb2_grpc import (
LocalSessionManagerStub,
SetInterfaceForUserPlaneStub)
from magma.pipelined.app.base import ControllerType
from magma.pipelined.app import of_rest_server
from magma.pipelined.app.access_control import AccessControlController
from magma.pipelined.app.conntrack import ConntrackController
from magma.pipelined.app.tunnel_learn import TunnelLearnController
from magma.pipelined.app.vlan_learn import VlanLearnController
from magma.pipelined.app.arp import ArpController
from magma.pipelined.app.ipv6_solicitation import \
IPV6SolicitationController
from magma.pipelined.app.dpi import DPIController
from magma.pipelined.app.gy import GYController
from magma.pipelined.app.enforcement import EnforcementController
from magma.pipelined.app.ipfix import IPFIXController
from magma.pipelined.app.li_mirror import LIMirrorController
from magma.pipelined.app.enforcement_stats import EnforcementStatsController
from magma.pipelined.app.inout import EGRESS, INGRESS, PHYSICAL_TO_LOGICAL, \
InOutController
from magma.pipelined.app.ue_mac import UEMacAddressController
from magma.pipelined.app.xwf_passthru import XWFPassthruController
from magma.pipelined.app.startup_flows import StartupFlows
from magma.pipelined.app.check_quota import CheckQuotaController
from magma.pipelined.app.uplink_bridge import UplinkBridgeController
from magma.pipelined.app.ng_services import NGServiceController
from magma.pipelined.rule_mappers import RuleIDToNumMapper, \
SessionRuleToVersionMapper
from magma.pipelined.ipv6_prefix_store import InterfaceIDToPrefixMapper
from magma.pipelined.tunnel_id_store import TunnelToTunnelMapper
from magma.pipelined.internal_ip_allocator import InternalIPAllocator
from ryu.base.app_manager import AppManager
from magma.common.service import MagmaService
from magma.common.service_registry import ServiceRegistry
from magma.configuration import environment
from magma.pipelined.app.classifier import Classifier
from magma.pipelined.app.he import HeaderEnrichmentController, PROXY_TABLE
# Type is either Physical or Logical, highest order_priority is at zero
App = namedtuple('App', ['name', 'module', 'type', 'order_priority'])
class Tables:
__slots__ = ['main_table', 'type', 'scratch_tables']
def __init__(self, main_table, type, scratch_tables=None):
self.main_table = main_table
self.type = type
self.scratch_tables = scratch_tables
if self.scratch_tables is None:
self.scratch_tables = []
class TableNumException(Exception):
"""
Exception used for when table number allocation fails.
"""
pass
class TableRange:
"""
Used to generalize different table ranges.
"""
def __init__(self, start: int, end: int):
self._start = start
self._end = end
self._next_table = self._start
def allocate_table(self):
if (self._next_table == self._end):
raise TableNumException('Cannot generate more tables. Table limit'
'of %s reached!' % self._end)
table_num = self._next_table
self._next_table += 1
return table_num
def allocate_tables(self, count: int):
if self._next_table + count >= self._end:
raise TableNumException('Cannot generate more tables. Table limit'
'of %s reached!' % self._end)
tables = [self.allocate_table() for i in range(0, count)]
return tables
def get_next_table(self, table: int):
if table + 1 < self._next_table:
return table + 1
else:
return self._end
class _TableManager:
"""
TableManager maintains an internal mapping between apps to their
main and scratch tables.
"""
GTP_TABLE_NUM = 0
INGRESS_TABLE_NUM = 1
PHYSICAL_TO_LOGICAL_TABLE_NUM = 10
EGRESS_TABLE_NUM = 20
LOGICAL_TABLE_LIMIT_NUM = EGRESS_TABLE_NUM # exclusive
SCRATCH_TABLE_START_NUM = EGRESS_TABLE_NUM + 1 # 21
SCRATCH_TABLE_LIMIT_NUM = 200
# 200 - 255 is used for apps that share a table
ALL_TABLE_LIMIT_NUM = 255 # exclusive
def __init__(self):
self._table_ranges = {
ControllerType.SPECIAL: TableRange(self.GTP_TABLE_NUM,
self.GTP_TABLE_NUM + 1),
ControllerType.PHYSICAL: TableRange(self.INGRESS_TABLE_NUM + 1,
self.PHYSICAL_TO_LOGICAL_TABLE_NUM),
ControllerType.LOGICAL:
TableRange(self.PHYSICAL_TO_LOGICAL_TABLE_NUM + 1,
self.EGRESS_TABLE_NUM)
}
self._scratch_range = TableRange(self.SCRATCH_TABLE_START_NUM,
self.SCRATCH_TABLE_LIMIT_NUM)
self._tables_by_app = {
INGRESS: Tables(main_table=self.INGRESS_TABLE_NUM,
type=ControllerType.SPECIAL),
PHYSICAL_TO_LOGICAL: Tables(
main_table=self.PHYSICAL_TO_LOGICAL_TABLE_NUM,
type=ControllerType.SPECIAL),
EGRESS: Tables(main_table=self.EGRESS_TABLE_NUM,
type=ControllerType.SPECIAL),
}
def _allocate_main_table(self, type: ControllerType) -> int:
if type not in self._table_ranges:
raise TableNumException('Cannot generate a table for %s' % type)
return self._table_ranges[type].allocate_table()
def register_apps_for_service(self, apps: List[App]):
"""
Register the apps for a service with a main table. All Apps must share
the same contoller type
"""
if not all(apps[0].type == app.type for app in apps):
raise TableNumException('Cannot register apps with different'
'controller type')
table_num = self._allocate_main_table(apps[0].type)
for app in apps:
self._tables_by_app[app.name] = Tables(main_table=table_num,
type=app.type)
def register_apps_for_table0_service(self, apps: List[App]):
"""
Register the apps for a service with main table 0
"""
for app in apps:
self._tables_by_app[app.name] = Tables(main_table=0, type=app.type)
def get_table_num(self, app_name: str) -> int:
if app_name not in self._tables_by_app:
raise Exception('App is not registered: %s' % app_name)
return self._tables_by_app[app_name].main_table
def get_next_table_num(self, app_name: str) -> int:
"""
Returns the main table number of the next service.
If there are no more services after the current table, return the
EGRESS table
"""
if app_name not in self._tables_by_app:
raise Exception('App is not registered: %s' % app_name)
app = self._tables_by_app[app_name]
if app.type == ControllerType.SPECIAL:
if app_name == INGRESS:
return self._table_ranges[ControllerType.PHYSICAL].get_next_table(app.main_table)
elif app_name == PHYSICAL_TO_LOGICAL:
return self._table_ranges[ControllerType.LOGICAL].get_next_table(app.main_table)
else:
raise TableNumException('No next table found for %s' % app_name)
return self._table_ranges[app.type].get_next_table(app.main_table)
def is_app_enabled(self, app_name: str) -> bool:
return app_name in self._tables_by_app or \
app_name == InOutController.APP_NAME
def allocate_scratch_tables(self, app_name: str, count: int) -> \
List[int]:
tbl_nums = self._scratch_range.allocate_tables(count)
self._tables_by_app[app_name].scratch_tables.extend(tbl_nums)
return tbl_nums
def get_scratch_table_nums(self, app_name: str) -> List[int]:
if app_name not in self._tables_by_app:
raise Exception('App is not registered: %s' % app_name)
return self._tables_by_app[app_name].scratch_tables
def get_all_table_assignments(self) -> 'OrderedDict[str, Tables]':
resp = OrderedDict(sorted(self._tables_by_app.items(),
key=lambda kv: (kv[1].main_table, kv[0])))
# Include table 0 when it is managed by the EPC, for completeness.
if not any(table in ['ue_mac', 'xwf_passthru', 'classifier'] for table in self._tables_by_app):
resp['mme'] = Tables(main_table=0, type=None)
resp.move_to_end('mme', last=False)
return resp
class ServiceManager:
"""
ServiceManager manages the service lifecycle and chaining of services for
the Ryu apps. Ryu apps are loaded based on the services specified in the
YAML config for static apps and mconfig for dynamic apps.
ServiceManager also maintains a mapping between apps to the flow
tables they use.
Currently, its use cases include:
- Starting all Ryu apps
- Flow table number lookup for Ryu apps
- Main & scratch tables management
"""
UE_MAC_ADDRESS_SERVICE_NAME = 'ue_mac'
ARP_SERVICE_NAME = 'arpd'
ACCESS_CONTROL_SERVICE_NAME = 'access_control'
ipv6_solicitation_SERVICE_NAME = 'ipv6_solicitation'
TUNNEL_LEARN_SERVICE_NAME = 'tunnel_learn'
VLAN_LEARN_SERVICE_NAME = 'vlan_learn'
IPFIX_SERVICE_NAME = 'ipfix'
CONNTRACK_SERVICE_NAME = 'conntrack'
RYU_REST_SERVICE_NAME = 'ryu_rest_service'
RYU_REST_APP_NAME = 'ryu_rest_app'
STARTUP_FLOWS_RECIEVER_CONTROLLER = 'startup_flows'
CHECK_QUOTA_SERVICE_NAME = 'check_quota'
LI_MIRROR_SERVICE_NAME = 'li_mirror'
XWF_PASSTHRU_NAME = 'xwf_passthru'
UPLINK_BRIDGE_NAME = 'uplink_bridge'
CLASSIFIER_NAME = 'classifier'
HE_CONTROLLER_NAME = 'proxy'
NG_SERVICE_CONTROLLER_NAME = 'ng_services'
INTERNAL_APP_SET_TABLE_NUM = 201
INTERNAL_IMSI_SET_TABLE_NUM = 202
INTERNAL_IPFIX_SAMPLE_TABLE_NUM = 203
INTERNAL_MAC_IP_REWRITE_TBL_NUM = 204
# Mapping between services defined in mconfig and the names and modules of
# the corresponding Ryu apps in PipelineD. The module is used for the Ryu
# app manager to instantiate the app.
# Note that a service may require multiple apps.
DYNAMIC_SERVICE_TO_APPS = {
PipelineD.ENFORCEMENT: [
App(name=GYController.APP_NAME,
module=GYController.__module__,
type=GYController.APP_TYPE,
order_priority=499),
App(name=EnforcementController.APP_NAME,
module=EnforcementController.__module__,
type=EnforcementController.APP_TYPE,
order_priority=500),
App(name=EnforcementStatsController.APP_NAME,
module=EnforcementStatsController.__module__,
type=EnforcementStatsController.APP_TYPE,
order_priority=501),
],
PipelineD.DPI: [
App(name=DPIController.APP_NAME, module=DPIController.__module__,
type=DPIController.APP_TYPE,
order_priority=400),
],
}
# Mapping between the app names defined in pipelined.yml and the names and
# modules of their corresponding Ryu apps in PipelineD.
STATIC_SERVICE_TO_APPS = {
UE_MAC_ADDRESS_SERVICE_NAME: [
App(name=UEMacAddressController.APP_NAME,
module=UEMacAddressController.__module__,
type=None,
order_priority=0),
],
ARP_SERVICE_NAME: [
App(name=ArpController.APP_NAME, module=ArpController.__module__,
type=ArpController.APP_TYPE,
order_priority=200)
],
ACCESS_CONTROL_SERVICE_NAME: [
App(name=AccessControlController.APP_NAME,
module=AccessControlController.__module__,
type=AccessControlController.APP_TYPE,
order_priority=400),
],
HE_CONTROLLER_NAME: [
App(name=HeaderEnrichmentController.APP_NAME,
module=HeaderEnrichmentController.__module__,
type=HeaderEnrichmentController.APP_TYPE,
order_priority=401),
],
ipv6_solicitation_SERVICE_NAME: [
App(name=IPV6SolicitationController.APP_NAME,
module=IPV6SolicitationController.__module__,
type=IPV6SolicitationController.APP_TYPE,
order_priority=210),
],
TUNNEL_LEARN_SERVICE_NAME: [
App(name=TunnelLearnController.APP_NAME,
module=TunnelLearnController.__module__,
type=TunnelLearnController.APP_TYPE,
order_priority=300),
],
VLAN_LEARN_SERVICE_NAME: [
App(name=VlanLearnController.APP_NAME,
module=VlanLearnController.__module__,
type=VlanLearnController.APP_TYPE,
order_priority=500),
],
RYU_REST_SERVICE_NAME: [
App(name=RYU_REST_APP_NAME,
module='ryu.app.ofctl_rest',
type=None,
order_priority=0),
],
STARTUP_FLOWS_RECIEVER_CONTROLLER: [
App(name=StartupFlows.APP_NAME,
module=StartupFlows.__module__,
type=StartupFlows.APP_TYPE,
order_priority=0),
],
CHECK_QUOTA_SERVICE_NAME: [
App(name=CheckQuotaController.APP_NAME,
module=CheckQuotaController.__module__,
type=CheckQuotaController.APP_TYPE,
order_priority=300),
],
CONNTRACK_SERVICE_NAME: [
App(name=ConntrackController.APP_NAME,
module=ConntrackController.__module__,
type=ConntrackController.APP_TYPE,
order_priority=700),
],
IPFIX_SERVICE_NAME: [
App(name=IPFIXController.APP_NAME,
module=IPFIXController.__module__,
type=IPFIXController.APP_TYPE,
order_priority=800),
],
LI_MIRROR_SERVICE_NAME: [
App(name=LIMirrorController.APP_NAME,
module=LIMirrorController.__module__,
type=LIMirrorController.APP_TYPE,
order_priority=900),
],
XWF_PASSTHRU_NAME: [
App(name=XWFPassthruController.APP_NAME,
module=XWFPassthruController.__module__,
type=XWFPassthruController.APP_TYPE,
order_priority=0),
],
UPLINK_BRIDGE_NAME: [
App(name=UplinkBridgeController.APP_NAME,
module=UplinkBridgeController.__module__,
type=UplinkBridgeController.APP_TYPE,
order_priority=0),
],
CLASSIFIER_NAME: [
App(name=Classifier.APP_NAME,
module=Classifier.__module__,
type=Classifier.APP_TYPE,
order_priority=0),
],
# 5G Related services
NG_SERVICE_CONTROLLER_NAME: [
App(name=NGServiceController.APP_NAME,
module=NGServiceController.__module__,
type=None,
order_priority=0),
],
}
# Some apps do not use a table, so they need to be excluded from table
# allocation.
STATIC_APP_WITH_NO_TABLE = [
RYU_REST_APP_NAME,
StartupFlows.APP_NAME,
UplinkBridgeController.APP_NAME,
NGServiceController.APP_NAME,
]
def __init__(self, magma_service: MagmaService):
self._magma_service = magma_service
if '5G_feature_set' not in magma_service.config:
self._5G_flag_enable = False
else:
ng_flag = magma_service.config.get('5G_feature_set')
self._5G_flag_enable = ng_flag['enable']
# inout is a mandatory app and it occupies:
# table 1(for ingress)
# table 10(for middle)
# table 20(for egress)
self._apps = [App(name=InOutController.APP_NAME,
module=InOutController.__module__,
type=None,
order_priority=0)]
self._table_manager = _TableManager()
self.rule_id_mapper = RuleIDToNumMapper()
self.session_rule_version_mapper = SessionRuleToVersionMapper()
self.interface_to_prefix_mapper = InterfaceIDToPrefixMapper()
self.tunnel_id_mapper = TunnelToTunnelMapper()
apps = self._get_static_apps()
apps.extend(self._get_dynamic_apps())
apps.sort(key=lambda x: x.order_priority)
self._apps.extend(apps)
# Filter out reserved apps and apps that don't need a table
for app in apps:
if app.name in self.STATIC_APP_WITH_NO_TABLE:
continue
# UE MAC service must be registered with Table 0
if app.name in [self.UE_MAC_ADDRESS_SERVICE_NAME, self.XWF_PASSTHRU_NAME]:
self._table_manager.register_apps_for_table0_service([app])
continue
if self._5G_flag_enable:
if app.name in [self.CLASSIFIER_NAME]:
self._table_manager.register_apps_for_table0_service([app])
continue
self._table_manager.register_apps_for_service([app])
def _get_static_apps(self):
"""
_init_static_services populates app modules and allocates a main table
for each static service.
"""
static_services = self._magma_service.config['static_services']
nat_enabled = self._magma_service.config.get('nat_enabled', False)
setup_type = self._magma_service.config.get('setup_type', None)
if setup_type == 'LTE':
static_services.append(self.__class__.UPLINK_BRIDGE_NAME)
logging.info("added uplink bridge controller")
if self._5G_flag_enable:
static_services.append(self.__class__.CLASSIFIER_NAME)
static_services.append(self.__class__.NG_SERVICE_CONTROLLER_NAME)
logging.info("added classifier and ng service controller")
static_apps = \
[app for service in static_services for app in
self.STATIC_SERVICE_TO_APPS[service]]
return static_apps
def _get_dynamic_apps(self):
"""
_init_dynamic_services populates app modules and allocates a main table
for each dynamic service.
"""
dynamic_services = []
for service in self._magma_service.mconfig.services:
if service not in self.DYNAMIC_SERVICE_TO_APPS:
# Most likely cause: the config contains a deprecated
# pipelined service.
# Fix: update the relevant network's network_services settings.
logging.warning(
'Mconfig contains unsupported network_services service: %s',
service,
)
continue
dynamic_services.append(service)
dynamic_apps = [app for service in dynamic_services for
app in self.DYNAMIC_SERVICE_TO_APPS[service]]
return dynamic_apps
def load(self):
"""
Instantiates and schedules the Ryu app eventlets in the service
eventloop.
"""
# Some setups might not use REDIS
if (self._magma_service.config['redis_enabled']):
# Wait for redis as multiple controllers rely on it
while not redisAvailable(self.rule_id_mapper.redis_cli):
logging.warning("Pipelined waiting for redis...")
time.sleep(1)
else:
self.rule_id_mapper._rule_nums_by_rule = {}
self.rule_id_mapper._rules_by_rule_num = {}
self.session_rule_version_mapper._version_by_imsi_and_rule = {}
self.interface_to_prefix_mapper._prefix_by_interface = {}
self.tunnel_id_mapper._tunnel_map = {}
manager = AppManager.get_instance()
manager.load_apps([app.module for app in self._apps])
contexts = manager.create_contexts()
contexts['rule_id_mapper'] = self.rule_id_mapper
contexts[
'session_rule_version_mapper'] = self.session_rule_version_mapper
contexts['interface_to_prefix_mapper'] = self.interface_to_prefix_mapper
contexts['tunnel_id_mapper'] = self.tunnel_id_mapper
contexts['app_futures'] = {app.name: Future() for app in self._apps}
contexts['internal_ip_allocator'] = \
InternalIPAllocator(self._magma_service.config)
contexts['config'] = self._magma_service.config
contexts['mconfig'] = self._magma_service.mconfig
contexts['loop'] = self._magma_service.loop
contexts['service_manager'] = self
sessiond_chan = ServiceRegistry.get_rpc_channel(
'sessiond', ServiceRegistry.LOCAL)
mobilityd_chan = ServiceRegistry.get_rpc_channel(
'mobilityd', ServiceRegistry.LOCAL)
contexts['rpc_stubs'] = {
'mobilityd': MobilityServiceStub(mobilityd_chan),
'sessiond': LocalSessionManagerStub(sessiond_chan),
}
if self._5G_flag_enable:
contexts['rpc_stubs'].update({'sessiond_setinterface': \
SetInterfaceForUserPlaneStub(sessiond_chan)})
# Instantiate and schedule apps
for app in manager.instantiate_apps(**contexts):
# Wrap the eventlet in asyncio so it will stop when the loop is
# stopped
future = aioeventlet.wrap_greenthread(app,
self._magma_service.loop)
# Schedule the eventlet for evaluation in service loop
asyncio.ensure_future(future)
# In development mode, run server so that
if environment.is_dev_mode():
server_thread = of_rest_server.start(manager)
future = aioeventlet.wrap_greenthread(server_thread,
self._magma_service.loop)
asyncio.ensure_future(future)
def get_table_num(self, app_name: str) -> int:
"""
Args:
app_name: Name of the app
Returns:
The app's main table number
"""
return self._table_manager.get_table_num(app_name)
def get_next_table_num(self, app_name: str) -> int:
"""
Args:
app_name: Name of the app
Returns:
The main table number of the next service.
If there are no more services after the current table,
return the EGRESS table
"""
return self._table_manager.get_next_table_num(app_name)
def is_app_enabled(self, app_name: str) -> bool:
"""
Args:
app_name: Name of the app
Returns:
Whether or not the app is enabled
"""
return self._table_manager.is_app_enabled(app_name)
def allocate_scratch_tables(self, app_name: str, count: int) -> List[int]:
"""
Args:
app_name:
Each scratch table is associated with an app. This is used to
help enforce scratch table isolation between apps.
count: Number of scratch tables to be claimed
Returns:
List of scratch table numbers
Raises:
TableNumException if there are no more available tables
"""
return self._table_manager.allocate_scratch_tables(app_name, count)
def get_scratch_table_nums(self, app_name: str) -> List[int]:
"""
Returns the scratch tables claimed by the given app.
"""
return self._table_manager.get_scratch_table_nums(app_name)
def get_all_table_assignments(self):
"""
Returns: OrderedDict of app name to tables mapping, ordered by main
table number, and app name.
"""
return self._table_manager.get_all_table_assignments()
def redisAvailable(redis_cli):
try:
redis_cli.ping()
except Exception as e:
logging.error(e)
return False
return True
| 40.228748
| 103
| 0.653642
|
ef12ced7493df356515c5c25c59fc6157ba2d5a0
| 3,204
|
py
|
Python
|
lyrebird/mock/handlers/proxy_handler.py
|
Rena-Yuan/lyrebird
|
f350a080c38dcb2acd6be2ca960fa094be51f557
|
[
"MIT"
] | 737
|
2019-02-20T06:51:50.000Z
|
2022-03-31T09:00:32.000Z
|
lyrebird/mock/handlers/proxy_handler.py
|
Rena-Yuan/lyrebird
|
f350a080c38dcb2acd6be2ca960fa094be51f557
|
[
"MIT"
] | 203
|
2019-02-19T02:57:29.000Z
|
2022-03-30T11:11:32.000Z
|
lyrebird/mock/handlers/proxy_handler.py
|
Rena-Yuan/lyrebird
|
f350a080c38dcb2acd6be2ca960fa094be51f557
|
[
"MIT"
] | 140
|
2019-02-18T03:32:50.000Z
|
2022-03-18T03:37:39.000Z
|
import urllib
import requests
from requests.packages import urllib3
from flask import Response, jsonify, stream_with_context
from .. import context
from lyrebird import application
from lyrebird.log import get_logger
from lyrebird.mock import lb_http_status
from .duplicate_request_handler import DuplicateRequest
import traceback
# 关闭ssl警告
urllib3.disable_warnings()
logger = get_logger()
class ProxyHandler:
"""
按照代理规则代理
"""
def handle(self, handler_context):
request = handler_context.flow['request']
origin_url = request.get('url')
logger.info(f'<Proxy> {origin_url}')
if not origin_url:
handler_context.is_proxiable = False
return
parsed_url = urllib.parse.urlparse(origin_url)
if not parsed_url.hostname:
handler_context.is_proxiable = False
return
elif parsed_url.hostname in ['localhost', '127.0.0.1', ] and parsed_url.port == application.config["mock.port"]:
DuplicateRequest().handle(handler_context)
return
data = handler_context.get_request_body()
method = request['method']
headers = handler_context.get_request_headers()
try:
r = requests.request(
method,
origin_url,
headers=headers,
data=data,
cookies=handler_context.request.cookies,
stream=True,
verify=False,
allow_redirects=False)
logger.info(f'<Proxy> SUCCESS {r.status_code} {origin_url}')
except:
trace_str = traceback.format_exc()
error_response = {
'code': 3000,
'message': 'proxy error',
'trace': trace_str
}
resp = jsonify(error_response)
resp.status = lb_http_status.STATUS_PROXY_ERROR
handler_context.response = resp
logger.info(f'<Proxy> PROXY ERROR {origin_url}\n------\ntrace:\n{trace_str}\n------\n<Proxy> PROXY ERROR {origin_url}')
return
# 增加数据源标记,此数据经代理得到
resp_headers = [('lyrebird', 'proxy')]
for name, value in r.raw.headers.items():
# rm 'content-length' from ignore list
if name.lower() in ('content-encoding',
'transfer-encoding'):
continue
if name.lower() == 'content-length' and 'content-encoding' in r.headers and r.headers['content-encoding'] == 'gzip':
# 如果是gzip请求,由于requests自动解压gzip,所以此处抹去content-length,以匹配解压后的数据长度
continue
resp_headers.append((name, value))
# HTTP Status code 204 => No content
if r.status_code == 204:
handler_context.response = Response(None, status=r.status_code, headers=resp_headers)
return
# After huangyuanzhen test, we use 2048byte buffer :D
handler_context.response = Response(
stream_with_context(r.iter_content(chunk_size=handler_context.response_chunk_size)),
status=r.status_code,
headers=resp_headers)
| 33.030928
| 131
| 0.598627
|
155864547f90034b4ab64b78b9798314af980ad8
| 30,516
|
py
|
Python
|
main.py
|
ajoaoff/mef_eline
|
a63869068e001c55c81235b83934ae29017a1796
|
[
"MIT"
] | null | null | null |
main.py
|
ajoaoff/mef_eline
|
a63869068e001c55c81235b83934ae29017a1796
|
[
"MIT"
] | null | null | null |
main.py
|
ajoaoff/mef_eline
|
a63869068e001c55c81235b83934ae29017a1796
|
[
"MIT"
] | null | null | null |
"""Main module of kytos/mef_eline Kytos Network Application.
NApp to provision circuits from user request.
"""
import time
from threading import Lock
from flask import jsonify, request
from werkzeug.exceptions import (BadRequest, Conflict, Forbidden,
MethodNotAllowed, NotFound,
UnsupportedMediaType)
from kytos.core import KytosNApp, log, rest
from kytos.core.events import KytosEvent
from kytos.core.helpers import listen_to
from kytos.core.interface import TAG, UNI
from kytos.core.link import Link
from napps.kytos.mef_eline import settings
from napps.kytos.mef_eline.exceptions import InvalidPath
from napps.kytos.mef_eline.models import EVC, DynamicPathManager, Path
from napps.kytos.mef_eline.scheduler import CircuitSchedule, Scheduler
from napps.kytos.mef_eline.storehouse import StoreHouse
from napps.kytos.mef_eline.utils import emit_event
# pylint: disable=too-many-public-methods
class Main(KytosNApp):
"""Main class of amlight/mef_eline NApp.
This class is the entry point for this napp.
"""
def setup(self):
"""Replace the '__init__' method for the KytosNApp subclass.
The setup method is automatically called by the controller when your
application is loaded.
So, if you have any setup routine, insert it here.
"""
# object used to scheduler circuit events
self.sched = Scheduler()
# object to save and load circuits
self.storehouse = StoreHouse(self.controller)
# set the controller that will manager the dynamic paths
DynamicPathManager.set_controller(self.controller)
# dictionary of EVCs created. It acts as a circuit buffer.
# Every create/update/delete must be synced to storehouse.
self.circuits = {}
# dictionary of EVCs by interface
self._circuits_by_interface = {}
self._lock = Lock()
self.execute_as_loop(settings.DEPLOY_EVCS_INTERVAL)
self.load_time = time.time()
self.load_all_evcs()
def execute(self):
"""Execute once when the napp is running."""
for circuit in tuple(self.circuits.values()):
if (
circuit.is_enabled() and
not circuit.is_active() and
not circuit.lock.locked()
):
if circuit.check_traces():
with circuit.lock:
circuit.activate()
circuit.sync()
else:
running_for = time.time() - self.load_time
if running_for > settings.WAIT_FOR_OLD_PATH:
with circuit.lock:
circuit.deploy()
def shutdown(self):
"""Execute when your napp is unloaded.
If you have some cleanup procedure, insert it here.
"""
@rest('/v2/evc/', methods=['GET'])
def list_circuits(self):
"""Endpoint to return circuits stored.
If archived is set to True return all circuits, else only the ones
not archived.
"""
log.debug('list_circuits /v2/evc')
archived = request.args.get('archived', False)
circuits = self.storehouse.get_data()
if not circuits:
return jsonify({}), 200
if archived:
return jsonify(circuits), 200
return jsonify({circuit_id: circuit
for circuit_id, circuit in circuits.items()
if not circuit.get('archived', False)}), 200
@rest('/v2/evc/<circuit_id>', methods=['GET'])
def get_circuit(self, circuit_id):
"""Endpoint to return a circuit based on id."""
log.debug('get_circuit /v2/evc/%s', circuit_id)
circuits = self.storehouse.get_data()
try:
result = circuits[circuit_id]
except KeyError:
result = f'circuit_id {circuit_id} not found'
log.debug('get_circuit result %s %s', result, 404)
raise NotFound(result)
status = 200
log.debug('get_circuit result %s %s', result, status)
return jsonify(result), status
@rest('/v2/evc/', methods=['POST'])
def create_circuit(self):
"""Try to create a new circuit.
Firstly, for EVPL: E-Line NApp verifies if UNI_A's requested C-VID and
UNI_Z's requested C-VID are available from the interfaces' pools. This
is checked when creating the UNI object.
Then, E-Line NApp requests a primary and a backup path to the
Pathfinder NApp using the attributes primary_links and backup_links
submitted via REST
# For each link composing paths in #3:
# - E-Line NApp requests a S-VID available from the link VLAN pool.
# - Using the S-VID obtained, generate abstract flow entries to be
# sent to FlowManager
Push abstract flow entries to FlowManager and FlowManager pushes
OpenFlow entries to datapaths
E-Line NApp generates an event to notify all Kytos NApps of a new EVC
creation
Finnaly, notify user of the status of its request.
"""
# Try to create the circuit object
log.debug('create_circuit /v2/evc/')
try:
data = request.get_json()
except BadRequest:
result = 'The request body is not a well-formed JSON.'
log.debug('create_circuit result %s %s', result, 400)
raise BadRequest(result)
if data is None:
result = 'The request body mimetype is not application/json.'
log.debug('create_circuit result %s %s', result, 415)
raise UnsupportedMediaType(result)
try:
evc = self._evc_from_dict(data)
except ValueError as exception:
log.debug('create_circuit result %s %s', exception, 400)
raise BadRequest(str(exception))
if evc.primary_path:
try:
evc.primary_path.is_valid(evc.uni_a.interface.switch,
evc.uni_z.interface.switch,
bool(evc.circuit_scheduler))
except InvalidPath as exception:
raise BadRequest(f'primary_path is not valid: {exception}')
if evc.backup_path:
try:
evc.backup_path.is_valid(evc.uni_a.interface.switch,
evc.uni_z.interface.switch,
bool(evc.circuit_scheduler))
except InvalidPath as exception:
raise BadRequest(f'backup_path is not valid: {exception}')
# verify duplicated evc
if self._is_duplicated_evc(evc):
result = "The EVC already exists."
log.debug('create_circuit result %s %s', result, 409)
raise Conflict(result)
if (
not evc.primary_path
and evc.dynamic_backup_path is False
and evc.uni_a.interface.switch != evc.uni_z.interface.switch
):
result = "The EVC must have a primary path or allow dynamic paths."
log.debug('create_circuit result %s %s', result, 400)
raise BadRequest(result)
# store circuit in dictionary
self.circuits[evc.id] = evc
# save circuit
self.storehouse.save_evc(evc)
# Schedule the circuit deploy
self.sched.add(evc)
# Circuit has no schedule, deploy now
if not evc.circuit_scheduler:
with evc.lock:
evc.deploy()
# Notify users
event = KytosEvent(name='kytos.mef_eline.created',
content=evc.as_dict())
self.controller.buffers.app.put(event)
result = {"circuit_id": evc.id}
status = 201
log.debug('create_circuit result %s %s', result, status)
emit_event(self.controller, 'created', evc_id=evc.id)
return jsonify(result), status
@rest('/v2/evc/<circuit_id>', methods=['PATCH'])
def update(self, circuit_id):
"""Update a circuit based on payload.
The EVC required attributes (name, uni_a, uni_z) can't be updated.
"""
log.debug('update /v2/evc/%s', circuit_id)
try:
evc = self.circuits[circuit_id]
except KeyError:
result = f'circuit_id {circuit_id} not found'
log.debug('update result %s %s', result, 404)
raise NotFound(result)
if evc.archived:
result = "Can't update archived EVC"
log.debug('update result %s %s', result, 405)
raise MethodNotAllowed(['GET'], result)
try:
data = request.get_json()
except BadRequest:
result = 'The request body is not a well-formed JSON.'
log.debug('update result %s %s', result, 400)
raise BadRequest(result)
if data is None:
result = 'The request body mimetype is not application/json.'
log.debug('update result %s %s', result, 415)
raise UnsupportedMediaType(result)
try:
enable, redeploy = \
evc.update(**self._evc_dict_with_instances(data))
except ValueError as exception:
log.error(exception)
log.debug('update result %s %s', exception, 400)
raise BadRequest(str(exception))
if evc.is_active():
if enable is False: # disable if active
with evc.lock:
evc.remove()
elif redeploy is not None: # redeploy if active
with evc.lock:
evc.remove()
evc.deploy()
else:
if evc.is_enabled(): # enable if inactive
with evc.lock:
evc.deploy()
result = {evc.id: evc.as_dict()}
status = 200
log.debug('update result %s %s', result, status)
emit_event(self.controller, 'updated', evc_id=evc.id, data=data)
return jsonify(result), status
@rest('/v2/evc/<circuit_id>', methods=['DELETE'])
def delete_circuit(self, circuit_id):
"""Remove a circuit.
First, the flows are removed from the switches, and then the EVC is
disabled.
"""
log.debug('delete_circuit /v2/evc/%s', circuit_id)
try:
evc = self.circuits[circuit_id]
except KeyError:
result = f'circuit_id {circuit_id} not found'
log.debug('delete_circuit result %s %s', result, 404)
raise NotFound(result)
if evc.archived:
result = f'Circuit {circuit_id} already removed'
log.debug('delete_circuit result %s %s', result, 404)
raise NotFound(result)
log.info('Removing %s', evc)
evc.remove_current_flows()
evc.deactivate()
evc.disable()
self.sched.remove(evc)
evc.archive()
evc.sync()
log.info('EVC removed. %s', evc)
result = {'response': f'Circuit {circuit_id} removed'}
status = 200
log.debug('delete_circuit result %s %s', result, status)
emit_event(self.controller, 'deleted', evc_id=evc.id)
return jsonify(result), status
@rest('v2/evc/<circuit_id>/metadata', methods=['GET'])
def get_metadata(self, circuit_id):
"""Get metadata from an EVC."""
try:
return jsonify({"metadata":
self.circuits[circuit_id].metadata}), 200
except KeyError:
raise NotFound(f'circuit_id {circuit_id} not found.')
@rest('v2/evc/<circuit_id>/metadata', methods=['POST'])
def add_metadata(self, circuit_id):
"""Add metadata to an EVC."""
try:
metadata = request.get_json()
content_type = request.content_type
except BadRequest:
result = 'The request body is not a well-formed JSON.'
raise BadRequest(result)
if content_type is None:
result = 'The request body is empty.'
raise BadRequest(result)
if metadata is None:
if content_type != 'application/json':
result = ('The content type must be application/json '
f'(received {content_type}).')
else:
result = 'Metadata is empty.'
raise UnsupportedMediaType(result)
try:
evc = self.circuits[circuit_id]
except KeyError:
raise NotFound(f'circuit_id {circuit_id} not found.')
evc.extend_metadata(metadata)
evc.sync()
return jsonify("Operation successful"), 201
@rest('v2/evc/<circuit_id>/metadata/<key>', methods=['DELETE'])
def delete_metadata(self, circuit_id, key):
"""Delete metadata from an EVC."""
try:
evc = self.circuits[circuit_id]
except KeyError:
raise NotFound(f'circuit_id {circuit_id} not found.')
evc.remove_metadata(key)
evc.sync()
return jsonify("Operation successful"), 200
@rest('/v2/evc/<circuit_id>/redeploy', methods=['PATCH'])
def redeploy(self, circuit_id):
"""Endpoint to force the redeployment of an EVC."""
log.debug('redeploy /v2/evc/%s/redeploy', circuit_id)
try:
evc = self.circuits[circuit_id]
except KeyError:
result = f'circuit_id {circuit_id} not found'
raise NotFound(result)
if evc.is_enabled():
with evc.lock:
evc.remove_current_flows()
evc.deploy()
result = {'response': f'Circuit {circuit_id} redeploy received.'}
status = 202
else:
result = {'response': f'Circuit {circuit_id} is disabled.'}
status = 409
return jsonify(result), status
@rest('/v2/evc/schedule', methods=['GET'])
def list_schedules(self):
"""Endpoint to return all schedules stored for all circuits.
Return a JSON with the following template:
[{"schedule_id": <schedule_id>,
"circuit_id": <circuit_id>,
"schedule": <schedule object>}]
"""
log.debug('list_schedules /v2/evc/schedule')
circuits = self.storehouse.get_data().values()
if not circuits:
result = {}
status = 200
return jsonify(result), status
result = []
status = 200
for circuit in circuits:
circuit_scheduler = circuit.get("circuit_scheduler")
if circuit_scheduler:
for scheduler in circuit_scheduler:
value = {"schedule_id": scheduler.get("id"),
"circuit_id": circuit.get("id"),
"schedule": scheduler}
result.append(value)
log.debug('list_schedules result %s %s', result, status)
return jsonify(result), status
@rest('/v2/evc/schedule/', methods=['POST'])
def create_schedule(self):
"""
Create a new schedule for a given circuit.
This service do no check if there are conflicts with another schedule.
Payload example:
{
"circuit_id":"aa:bb:cc",
"schedule": {
"date": "2019-08-07T14:52:10.967Z",
"interval": "string",
"frequency": "1 * * * *",
"action": "create"
}
}
"""
log.debug('create_schedule /v2/evc/schedule/')
json_data = self._json_from_request('create_schedule')
try:
circuit_id = json_data['circuit_id']
except TypeError:
result = 'The payload should have a dictionary.'
log.debug('create_schedule result %s %s', result, 400)
raise BadRequest(result)
except KeyError:
result = 'Missing circuit_id.'
log.debug('create_schedule result %s %s', result, 400)
raise BadRequest(result)
try:
schedule_data = json_data['schedule']
except KeyError:
result = 'Missing schedule data.'
log.debug('create_schedule result %s %s', result, 400)
raise BadRequest(result)
# Get EVC from circuits buffer
circuits = self._get_circuits_buffer()
# get the circuit
evc = circuits.get(circuit_id)
# get the circuit
if not evc:
result = f'circuit_id {circuit_id} not found'
log.debug('create_schedule result %s %s', result, 404)
raise NotFound(result)
# Can not modify circuits deleted and archived
if evc.archived:
result = f'Circuit {circuit_id} is archived. Update is forbidden.'
log.debug('create_schedule result %s %s', result, 403)
raise Forbidden(result)
# new schedule from dict
new_schedule = CircuitSchedule.from_dict(schedule_data)
# If there is no schedule, create the list
if not evc.circuit_scheduler:
evc.circuit_scheduler = []
# Add the new schedule
evc.circuit_scheduler.append(new_schedule)
# Add schedule job
self.sched.add_circuit_job(evc, new_schedule)
# save circuit to storehouse
evc.sync()
result = new_schedule.as_dict()
status = 201
log.debug('create_schedule result %s %s', result, status)
return jsonify(result), status
@rest('/v2/evc/schedule/<schedule_id>', methods=['PATCH'])
def update_schedule(self, schedule_id):
"""Update a schedule.
Change all attributes from the given schedule from a EVC circuit.
The schedule ID is preserved as default.
Payload example:
{
"date": "2019-08-07T14:52:10.967Z",
"interval": "string",
"frequency": "1 * * *",
"action": "create"
}
"""
log.debug('update_schedule /v2/evc/schedule/%s', schedule_id)
# Try to find a circuit schedule
evc, found_schedule = self._find_evc_by_schedule_id(schedule_id)
# Can not modify circuits deleted and archived
if not found_schedule:
result = f'schedule_id {schedule_id} not found'
log.debug('update_schedule result %s %s', result, 404)
raise NotFound(result)
if evc.archived:
result = f'Circuit {evc.id} is archived. Update is forbidden.'
log.debug('update_schedule result %s %s', result, 403)
raise Forbidden(result)
data = self._json_from_request('update_schedule')
new_schedule = CircuitSchedule.from_dict(data)
new_schedule.id = found_schedule.id
# Remove the old schedule
evc.circuit_scheduler.remove(found_schedule)
# Append the modified schedule
evc.circuit_scheduler.append(new_schedule)
# Cancel all schedule jobs
self.sched.cancel_job(found_schedule.id)
# Add the new circuit schedule
self.sched.add_circuit_job(evc, new_schedule)
# Save EVC to the storehouse
evc.sync()
result = new_schedule.as_dict()
status = 200
log.debug('update_schedule result %s %s', result, status)
return jsonify(result), status
@rest('/v2/evc/schedule/<schedule_id>', methods=['DELETE'])
def delete_schedule(self, schedule_id):
"""Remove a circuit schedule.
Remove the Schedule from EVC.
Remove the Schedule from cron job.
Save the EVC to the Storehouse.
"""
log.debug('delete_schedule /v2/evc/schedule/%s', schedule_id)
evc, found_schedule = self._find_evc_by_schedule_id(schedule_id)
# Can not modify circuits deleted and archived
if not found_schedule:
result = f'schedule_id {schedule_id} not found'
log.debug('delete_schedule result %s %s', result, 404)
raise NotFound(result)
if evc.archived:
result = f'Circuit {evc.id} is archived. Update is forbidden.'
log.debug('delete_schedule result %s %s', result, 403)
raise Forbidden(result)
# Remove the old schedule
evc.circuit_scheduler.remove(found_schedule)
# Cancel all schedule jobs
self.sched.cancel_job(found_schedule.id)
# Save EVC to the storehouse
evc.sync()
result = "Schedule removed"
status = 200
log.debug('delete_schedule result %s %s', result, status)
return jsonify(result), status
def _is_duplicated_evc(self, evc):
"""Verify if the circuit given is duplicated with the stored evcs.
Args:
evc (EVC): circuit to be analysed.
Returns:
boolean: True if the circuit is duplicated, otherwise False.
"""
for circuit in tuple(self.circuits.values()):
if not circuit.archived and circuit.shares_uni(evc):
return True
return False
@listen_to('kytos/topology.link_up')
def handle_link_up(self, event):
"""Change circuit when link is up or end_maintenance."""
log.debug("Event handle_link_up %s", event)
for evc in self.circuits.values():
if evc.is_enabled() and not evc.archived:
with evc.lock:
evc.handle_link_up(event.content['link'])
@listen_to('kytos/topology.link_down')
def handle_link_down(self, event):
"""Change circuit when link is down or under_mantenance."""
log.debug("Event handle_link_down %s", event)
for evc in self.circuits.values():
with evc.lock:
if evc.is_affected_by_link(event.content['link']):
log.debug(f'Handling evc {evc.id} on link down')
if evc.handle_link_down():
emit_event(self.controller, 'redeployed_link_down',
evc_id=evc.id)
else:
emit_event(self.controller, 'error_redeploy_link_down',
evc_id=evc.id)
def load_circuits_by_interface(self, circuits):
"""Load circuits in storehouse for in-memory dictionary."""
for circuit_id, circuit in circuits.items():
if circuit['archived'] is True:
continue
intf_a = circuit['uni_a']['interface_id']
self.add_to_dict_of_sets(intf_a, circuit_id)
intf_z = circuit['uni_z']['interface_id']
self.add_to_dict_of_sets(intf_z, circuit_id)
for path in ('current_path', 'primary_path', 'backup_path'):
for link in circuit[path]:
intf_a = link['endpoint_a']['id']
self.add_to_dict_of_sets(intf_a, circuit_id)
intf_b = link['endpoint_b']['id']
self.add_to_dict_of_sets(intf_b, circuit_id)
def add_to_dict_of_sets(self, intf, circuit_id):
"""Add a single item to the dictionary of circuits by interface."""
if intf not in self._circuits_by_interface:
self._circuits_by_interface[intf] = set()
self._circuits_by_interface[intf].add(circuit_id)
@listen_to('kytos/topology.port.created')
def load_evcs(self, event):
"""Try to load the unloaded EVCs from storehouse."""
with self._lock:
log.debug("Event load_evcs %s", event)
circuits = self.storehouse.get_data()
if not self._circuits_by_interface:
self.load_circuits_by_interface(circuits)
interface_id = '{}:{}'.format(event.content['switch'],
event.content['port'])
for circuit_id in self._circuits_by_interface.get(interface_id,
[]):
if circuit_id in circuits and circuit_id not in self.circuits:
self._load_evc(circuits[circuit_id])
def load_all_evcs(self):
"""Try to load all EVCs on startup."""
for circuit_id, circuit in self.storehouse.get_data().items():
if circuit_id not in self.circuits:
self._load_evc(circuit)
def _load_evc(self, circuit_dict):
"""Load one EVC from storehouse to memory."""
try:
evc = self._evc_from_dict(circuit_dict)
except ValueError as exception:
log.error(
f'Could not load EVC {circuit_dict["id"]} '
f'because {exception}')
return None
if evc.archived:
return None
evc.deactivate()
evc.sync()
self.circuits.setdefault(evc.id, evc)
self.sched.add(evc)
return evc
@listen_to('kytos/flow_manager.flow.error')
def handle_flow_mod_error(self, event):
"""Handle flow mod errors related to an EVC."""
flow = event.content['flow']
command = event.content.get('error_command')
if command != 'add':
return
evc_id = f'{flow.cookie:x}'
evc = self.circuits.get(evc_id)
if evc:
evc.remove_current_flows()
def _evc_dict_with_instances(self, evc_dict):
"""Convert some dict values to instance of EVC classes.
This method will convert: [UNI, Link]
"""
data = evc_dict.copy() # Do not modify the original dict
for attribute, value in data.items():
# Get multiple attributes.
# Ex: uni_a, uni_z
if 'uni' in attribute:
try:
data[attribute] = self._uni_from_dict(value)
except ValueError as exc:
raise ValueError(f'Error creating UNI: {exc}')
if attribute == 'circuit_scheduler':
data[attribute] = []
for schedule in value:
data[attribute].append(CircuitSchedule.from_dict(schedule))
# Get multiple attributes.
# Ex: primary_links,
# backup_links,
# current_links_cache,
# primary_links_cache,
# backup_links_cache
if 'links' in attribute:
data[attribute] = [self._link_from_dict(link)
for link in value]
# Get multiple attributes.
# Ex: current_path,
# primary_path,
# backup_path
if 'path' in attribute and attribute != 'dynamic_backup_path':
data[attribute] = Path([self._link_from_dict(link)
for link in value])
return data
def _evc_from_dict(self, evc_dict):
data = self._evc_dict_with_instances(evc_dict)
return EVC(self.controller, **data)
def _uni_from_dict(self, uni_dict):
"""Return a UNI object from python dict."""
if uni_dict is None:
return False
interface_id = uni_dict.get("interface_id")
interface = self.controller.get_interface_by_id(interface_id)
if interface is None:
raise ValueError(f'Could not instantiate interface {interface_id}')
tag_dict = uni_dict.get('tag', None)
if tag_dict:
tag = TAG.from_dict(tag_dict)
else:
tag = None
uni = UNI(interface, tag)
return uni
def _link_from_dict(self, link_dict):
"""Return a Link object from python dict."""
id_a = link_dict.get('endpoint_a').get('id')
id_b = link_dict.get('endpoint_b').get('id')
endpoint_a = self.controller.get_interface_by_id(id_a)
endpoint_b = self.controller.get_interface_by_id(id_b)
link = Link(endpoint_a, endpoint_b)
if 'metadata' in link_dict:
link.extend_metadata(link_dict.get('metadata'))
s_vlan = link.get_metadata('s_vlan')
if s_vlan:
tag = TAG.from_dict(s_vlan)
if tag is False:
error_msg = f'Could not instantiate tag from dict {s_vlan}'
raise ValueError(error_msg)
link.update_metadata('s_vlan', tag)
return link
def _find_evc_by_schedule_id(self, schedule_id):
"""
Find an EVC and CircuitSchedule based on schedule_id.
:param schedule_id: Schedule ID
:return: EVC and Schedule
"""
circuits = self._get_circuits_buffer()
found_schedule = None
evc = None
# pylint: disable=unused-variable
for c_id, circuit in circuits.items():
for schedule in circuit.circuit_scheduler:
if schedule.id == schedule_id:
found_schedule = schedule
evc = circuit
break
if found_schedule:
break
return evc, found_schedule
def _get_circuits_buffer(self):
"""
Return the circuit buffer.
If the buffer is empty, try to load data from storehouse.
"""
if not self.circuits:
# Load storehouse circuits to buffer
circuits = self.storehouse.get_data()
for c_id, circuit in circuits.items():
evc = self._evc_from_dict(circuit)
self.circuits[c_id] = evc
return self.circuits
@staticmethod
def _json_from_request(caller):
"""Return a json from request.
If it was not possible to get a json from the request, log, for debug,
who was the caller and the error that ocurred, and raise an
Exception.
"""
try:
json_data = request.get_json()
except ValueError as exception:
log.error(exception)
log.debug(f'{caller} result {exception} 400')
raise BadRequest(str(exception))
except BadRequest:
result = 'The request is not a valid JSON.'
log.debug(f'{caller} result {result} 400')
raise BadRequest(result)
if json_data is None:
result = 'Content-Type must be application/json'
log.debug(f'{caller} result {result} 415')
raise UnsupportedMediaType(result)
return json_data
| 36.502392
| 79
| 0.579696
|
d275a74df6afb0d4e12a36bf299515bddea1659d
| 70
|
py
|
Python
|
genie/models/__init__.py
|
epfl-dlab/GenIE
|
62ae6af936c9375c36d3d5ad60401bf579875bd9
|
[
"MIT"
] | 8
|
2022-02-08T11:12:37.000Z
|
2022-03-16T08:27:50.000Z
|
genie/models/__init__.py
|
epfl-dlab/GenIE
|
62ae6af936c9375c36d3d5ad60401bf579875bd9
|
[
"MIT"
] | 1
|
2022-03-07T07:36:24.000Z
|
2022-03-07T20:58:12.000Z
|
genie/models/__init__.py
|
epfl-dlab/GenIE
|
62ae6af936c9375c36d3d5ad60401bf579875bd9
|
[
"MIT"
] | 7
|
2022-02-22T22:48:35.000Z
|
2022-03-18T05:18:30.000Z
|
from .genie_base_hf import GenieHF
from .genie_base_pl import GeniePL
| 23.333333
| 34
| 0.857143
|
25f658c22b0a38bd7ad8abc2167719a2a704c363
| 156,169
|
py
|
Python
|
nuitka/nodes/AttributeNodesGenerated.py
|
roired/Nuitka
|
d240a14b4b00c03d60050b4ba67382fb09c7dba8
|
[
"Apache-2.0"
] | null | null | null |
nuitka/nodes/AttributeNodesGenerated.py
|
roired/Nuitka
|
d240a14b4b00c03d60050b4ba67382fb09c7dba8
|
[
"Apache-2.0"
] | null | null | null |
nuitka/nodes/AttributeNodesGenerated.py
|
roired/Nuitka
|
d240a14b4b00c03d60050b4ba67382fb09c7dba8
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Specialized attribute nodes
WARNING, this code is GENERATED. Modify the template AttributeNodeFixed.py.j2 instead!
"""
from nuitka.specs.BuiltinParameterSpecs import extractBuiltinArgs
from .AttributeLookupNodes import ExpressionAttributeLookupFixedBase
from .NodeBases import SideEffectsFromChildrenMixin
attribute_classes = {}
attribute_typed_classes = set()
class ExpressionAttributeLookupFixedCapitalize(ExpressionAttributeLookupFixedBase):
"""Looking up an attribute value 'capitalize' of an object.
Typically code like: source.capitalize
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_FIXED_CAPITALIZE"
attribute_name = "capitalize"
def computeExpression(self, trace_collection):
subnode_expression = self.subnode_expression
if subnode_expression.hasShapeStrExact():
result = ExpressionAttributeLookupStrCapitalize(
expression=subnode_expression, source_ref=self.source_ref
)
return (
result,
"new_expression",
"Attribute lookup 'capitalize' on str shape resolved.",
)
return subnode_expression.computeExpressionAttribute(
lookup_node=self,
attribute_name="capitalize",
trace_collection=trace_collection,
)
def mayRaiseException(self, exception_type):
return self.subnode_expression.mayRaiseExceptionAttributeLookup(
exception_type=exception_type, attribute_name="capitalize"
)
attribute_classes["capitalize"] = ExpressionAttributeLookupFixedCapitalize
from nuitka.specs.BuiltinStrOperationSpecs import str_capitalize_spec
class ExpressionAttributeLookupStrCapitalize(
SideEffectsFromChildrenMixin, ExpressionAttributeLookupFixedCapitalize
):
"""Attribute Capitalize lookup on a str.
Typically code like: some_str.capitalize
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_STR_CAPITALIZE"
attribute_name = "capitalize"
def computeExpression(self, trace_collection):
return self, None, None
def computeExpressionCall(self, call_node, call_args, call_kw, trace_collection):
def wrapExpressionStrOperationCapitalize(source_ref):
from .StrNodes import ExpressionStrOperationCapitalize
return ExpressionStrOperationCapitalize(
str_arg=self.subnode_expression, source_ref=source_ref
)
# Anything may happen. On next pass, if replaced, we might be better
# but not now.
trace_collection.onExceptionRaiseExit(BaseException)
result = extractBuiltinArgs(
node=call_node,
builtin_class=wrapExpressionStrOperationCapitalize,
builtin_spec=str_capitalize_spec,
)
return result, "new_expression", "Call to 'capitalize' of str recognized."
attribute_typed_classes.add(ExpressionAttributeLookupStrCapitalize)
class ExpressionAttributeLookupFixedCasefold(ExpressionAttributeLookupFixedBase):
"""Looking up an attribute value 'casefold' of an object.
Typically code like: source.casefold
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_FIXED_CASEFOLD"
attribute_name = "casefold"
def computeExpression(self, trace_collection):
subnode_expression = self.subnode_expression
if str is not bytes and subnode_expression.hasShapeStrExact():
result = ExpressionAttributeLookupStrCasefold(
expression=subnode_expression, source_ref=self.source_ref
)
return (
result,
"new_expression",
"Attribute lookup 'casefold' on str shape resolved.",
)
return subnode_expression.computeExpressionAttribute(
lookup_node=self,
attribute_name="casefold",
trace_collection=trace_collection,
)
def mayRaiseException(self, exception_type):
return self.subnode_expression.mayRaiseExceptionAttributeLookup(
exception_type=exception_type, attribute_name="casefold"
)
attribute_classes["casefold"] = ExpressionAttributeLookupFixedCasefold
class ExpressionAttributeLookupStrCasefold(
SideEffectsFromChildrenMixin, ExpressionAttributeLookupFixedCasefold
):
"""Attribute Casefold lookup on a str.
Typically code like: some_str.casefold
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_STR_CASEFOLD"
attribute_name = "casefold"
def computeExpression(self, trace_collection):
return self, None, None
# No computeExpressionCall as str operation ExpressionStrOperationCasefold is not yet implemented
attribute_typed_classes.add(ExpressionAttributeLookupStrCasefold)
class ExpressionAttributeLookupFixedCenter(ExpressionAttributeLookupFixedBase):
"""Looking up an attribute value 'center' of an object.
Typically code like: source.center
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_FIXED_CENTER"
attribute_name = "center"
def computeExpression(self, trace_collection):
subnode_expression = self.subnode_expression
if subnode_expression.hasShapeStrExact():
result = ExpressionAttributeLookupStrCenter(
expression=subnode_expression, source_ref=self.source_ref
)
return (
result,
"new_expression",
"Attribute lookup 'center' on str shape resolved.",
)
return subnode_expression.computeExpressionAttribute(
lookup_node=self,
attribute_name="center",
trace_collection=trace_collection,
)
def mayRaiseException(self, exception_type):
return self.subnode_expression.mayRaiseExceptionAttributeLookup(
exception_type=exception_type, attribute_name="center"
)
attribute_classes["center"] = ExpressionAttributeLookupFixedCenter
class ExpressionAttributeLookupStrCenter(
SideEffectsFromChildrenMixin, ExpressionAttributeLookupFixedCenter
):
"""Attribute Center lookup on a str.
Typically code like: some_str.center
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_STR_CENTER"
attribute_name = "center"
def computeExpression(self, trace_collection):
return self, None, None
# No computeExpressionCall as str operation ExpressionStrOperationCenter is not yet implemented
attribute_typed_classes.add(ExpressionAttributeLookupStrCenter)
class ExpressionAttributeLookupFixedClear(ExpressionAttributeLookupFixedBase):
"""Looking up an attribute value 'clear' of an object.
Typically code like: source.clear
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_FIXED_CLEAR"
attribute_name = "clear"
def computeExpression(self, trace_collection):
subnode_expression = self.subnode_expression
if subnode_expression.hasShapeDictionaryExact():
result = ExpressionAttributeLookupDictClear(
expression=subnode_expression, source_ref=self.source_ref
)
return (
result,
"new_expression",
"Attribute lookup 'clear' on dict shape resolved.",
)
return subnode_expression.computeExpressionAttribute(
lookup_node=self,
attribute_name="clear",
trace_collection=trace_collection,
)
def mayRaiseException(self, exception_type):
return self.subnode_expression.mayRaiseExceptionAttributeLookup(
exception_type=exception_type, attribute_name="clear"
)
attribute_classes["clear"] = ExpressionAttributeLookupFixedClear
from nuitka.specs.BuiltinDictOperationSpecs import dict_clear_spec
class ExpressionAttributeLookupDictClear(
SideEffectsFromChildrenMixin, ExpressionAttributeLookupFixedClear
):
"""Attribute Clear lookup on a dict.
Typically code like: some_dict.clear
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_DICT_CLEAR"
attribute_name = "clear"
def computeExpression(self, trace_collection):
return self, None, None
def computeExpressionCall(self, call_node, call_args, call_kw, trace_collection):
def wrapExpressionDictOperationClear(source_ref):
from .DictionaryNodes import ExpressionDictOperationClear
return ExpressionDictOperationClear(
dict_arg=self.subnode_expression, source_ref=source_ref
)
# Anything may happen. On next pass, if replaced, we might be better
# but not now.
trace_collection.onExceptionRaiseExit(BaseException)
result = extractBuiltinArgs(
node=call_node,
builtin_class=wrapExpressionDictOperationClear,
builtin_spec=dict_clear_spec,
)
return result, "new_expression", "Call to 'clear' of dictionary recognized."
attribute_typed_classes.add(ExpressionAttributeLookupDictClear)
class ExpressionAttributeLookupFixedCopy(ExpressionAttributeLookupFixedBase):
"""Looking up an attribute value 'copy' of an object.
Typically code like: source.copy
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_FIXED_COPY"
attribute_name = "copy"
def computeExpression(self, trace_collection):
subnode_expression = self.subnode_expression
if subnode_expression.hasShapeDictionaryExact():
result = ExpressionAttributeLookupDictCopy(
expression=subnode_expression, source_ref=self.source_ref
)
return (
result,
"new_expression",
"Attribute lookup 'copy' on dict shape resolved.",
)
return subnode_expression.computeExpressionAttribute(
lookup_node=self,
attribute_name="copy",
trace_collection=trace_collection,
)
def mayRaiseException(self, exception_type):
return self.subnode_expression.mayRaiseExceptionAttributeLookup(
exception_type=exception_type, attribute_name="copy"
)
attribute_classes["copy"] = ExpressionAttributeLookupFixedCopy
from nuitka.specs.BuiltinDictOperationSpecs import dict_copy_spec
class ExpressionAttributeLookupDictCopy(
SideEffectsFromChildrenMixin, ExpressionAttributeLookupFixedCopy
):
"""Attribute Copy lookup on a dict.
Typically code like: some_dict.copy
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_DICT_COPY"
attribute_name = "copy"
def computeExpression(self, trace_collection):
return self, None, None
def computeExpressionCall(self, call_node, call_args, call_kw, trace_collection):
def wrapExpressionDictOperationCopy(source_ref):
from .DictionaryNodes import ExpressionDictOperationCopy
return ExpressionDictOperationCopy(
dict_arg=self.subnode_expression, source_ref=source_ref
)
# Anything may happen. On next pass, if replaced, we might be better
# but not now.
trace_collection.onExceptionRaiseExit(BaseException)
result = extractBuiltinArgs(
node=call_node,
builtin_class=wrapExpressionDictOperationCopy,
builtin_spec=dict_copy_spec,
)
return result, "new_expression", "Call to 'copy' of dictionary recognized."
attribute_typed_classes.add(ExpressionAttributeLookupDictCopy)
class ExpressionAttributeLookupFixedCount(ExpressionAttributeLookupFixedBase):
"""Looking up an attribute value 'count' of an object.
Typically code like: source.count
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_FIXED_COUNT"
attribute_name = "count"
def computeExpression(self, trace_collection):
subnode_expression = self.subnode_expression
if subnode_expression.hasShapeStrExact():
result = ExpressionAttributeLookupStrCount(
expression=subnode_expression, source_ref=self.source_ref
)
return (
result,
"new_expression",
"Attribute lookup 'count' on str shape resolved.",
)
return subnode_expression.computeExpressionAttribute(
lookup_node=self,
attribute_name="count",
trace_collection=trace_collection,
)
def mayRaiseException(self, exception_type):
return self.subnode_expression.mayRaiseExceptionAttributeLookup(
exception_type=exception_type, attribute_name="count"
)
attribute_classes["count"] = ExpressionAttributeLookupFixedCount
class ExpressionAttributeLookupStrCount(
SideEffectsFromChildrenMixin, ExpressionAttributeLookupFixedCount
):
"""Attribute Count lookup on a str.
Typically code like: some_str.count
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_STR_COUNT"
attribute_name = "count"
def computeExpression(self, trace_collection):
return self, None, None
# No computeExpressionCall as str operation ExpressionStrOperationCount is not yet implemented
attribute_typed_classes.add(ExpressionAttributeLookupStrCount)
class ExpressionAttributeLookupFixedDecode(ExpressionAttributeLookupFixedBase):
"""Looking up an attribute value 'decode' of an object.
Typically code like: source.decode
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_FIXED_DECODE"
attribute_name = "decode"
def computeExpression(self, trace_collection):
subnode_expression = self.subnode_expression
if str is bytes and subnode_expression.hasShapeStrExact():
result = ExpressionAttributeLookupStrDecode(
expression=subnode_expression, source_ref=self.source_ref
)
return (
result,
"new_expression",
"Attribute lookup 'decode' on str shape resolved.",
)
return subnode_expression.computeExpressionAttribute(
lookup_node=self,
attribute_name="decode",
trace_collection=trace_collection,
)
def mayRaiseException(self, exception_type):
return self.subnode_expression.mayRaiseExceptionAttributeLookup(
exception_type=exception_type, attribute_name="decode"
)
attribute_classes["decode"] = ExpressionAttributeLookupFixedDecode
class ExpressionAttributeLookupStrDecode(
SideEffectsFromChildrenMixin, ExpressionAttributeLookupFixedDecode
):
"""Attribute Decode lookup on a str.
Typically code like: some_str.decode
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_STR_DECODE"
attribute_name = "decode"
def computeExpression(self, trace_collection):
return self, None, None
# No computeExpressionCall as str operation ExpressionStrOperationDecode is not yet implemented
attribute_typed_classes.add(ExpressionAttributeLookupStrDecode)
class ExpressionAttributeLookupFixedEncode(ExpressionAttributeLookupFixedBase):
"""Looking up an attribute value 'encode' of an object.
Typically code like: source.encode
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_FIXED_ENCODE"
attribute_name = "encode"
def computeExpression(self, trace_collection):
subnode_expression = self.subnode_expression
if subnode_expression.hasShapeStrExact():
result = ExpressionAttributeLookupStrEncode(
expression=subnode_expression, source_ref=self.source_ref
)
return (
result,
"new_expression",
"Attribute lookup 'encode' on str shape resolved.",
)
return subnode_expression.computeExpressionAttribute(
lookup_node=self,
attribute_name="encode",
trace_collection=trace_collection,
)
def mayRaiseException(self, exception_type):
return self.subnode_expression.mayRaiseExceptionAttributeLookup(
exception_type=exception_type, attribute_name="encode"
)
attribute_classes["encode"] = ExpressionAttributeLookupFixedEncode
class ExpressionAttributeLookupStrEncode(
SideEffectsFromChildrenMixin, ExpressionAttributeLookupFixedEncode
):
"""Attribute Encode lookup on a str.
Typically code like: some_str.encode
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_STR_ENCODE"
attribute_name = "encode"
def computeExpression(self, trace_collection):
return self, None, None
# No computeExpressionCall as str operation ExpressionStrOperationEncode is not yet implemented
attribute_typed_classes.add(ExpressionAttributeLookupStrEncode)
class ExpressionAttributeLookupFixedEndswith(ExpressionAttributeLookupFixedBase):
"""Looking up an attribute value 'endswith' of an object.
Typically code like: source.endswith
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_FIXED_ENDSWITH"
attribute_name = "endswith"
def computeExpression(self, trace_collection):
subnode_expression = self.subnode_expression
if subnode_expression.hasShapeStrExact():
result = ExpressionAttributeLookupStrEndswith(
expression=subnode_expression, source_ref=self.source_ref
)
return (
result,
"new_expression",
"Attribute lookup 'endswith' on str shape resolved.",
)
return subnode_expression.computeExpressionAttribute(
lookup_node=self,
attribute_name="endswith",
trace_collection=trace_collection,
)
def mayRaiseException(self, exception_type):
return self.subnode_expression.mayRaiseExceptionAttributeLookup(
exception_type=exception_type, attribute_name="endswith"
)
attribute_classes["endswith"] = ExpressionAttributeLookupFixedEndswith
from nuitka.specs.BuiltinStrOperationSpecs import str_endswith_spec
class ExpressionAttributeLookupStrEndswith(
SideEffectsFromChildrenMixin, ExpressionAttributeLookupFixedEndswith
):
"""Attribute Endswith lookup on a str.
Typically code like: some_str.endswith
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_STR_ENDSWITH"
attribute_name = "endswith"
def computeExpression(self, trace_collection):
return self, None, None
def computeExpressionCall(self, call_node, call_args, call_kw, trace_collection):
def wrapExpressionStrOperationEndswith(suffix, start, end, source_ref):
if end is not None:
from .StrNodes import ExpressionStrOperationEndswith4
return ExpressionStrOperationEndswith4(
str_arg=self.subnode_expression,
suffix=suffix,
start=start,
end=end,
source_ref=source_ref,
)
elif start is not None:
from .StrNodes import ExpressionStrOperationEndswith3
return ExpressionStrOperationEndswith3(
str_arg=self.subnode_expression,
suffix=suffix,
start=start,
source_ref=source_ref,
)
else:
from .StrNodes import ExpressionStrOperationEndswith2
return ExpressionStrOperationEndswith2(
str_arg=self.subnode_expression,
suffix=suffix,
source_ref=source_ref,
)
# Anything may happen. On next pass, if replaced, we might be better
# but not now.
trace_collection.onExceptionRaiseExit(BaseException)
result = extractBuiltinArgs(
node=call_node,
builtin_class=wrapExpressionStrOperationEndswith,
builtin_spec=str_endswith_spec,
)
return result, "new_expression", "Call to 'endswith' of str recognized."
attribute_typed_classes.add(ExpressionAttributeLookupStrEndswith)
class ExpressionAttributeLookupFixedExpandtabs(ExpressionAttributeLookupFixedBase):
"""Looking up an attribute value 'expandtabs' of an object.
Typically code like: source.expandtabs
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_FIXED_EXPANDTABS"
attribute_name = "expandtabs"
def computeExpression(self, trace_collection):
subnode_expression = self.subnode_expression
if subnode_expression.hasShapeStrExact():
result = ExpressionAttributeLookupStrExpandtabs(
expression=subnode_expression, source_ref=self.source_ref
)
return (
result,
"new_expression",
"Attribute lookup 'expandtabs' on str shape resolved.",
)
return subnode_expression.computeExpressionAttribute(
lookup_node=self,
attribute_name="expandtabs",
trace_collection=trace_collection,
)
def mayRaiseException(self, exception_type):
return self.subnode_expression.mayRaiseExceptionAttributeLookup(
exception_type=exception_type, attribute_name="expandtabs"
)
attribute_classes["expandtabs"] = ExpressionAttributeLookupFixedExpandtabs
class ExpressionAttributeLookupStrExpandtabs(
SideEffectsFromChildrenMixin, ExpressionAttributeLookupFixedExpandtabs
):
"""Attribute Expandtabs lookup on a str.
Typically code like: some_str.expandtabs
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_STR_EXPANDTABS"
attribute_name = "expandtabs"
def computeExpression(self, trace_collection):
return self, None, None
# No computeExpressionCall as str operation ExpressionStrOperationExpandtabs is not yet implemented
attribute_typed_classes.add(ExpressionAttributeLookupStrExpandtabs)
class ExpressionAttributeLookupFixedFind(ExpressionAttributeLookupFixedBase):
"""Looking up an attribute value 'find' of an object.
Typically code like: source.find
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_FIXED_FIND"
attribute_name = "find"
def computeExpression(self, trace_collection):
subnode_expression = self.subnode_expression
if subnode_expression.hasShapeStrExact():
result = ExpressionAttributeLookupStrFind(
expression=subnode_expression, source_ref=self.source_ref
)
return (
result,
"new_expression",
"Attribute lookup 'find' on str shape resolved.",
)
return subnode_expression.computeExpressionAttribute(
lookup_node=self,
attribute_name="find",
trace_collection=trace_collection,
)
def mayRaiseException(self, exception_type):
return self.subnode_expression.mayRaiseExceptionAttributeLookup(
exception_type=exception_type, attribute_name="find"
)
attribute_classes["find"] = ExpressionAttributeLookupFixedFind
from nuitka.specs.BuiltinStrOperationSpecs import str_find_spec
class ExpressionAttributeLookupStrFind(
SideEffectsFromChildrenMixin, ExpressionAttributeLookupFixedFind
):
"""Attribute Find lookup on a str.
Typically code like: some_str.find
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_STR_FIND"
attribute_name = "find"
def computeExpression(self, trace_collection):
return self, None, None
def computeExpressionCall(self, call_node, call_args, call_kw, trace_collection):
def wrapExpressionStrOperationFind(sub, start, end, source_ref):
if end is not None:
from .StrNodes import ExpressionStrOperationFind4
return ExpressionStrOperationFind4(
str_arg=self.subnode_expression,
sub=sub,
start=start,
end=end,
source_ref=source_ref,
)
elif start is not None:
from .StrNodes import ExpressionStrOperationFind3
return ExpressionStrOperationFind3(
str_arg=self.subnode_expression,
sub=sub,
start=start,
source_ref=source_ref,
)
else:
from .StrNodes import ExpressionStrOperationFind2
return ExpressionStrOperationFind2(
str_arg=self.subnode_expression, sub=sub, source_ref=source_ref
)
# Anything may happen. On next pass, if replaced, we might be better
# but not now.
trace_collection.onExceptionRaiseExit(BaseException)
result = extractBuiltinArgs(
node=call_node,
builtin_class=wrapExpressionStrOperationFind,
builtin_spec=str_find_spec,
)
return result, "new_expression", "Call to 'find' of str recognized."
attribute_typed_classes.add(ExpressionAttributeLookupStrFind)
class ExpressionAttributeLookupFixedFormat(ExpressionAttributeLookupFixedBase):
"""Looking up an attribute value 'format' of an object.
Typically code like: source.format
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_FIXED_FORMAT"
attribute_name = "format"
def computeExpression(self, trace_collection):
subnode_expression = self.subnode_expression
if subnode_expression.hasShapeStrExact():
result = ExpressionAttributeLookupStrFormat(
expression=subnode_expression, source_ref=self.source_ref
)
return (
result,
"new_expression",
"Attribute lookup 'format' on str shape resolved.",
)
return subnode_expression.computeExpressionAttribute(
lookup_node=self,
attribute_name="format",
trace_collection=trace_collection,
)
def mayRaiseException(self, exception_type):
return self.subnode_expression.mayRaiseExceptionAttributeLookup(
exception_type=exception_type, attribute_name="format"
)
attribute_classes["format"] = ExpressionAttributeLookupFixedFormat
class ExpressionAttributeLookupStrFormat(
SideEffectsFromChildrenMixin, ExpressionAttributeLookupFixedFormat
):
"""Attribute Format lookup on a str.
Typically code like: some_str.format
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_STR_FORMAT"
attribute_name = "format"
def computeExpression(self, trace_collection):
return self, None, None
# No computeExpressionCall as str operation ExpressionStrOperationFormat is not yet implemented
attribute_typed_classes.add(ExpressionAttributeLookupStrFormat)
class ExpressionAttributeLookupFixedFormatmap(ExpressionAttributeLookupFixedBase):
"""Looking up an attribute value 'format_map' of an object.
Typically code like: source.format_map
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_FIXED_FORMATMAP"
attribute_name = "format_map"
def computeExpression(self, trace_collection):
subnode_expression = self.subnode_expression
if str is not bytes and subnode_expression.hasShapeStrExact():
result = ExpressionAttributeLookupStrFormatmap(
expression=subnode_expression, source_ref=self.source_ref
)
return (
result,
"new_expression",
"Attribute lookup 'format_map' on str shape resolved.",
)
return subnode_expression.computeExpressionAttribute(
lookup_node=self,
attribute_name="format_map",
trace_collection=trace_collection,
)
def mayRaiseException(self, exception_type):
return self.subnode_expression.mayRaiseExceptionAttributeLookup(
exception_type=exception_type, attribute_name="format_map"
)
attribute_classes["format_map"] = ExpressionAttributeLookupFixedFormatmap
class ExpressionAttributeLookupStrFormatmap(
SideEffectsFromChildrenMixin, ExpressionAttributeLookupFixedFormatmap
):
"""Attribute Formatmap lookup on a str.
Typically code like: some_str.format_map
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_STR_FORMATMAP"
attribute_name = "format_map"
def computeExpression(self, trace_collection):
return self, None, None
# No computeExpressionCall as str operation ExpressionStrOperationFormatmap is not yet implemented
attribute_typed_classes.add(ExpressionAttributeLookupStrFormatmap)
class ExpressionAttributeLookupFixedFromkeys(ExpressionAttributeLookupFixedBase):
"""Looking up an attribute value 'fromkeys' of an object.
Typically code like: source.fromkeys
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_FIXED_FROMKEYS"
attribute_name = "fromkeys"
def computeExpression(self, trace_collection):
subnode_expression = self.subnode_expression
if subnode_expression.hasShapeDictionaryExact():
result = ExpressionAttributeLookupDictFromkeys(
expression=subnode_expression, source_ref=self.source_ref
)
return (
result,
"new_expression",
"Attribute lookup 'fromkeys' on dict shape resolved.",
)
return subnode_expression.computeExpressionAttribute(
lookup_node=self,
attribute_name="fromkeys",
trace_collection=trace_collection,
)
def mayRaiseException(self, exception_type):
return self.subnode_expression.mayRaiseExceptionAttributeLookup(
exception_type=exception_type, attribute_name="fromkeys"
)
attribute_classes["fromkeys"] = ExpressionAttributeLookupFixedFromkeys
class ExpressionAttributeLookupDictFromkeys(
SideEffectsFromChildrenMixin, ExpressionAttributeLookupFixedFromkeys
):
"""Attribute Fromkeys lookup on a dict.
Typically code like: some_dict.fromkeys
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_DICT_FROMKEYS"
attribute_name = "fromkeys"
def computeExpression(self, trace_collection):
return self, None, None
# No computeExpressionCall as dict operation ExpressionDictOperationFromkeys is not yet implemented
attribute_typed_classes.add(ExpressionAttributeLookupDictFromkeys)
class ExpressionAttributeLookupFixedGet(ExpressionAttributeLookupFixedBase):
"""Looking up an attribute value 'get' of an object.
Typically code like: source.get
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_FIXED_GET"
attribute_name = "get"
def computeExpression(self, trace_collection):
subnode_expression = self.subnode_expression
if subnode_expression.hasShapeDictionaryExact():
result = ExpressionAttributeLookupDictGet(
expression=subnode_expression, source_ref=self.source_ref
)
return (
result,
"new_expression",
"Attribute lookup 'get' on dict shape resolved.",
)
return subnode_expression.computeExpressionAttribute(
lookup_node=self,
attribute_name="get",
trace_collection=trace_collection,
)
def mayRaiseException(self, exception_type):
return self.subnode_expression.mayRaiseExceptionAttributeLookup(
exception_type=exception_type, attribute_name="get"
)
attribute_classes["get"] = ExpressionAttributeLookupFixedGet
from nuitka.specs.BuiltinDictOperationSpecs import dict_get_spec
class ExpressionAttributeLookupDictGet(
SideEffectsFromChildrenMixin, ExpressionAttributeLookupFixedGet
):
"""Attribute Get lookup on a dict.
Typically code like: some_dict.get
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_DICT_GET"
attribute_name = "get"
def computeExpression(self, trace_collection):
return self, None, None
def computeExpressionCall(self, call_node, call_args, call_kw, trace_collection):
def wrapExpressionDictOperationGet(key, default, source_ref):
if default is not None:
from .DictionaryNodes import ExpressionDictOperationGet3
return ExpressionDictOperationGet3(
dict_arg=self.subnode_expression,
key=key,
default=default,
source_ref=source_ref,
)
else:
from .DictionaryNodes import ExpressionDictOperationGet2
return ExpressionDictOperationGet2(
dict_arg=self.subnode_expression, key=key, source_ref=source_ref
)
# Anything may happen. On next pass, if replaced, we might be better
# but not now.
trace_collection.onExceptionRaiseExit(BaseException)
result = extractBuiltinArgs(
node=call_node,
builtin_class=wrapExpressionDictOperationGet,
builtin_spec=dict_get_spec,
)
return result, "new_expression", "Call to 'get' of dictionary recognized."
attribute_typed_classes.add(ExpressionAttributeLookupDictGet)
class ExpressionAttributeLookupFixedHaskey(ExpressionAttributeLookupFixedBase):
"""Looking up an attribute value 'has_key' of an object.
Typically code like: source.has_key
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_FIXED_HASKEY"
attribute_name = "has_key"
def computeExpression(self, trace_collection):
subnode_expression = self.subnode_expression
if str is bytes and subnode_expression.hasShapeDictionaryExact():
result = ExpressionAttributeLookupDictHaskey(
expression=subnode_expression, source_ref=self.source_ref
)
return (
result,
"new_expression",
"Attribute lookup 'has_key' on dict shape resolved.",
)
return subnode_expression.computeExpressionAttribute(
lookup_node=self,
attribute_name="has_key",
trace_collection=trace_collection,
)
def mayRaiseException(self, exception_type):
return self.subnode_expression.mayRaiseExceptionAttributeLookup(
exception_type=exception_type, attribute_name="has_key"
)
attribute_classes["has_key"] = ExpressionAttributeLookupFixedHaskey
from nuitka.specs.BuiltinDictOperationSpecs import dict_has_key_spec
class ExpressionAttributeLookupDictHaskey(
SideEffectsFromChildrenMixin, ExpressionAttributeLookupFixedHaskey
):
"""Attribute Haskey lookup on a dict.
Typically code like: some_dict.has_key
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_DICT_HASKEY"
attribute_name = "has_key"
def computeExpression(self, trace_collection):
return self, None, None
def computeExpressionCall(self, call_node, call_args, call_kw, trace_collection):
def wrapExpressionDictOperationHaskey(key, source_ref):
from .DictionaryNodes import ExpressionDictOperationHaskey
return ExpressionDictOperationHaskey(
dict_arg=self.subnode_expression, key=key, source_ref=source_ref
)
# Anything may happen. On next pass, if replaced, we might be better
# but not now.
trace_collection.onExceptionRaiseExit(BaseException)
result = extractBuiltinArgs(
node=call_node,
builtin_class=wrapExpressionDictOperationHaskey,
builtin_spec=dict_has_key_spec,
)
return result, "new_expression", "Call to 'has_key' of dictionary recognized."
attribute_typed_classes.add(ExpressionAttributeLookupDictHaskey)
class ExpressionAttributeLookupFixedIndex(ExpressionAttributeLookupFixedBase):
"""Looking up an attribute value 'index' of an object.
Typically code like: source.index
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_FIXED_INDEX"
attribute_name = "index"
def computeExpression(self, trace_collection):
subnode_expression = self.subnode_expression
if subnode_expression.hasShapeStrExact():
result = ExpressionAttributeLookupStrIndex(
expression=subnode_expression, source_ref=self.source_ref
)
return (
result,
"new_expression",
"Attribute lookup 'index' on str shape resolved.",
)
return subnode_expression.computeExpressionAttribute(
lookup_node=self,
attribute_name="index",
trace_collection=trace_collection,
)
def mayRaiseException(self, exception_type):
return self.subnode_expression.mayRaiseExceptionAttributeLookup(
exception_type=exception_type, attribute_name="index"
)
attribute_classes["index"] = ExpressionAttributeLookupFixedIndex
from nuitka.specs.BuiltinStrOperationSpecs import str_index_spec
class ExpressionAttributeLookupStrIndex(
SideEffectsFromChildrenMixin, ExpressionAttributeLookupFixedIndex
):
"""Attribute Index lookup on a str.
Typically code like: some_str.index
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_STR_INDEX"
attribute_name = "index"
def computeExpression(self, trace_collection):
return self, None, None
def computeExpressionCall(self, call_node, call_args, call_kw, trace_collection):
def wrapExpressionStrOperationIndex(sub, start, end, source_ref):
if end is not None:
from .StrNodes import ExpressionStrOperationIndex4
return ExpressionStrOperationIndex4(
str_arg=self.subnode_expression,
sub=sub,
start=start,
end=end,
source_ref=source_ref,
)
elif start is not None:
from .StrNodes import ExpressionStrOperationIndex3
return ExpressionStrOperationIndex3(
str_arg=self.subnode_expression,
sub=sub,
start=start,
source_ref=source_ref,
)
else:
from .StrNodes import ExpressionStrOperationIndex2
return ExpressionStrOperationIndex2(
str_arg=self.subnode_expression, sub=sub, source_ref=source_ref
)
# Anything may happen. On next pass, if replaced, we might be better
# but not now.
trace_collection.onExceptionRaiseExit(BaseException)
result = extractBuiltinArgs(
node=call_node,
builtin_class=wrapExpressionStrOperationIndex,
builtin_spec=str_index_spec,
)
return result, "new_expression", "Call to 'index' of str recognized."
attribute_typed_classes.add(ExpressionAttributeLookupStrIndex)
class ExpressionAttributeLookupFixedIsalnum(ExpressionAttributeLookupFixedBase):
"""Looking up an attribute value 'isalnum' of an object.
Typically code like: source.isalnum
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_FIXED_ISALNUM"
attribute_name = "isalnum"
def computeExpression(self, trace_collection):
subnode_expression = self.subnode_expression
if subnode_expression.hasShapeStrExact():
result = ExpressionAttributeLookupStrIsalnum(
expression=subnode_expression, source_ref=self.source_ref
)
return (
result,
"new_expression",
"Attribute lookup 'isalnum' on str shape resolved.",
)
return subnode_expression.computeExpressionAttribute(
lookup_node=self,
attribute_name="isalnum",
trace_collection=trace_collection,
)
def mayRaiseException(self, exception_type):
return self.subnode_expression.mayRaiseExceptionAttributeLookup(
exception_type=exception_type, attribute_name="isalnum"
)
attribute_classes["isalnum"] = ExpressionAttributeLookupFixedIsalnum
from nuitka.specs.BuiltinStrOperationSpecs import str_isalnum_spec
class ExpressionAttributeLookupStrIsalnum(
SideEffectsFromChildrenMixin, ExpressionAttributeLookupFixedIsalnum
):
"""Attribute Isalnum lookup on a str.
Typically code like: some_str.isalnum
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_STR_ISALNUM"
attribute_name = "isalnum"
def computeExpression(self, trace_collection):
return self, None, None
def computeExpressionCall(self, call_node, call_args, call_kw, trace_collection):
def wrapExpressionStrOperationIsalnum(source_ref):
from .StrNodes import ExpressionStrOperationIsalnum
return ExpressionStrOperationIsalnum(
str_arg=self.subnode_expression, source_ref=source_ref
)
# Anything may happen. On next pass, if replaced, we might be better
# but not now.
trace_collection.onExceptionRaiseExit(BaseException)
result = extractBuiltinArgs(
node=call_node,
builtin_class=wrapExpressionStrOperationIsalnum,
builtin_spec=str_isalnum_spec,
)
return result, "new_expression", "Call to 'isalnum' of str recognized."
attribute_typed_classes.add(ExpressionAttributeLookupStrIsalnum)
class ExpressionAttributeLookupFixedIsalpha(ExpressionAttributeLookupFixedBase):
"""Looking up an attribute value 'isalpha' of an object.
Typically code like: source.isalpha
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_FIXED_ISALPHA"
attribute_name = "isalpha"
def computeExpression(self, trace_collection):
subnode_expression = self.subnode_expression
if subnode_expression.hasShapeStrExact():
result = ExpressionAttributeLookupStrIsalpha(
expression=subnode_expression, source_ref=self.source_ref
)
return (
result,
"new_expression",
"Attribute lookup 'isalpha' on str shape resolved.",
)
return subnode_expression.computeExpressionAttribute(
lookup_node=self,
attribute_name="isalpha",
trace_collection=trace_collection,
)
def mayRaiseException(self, exception_type):
return self.subnode_expression.mayRaiseExceptionAttributeLookup(
exception_type=exception_type, attribute_name="isalpha"
)
attribute_classes["isalpha"] = ExpressionAttributeLookupFixedIsalpha
from nuitka.specs.BuiltinStrOperationSpecs import str_isalpha_spec
class ExpressionAttributeLookupStrIsalpha(
SideEffectsFromChildrenMixin, ExpressionAttributeLookupFixedIsalpha
):
"""Attribute Isalpha lookup on a str.
Typically code like: some_str.isalpha
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_STR_ISALPHA"
attribute_name = "isalpha"
def computeExpression(self, trace_collection):
return self, None, None
def computeExpressionCall(self, call_node, call_args, call_kw, trace_collection):
def wrapExpressionStrOperationIsalpha(source_ref):
from .StrNodes import ExpressionStrOperationIsalpha
return ExpressionStrOperationIsalpha(
str_arg=self.subnode_expression, source_ref=source_ref
)
# Anything may happen. On next pass, if replaced, we might be better
# but not now.
trace_collection.onExceptionRaiseExit(BaseException)
result = extractBuiltinArgs(
node=call_node,
builtin_class=wrapExpressionStrOperationIsalpha,
builtin_spec=str_isalpha_spec,
)
return result, "new_expression", "Call to 'isalpha' of str recognized."
attribute_typed_classes.add(ExpressionAttributeLookupStrIsalpha)
class ExpressionAttributeLookupFixedIsascii(ExpressionAttributeLookupFixedBase):
"""Looking up an attribute value 'isascii' of an object.
Typically code like: source.isascii
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_FIXED_ISASCII"
attribute_name = "isascii"
def computeExpression(self, trace_collection):
subnode_expression = self.subnode_expression
if str is not bytes and subnode_expression.hasShapeStrExact():
result = ExpressionAttributeLookupStrIsascii(
expression=subnode_expression, source_ref=self.source_ref
)
return (
result,
"new_expression",
"Attribute lookup 'isascii' on str shape resolved.",
)
return subnode_expression.computeExpressionAttribute(
lookup_node=self,
attribute_name="isascii",
trace_collection=trace_collection,
)
def mayRaiseException(self, exception_type):
return self.subnode_expression.mayRaiseExceptionAttributeLookup(
exception_type=exception_type, attribute_name="isascii"
)
attribute_classes["isascii"] = ExpressionAttributeLookupFixedIsascii
class ExpressionAttributeLookupStrIsascii(
SideEffectsFromChildrenMixin, ExpressionAttributeLookupFixedIsascii
):
"""Attribute Isascii lookup on a str.
Typically code like: some_str.isascii
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_STR_ISASCII"
attribute_name = "isascii"
def computeExpression(self, trace_collection):
return self, None, None
# No computeExpressionCall as str operation ExpressionStrOperationIsascii is not yet implemented
attribute_typed_classes.add(ExpressionAttributeLookupStrIsascii)
class ExpressionAttributeLookupFixedIsdecimal(ExpressionAttributeLookupFixedBase):
"""Looking up an attribute value 'isdecimal' of an object.
Typically code like: source.isdecimal
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_FIXED_ISDECIMAL"
attribute_name = "isdecimal"
def computeExpression(self, trace_collection):
subnode_expression = self.subnode_expression
if str is not bytes and subnode_expression.hasShapeStrExact():
result = ExpressionAttributeLookupStrIsdecimal(
expression=subnode_expression, source_ref=self.source_ref
)
return (
result,
"new_expression",
"Attribute lookup 'isdecimal' on str shape resolved.",
)
return subnode_expression.computeExpressionAttribute(
lookup_node=self,
attribute_name="isdecimal",
trace_collection=trace_collection,
)
def mayRaiseException(self, exception_type):
return self.subnode_expression.mayRaiseExceptionAttributeLookup(
exception_type=exception_type, attribute_name="isdecimal"
)
attribute_classes["isdecimal"] = ExpressionAttributeLookupFixedIsdecimal
class ExpressionAttributeLookupStrIsdecimal(
SideEffectsFromChildrenMixin, ExpressionAttributeLookupFixedIsdecimal
):
"""Attribute Isdecimal lookup on a str.
Typically code like: some_str.isdecimal
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_STR_ISDECIMAL"
attribute_name = "isdecimal"
def computeExpression(self, trace_collection):
return self, None, None
# No computeExpressionCall as str operation ExpressionStrOperationIsdecimal is not yet implemented
attribute_typed_classes.add(ExpressionAttributeLookupStrIsdecimal)
class ExpressionAttributeLookupFixedIsdigit(ExpressionAttributeLookupFixedBase):
"""Looking up an attribute value 'isdigit' of an object.
Typically code like: source.isdigit
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_FIXED_ISDIGIT"
attribute_name = "isdigit"
def computeExpression(self, trace_collection):
subnode_expression = self.subnode_expression
if subnode_expression.hasShapeStrExact():
result = ExpressionAttributeLookupStrIsdigit(
expression=subnode_expression, source_ref=self.source_ref
)
return (
result,
"new_expression",
"Attribute lookup 'isdigit' on str shape resolved.",
)
return subnode_expression.computeExpressionAttribute(
lookup_node=self,
attribute_name="isdigit",
trace_collection=trace_collection,
)
def mayRaiseException(self, exception_type):
return self.subnode_expression.mayRaiseExceptionAttributeLookup(
exception_type=exception_type, attribute_name="isdigit"
)
attribute_classes["isdigit"] = ExpressionAttributeLookupFixedIsdigit
from nuitka.specs.BuiltinStrOperationSpecs import str_isdigit_spec
class ExpressionAttributeLookupStrIsdigit(
SideEffectsFromChildrenMixin, ExpressionAttributeLookupFixedIsdigit
):
"""Attribute Isdigit lookup on a str.
Typically code like: some_str.isdigit
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_STR_ISDIGIT"
attribute_name = "isdigit"
def computeExpression(self, trace_collection):
return self, None, None
def computeExpressionCall(self, call_node, call_args, call_kw, trace_collection):
def wrapExpressionStrOperationIsdigit(source_ref):
from .StrNodes import ExpressionStrOperationIsdigit
return ExpressionStrOperationIsdigit(
str_arg=self.subnode_expression, source_ref=source_ref
)
# Anything may happen. On next pass, if replaced, we might be better
# but not now.
trace_collection.onExceptionRaiseExit(BaseException)
result = extractBuiltinArgs(
node=call_node,
builtin_class=wrapExpressionStrOperationIsdigit,
builtin_spec=str_isdigit_spec,
)
return result, "new_expression", "Call to 'isdigit' of str recognized."
attribute_typed_classes.add(ExpressionAttributeLookupStrIsdigit)
class ExpressionAttributeLookupFixedIsidentifier(ExpressionAttributeLookupFixedBase):
"""Looking up an attribute value 'isidentifier' of an object.
Typically code like: source.isidentifier
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_FIXED_ISIDENTIFIER"
attribute_name = "isidentifier"
def computeExpression(self, trace_collection):
subnode_expression = self.subnode_expression
if str is not bytes and subnode_expression.hasShapeStrExact():
result = ExpressionAttributeLookupStrIsidentifier(
expression=subnode_expression, source_ref=self.source_ref
)
return (
result,
"new_expression",
"Attribute lookup 'isidentifier' on str shape resolved.",
)
return subnode_expression.computeExpressionAttribute(
lookup_node=self,
attribute_name="isidentifier",
trace_collection=trace_collection,
)
def mayRaiseException(self, exception_type):
return self.subnode_expression.mayRaiseExceptionAttributeLookup(
exception_type=exception_type, attribute_name="isidentifier"
)
attribute_classes["isidentifier"] = ExpressionAttributeLookupFixedIsidentifier
class ExpressionAttributeLookupStrIsidentifier(
SideEffectsFromChildrenMixin, ExpressionAttributeLookupFixedIsidentifier
):
"""Attribute Isidentifier lookup on a str.
Typically code like: some_str.isidentifier
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_STR_ISIDENTIFIER"
attribute_name = "isidentifier"
def computeExpression(self, trace_collection):
return self, None, None
# No computeExpressionCall as str operation ExpressionStrOperationIsidentifier is not yet implemented
attribute_typed_classes.add(ExpressionAttributeLookupStrIsidentifier)
class ExpressionAttributeLookupFixedIslower(ExpressionAttributeLookupFixedBase):
"""Looking up an attribute value 'islower' of an object.
Typically code like: source.islower
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_FIXED_ISLOWER"
attribute_name = "islower"
def computeExpression(self, trace_collection):
subnode_expression = self.subnode_expression
if subnode_expression.hasShapeStrExact():
result = ExpressionAttributeLookupStrIslower(
expression=subnode_expression, source_ref=self.source_ref
)
return (
result,
"new_expression",
"Attribute lookup 'islower' on str shape resolved.",
)
return subnode_expression.computeExpressionAttribute(
lookup_node=self,
attribute_name="islower",
trace_collection=trace_collection,
)
def mayRaiseException(self, exception_type):
return self.subnode_expression.mayRaiseExceptionAttributeLookup(
exception_type=exception_type, attribute_name="islower"
)
attribute_classes["islower"] = ExpressionAttributeLookupFixedIslower
from nuitka.specs.BuiltinStrOperationSpecs import str_islower_spec
class ExpressionAttributeLookupStrIslower(
SideEffectsFromChildrenMixin, ExpressionAttributeLookupFixedIslower
):
"""Attribute Islower lookup on a str.
Typically code like: some_str.islower
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_STR_ISLOWER"
attribute_name = "islower"
def computeExpression(self, trace_collection):
return self, None, None
def computeExpressionCall(self, call_node, call_args, call_kw, trace_collection):
def wrapExpressionStrOperationIslower(source_ref):
from .StrNodes import ExpressionStrOperationIslower
return ExpressionStrOperationIslower(
str_arg=self.subnode_expression, source_ref=source_ref
)
# Anything may happen. On next pass, if replaced, we might be better
# but not now.
trace_collection.onExceptionRaiseExit(BaseException)
result = extractBuiltinArgs(
node=call_node,
builtin_class=wrapExpressionStrOperationIslower,
builtin_spec=str_islower_spec,
)
return result, "new_expression", "Call to 'islower' of str recognized."
attribute_typed_classes.add(ExpressionAttributeLookupStrIslower)
class ExpressionAttributeLookupFixedIsnumeric(ExpressionAttributeLookupFixedBase):
"""Looking up an attribute value 'isnumeric' of an object.
Typically code like: source.isnumeric
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_FIXED_ISNUMERIC"
attribute_name = "isnumeric"
def computeExpression(self, trace_collection):
subnode_expression = self.subnode_expression
if str is not bytes and subnode_expression.hasShapeStrExact():
result = ExpressionAttributeLookupStrIsnumeric(
expression=subnode_expression, source_ref=self.source_ref
)
return (
result,
"new_expression",
"Attribute lookup 'isnumeric' on str shape resolved.",
)
return subnode_expression.computeExpressionAttribute(
lookup_node=self,
attribute_name="isnumeric",
trace_collection=trace_collection,
)
def mayRaiseException(self, exception_type):
return self.subnode_expression.mayRaiseExceptionAttributeLookup(
exception_type=exception_type, attribute_name="isnumeric"
)
attribute_classes["isnumeric"] = ExpressionAttributeLookupFixedIsnumeric
class ExpressionAttributeLookupStrIsnumeric(
SideEffectsFromChildrenMixin, ExpressionAttributeLookupFixedIsnumeric
):
"""Attribute Isnumeric lookup on a str.
Typically code like: some_str.isnumeric
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_STR_ISNUMERIC"
attribute_name = "isnumeric"
def computeExpression(self, trace_collection):
return self, None, None
# No computeExpressionCall as str operation ExpressionStrOperationIsnumeric is not yet implemented
attribute_typed_classes.add(ExpressionAttributeLookupStrIsnumeric)
class ExpressionAttributeLookupFixedIsprintable(ExpressionAttributeLookupFixedBase):
"""Looking up an attribute value 'isprintable' of an object.
Typically code like: source.isprintable
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_FIXED_ISPRINTABLE"
attribute_name = "isprintable"
def computeExpression(self, trace_collection):
subnode_expression = self.subnode_expression
if str is not bytes and subnode_expression.hasShapeStrExact():
result = ExpressionAttributeLookupStrIsprintable(
expression=subnode_expression, source_ref=self.source_ref
)
return (
result,
"new_expression",
"Attribute lookup 'isprintable' on str shape resolved.",
)
return subnode_expression.computeExpressionAttribute(
lookup_node=self,
attribute_name="isprintable",
trace_collection=trace_collection,
)
def mayRaiseException(self, exception_type):
return self.subnode_expression.mayRaiseExceptionAttributeLookup(
exception_type=exception_type, attribute_name="isprintable"
)
attribute_classes["isprintable"] = ExpressionAttributeLookupFixedIsprintable
class ExpressionAttributeLookupStrIsprintable(
SideEffectsFromChildrenMixin, ExpressionAttributeLookupFixedIsprintable
):
"""Attribute Isprintable lookup on a str.
Typically code like: some_str.isprintable
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_STR_ISPRINTABLE"
attribute_name = "isprintable"
def computeExpression(self, trace_collection):
return self, None, None
# No computeExpressionCall as str operation ExpressionStrOperationIsprintable is not yet implemented
attribute_typed_classes.add(ExpressionAttributeLookupStrIsprintable)
class ExpressionAttributeLookupFixedIsspace(ExpressionAttributeLookupFixedBase):
"""Looking up an attribute value 'isspace' of an object.
Typically code like: source.isspace
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_FIXED_ISSPACE"
attribute_name = "isspace"
def computeExpression(self, trace_collection):
subnode_expression = self.subnode_expression
if subnode_expression.hasShapeStrExact():
result = ExpressionAttributeLookupStrIsspace(
expression=subnode_expression, source_ref=self.source_ref
)
return (
result,
"new_expression",
"Attribute lookup 'isspace' on str shape resolved.",
)
return subnode_expression.computeExpressionAttribute(
lookup_node=self,
attribute_name="isspace",
trace_collection=trace_collection,
)
def mayRaiseException(self, exception_type):
return self.subnode_expression.mayRaiseExceptionAttributeLookup(
exception_type=exception_type, attribute_name="isspace"
)
attribute_classes["isspace"] = ExpressionAttributeLookupFixedIsspace
from nuitka.specs.BuiltinStrOperationSpecs import str_isspace_spec
class ExpressionAttributeLookupStrIsspace(
SideEffectsFromChildrenMixin, ExpressionAttributeLookupFixedIsspace
):
"""Attribute Isspace lookup on a str.
Typically code like: some_str.isspace
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_STR_ISSPACE"
attribute_name = "isspace"
def computeExpression(self, trace_collection):
return self, None, None
def computeExpressionCall(self, call_node, call_args, call_kw, trace_collection):
def wrapExpressionStrOperationIsspace(source_ref):
from .StrNodes import ExpressionStrOperationIsspace
return ExpressionStrOperationIsspace(
str_arg=self.subnode_expression, source_ref=source_ref
)
# Anything may happen. On next pass, if replaced, we might be better
# but not now.
trace_collection.onExceptionRaiseExit(BaseException)
result = extractBuiltinArgs(
node=call_node,
builtin_class=wrapExpressionStrOperationIsspace,
builtin_spec=str_isspace_spec,
)
return result, "new_expression", "Call to 'isspace' of str recognized."
attribute_typed_classes.add(ExpressionAttributeLookupStrIsspace)
class ExpressionAttributeLookupFixedIstitle(ExpressionAttributeLookupFixedBase):
"""Looking up an attribute value 'istitle' of an object.
Typically code like: source.istitle
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_FIXED_ISTITLE"
attribute_name = "istitle"
def computeExpression(self, trace_collection):
subnode_expression = self.subnode_expression
if subnode_expression.hasShapeStrExact():
result = ExpressionAttributeLookupStrIstitle(
expression=subnode_expression, source_ref=self.source_ref
)
return (
result,
"new_expression",
"Attribute lookup 'istitle' on str shape resolved.",
)
return subnode_expression.computeExpressionAttribute(
lookup_node=self,
attribute_name="istitle",
trace_collection=trace_collection,
)
def mayRaiseException(self, exception_type):
return self.subnode_expression.mayRaiseExceptionAttributeLookup(
exception_type=exception_type, attribute_name="istitle"
)
attribute_classes["istitle"] = ExpressionAttributeLookupFixedIstitle
from nuitka.specs.BuiltinStrOperationSpecs import str_istitle_spec
class ExpressionAttributeLookupStrIstitle(
SideEffectsFromChildrenMixin, ExpressionAttributeLookupFixedIstitle
):
"""Attribute Istitle lookup on a str.
Typically code like: some_str.istitle
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_STR_ISTITLE"
attribute_name = "istitle"
def computeExpression(self, trace_collection):
return self, None, None
def computeExpressionCall(self, call_node, call_args, call_kw, trace_collection):
def wrapExpressionStrOperationIstitle(source_ref):
from .StrNodes import ExpressionStrOperationIstitle
return ExpressionStrOperationIstitle(
str_arg=self.subnode_expression, source_ref=source_ref
)
# Anything may happen. On next pass, if replaced, we might be better
# but not now.
trace_collection.onExceptionRaiseExit(BaseException)
result = extractBuiltinArgs(
node=call_node,
builtin_class=wrapExpressionStrOperationIstitle,
builtin_spec=str_istitle_spec,
)
return result, "new_expression", "Call to 'istitle' of str recognized."
attribute_typed_classes.add(ExpressionAttributeLookupStrIstitle)
class ExpressionAttributeLookupFixedIsupper(ExpressionAttributeLookupFixedBase):
"""Looking up an attribute value 'isupper' of an object.
Typically code like: source.isupper
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_FIXED_ISUPPER"
attribute_name = "isupper"
def computeExpression(self, trace_collection):
subnode_expression = self.subnode_expression
if subnode_expression.hasShapeStrExact():
result = ExpressionAttributeLookupStrIsupper(
expression=subnode_expression, source_ref=self.source_ref
)
return (
result,
"new_expression",
"Attribute lookup 'isupper' on str shape resolved.",
)
return subnode_expression.computeExpressionAttribute(
lookup_node=self,
attribute_name="isupper",
trace_collection=trace_collection,
)
def mayRaiseException(self, exception_type):
return self.subnode_expression.mayRaiseExceptionAttributeLookup(
exception_type=exception_type, attribute_name="isupper"
)
attribute_classes["isupper"] = ExpressionAttributeLookupFixedIsupper
from nuitka.specs.BuiltinStrOperationSpecs import str_isupper_spec
class ExpressionAttributeLookupStrIsupper(
SideEffectsFromChildrenMixin, ExpressionAttributeLookupFixedIsupper
):
"""Attribute Isupper lookup on a str.
Typically code like: some_str.isupper
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_STR_ISUPPER"
attribute_name = "isupper"
def computeExpression(self, trace_collection):
return self, None, None
def computeExpressionCall(self, call_node, call_args, call_kw, trace_collection):
def wrapExpressionStrOperationIsupper(source_ref):
from .StrNodes import ExpressionStrOperationIsupper
return ExpressionStrOperationIsupper(
str_arg=self.subnode_expression, source_ref=source_ref
)
# Anything may happen. On next pass, if replaced, we might be better
# but not now.
trace_collection.onExceptionRaiseExit(BaseException)
result = extractBuiltinArgs(
node=call_node,
builtin_class=wrapExpressionStrOperationIsupper,
builtin_spec=str_isupper_spec,
)
return result, "new_expression", "Call to 'isupper' of str recognized."
attribute_typed_classes.add(ExpressionAttributeLookupStrIsupper)
class ExpressionAttributeLookupFixedItems(ExpressionAttributeLookupFixedBase):
"""Looking up an attribute value 'items' of an object.
Typically code like: source.items
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_FIXED_ITEMS"
attribute_name = "items"
def computeExpression(self, trace_collection):
subnode_expression = self.subnode_expression
if subnode_expression.hasShapeDictionaryExact():
result = ExpressionAttributeLookupDictItems(
expression=subnode_expression, source_ref=self.source_ref
)
return (
result,
"new_expression",
"Attribute lookup 'items' on dict shape resolved.",
)
return subnode_expression.computeExpressionAttribute(
lookup_node=self,
attribute_name="items",
trace_collection=trace_collection,
)
def mayRaiseException(self, exception_type):
return self.subnode_expression.mayRaiseExceptionAttributeLookup(
exception_type=exception_type, attribute_name="items"
)
attribute_classes["items"] = ExpressionAttributeLookupFixedItems
from nuitka.specs.BuiltinDictOperationSpecs import dict_items_spec
class ExpressionAttributeLookupDictItems(
SideEffectsFromChildrenMixin, ExpressionAttributeLookupFixedItems
):
"""Attribute Items lookup on a dict.
Typically code like: some_dict.items
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_DICT_ITEMS"
attribute_name = "items"
def computeExpression(self, trace_collection):
return self, None, None
def computeExpressionCall(self, call_node, call_args, call_kw, trace_collection):
def wrapExpressionDictOperationItems(source_ref):
if str is bytes:
from .DictionaryNodes import ExpressionDictOperationItems
return ExpressionDictOperationItems(
dict_arg=self.subnode_expression, source_ref=source_ref
)
else:
from .DictionaryNodes import ExpressionDictOperationIteritems
return ExpressionDictOperationIteritems(
dict_arg=self.subnode_expression, source_ref=source_ref
)
# Anything may happen. On next pass, if replaced, we might be better
# but not now.
trace_collection.onExceptionRaiseExit(BaseException)
result = extractBuiltinArgs(
node=call_node,
builtin_class=wrapExpressionDictOperationItems,
builtin_spec=dict_items_spec,
)
return result, "new_expression", "Call to 'items' of dictionary recognized."
attribute_typed_classes.add(ExpressionAttributeLookupDictItems)
class ExpressionAttributeLookupFixedIteritems(ExpressionAttributeLookupFixedBase):
"""Looking up an attribute value 'iteritems' of an object.
Typically code like: source.iteritems
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_FIXED_ITERITEMS"
attribute_name = "iteritems"
def computeExpression(self, trace_collection):
subnode_expression = self.subnode_expression
if str is bytes and subnode_expression.hasShapeDictionaryExact():
result = ExpressionAttributeLookupDictIteritems(
expression=subnode_expression, source_ref=self.source_ref
)
return (
result,
"new_expression",
"Attribute lookup 'iteritems' on dict shape resolved.",
)
return subnode_expression.computeExpressionAttribute(
lookup_node=self,
attribute_name="iteritems",
trace_collection=trace_collection,
)
def mayRaiseException(self, exception_type):
return self.subnode_expression.mayRaiseExceptionAttributeLookup(
exception_type=exception_type, attribute_name="iteritems"
)
attribute_classes["iteritems"] = ExpressionAttributeLookupFixedIteritems
from nuitka.specs.BuiltinDictOperationSpecs import dict_iteritems_spec
class ExpressionAttributeLookupDictIteritems(
SideEffectsFromChildrenMixin, ExpressionAttributeLookupFixedIteritems
):
"""Attribute Iteritems lookup on a dict.
Typically code like: some_dict.iteritems
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_DICT_ITERITEMS"
attribute_name = "iteritems"
def computeExpression(self, trace_collection):
return self, None, None
def computeExpressionCall(self, call_node, call_args, call_kw, trace_collection):
def wrapExpressionDictOperationIteritems(source_ref):
from .DictionaryNodes import ExpressionDictOperationIteritems
return ExpressionDictOperationIteritems(
dict_arg=self.subnode_expression, source_ref=source_ref
)
# Anything may happen. On next pass, if replaced, we might be better
# but not now.
trace_collection.onExceptionRaiseExit(BaseException)
result = extractBuiltinArgs(
node=call_node,
builtin_class=wrapExpressionDictOperationIteritems,
builtin_spec=dict_iteritems_spec,
)
return result, "new_expression", "Call to 'iteritems' of dictionary recognized."
attribute_typed_classes.add(ExpressionAttributeLookupDictIteritems)
class ExpressionAttributeLookupFixedIterkeys(ExpressionAttributeLookupFixedBase):
"""Looking up an attribute value 'iterkeys' of an object.
Typically code like: source.iterkeys
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_FIXED_ITERKEYS"
attribute_name = "iterkeys"
def computeExpression(self, trace_collection):
subnode_expression = self.subnode_expression
if str is bytes and subnode_expression.hasShapeDictionaryExact():
result = ExpressionAttributeLookupDictIterkeys(
expression=subnode_expression, source_ref=self.source_ref
)
return (
result,
"new_expression",
"Attribute lookup 'iterkeys' on dict shape resolved.",
)
return subnode_expression.computeExpressionAttribute(
lookup_node=self,
attribute_name="iterkeys",
trace_collection=trace_collection,
)
def mayRaiseException(self, exception_type):
return self.subnode_expression.mayRaiseExceptionAttributeLookup(
exception_type=exception_type, attribute_name="iterkeys"
)
attribute_classes["iterkeys"] = ExpressionAttributeLookupFixedIterkeys
from nuitka.specs.BuiltinDictOperationSpecs import dict_iterkeys_spec
class ExpressionAttributeLookupDictIterkeys(
SideEffectsFromChildrenMixin, ExpressionAttributeLookupFixedIterkeys
):
"""Attribute Iterkeys lookup on a dict.
Typically code like: some_dict.iterkeys
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_DICT_ITERKEYS"
attribute_name = "iterkeys"
def computeExpression(self, trace_collection):
return self, None, None
def computeExpressionCall(self, call_node, call_args, call_kw, trace_collection):
def wrapExpressionDictOperationIterkeys(source_ref):
from .DictionaryNodes import ExpressionDictOperationIterkeys
return ExpressionDictOperationIterkeys(
dict_arg=self.subnode_expression, source_ref=source_ref
)
# Anything may happen. On next pass, if replaced, we might be better
# but not now.
trace_collection.onExceptionRaiseExit(BaseException)
result = extractBuiltinArgs(
node=call_node,
builtin_class=wrapExpressionDictOperationIterkeys,
builtin_spec=dict_iterkeys_spec,
)
return result, "new_expression", "Call to 'iterkeys' of dictionary recognized."
attribute_typed_classes.add(ExpressionAttributeLookupDictIterkeys)
class ExpressionAttributeLookupFixedItervalues(ExpressionAttributeLookupFixedBase):
"""Looking up an attribute value 'itervalues' of an object.
Typically code like: source.itervalues
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_FIXED_ITERVALUES"
attribute_name = "itervalues"
def computeExpression(self, trace_collection):
subnode_expression = self.subnode_expression
if str is bytes and subnode_expression.hasShapeDictionaryExact():
result = ExpressionAttributeLookupDictItervalues(
expression=subnode_expression, source_ref=self.source_ref
)
return (
result,
"new_expression",
"Attribute lookup 'itervalues' on dict shape resolved.",
)
return subnode_expression.computeExpressionAttribute(
lookup_node=self,
attribute_name="itervalues",
trace_collection=trace_collection,
)
def mayRaiseException(self, exception_type):
return self.subnode_expression.mayRaiseExceptionAttributeLookup(
exception_type=exception_type, attribute_name="itervalues"
)
attribute_classes["itervalues"] = ExpressionAttributeLookupFixedItervalues
from nuitka.specs.BuiltinDictOperationSpecs import dict_itervalues_spec
class ExpressionAttributeLookupDictItervalues(
SideEffectsFromChildrenMixin, ExpressionAttributeLookupFixedItervalues
):
"""Attribute Itervalues lookup on a dict.
Typically code like: some_dict.itervalues
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_DICT_ITERVALUES"
attribute_name = "itervalues"
def computeExpression(self, trace_collection):
return self, None, None
def computeExpressionCall(self, call_node, call_args, call_kw, trace_collection):
def wrapExpressionDictOperationItervalues(source_ref):
from .DictionaryNodes import ExpressionDictOperationItervalues
return ExpressionDictOperationItervalues(
dict_arg=self.subnode_expression, source_ref=source_ref
)
# Anything may happen. On next pass, if replaced, we might be better
# but not now.
trace_collection.onExceptionRaiseExit(BaseException)
result = extractBuiltinArgs(
node=call_node,
builtin_class=wrapExpressionDictOperationItervalues,
builtin_spec=dict_itervalues_spec,
)
return (
result,
"new_expression",
"Call to 'itervalues' of dictionary recognized.",
)
attribute_typed_classes.add(ExpressionAttributeLookupDictItervalues)
class ExpressionAttributeLookupFixedJoin(ExpressionAttributeLookupFixedBase):
"""Looking up an attribute value 'join' of an object.
Typically code like: source.join
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_FIXED_JOIN"
attribute_name = "join"
def computeExpression(self, trace_collection):
subnode_expression = self.subnode_expression
if subnode_expression.hasShapeStrExact():
result = ExpressionAttributeLookupStrJoin(
expression=subnode_expression, source_ref=self.source_ref
)
return (
result,
"new_expression",
"Attribute lookup 'join' on str shape resolved.",
)
return subnode_expression.computeExpressionAttribute(
lookup_node=self,
attribute_name="join",
trace_collection=trace_collection,
)
def mayRaiseException(self, exception_type):
return self.subnode_expression.mayRaiseExceptionAttributeLookup(
exception_type=exception_type, attribute_name="join"
)
attribute_classes["join"] = ExpressionAttributeLookupFixedJoin
from nuitka.specs.BuiltinStrOperationSpecs import str_join_spec
class ExpressionAttributeLookupStrJoin(
SideEffectsFromChildrenMixin, ExpressionAttributeLookupFixedJoin
):
"""Attribute Join lookup on a str.
Typically code like: some_str.join
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_STR_JOIN"
attribute_name = "join"
def computeExpression(self, trace_collection):
return self, None, None
def computeExpressionCall(self, call_node, call_args, call_kw, trace_collection):
def wrapExpressionStrOperationJoin(iterable, source_ref):
from .StrNodes import ExpressionStrOperationJoin
return ExpressionStrOperationJoin(
str_arg=self.subnode_expression,
iterable=iterable,
source_ref=source_ref,
)
# Anything may happen. On next pass, if replaced, we might be better
# but not now.
trace_collection.onExceptionRaiseExit(BaseException)
result = extractBuiltinArgs(
node=call_node,
builtin_class=wrapExpressionStrOperationJoin,
builtin_spec=str_join_spec,
)
return result, "new_expression", "Call to 'join' of str recognized."
attribute_typed_classes.add(ExpressionAttributeLookupStrJoin)
class ExpressionAttributeLookupFixedKeys(ExpressionAttributeLookupFixedBase):
"""Looking up an attribute value 'keys' of an object.
Typically code like: source.keys
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_FIXED_KEYS"
attribute_name = "keys"
def computeExpression(self, trace_collection):
subnode_expression = self.subnode_expression
if subnode_expression.hasShapeDictionaryExact():
result = ExpressionAttributeLookupDictKeys(
expression=subnode_expression, source_ref=self.source_ref
)
return (
result,
"new_expression",
"Attribute lookup 'keys' on dict shape resolved.",
)
return subnode_expression.computeExpressionAttribute(
lookup_node=self,
attribute_name="keys",
trace_collection=trace_collection,
)
def mayRaiseException(self, exception_type):
return self.subnode_expression.mayRaiseExceptionAttributeLookup(
exception_type=exception_type, attribute_name="keys"
)
attribute_classes["keys"] = ExpressionAttributeLookupFixedKeys
from nuitka.specs.BuiltinDictOperationSpecs import dict_keys_spec
class ExpressionAttributeLookupDictKeys(
SideEffectsFromChildrenMixin, ExpressionAttributeLookupFixedKeys
):
"""Attribute Keys lookup on a dict.
Typically code like: some_dict.keys
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_DICT_KEYS"
attribute_name = "keys"
def computeExpression(self, trace_collection):
return self, None, None
def computeExpressionCall(self, call_node, call_args, call_kw, trace_collection):
def wrapExpressionDictOperationKeys(source_ref):
if str is bytes:
from .DictionaryNodes import ExpressionDictOperationKeys
return ExpressionDictOperationKeys(
dict_arg=self.subnode_expression, source_ref=source_ref
)
else:
from .DictionaryNodes import ExpressionDictOperationIterkeys
return ExpressionDictOperationIterkeys(
dict_arg=self.subnode_expression, source_ref=source_ref
)
# Anything may happen. On next pass, if replaced, we might be better
# but not now.
trace_collection.onExceptionRaiseExit(BaseException)
result = extractBuiltinArgs(
node=call_node,
builtin_class=wrapExpressionDictOperationKeys,
builtin_spec=dict_keys_spec,
)
return result, "new_expression", "Call to 'keys' of dictionary recognized."
attribute_typed_classes.add(ExpressionAttributeLookupDictKeys)
class ExpressionAttributeLookupFixedLjust(ExpressionAttributeLookupFixedBase):
"""Looking up an attribute value 'ljust' of an object.
Typically code like: source.ljust
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_FIXED_LJUST"
attribute_name = "ljust"
def computeExpression(self, trace_collection):
subnode_expression = self.subnode_expression
if subnode_expression.hasShapeStrExact():
result = ExpressionAttributeLookupStrLjust(
expression=subnode_expression, source_ref=self.source_ref
)
return (
result,
"new_expression",
"Attribute lookup 'ljust' on str shape resolved.",
)
return subnode_expression.computeExpressionAttribute(
lookup_node=self,
attribute_name="ljust",
trace_collection=trace_collection,
)
def mayRaiseException(self, exception_type):
return self.subnode_expression.mayRaiseExceptionAttributeLookup(
exception_type=exception_type, attribute_name="ljust"
)
attribute_classes["ljust"] = ExpressionAttributeLookupFixedLjust
class ExpressionAttributeLookupStrLjust(
SideEffectsFromChildrenMixin, ExpressionAttributeLookupFixedLjust
):
"""Attribute Ljust lookup on a str.
Typically code like: some_str.ljust
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_STR_LJUST"
attribute_name = "ljust"
def computeExpression(self, trace_collection):
return self, None, None
# No computeExpressionCall as str operation ExpressionStrOperationLjust is not yet implemented
attribute_typed_classes.add(ExpressionAttributeLookupStrLjust)
class ExpressionAttributeLookupFixedLower(ExpressionAttributeLookupFixedBase):
"""Looking up an attribute value 'lower' of an object.
Typically code like: source.lower
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_FIXED_LOWER"
attribute_name = "lower"
def computeExpression(self, trace_collection):
subnode_expression = self.subnode_expression
if subnode_expression.hasShapeStrExact():
result = ExpressionAttributeLookupStrLower(
expression=subnode_expression, source_ref=self.source_ref
)
return (
result,
"new_expression",
"Attribute lookup 'lower' on str shape resolved.",
)
return subnode_expression.computeExpressionAttribute(
lookup_node=self,
attribute_name="lower",
trace_collection=trace_collection,
)
def mayRaiseException(self, exception_type):
return self.subnode_expression.mayRaiseExceptionAttributeLookup(
exception_type=exception_type, attribute_name="lower"
)
attribute_classes["lower"] = ExpressionAttributeLookupFixedLower
from nuitka.specs.BuiltinStrOperationSpecs import str_lower_spec
class ExpressionAttributeLookupStrLower(
SideEffectsFromChildrenMixin, ExpressionAttributeLookupFixedLower
):
"""Attribute Lower lookup on a str.
Typically code like: some_str.lower
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_STR_LOWER"
attribute_name = "lower"
def computeExpression(self, trace_collection):
return self, None, None
def computeExpressionCall(self, call_node, call_args, call_kw, trace_collection):
def wrapExpressionStrOperationLower(source_ref):
from .StrNodes import ExpressionStrOperationLower
return ExpressionStrOperationLower(
str_arg=self.subnode_expression, source_ref=source_ref
)
# Anything may happen. On next pass, if replaced, we might be better
# but not now.
trace_collection.onExceptionRaiseExit(BaseException)
result = extractBuiltinArgs(
node=call_node,
builtin_class=wrapExpressionStrOperationLower,
builtin_spec=str_lower_spec,
)
return result, "new_expression", "Call to 'lower' of str recognized."
attribute_typed_classes.add(ExpressionAttributeLookupStrLower)
class ExpressionAttributeLookupFixedLstrip(ExpressionAttributeLookupFixedBase):
"""Looking up an attribute value 'lstrip' of an object.
Typically code like: source.lstrip
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_FIXED_LSTRIP"
attribute_name = "lstrip"
def computeExpression(self, trace_collection):
subnode_expression = self.subnode_expression
if subnode_expression.hasShapeStrExact():
result = ExpressionAttributeLookupStrLstrip(
expression=subnode_expression, source_ref=self.source_ref
)
return (
result,
"new_expression",
"Attribute lookup 'lstrip' on str shape resolved.",
)
return subnode_expression.computeExpressionAttribute(
lookup_node=self,
attribute_name="lstrip",
trace_collection=trace_collection,
)
def mayRaiseException(self, exception_type):
return self.subnode_expression.mayRaiseExceptionAttributeLookup(
exception_type=exception_type, attribute_name="lstrip"
)
attribute_classes["lstrip"] = ExpressionAttributeLookupFixedLstrip
from nuitka.specs.BuiltinStrOperationSpecs import str_lstrip_spec
class ExpressionAttributeLookupStrLstrip(
SideEffectsFromChildrenMixin, ExpressionAttributeLookupFixedLstrip
):
"""Attribute Lstrip lookup on a str.
Typically code like: some_str.lstrip
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_STR_LSTRIP"
attribute_name = "lstrip"
def computeExpression(self, trace_collection):
return self, None, None
def computeExpressionCall(self, call_node, call_args, call_kw, trace_collection):
def wrapExpressionStrOperationLstrip(chars, source_ref):
if chars is not None:
from .StrNodes import ExpressionStrOperationLstrip2
return ExpressionStrOperationLstrip2(
str_arg=self.subnode_expression, chars=chars, source_ref=source_ref
)
else:
from .StrNodes import ExpressionStrOperationLstrip1
return ExpressionStrOperationLstrip1(
str_arg=self.subnode_expression, source_ref=source_ref
)
# Anything may happen. On next pass, if replaced, we might be better
# but not now.
trace_collection.onExceptionRaiseExit(BaseException)
result = extractBuiltinArgs(
node=call_node,
builtin_class=wrapExpressionStrOperationLstrip,
builtin_spec=str_lstrip_spec,
)
return result, "new_expression", "Call to 'lstrip' of str recognized."
attribute_typed_classes.add(ExpressionAttributeLookupStrLstrip)
class ExpressionAttributeLookupFixedMaketrans(ExpressionAttributeLookupFixedBase):
"""Looking up an attribute value 'maketrans' of an object.
Typically code like: source.maketrans
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_FIXED_MAKETRANS"
attribute_name = "maketrans"
def computeExpression(self, trace_collection):
subnode_expression = self.subnode_expression
if str is not bytes and subnode_expression.hasShapeStrExact():
result = ExpressionAttributeLookupStrMaketrans(
expression=subnode_expression, source_ref=self.source_ref
)
return (
result,
"new_expression",
"Attribute lookup 'maketrans' on str shape resolved.",
)
return subnode_expression.computeExpressionAttribute(
lookup_node=self,
attribute_name="maketrans",
trace_collection=trace_collection,
)
def mayRaiseException(self, exception_type):
return self.subnode_expression.mayRaiseExceptionAttributeLookup(
exception_type=exception_type, attribute_name="maketrans"
)
attribute_classes["maketrans"] = ExpressionAttributeLookupFixedMaketrans
class ExpressionAttributeLookupStrMaketrans(
SideEffectsFromChildrenMixin, ExpressionAttributeLookupFixedMaketrans
):
"""Attribute Maketrans lookup on a str.
Typically code like: some_str.maketrans
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_STR_MAKETRANS"
attribute_name = "maketrans"
def computeExpression(self, trace_collection):
return self, None, None
# No computeExpressionCall as str operation ExpressionStrOperationMaketrans is not yet implemented
attribute_typed_classes.add(ExpressionAttributeLookupStrMaketrans)
class ExpressionAttributeLookupFixedPartition(ExpressionAttributeLookupFixedBase):
"""Looking up an attribute value 'partition' of an object.
Typically code like: source.partition
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_FIXED_PARTITION"
attribute_name = "partition"
def computeExpression(self, trace_collection):
subnode_expression = self.subnode_expression
if subnode_expression.hasShapeStrExact():
result = ExpressionAttributeLookupStrPartition(
expression=subnode_expression, source_ref=self.source_ref
)
return (
result,
"new_expression",
"Attribute lookup 'partition' on str shape resolved.",
)
return subnode_expression.computeExpressionAttribute(
lookup_node=self,
attribute_name="partition",
trace_collection=trace_collection,
)
def mayRaiseException(self, exception_type):
return self.subnode_expression.mayRaiseExceptionAttributeLookup(
exception_type=exception_type, attribute_name="partition"
)
attribute_classes["partition"] = ExpressionAttributeLookupFixedPartition
from nuitka.specs.BuiltinStrOperationSpecs import str_partition_spec
class ExpressionAttributeLookupStrPartition(
SideEffectsFromChildrenMixin, ExpressionAttributeLookupFixedPartition
):
"""Attribute Partition lookup on a str.
Typically code like: some_str.partition
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_STR_PARTITION"
attribute_name = "partition"
def computeExpression(self, trace_collection):
return self, None, None
def computeExpressionCall(self, call_node, call_args, call_kw, trace_collection):
def wrapExpressionStrOperationPartition(sep, source_ref):
from .StrNodes import ExpressionStrOperationPartition
return ExpressionStrOperationPartition(
str_arg=self.subnode_expression, sep=sep, source_ref=source_ref
)
# Anything may happen. On next pass, if replaced, we might be better
# but not now.
trace_collection.onExceptionRaiseExit(BaseException)
result = extractBuiltinArgs(
node=call_node,
builtin_class=wrapExpressionStrOperationPartition,
builtin_spec=str_partition_spec,
)
return result, "new_expression", "Call to 'partition' of str recognized."
attribute_typed_classes.add(ExpressionAttributeLookupStrPartition)
class ExpressionAttributeLookupFixedPop(ExpressionAttributeLookupFixedBase):
"""Looking up an attribute value 'pop' of an object.
Typically code like: source.pop
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_FIXED_POP"
attribute_name = "pop"
def computeExpression(self, trace_collection):
subnode_expression = self.subnode_expression
if subnode_expression.hasShapeDictionaryExact():
result = ExpressionAttributeLookupDictPop(
expression=subnode_expression, source_ref=self.source_ref
)
return (
result,
"new_expression",
"Attribute lookup 'pop' on dict shape resolved.",
)
return subnode_expression.computeExpressionAttribute(
lookup_node=self,
attribute_name="pop",
trace_collection=trace_collection,
)
def mayRaiseException(self, exception_type):
return self.subnode_expression.mayRaiseExceptionAttributeLookup(
exception_type=exception_type, attribute_name="pop"
)
attribute_classes["pop"] = ExpressionAttributeLookupFixedPop
from nuitka.specs.BuiltinDictOperationSpecs import dict_pop_spec
class ExpressionAttributeLookupDictPop(
SideEffectsFromChildrenMixin, ExpressionAttributeLookupFixedPop
):
"""Attribute Pop lookup on a dict.
Typically code like: some_dict.pop
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_DICT_POP"
attribute_name = "pop"
def computeExpression(self, trace_collection):
return self, None, None
def computeExpressionCall(self, call_node, call_args, call_kw, trace_collection):
def wrapExpressionDictOperationPop(key, default, source_ref):
if default is not None:
from .DictionaryNodes import ExpressionDictOperationPop3
return ExpressionDictOperationPop3(
dict_arg=self.subnode_expression,
key=key,
default=default,
source_ref=source_ref,
)
else:
from .DictionaryNodes import ExpressionDictOperationPop2
return ExpressionDictOperationPop2(
dict_arg=self.subnode_expression, key=key, source_ref=source_ref
)
# Anything may happen. On next pass, if replaced, we might be better
# but not now.
trace_collection.onExceptionRaiseExit(BaseException)
result = extractBuiltinArgs(
node=call_node,
builtin_class=wrapExpressionDictOperationPop,
builtin_spec=dict_pop_spec,
)
return result, "new_expression", "Call to 'pop' of dictionary recognized."
attribute_typed_classes.add(ExpressionAttributeLookupDictPop)
class ExpressionAttributeLookupFixedPopitem(ExpressionAttributeLookupFixedBase):
"""Looking up an attribute value 'popitem' of an object.
Typically code like: source.popitem
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_FIXED_POPITEM"
attribute_name = "popitem"
def computeExpression(self, trace_collection):
subnode_expression = self.subnode_expression
if subnode_expression.hasShapeDictionaryExact():
result = ExpressionAttributeLookupDictPopitem(
expression=subnode_expression, source_ref=self.source_ref
)
return (
result,
"new_expression",
"Attribute lookup 'popitem' on dict shape resolved.",
)
return subnode_expression.computeExpressionAttribute(
lookup_node=self,
attribute_name="popitem",
trace_collection=trace_collection,
)
def mayRaiseException(self, exception_type):
return self.subnode_expression.mayRaiseExceptionAttributeLookup(
exception_type=exception_type, attribute_name="popitem"
)
attribute_classes["popitem"] = ExpressionAttributeLookupFixedPopitem
class ExpressionAttributeLookupDictPopitem(
SideEffectsFromChildrenMixin, ExpressionAttributeLookupFixedPopitem
):
"""Attribute Popitem lookup on a dict.
Typically code like: some_dict.popitem
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_DICT_POPITEM"
attribute_name = "popitem"
def computeExpression(self, trace_collection):
return self, None, None
# No computeExpressionCall as dict operation ExpressionDictOperationPopitem is not yet implemented
attribute_typed_classes.add(ExpressionAttributeLookupDictPopitem)
class ExpressionAttributeLookupFixedReplace(ExpressionAttributeLookupFixedBase):
"""Looking up an attribute value 'replace' of an object.
Typically code like: source.replace
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_FIXED_REPLACE"
attribute_name = "replace"
def computeExpression(self, trace_collection):
subnode_expression = self.subnode_expression
if subnode_expression.hasShapeStrExact():
result = ExpressionAttributeLookupStrReplace(
expression=subnode_expression, source_ref=self.source_ref
)
return (
result,
"new_expression",
"Attribute lookup 'replace' on str shape resolved.",
)
return subnode_expression.computeExpressionAttribute(
lookup_node=self,
attribute_name="replace",
trace_collection=trace_collection,
)
def mayRaiseException(self, exception_type):
return self.subnode_expression.mayRaiseExceptionAttributeLookup(
exception_type=exception_type, attribute_name="replace"
)
attribute_classes["replace"] = ExpressionAttributeLookupFixedReplace
from nuitka.specs.BuiltinStrOperationSpecs import str_replace_spec
from .StrNodes import ExpressionStrOperationReplace4
class ExpressionAttributeLookupStrReplace(
SideEffectsFromChildrenMixin, ExpressionAttributeLookupFixedReplace
):
"""Attribute Replace lookup on a str.
Typically code like: some_str.replace
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_STR_REPLACE"
attribute_name = "replace"
def computeExpression(self, trace_collection):
return self, None, None
def computeExpressionCall(self, call_node, call_args, call_kw, trace_collection):
def wrapExpressionStrOperationReplace(old, new, count, source_ref):
if count is not None:
return ExpressionStrOperationReplace4(
str_arg=self.subnode_expression,
old=old,
new=new,
count=count,
source_ref=source_ref,
)
else:
from .StrNodes import ExpressionStrOperationReplace3
return ExpressionStrOperationReplace3(
str_arg=self.subnode_expression,
old=old,
new=new,
source_ref=source_ref,
)
# Anything may happen. On next pass, if replaced, we might be better
# but not now.
trace_collection.onExceptionRaiseExit(BaseException)
result = extractBuiltinArgs(
node=call_node,
builtin_class=wrapExpressionStrOperationReplace,
builtin_spec=str_replace_spec,
)
return result, "new_expression", "Call to 'replace' of str recognized."
attribute_typed_classes.add(ExpressionAttributeLookupStrReplace)
class ExpressionAttributeLookupFixedRfind(ExpressionAttributeLookupFixedBase):
"""Looking up an attribute value 'rfind' of an object.
Typically code like: source.rfind
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_FIXED_RFIND"
attribute_name = "rfind"
def computeExpression(self, trace_collection):
subnode_expression = self.subnode_expression
if subnode_expression.hasShapeStrExact():
result = ExpressionAttributeLookupStrRfind(
expression=subnode_expression, source_ref=self.source_ref
)
return (
result,
"new_expression",
"Attribute lookup 'rfind' on str shape resolved.",
)
return subnode_expression.computeExpressionAttribute(
lookup_node=self,
attribute_name="rfind",
trace_collection=trace_collection,
)
def mayRaiseException(self, exception_type):
return self.subnode_expression.mayRaiseExceptionAttributeLookup(
exception_type=exception_type, attribute_name="rfind"
)
attribute_classes["rfind"] = ExpressionAttributeLookupFixedRfind
from nuitka.specs.BuiltinStrOperationSpecs import str_rfind_spec
class ExpressionAttributeLookupStrRfind(
SideEffectsFromChildrenMixin, ExpressionAttributeLookupFixedRfind
):
"""Attribute Rfind lookup on a str.
Typically code like: some_str.rfind
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_STR_RFIND"
attribute_name = "rfind"
def computeExpression(self, trace_collection):
return self, None, None
def computeExpressionCall(self, call_node, call_args, call_kw, trace_collection):
def wrapExpressionStrOperationRfind(sub, start, end, source_ref):
if end is not None:
from .StrNodes import ExpressionStrOperationRfind4
return ExpressionStrOperationRfind4(
str_arg=self.subnode_expression,
sub=sub,
start=start,
end=end,
source_ref=source_ref,
)
elif start is not None:
from .StrNodes import ExpressionStrOperationRfind3
return ExpressionStrOperationRfind3(
str_arg=self.subnode_expression,
sub=sub,
start=start,
source_ref=source_ref,
)
else:
from .StrNodes import ExpressionStrOperationRfind2
return ExpressionStrOperationRfind2(
str_arg=self.subnode_expression, sub=sub, source_ref=source_ref
)
# Anything may happen. On next pass, if replaced, we might be better
# but not now.
trace_collection.onExceptionRaiseExit(BaseException)
result = extractBuiltinArgs(
node=call_node,
builtin_class=wrapExpressionStrOperationRfind,
builtin_spec=str_rfind_spec,
)
return result, "new_expression", "Call to 'rfind' of str recognized."
attribute_typed_classes.add(ExpressionAttributeLookupStrRfind)
class ExpressionAttributeLookupFixedRindex(ExpressionAttributeLookupFixedBase):
"""Looking up an attribute value 'rindex' of an object.
Typically code like: source.rindex
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_FIXED_RINDEX"
attribute_name = "rindex"
def computeExpression(self, trace_collection):
subnode_expression = self.subnode_expression
if subnode_expression.hasShapeStrExact():
result = ExpressionAttributeLookupStrRindex(
expression=subnode_expression, source_ref=self.source_ref
)
return (
result,
"new_expression",
"Attribute lookup 'rindex' on str shape resolved.",
)
return subnode_expression.computeExpressionAttribute(
lookup_node=self,
attribute_name="rindex",
trace_collection=trace_collection,
)
def mayRaiseException(self, exception_type):
return self.subnode_expression.mayRaiseExceptionAttributeLookup(
exception_type=exception_type, attribute_name="rindex"
)
attribute_classes["rindex"] = ExpressionAttributeLookupFixedRindex
from nuitka.specs.BuiltinStrOperationSpecs import str_rindex_spec
class ExpressionAttributeLookupStrRindex(
SideEffectsFromChildrenMixin, ExpressionAttributeLookupFixedRindex
):
"""Attribute Rindex lookup on a str.
Typically code like: some_str.rindex
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_STR_RINDEX"
attribute_name = "rindex"
def computeExpression(self, trace_collection):
return self, None, None
def computeExpressionCall(self, call_node, call_args, call_kw, trace_collection):
def wrapExpressionStrOperationRindex(sub, start, end, source_ref):
if end is not None:
from .StrNodes import ExpressionStrOperationRindex4
return ExpressionStrOperationRindex4(
str_arg=self.subnode_expression,
sub=sub,
start=start,
end=end,
source_ref=source_ref,
)
elif start is not None:
from .StrNodes import ExpressionStrOperationRindex3
return ExpressionStrOperationRindex3(
str_arg=self.subnode_expression,
sub=sub,
start=start,
source_ref=source_ref,
)
else:
from .StrNodes import ExpressionStrOperationRindex2
return ExpressionStrOperationRindex2(
str_arg=self.subnode_expression, sub=sub, source_ref=source_ref
)
# Anything may happen. On next pass, if replaced, we might be better
# but not now.
trace_collection.onExceptionRaiseExit(BaseException)
result = extractBuiltinArgs(
node=call_node,
builtin_class=wrapExpressionStrOperationRindex,
builtin_spec=str_rindex_spec,
)
return result, "new_expression", "Call to 'rindex' of str recognized."
attribute_typed_classes.add(ExpressionAttributeLookupStrRindex)
class ExpressionAttributeLookupFixedRjust(ExpressionAttributeLookupFixedBase):
"""Looking up an attribute value 'rjust' of an object.
Typically code like: source.rjust
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_FIXED_RJUST"
attribute_name = "rjust"
def computeExpression(self, trace_collection):
subnode_expression = self.subnode_expression
if subnode_expression.hasShapeStrExact():
result = ExpressionAttributeLookupStrRjust(
expression=subnode_expression, source_ref=self.source_ref
)
return (
result,
"new_expression",
"Attribute lookup 'rjust' on str shape resolved.",
)
return subnode_expression.computeExpressionAttribute(
lookup_node=self,
attribute_name="rjust",
trace_collection=trace_collection,
)
def mayRaiseException(self, exception_type):
return self.subnode_expression.mayRaiseExceptionAttributeLookup(
exception_type=exception_type, attribute_name="rjust"
)
attribute_classes["rjust"] = ExpressionAttributeLookupFixedRjust
class ExpressionAttributeLookupStrRjust(
SideEffectsFromChildrenMixin, ExpressionAttributeLookupFixedRjust
):
"""Attribute Rjust lookup on a str.
Typically code like: some_str.rjust
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_STR_RJUST"
attribute_name = "rjust"
def computeExpression(self, trace_collection):
return self, None, None
# No computeExpressionCall as str operation ExpressionStrOperationRjust is not yet implemented
attribute_typed_classes.add(ExpressionAttributeLookupStrRjust)
class ExpressionAttributeLookupFixedRpartition(ExpressionAttributeLookupFixedBase):
"""Looking up an attribute value 'rpartition' of an object.
Typically code like: source.rpartition
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_FIXED_RPARTITION"
attribute_name = "rpartition"
def computeExpression(self, trace_collection):
subnode_expression = self.subnode_expression
if subnode_expression.hasShapeStrExact():
result = ExpressionAttributeLookupStrRpartition(
expression=subnode_expression, source_ref=self.source_ref
)
return (
result,
"new_expression",
"Attribute lookup 'rpartition' on str shape resolved.",
)
return subnode_expression.computeExpressionAttribute(
lookup_node=self,
attribute_name="rpartition",
trace_collection=trace_collection,
)
def mayRaiseException(self, exception_type):
return self.subnode_expression.mayRaiseExceptionAttributeLookup(
exception_type=exception_type, attribute_name="rpartition"
)
attribute_classes["rpartition"] = ExpressionAttributeLookupFixedRpartition
from nuitka.specs.BuiltinStrOperationSpecs import str_rpartition_spec
class ExpressionAttributeLookupStrRpartition(
SideEffectsFromChildrenMixin, ExpressionAttributeLookupFixedRpartition
):
"""Attribute Rpartition lookup on a str.
Typically code like: some_str.rpartition
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_STR_RPARTITION"
attribute_name = "rpartition"
def computeExpression(self, trace_collection):
return self, None, None
def computeExpressionCall(self, call_node, call_args, call_kw, trace_collection):
def wrapExpressionStrOperationRpartition(sep, source_ref):
from .StrNodes import ExpressionStrOperationRpartition
return ExpressionStrOperationRpartition(
str_arg=self.subnode_expression, sep=sep, source_ref=source_ref
)
# Anything may happen. On next pass, if replaced, we might be better
# but not now.
trace_collection.onExceptionRaiseExit(BaseException)
result = extractBuiltinArgs(
node=call_node,
builtin_class=wrapExpressionStrOperationRpartition,
builtin_spec=str_rpartition_spec,
)
return result, "new_expression", "Call to 'rpartition' of str recognized."
attribute_typed_classes.add(ExpressionAttributeLookupStrRpartition)
class ExpressionAttributeLookupFixedRsplit(ExpressionAttributeLookupFixedBase):
"""Looking up an attribute value 'rsplit' of an object.
Typically code like: source.rsplit
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_FIXED_RSPLIT"
attribute_name = "rsplit"
def computeExpression(self, trace_collection):
subnode_expression = self.subnode_expression
if subnode_expression.hasShapeStrExact():
result = ExpressionAttributeLookupStrRsplit(
expression=subnode_expression, source_ref=self.source_ref
)
return (
result,
"new_expression",
"Attribute lookup 'rsplit' on str shape resolved.",
)
return subnode_expression.computeExpressionAttribute(
lookup_node=self,
attribute_name="rsplit",
trace_collection=trace_collection,
)
def mayRaiseException(self, exception_type):
return self.subnode_expression.mayRaiseExceptionAttributeLookup(
exception_type=exception_type, attribute_name="rsplit"
)
attribute_classes["rsplit"] = ExpressionAttributeLookupFixedRsplit
from nuitka.specs.BuiltinStrOperationSpecs import str_rsplit_spec
class ExpressionAttributeLookupStrRsplit(
SideEffectsFromChildrenMixin, ExpressionAttributeLookupFixedRsplit
):
"""Attribute Rsplit lookup on a str.
Typically code like: some_str.rsplit
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_STR_RSPLIT"
attribute_name = "rsplit"
def computeExpression(self, trace_collection):
return self, None, None
def computeExpressionCall(self, call_node, call_args, call_kw, trace_collection):
def wrapExpressionStrOperationRsplit(sep, maxsplit, source_ref):
if maxsplit is not None:
from .StrNodes import ExpressionStrOperationRsplit3
return ExpressionStrOperationRsplit3(
str_arg=self.subnode_expression,
sep=sep,
maxsplit=maxsplit,
source_ref=source_ref,
)
elif sep is not None:
from .StrNodes import ExpressionStrOperationRsplit2
return ExpressionStrOperationRsplit2(
str_arg=self.subnode_expression, sep=sep, source_ref=source_ref
)
else:
from .StrNodes import ExpressionStrOperationRsplit1
return ExpressionStrOperationRsplit1(
str_arg=self.subnode_expression, source_ref=source_ref
)
# Anything may happen. On next pass, if replaced, we might be better
# but not now.
trace_collection.onExceptionRaiseExit(BaseException)
result = extractBuiltinArgs(
node=call_node,
builtin_class=wrapExpressionStrOperationRsplit,
builtin_spec=str_rsplit_spec,
)
return result, "new_expression", "Call to 'rsplit' of str recognized."
attribute_typed_classes.add(ExpressionAttributeLookupStrRsplit)
class ExpressionAttributeLookupFixedRstrip(ExpressionAttributeLookupFixedBase):
"""Looking up an attribute value 'rstrip' of an object.
Typically code like: source.rstrip
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_FIXED_RSTRIP"
attribute_name = "rstrip"
def computeExpression(self, trace_collection):
subnode_expression = self.subnode_expression
if subnode_expression.hasShapeStrExact():
result = ExpressionAttributeLookupStrRstrip(
expression=subnode_expression, source_ref=self.source_ref
)
return (
result,
"new_expression",
"Attribute lookup 'rstrip' on str shape resolved.",
)
return subnode_expression.computeExpressionAttribute(
lookup_node=self,
attribute_name="rstrip",
trace_collection=trace_collection,
)
def mayRaiseException(self, exception_type):
return self.subnode_expression.mayRaiseExceptionAttributeLookup(
exception_type=exception_type, attribute_name="rstrip"
)
attribute_classes["rstrip"] = ExpressionAttributeLookupFixedRstrip
from nuitka.specs.BuiltinStrOperationSpecs import str_rstrip_spec
class ExpressionAttributeLookupStrRstrip(
SideEffectsFromChildrenMixin, ExpressionAttributeLookupFixedRstrip
):
"""Attribute Rstrip lookup on a str.
Typically code like: some_str.rstrip
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_STR_RSTRIP"
attribute_name = "rstrip"
def computeExpression(self, trace_collection):
return self, None, None
def computeExpressionCall(self, call_node, call_args, call_kw, trace_collection):
def wrapExpressionStrOperationRstrip(chars, source_ref):
if chars is not None:
from .StrNodes import ExpressionStrOperationRstrip2
return ExpressionStrOperationRstrip2(
str_arg=self.subnode_expression, chars=chars, source_ref=source_ref
)
else:
from .StrNodes import ExpressionStrOperationRstrip1
return ExpressionStrOperationRstrip1(
str_arg=self.subnode_expression, source_ref=source_ref
)
# Anything may happen. On next pass, if replaced, we might be better
# but not now.
trace_collection.onExceptionRaiseExit(BaseException)
result = extractBuiltinArgs(
node=call_node,
builtin_class=wrapExpressionStrOperationRstrip,
builtin_spec=str_rstrip_spec,
)
return result, "new_expression", "Call to 'rstrip' of str recognized."
attribute_typed_classes.add(ExpressionAttributeLookupStrRstrip)
class ExpressionAttributeLookupFixedSetdefault(ExpressionAttributeLookupFixedBase):
"""Looking up an attribute value 'setdefault' of an object.
Typically code like: source.setdefault
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_FIXED_SETDEFAULT"
attribute_name = "setdefault"
def computeExpression(self, trace_collection):
subnode_expression = self.subnode_expression
if subnode_expression.hasShapeDictionaryExact():
result = ExpressionAttributeLookupDictSetdefault(
expression=subnode_expression, source_ref=self.source_ref
)
return (
result,
"new_expression",
"Attribute lookup 'setdefault' on dict shape resolved.",
)
return subnode_expression.computeExpressionAttribute(
lookup_node=self,
attribute_name="setdefault",
trace_collection=trace_collection,
)
def mayRaiseException(self, exception_type):
return self.subnode_expression.mayRaiseExceptionAttributeLookup(
exception_type=exception_type, attribute_name="setdefault"
)
attribute_classes["setdefault"] = ExpressionAttributeLookupFixedSetdefault
from nuitka.specs.BuiltinDictOperationSpecs import dict_setdefault_spec
class ExpressionAttributeLookupDictSetdefault(
SideEffectsFromChildrenMixin, ExpressionAttributeLookupFixedSetdefault
):
"""Attribute Setdefault lookup on a dict.
Typically code like: some_dict.setdefault
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_DICT_SETDEFAULT"
attribute_name = "setdefault"
def computeExpression(self, trace_collection):
return self, None, None
def computeExpressionCall(self, call_node, call_args, call_kw, trace_collection):
def wrapExpressionDictOperationSetdefault(key, default, source_ref):
if default is not None:
from .DictionaryNodes import ExpressionDictOperationSetdefault3
return ExpressionDictOperationSetdefault3(
dict_arg=self.subnode_expression,
key=key,
default=default,
source_ref=source_ref,
)
else:
from .DictionaryNodes import ExpressionDictOperationSetdefault2
return ExpressionDictOperationSetdefault2(
dict_arg=self.subnode_expression, key=key, source_ref=source_ref
)
# Anything may happen. On next pass, if replaced, we might be better
# but not now.
trace_collection.onExceptionRaiseExit(BaseException)
result = extractBuiltinArgs(
node=call_node,
builtin_class=wrapExpressionDictOperationSetdefault,
builtin_spec=dict_setdefault_spec,
)
return (
result,
"new_expression",
"Call to 'setdefault' of dictionary recognized.",
)
attribute_typed_classes.add(ExpressionAttributeLookupDictSetdefault)
class ExpressionAttributeLookupFixedSplit(ExpressionAttributeLookupFixedBase):
"""Looking up an attribute value 'split' of an object.
Typically code like: source.split
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_FIXED_SPLIT"
attribute_name = "split"
def computeExpression(self, trace_collection):
subnode_expression = self.subnode_expression
if subnode_expression.hasShapeStrExact():
result = ExpressionAttributeLookupStrSplit(
expression=subnode_expression, source_ref=self.source_ref
)
return (
result,
"new_expression",
"Attribute lookup 'split' on str shape resolved.",
)
return subnode_expression.computeExpressionAttribute(
lookup_node=self,
attribute_name="split",
trace_collection=trace_collection,
)
def mayRaiseException(self, exception_type):
return self.subnode_expression.mayRaiseExceptionAttributeLookup(
exception_type=exception_type, attribute_name="split"
)
attribute_classes["split"] = ExpressionAttributeLookupFixedSplit
from nuitka.specs.BuiltinStrOperationSpecs import str_split_spec
class ExpressionAttributeLookupStrSplit(
SideEffectsFromChildrenMixin, ExpressionAttributeLookupFixedSplit
):
"""Attribute Split lookup on a str.
Typically code like: some_str.split
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_STR_SPLIT"
attribute_name = "split"
def computeExpression(self, trace_collection):
return self, None, None
def computeExpressionCall(self, call_node, call_args, call_kw, trace_collection):
def wrapExpressionStrOperationSplit(sep, maxsplit, source_ref):
if maxsplit is not None:
from .StrNodes import ExpressionStrOperationSplit3
return ExpressionStrOperationSplit3(
str_arg=self.subnode_expression,
sep=sep,
maxsplit=maxsplit,
source_ref=source_ref,
)
elif sep is not None:
from .StrNodes import ExpressionStrOperationSplit2
return ExpressionStrOperationSplit2(
str_arg=self.subnode_expression, sep=sep, source_ref=source_ref
)
else:
from .StrNodes import ExpressionStrOperationSplit1
return ExpressionStrOperationSplit1(
str_arg=self.subnode_expression, source_ref=source_ref
)
# Anything may happen. On next pass, if replaced, we might be better
# but not now.
trace_collection.onExceptionRaiseExit(BaseException)
result = extractBuiltinArgs(
node=call_node,
builtin_class=wrapExpressionStrOperationSplit,
builtin_spec=str_split_spec,
)
return result, "new_expression", "Call to 'split' of str recognized."
attribute_typed_classes.add(ExpressionAttributeLookupStrSplit)
class ExpressionAttributeLookupFixedSplitlines(ExpressionAttributeLookupFixedBase):
"""Looking up an attribute value 'splitlines' of an object.
Typically code like: source.splitlines
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_FIXED_SPLITLINES"
attribute_name = "splitlines"
def computeExpression(self, trace_collection):
subnode_expression = self.subnode_expression
if subnode_expression.hasShapeStrExact():
result = ExpressionAttributeLookupStrSplitlines(
expression=subnode_expression, source_ref=self.source_ref
)
return (
result,
"new_expression",
"Attribute lookup 'splitlines' on str shape resolved.",
)
return subnode_expression.computeExpressionAttribute(
lookup_node=self,
attribute_name="splitlines",
trace_collection=trace_collection,
)
def mayRaiseException(self, exception_type):
return self.subnode_expression.mayRaiseExceptionAttributeLookup(
exception_type=exception_type, attribute_name="splitlines"
)
attribute_classes["splitlines"] = ExpressionAttributeLookupFixedSplitlines
class ExpressionAttributeLookupStrSplitlines(
SideEffectsFromChildrenMixin, ExpressionAttributeLookupFixedSplitlines
):
"""Attribute Splitlines lookup on a str.
Typically code like: some_str.splitlines
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_STR_SPLITLINES"
attribute_name = "splitlines"
def computeExpression(self, trace_collection):
return self, None, None
# No computeExpressionCall as str operation ExpressionStrOperationSplitlines is not yet implemented
attribute_typed_classes.add(ExpressionAttributeLookupStrSplitlines)
class ExpressionAttributeLookupFixedStartswith(ExpressionAttributeLookupFixedBase):
"""Looking up an attribute value 'startswith' of an object.
Typically code like: source.startswith
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_FIXED_STARTSWITH"
attribute_name = "startswith"
def computeExpression(self, trace_collection):
subnode_expression = self.subnode_expression
if subnode_expression.hasShapeStrExact():
result = ExpressionAttributeLookupStrStartswith(
expression=subnode_expression, source_ref=self.source_ref
)
return (
result,
"new_expression",
"Attribute lookup 'startswith' on str shape resolved.",
)
return subnode_expression.computeExpressionAttribute(
lookup_node=self,
attribute_name="startswith",
trace_collection=trace_collection,
)
def mayRaiseException(self, exception_type):
return self.subnode_expression.mayRaiseExceptionAttributeLookup(
exception_type=exception_type, attribute_name="startswith"
)
attribute_classes["startswith"] = ExpressionAttributeLookupFixedStartswith
from nuitka.specs.BuiltinStrOperationSpecs import str_startswith_spec
class ExpressionAttributeLookupStrStartswith(
SideEffectsFromChildrenMixin, ExpressionAttributeLookupFixedStartswith
):
"""Attribute Startswith lookup on a str.
Typically code like: some_str.startswith
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_STR_STARTSWITH"
attribute_name = "startswith"
def computeExpression(self, trace_collection):
return self, None, None
def computeExpressionCall(self, call_node, call_args, call_kw, trace_collection):
def wrapExpressionStrOperationStartswith(prefix, start, end, source_ref):
if end is not None:
from .StrNodes import ExpressionStrOperationStartswith4
return ExpressionStrOperationStartswith4(
str_arg=self.subnode_expression,
prefix=prefix,
start=start,
end=end,
source_ref=source_ref,
)
elif start is not None:
from .StrNodes import ExpressionStrOperationStartswith3
return ExpressionStrOperationStartswith3(
str_arg=self.subnode_expression,
prefix=prefix,
start=start,
source_ref=source_ref,
)
else:
from .StrNodes import ExpressionStrOperationStartswith2
return ExpressionStrOperationStartswith2(
str_arg=self.subnode_expression,
prefix=prefix,
source_ref=source_ref,
)
# Anything may happen. On next pass, if replaced, we might be better
# but not now.
trace_collection.onExceptionRaiseExit(BaseException)
result = extractBuiltinArgs(
node=call_node,
builtin_class=wrapExpressionStrOperationStartswith,
builtin_spec=str_startswith_spec,
)
return result, "new_expression", "Call to 'startswith' of str recognized."
attribute_typed_classes.add(ExpressionAttributeLookupStrStartswith)
class ExpressionAttributeLookupFixedStrip(ExpressionAttributeLookupFixedBase):
"""Looking up an attribute value 'strip' of an object.
Typically code like: source.strip
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_FIXED_STRIP"
attribute_name = "strip"
def computeExpression(self, trace_collection):
subnode_expression = self.subnode_expression
if subnode_expression.hasShapeStrExact():
result = ExpressionAttributeLookupStrStrip(
expression=subnode_expression, source_ref=self.source_ref
)
return (
result,
"new_expression",
"Attribute lookup 'strip' on str shape resolved.",
)
return subnode_expression.computeExpressionAttribute(
lookup_node=self,
attribute_name="strip",
trace_collection=trace_collection,
)
def mayRaiseException(self, exception_type):
return self.subnode_expression.mayRaiseExceptionAttributeLookup(
exception_type=exception_type, attribute_name="strip"
)
attribute_classes["strip"] = ExpressionAttributeLookupFixedStrip
from nuitka.specs.BuiltinStrOperationSpecs import str_strip_spec
class ExpressionAttributeLookupStrStrip(
SideEffectsFromChildrenMixin, ExpressionAttributeLookupFixedStrip
):
"""Attribute Strip lookup on a str.
Typically code like: some_str.strip
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_STR_STRIP"
attribute_name = "strip"
def computeExpression(self, trace_collection):
return self, None, None
def computeExpressionCall(self, call_node, call_args, call_kw, trace_collection):
def wrapExpressionStrOperationStrip(chars, source_ref):
if chars is not None:
from .StrNodes import ExpressionStrOperationStrip2
return ExpressionStrOperationStrip2(
str_arg=self.subnode_expression, chars=chars, source_ref=source_ref
)
else:
from .StrNodes import ExpressionStrOperationStrip1
return ExpressionStrOperationStrip1(
str_arg=self.subnode_expression, source_ref=source_ref
)
# Anything may happen. On next pass, if replaced, we might be better
# but not now.
trace_collection.onExceptionRaiseExit(BaseException)
result = extractBuiltinArgs(
node=call_node,
builtin_class=wrapExpressionStrOperationStrip,
builtin_spec=str_strip_spec,
)
return result, "new_expression", "Call to 'strip' of str recognized."
attribute_typed_classes.add(ExpressionAttributeLookupStrStrip)
class ExpressionAttributeLookupFixedSwapcase(ExpressionAttributeLookupFixedBase):
"""Looking up an attribute value 'swapcase' of an object.
Typically code like: source.swapcase
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_FIXED_SWAPCASE"
attribute_name = "swapcase"
def computeExpression(self, trace_collection):
subnode_expression = self.subnode_expression
if subnode_expression.hasShapeStrExact():
result = ExpressionAttributeLookupStrSwapcase(
expression=subnode_expression, source_ref=self.source_ref
)
return (
result,
"new_expression",
"Attribute lookup 'swapcase' on str shape resolved.",
)
return subnode_expression.computeExpressionAttribute(
lookup_node=self,
attribute_name="swapcase",
trace_collection=trace_collection,
)
def mayRaiseException(self, exception_type):
return self.subnode_expression.mayRaiseExceptionAttributeLookup(
exception_type=exception_type, attribute_name="swapcase"
)
attribute_classes["swapcase"] = ExpressionAttributeLookupFixedSwapcase
from nuitka.specs.BuiltinStrOperationSpecs import str_swapcase_spec
class ExpressionAttributeLookupStrSwapcase(
SideEffectsFromChildrenMixin, ExpressionAttributeLookupFixedSwapcase
):
"""Attribute Swapcase lookup on a str.
Typically code like: some_str.swapcase
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_STR_SWAPCASE"
attribute_name = "swapcase"
def computeExpression(self, trace_collection):
return self, None, None
def computeExpressionCall(self, call_node, call_args, call_kw, trace_collection):
def wrapExpressionStrOperationSwapcase(source_ref):
from .StrNodes import ExpressionStrOperationSwapcase
return ExpressionStrOperationSwapcase(
str_arg=self.subnode_expression, source_ref=source_ref
)
# Anything may happen. On next pass, if replaced, we might be better
# but not now.
trace_collection.onExceptionRaiseExit(BaseException)
result = extractBuiltinArgs(
node=call_node,
builtin_class=wrapExpressionStrOperationSwapcase,
builtin_spec=str_swapcase_spec,
)
return result, "new_expression", "Call to 'swapcase' of str recognized."
attribute_typed_classes.add(ExpressionAttributeLookupStrSwapcase)
class ExpressionAttributeLookupFixedTitle(ExpressionAttributeLookupFixedBase):
"""Looking up an attribute value 'title' of an object.
Typically code like: source.title
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_FIXED_TITLE"
attribute_name = "title"
def computeExpression(self, trace_collection):
subnode_expression = self.subnode_expression
if subnode_expression.hasShapeStrExact():
result = ExpressionAttributeLookupStrTitle(
expression=subnode_expression, source_ref=self.source_ref
)
return (
result,
"new_expression",
"Attribute lookup 'title' on str shape resolved.",
)
return subnode_expression.computeExpressionAttribute(
lookup_node=self,
attribute_name="title",
trace_collection=trace_collection,
)
def mayRaiseException(self, exception_type):
return self.subnode_expression.mayRaiseExceptionAttributeLookup(
exception_type=exception_type, attribute_name="title"
)
attribute_classes["title"] = ExpressionAttributeLookupFixedTitle
from nuitka.specs.BuiltinStrOperationSpecs import str_title_spec
class ExpressionAttributeLookupStrTitle(
SideEffectsFromChildrenMixin, ExpressionAttributeLookupFixedTitle
):
"""Attribute Title lookup on a str.
Typically code like: some_str.title
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_STR_TITLE"
attribute_name = "title"
def computeExpression(self, trace_collection):
return self, None, None
def computeExpressionCall(self, call_node, call_args, call_kw, trace_collection):
def wrapExpressionStrOperationTitle(source_ref):
from .StrNodes import ExpressionStrOperationTitle
return ExpressionStrOperationTitle(
str_arg=self.subnode_expression, source_ref=source_ref
)
# Anything may happen. On next pass, if replaced, we might be better
# but not now.
trace_collection.onExceptionRaiseExit(BaseException)
result = extractBuiltinArgs(
node=call_node,
builtin_class=wrapExpressionStrOperationTitle,
builtin_spec=str_title_spec,
)
return result, "new_expression", "Call to 'title' of str recognized."
attribute_typed_classes.add(ExpressionAttributeLookupStrTitle)
class ExpressionAttributeLookupFixedTranslate(ExpressionAttributeLookupFixedBase):
"""Looking up an attribute value 'translate' of an object.
Typically code like: source.translate
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_FIXED_TRANSLATE"
attribute_name = "translate"
def computeExpression(self, trace_collection):
subnode_expression = self.subnode_expression
if subnode_expression.hasShapeStrExact():
result = ExpressionAttributeLookupStrTranslate(
expression=subnode_expression, source_ref=self.source_ref
)
return (
result,
"new_expression",
"Attribute lookup 'translate' on str shape resolved.",
)
return subnode_expression.computeExpressionAttribute(
lookup_node=self,
attribute_name="translate",
trace_collection=trace_collection,
)
def mayRaiseException(self, exception_type):
return self.subnode_expression.mayRaiseExceptionAttributeLookup(
exception_type=exception_type, attribute_name="translate"
)
attribute_classes["translate"] = ExpressionAttributeLookupFixedTranslate
class ExpressionAttributeLookupStrTranslate(
SideEffectsFromChildrenMixin, ExpressionAttributeLookupFixedTranslate
):
"""Attribute Translate lookup on a str.
Typically code like: some_str.translate
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_STR_TRANSLATE"
attribute_name = "translate"
def computeExpression(self, trace_collection):
return self, None, None
# No computeExpressionCall as str operation ExpressionStrOperationTranslate is not yet implemented
attribute_typed_classes.add(ExpressionAttributeLookupStrTranslate)
class ExpressionAttributeLookupFixedUpdate(ExpressionAttributeLookupFixedBase):
"""Looking up an attribute value 'update' of an object.
Typically code like: source.update
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_FIXED_UPDATE"
attribute_name = "update"
def computeExpression(self, trace_collection):
subnode_expression = self.subnode_expression
if subnode_expression.hasShapeDictionaryExact():
result = ExpressionAttributeLookupDictUpdate(
expression=subnode_expression, source_ref=self.source_ref
)
return (
result,
"new_expression",
"Attribute lookup 'update' on dict shape resolved.",
)
return subnode_expression.computeExpressionAttribute(
lookup_node=self,
attribute_name="update",
trace_collection=trace_collection,
)
def mayRaiseException(self, exception_type):
return self.subnode_expression.mayRaiseExceptionAttributeLookup(
exception_type=exception_type, attribute_name="update"
)
attribute_classes["update"] = ExpressionAttributeLookupFixedUpdate
from nuitka.specs.BuiltinDictOperationSpecs import dict_update_spec
class ExpressionAttributeLookupDictUpdate(
SideEffectsFromChildrenMixin, ExpressionAttributeLookupFixedUpdate
):
"""Attribute Update lookup on a dict.
Typically code like: some_dict.update
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_DICT_UPDATE"
attribute_name = "update"
def computeExpression(self, trace_collection):
return self, None, None
def computeExpressionCall(self, call_node, call_args, call_kw, trace_collection):
def wrapExpressionDictOperationUpdate(list_args, kw_args, source_ref):
if kw_args is not None:
from .DictionaryNodes import ExpressionDictOperationUpdate3
return ExpressionDictOperationUpdate3(
dict_arg=self.subnode_expression,
iterable=list_args,
pairs=kw_args,
source_ref=source_ref,
)
else:
from .DictionaryNodes import ExpressionDictOperationUpdate2
return ExpressionDictOperationUpdate2(
dict_arg=self.subnode_expression,
iterable=list_args,
source_ref=source_ref,
)
# Anything may happen. On next pass, if replaced, we might be better
# but not now.
trace_collection.onExceptionRaiseExit(BaseException)
result = extractBuiltinArgs(
node=call_node,
builtin_class=wrapExpressionDictOperationUpdate,
builtin_spec=dict_update_spec,
)
return result, "new_expression", "Call to 'update' of dictionary recognized."
attribute_typed_classes.add(ExpressionAttributeLookupDictUpdate)
class ExpressionAttributeLookupFixedUpper(ExpressionAttributeLookupFixedBase):
"""Looking up an attribute value 'upper' of an object.
Typically code like: source.upper
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_FIXED_UPPER"
attribute_name = "upper"
def computeExpression(self, trace_collection):
subnode_expression = self.subnode_expression
if subnode_expression.hasShapeStrExact():
result = ExpressionAttributeLookupStrUpper(
expression=subnode_expression, source_ref=self.source_ref
)
return (
result,
"new_expression",
"Attribute lookup 'upper' on str shape resolved.",
)
return subnode_expression.computeExpressionAttribute(
lookup_node=self,
attribute_name="upper",
trace_collection=trace_collection,
)
def mayRaiseException(self, exception_type):
return self.subnode_expression.mayRaiseExceptionAttributeLookup(
exception_type=exception_type, attribute_name="upper"
)
attribute_classes["upper"] = ExpressionAttributeLookupFixedUpper
from nuitka.specs.BuiltinStrOperationSpecs import str_upper_spec
class ExpressionAttributeLookupStrUpper(
SideEffectsFromChildrenMixin, ExpressionAttributeLookupFixedUpper
):
"""Attribute Upper lookup on a str.
Typically code like: some_str.upper
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_STR_UPPER"
attribute_name = "upper"
def computeExpression(self, trace_collection):
return self, None, None
def computeExpressionCall(self, call_node, call_args, call_kw, trace_collection):
def wrapExpressionStrOperationUpper(source_ref):
from .StrNodes import ExpressionStrOperationUpper
return ExpressionStrOperationUpper(
str_arg=self.subnode_expression, source_ref=source_ref
)
# Anything may happen. On next pass, if replaced, we might be better
# but not now.
trace_collection.onExceptionRaiseExit(BaseException)
result = extractBuiltinArgs(
node=call_node,
builtin_class=wrapExpressionStrOperationUpper,
builtin_spec=str_upper_spec,
)
return result, "new_expression", "Call to 'upper' of str recognized."
attribute_typed_classes.add(ExpressionAttributeLookupStrUpper)
class ExpressionAttributeLookupFixedValues(ExpressionAttributeLookupFixedBase):
"""Looking up an attribute value 'values' of an object.
Typically code like: source.values
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_FIXED_VALUES"
attribute_name = "values"
def computeExpression(self, trace_collection):
subnode_expression = self.subnode_expression
if subnode_expression.hasShapeDictionaryExact():
result = ExpressionAttributeLookupDictValues(
expression=subnode_expression, source_ref=self.source_ref
)
return (
result,
"new_expression",
"Attribute lookup 'values' on dict shape resolved.",
)
return subnode_expression.computeExpressionAttribute(
lookup_node=self,
attribute_name="values",
trace_collection=trace_collection,
)
def mayRaiseException(self, exception_type):
return self.subnode_expression.mayRaiseExceptionAttributeLookup(
exception_type=exception_type, attribute_name="values"
)
attribute_classes["values"] = ExpressionAttributeLookupFixedValues
from nuitka.specs.BuiltinDictOperationSpecs import dict_values_spec
class ExpressionAttributeLookupDictValues(
SideEffectsFromChildrenMixin, ExpressionAttributeLookupFixedValues
):
"""Attribute Values lookup on a dict.
Typically code like: some_dict.values
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_DICT_VALUES"
attribute_name = "values"
def computeExpression(self, trace_collection):
return self, None, None
def computeExpressionCall(self, call_node, call_args, call_kw, trace_collection):
def wrapExpressionDictOperationValues(source_ref):
if str is bytes:
from .DictionaryNodes import ExpressionDictOperationValues
return ExpressionDictOperationValues(
dict_arg=self.subnode_expression, source_ref=source_ref
)
else:
from .DictionaryNodes import ExpressionDictOperationItervalues
return ExpressionDictOperationItervalues(
dict_arg=self.subnode_expression, source_ref=source_ref
)
# Anything may happen. On next pass, if replaced, we might be better
# but not now.
trace_collection.onExceptionRaiseExit(BaseException)
result = extractBuiltinArgs(
node=call_node,
builtin_class=wrapExpressionDictOperationValues,
builtin_spec=dict_values_spec,
)
return result, "new_expression", "Call to 'values' of dictionary recognized."
attribute_typed_classes.add(ExpressionAttributeLookupDictValues)
class ExpressionAttributeLookupFixedViewitems(ExpressionAttributeLookupFixedBase):
"""Looking up an attribute value 'viewitems' of an object.
Typically code like: source.viewitems
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_FIXED_VIEWITEMS"
attribute_name = "viewitems"
def computeExpression(self, trace_collection):
subnode_expression = self.subnode_expression
if str is bytes and subnode_expression.hasShapeDictionaryExact():
result = ExpressionAttributeLookupDictViewitems(
expression=subnode_expression, source_ref=self.source_ref
)
return (
result,
"new_expression",
"Attribute lookup 'viewitems' on dict shape resolved.",
)
return subnode_expression.computeExpressionAttribute(
lookup_node=self,
attribute_name="viewitems",
trace_collection=trace_collection,
)
def mayRaiseException(self, exception_type):
return self.subnode_expression.mayRaiseExceptionAttributeLookup(
exception_type=exception_type, attribute_name="viewitems"
)
attribute_classes["viewitems"] = ExpressionAttributeLookupFixedViewitems
from nuitka.specs.BuiltinDictOperationSpecs import dict_viewitems_spec
class ExpressionAttributeLookupDictViewitems(
SideEffectsFromChildrenMixin, ExpressionAttributeLookupFixedViewitems
):
"""Attribute Viewitems lookup on a dict.
Typically code like: some_dict.viewitems
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_DICT_VIEWITEMS"
attribute_name = "viewitems"
def computeExpression(self, trace_collection):
return self, None, None
def computeExpressionCall(self, call_node, call_args, call_kw, trace_collection):
def wrapExpressionDictOperationViewitems(source_ref):
from .DictionaryNodes import ExpressionDictOperationViewitems
return ExpressionDictOperationViewitems(
dict_arg=self.subnode_expression, source_ref=source_ref
)
# Anything may happen. On next pass, if replaced, we might be better
# but not now.
trace_collection.onExceptionRaiseExit(BaseException)
result = extractBuiltinArgs(
node=call_node,
builtin_class=wrapExpressionDictOperationViewitems,
builtin_spec=dict_viewitems_spec,
)
return result, "new_expression", "Call to 'viewitems' of dictionary recognized."
attribute_typed_classes.add(ExpressionAttributeLookupDictViewitems)
class ExpressionAttributeLookupFixedViewkeys(ExpressionAttributeLookupFixedBase):
"""Looking up an attribute value 'viewkeys' of an object.
Typically code like: source.viewkeys
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_FIXED_VIEWKEYS"
attribute_name = "viewkeys"
def computeExpression(self, trace_collection):
subnode_expression = self.subnode_expression
if str is bytes and subnode_expression.hasShapeDictionaryExact():
result = ExpressionAttributeLookupDictViewkeys(
expression=subnode_expression, source_ref=self.source_ref
)
return (
result,
"new_expression",
"Attribute lookup 'viewkeys' on dict shape resolved.",
)
return subnode_expression.computeExpressionAttribute(
lookup_node=self,
attribute_name="viewkeys",
trace_collection=trace_collection,
)
def mayRaiseException(self, exception_type):
return self.subnode_expression.mayRaiseExceptionAttributeLookup(
exception_type=exception_type, attribute_name="viewkeys"
)
attribute_classes["viewkeys"] = ExpressionAttributeLookupFixedViewkeys
from nuitka.specs.BuiltinDictOperationSpecs import dict_viewkeys_spec
class ExpressionAttributeLookupDictViewkeys(
SideEffectsFromChildrenMixin, ExpressionAttributeLookupFixedViewkeys
):
"""Attribute Viewkeys lookup on a dict.
Typically code like: some_dict.viewkeys
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_DICT_VIEWKEYS"
attribute_name = "viewkeys"
def computeExpression(self, trace_collection):
return self, None, None
def computeExpressionCall(self, call_node, call_args, call_kw, trace_collection):
def wrapExpressionDictOperationViewkeys(source_ref):
from .DictionaryNodes import ExpressionDictOperationViewkeys
return ExpressionDictOperationViewkeys(
dict_arg=self.subnode_expression, source_ref=source_ref
)
# Anything may happen. On next pass, if replaced, we might be better
# but not now.
trace_collection.onExceptionRaiseExit(BaseException)
result = extractBuiltinArgs(
node=call_node,
builtin_class=wrapExpressionDictOperationViewkeys,
builtin_spec=dict_viewkeys_spec,
)
return result, "new_expression", "Call to 'viewkeys' of dictionary recognized."
attribute_typed_classes.add(ExpressionAttributeLookupDictViewkeys)
class ExpressionAttributeLookupFixedViewvalues(ExpressionAttributeLookupFixedBase):
"""Looking up an attribute value 'viewvalues' of an object.
Typically code like: source.viewvalues
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_FIXED_VIEWVALUES"
attribute_name = "viewvalues"
def computeExpression(self, trace_collection):
subnode_expression = self.subnode_expression
if str is bytes and subnode_expression.hasShapeDictionaryExact():
result = ExpressionAttributeLookupDictViewvalues(
expression=subnode_expression, source_ref=self.source_ref
)
return (
result,
"new_expression",
"Attribute lookup 'viewvalues' on dict shape resolved.",
)
return subnode_expression.computeExpressionAttribute(
lookup_node=self,
attribute_name="viewvalues",
trace_collection=trace_collection,
)
def mayRaiseException(self, exception_type):
return self.subnode_expression.mayRaiseExceptionAttributeLookup(
exception_type=exception_type, attribute_name="viewvalues"
)
attribute_classes["viewvalues"] = ExpressionAttributeLookupFixedViewvalues
from nuitka.specs.BuiltinDictOperationSpecs import dict_viewvalues_spec
class ExpressionAttributeLookupDictViewvalues(
SideEffectsFromChildrenMixin, ExpressionAttributeLookupFixedViewvalues
):
"""Attribute Viewvalues lookup on a dict.
Typically code like: some_dict.viewvalues
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_DICT_VIEWVALUES"
attribute_name = "viewvalues"
def computeExpression(self, trace_collection):
return self, None, None
def computeExpressionCall(self, call_node, call_args, call_kw, trace_collection):
def wrapExpressionDictOperationViewvalues(source_ref):
from .DictionaryNodes import ExpressionDictOperationViewvalues
return ExpressionDictOperationViewvalues(
dict_arg=self.subnode_expression, source_ref=source_ref
)
# Anything may happen. On next pass, if replaced, we might be better
# but not now.
trace_collection.onExceptionRaiseExit(BaseException)
result = extractBuiltinArgs(
node=call_node,
builtin_class=wrapExpressionDictOperationViewvalues,
builtin_spec=dict_viewvalues_spec,
)
return (
result,
"new_expression",
"Call to 'viewvalues' of dictionary recognized.",
)
attribute_typed_classes.add(ExpressionAttributeLookupDictViewvalues)
class ExpressionAttributeLookupFixedZfill(ExpressionAttributeLookupFixedBase):
"""Looking up an attribute value 'zfill' of an object.
Typically code like: source.zfill
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_FIXED_ZFILL"
attribute_name = "zfill"
def computeExpression(self, trace_collection):
subnode_expression = self.subnode_expression
if subnode_expression.hasShapeStrExact():
result = ExpressionAttributeLookupStrZfill(
expression=subnode_expression, source_ref=self.source_ref
)
return (
result,
"new_expression",
"Attribute lookup 'zfill' on str shape resolved.",
)
return subnode_expression.computeExpressionAttribute(
lookup_node=self,
attribute_name="zfill",
trace_collection=trace_collection,
)
def mayRaiseException(self, exception_type):
return self.subnode_expression.mayRaiseExceptionAttributeLookup(
exception_type=exception_type, attribute_name="zfill"
)
attribute_classes["zfill"] = ExpressionAttributeLookupFixedZfill
class ExpressionAttributeLookupStrZfill(
SideEffectsFromChildrenMixin, ExpressionAttributeLookupFixedZfill
):
"""Attribute Zfill lookup on a str.
Typically code like: some_str.zfill
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_STR_ZFILL"
attribute_name = "zfill"
def computeExpression(self, trace_collection):
return self, None, None
# No computeExpressionCall as str operation ExpressionStrOperationZfill is not yet implemented
attribute_typed_classes.add(ExpressionAttributeLookupStrZfill)
| 31.780423
| 105
| 0.689983
|
131668a619ff05fe9716089c047193de98ccfc87
| 303
|
py
|
Python
|
python/ray/ml/__init__.py
|
willfrey/ray
|
288a81b42ef0186ab4db33b30191614a7bdb69f6
|
[
"Apache-2.0"
] | 1
|
2019-06-19T02:23:43.000Z
|
2019-06-19T02:23:43.000Z
|
python/ray/ml/__init__.py
|
willfrey/ray
|
288a81b42ef0186ab4db33b30191614a7bdb69f6
|
[
"Apache-2.0"
] | 73
|
2021-09-25T07:11:39.000Z
|
2022-03-26T07:10:59.000Z
|
python/ray/ml/__init__.py
|
willfrey/ray
|
288a81b42ef0186ab4db33b30191614a7bdb69f6
|
[
"Apache-2.0"
] | 1
|
2019-09-24T16:24:49.000Z
|
2019-09-24T16:24:49.000Z
|
from ray.ml.checkpoint import Checkpoint
from ray.ml.config import RunConfig, ScalingConfig
from ray.ml.preprocessor import Preprocessor
from ray.ml.utils.datasets import train_test_split
__all__ = [
"Checkpoint",
"Preprocessor",
"RunConfig",
"ScalingConfig",
"train_test_split",
]
| 23.307692
| 50
| 0.752475
|
4efb264d134e2bb6f62ac3ebc67f34abd4ed84d0
| 413
|
py
|
Python
|
plot/test.py
|
MCCCSunny/vnpy-edited
|
c9ef56d4b809bac1f083c213ca37f74a1f80b65e
|
[
"MIT"
] | null | null | null |
plot/test.py
|
MCCCSunny/vnpy-edited
|
c9ef56d4b809bac1f083c213ca37f74a1f80b65e
|
[
"MIT"
] | null | null | null |
plot/test.py
|
MCCCSunny/vnpy-edited
|
c9ef56d4b809bac1f083c213ca37f74a1f80b65e
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 22 10:20:35 2017
@author: WCP
"""
import sys
sys.path.append('C:\\Users\\WCP\\Desktop\\vnpy1.7.2_SC\\vnpy-master\\examples\\VnTrader\\temp')
sys.path.append('C:\\Users\\WCP\\Desktop\\vnpy1.7.2_SC\\vnpy-master')
import vnpy.trader.vtObject
import shelve
f=shelve.open('C:\\Users\\WCP\\Desktop\\vnpy1.7.2_SC\\vnpy-master\\examples\\VnTrader\\temp\\ContractData.vt')
| 29.5
| 110
| 0.702179
|
1877886c45ccb6a98c2ae46418bc5800a29bce5d
| 430
|
py
|
Python
|
tavastiaevents/urls.py
|
hamk-uas/TavastiaEventsOld
|
b808a1418ee89ba1e774c814364e5b55ea4f9a2c
|
[
"MIT"
] | null | null | null |
tavastiaevents/urls.py
|
hamk-uas/TavastiaEventsOld
|
b808a1418ee89ba1e774c814364e5b55ea4f9a2c
|
[
"MIT"
] | null | null | null |
tavastiaevents/urls.py
|
hamk-uas/TavastiaEventsOld
|
b808a1418ee89ba1e774c814364e5b55ea4f9a2c
|
[
"MIT"
] | null | null | null |
from django.core.urlresolvers import reverse
from django.conf.urls import url, include
from django.views.generic import View
from .views import message_event
from django.views.generic import TemplateView
app_name = 'tavastiaevents'
urlpatterns = [
url(r'report/', message_event, name='message-event'),
url(r'documentation/$', TemplateView.as_view(template_name="rest_framework/api_info.html"), name='documentation')
]
| 28.666667
| 117
| 0.783721
|
0ec53ea5068181444a7deb7ec2d5a9e3f08b6bce
| 4,642
|
py
|
Python
|
csv/build_spm.py
|
miurahr/libgeotiff
|
d9c877b6a5f7aac46905e1665f95d8a0469140fa
|
[
"MIT"
] | null | null | null |
csv/build_spm.py
|
miurahr/libgeotiff
|
d9c877b6a5f7aac46905e1665f95d8a0469140fa
|
[
"MIT"
] | null | null | null |
csv/build_spm.py
|
miurahr/libgeotiff
|
d9c877b6a5f7aac46905e1665f95d8a0469140fa
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#******************************************************************************
# $Id$
#
# Project: GDAL
# Purpose: Build stateplane.csv, relating USGS state plane numbers with
# EPSG coordinate system, and some supporting info fields.
# Author: Frank Warmerdam, warmerdam@pobox.com
#******************************************************************************
# Copyright (c) 2002, Frank Warmerdam <warmerdam@pobox.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#******************************************************************************
#
# $Log$
# Revision 1.1 2002/11/28 16:11:47 warmerda
# New
#
#
import string
import csv_tools
##############################################################################
# read_stateplane_txt()
def read_stateplane_txt( filename ):
spt = csv_tools.CSVTable()
spt.add_field( "ID" )
spt.add_field( "STATE" )
spt.add_field( "ZONE" )
spt.add_field( "PROJ_METHOD" )
spt.add_field( "DATUM" )
spt.add_field( "USGS_CODE" )
spt.add_field( "EPSG_PCS_CODE" )
src_lines = open(filename).readlines()
for line in src_lines:
rec = {}
rec['STATE'] = string.strip(line[0:16])
rec['ZONE'] = string.strip(line[16:39])
rec['PROJ_METHOD'] = string.strip(line[39:40])
rec['DATUM'] = string.strip(line[48:65])
rec['USGS_CODE'] = string.strip(line[65:])
rec['EPSG_PCS_CODE'] = ''
if rec['DATUM'] == 'NAD27':
rec['ID'] = str(int(rec['USGS_CODE']) + 10000)
else:
rec['ID'] = rec['USGS_CODE']
spt.add_record( int(rec['ID']), rec )
return spt
##############################################################################
# main()
spt = read_stateplane_txt( '/home/warmerda/gdal/data/stateplane.txt' )
pcs = csv_tools.CSVTable()
pcs.read_from_csv( 'coordinate_reference_system.csv' )
coord_sys_by_name = {}
ids_to_replace = []
pcs_keys = pcs.data.keys()
#pcs_keys = [ 2204, 32036 ]
for pcs_key in pcs_keys:
rec = pcs.get_record( pcs_key )
if rec['COORD_REF_SYS_KIND'] != 'projected':
continue
dep = rec['DEPRECATED']
coc = int(rec['PROJECTION_CONV_CODE'])
if rec['SOURCE_GEOGCRS_CODE'] == '4269':
zone = coc - 10030
id = zone
else:
zone = coc - 10000
id = zone + 10000
if dep == '0':
name = rec['COORD_REF_SYS_NAME']
if coord_sys_by_name.has_key(name):
print 'Yikes, more than one ', name
else:
coord_sys_by_name[name] = pcs_key
if spt.data.has_key(id):
spt_rec = spt.get_record( id )
if len(spt_rec['EPSG_PCS_CODE']) > 0:
raise ValueError, 'already have this State Plane Zone!'
spt_rec['EPSG_PCS_CODE'] = str(pcs_key)
spt.set_record( id, spt_rec )
if dep == '1':
ids_to_replace.append( id )
print 'Found %d deprecated PCS codes to upgrade by name.' % len(ids_to_replace)
for id in ids_to_replace:
spt_rec = spt.get_record( id )
dep_pcs_rec = pcs.get_record( int(spt_rec['EPSG_PCS_CODE']) )
name = dep_pcs_rec['COORD_REF_SYS_NAME']
if coord_sys_by_name.has_key(name):
spt_rec['EPSG_PCS_CODE'] = str(coord_sys_by_name[name])
else:
print 'Unable to find non-deprecated value for ', name
for spt_id in spt.data.keys():
rec = spt.get_record(spt_id)
if len(rec['EPSG_PCS_CODE']) == 0:
print 'Never got match for %s / %s' % (rec['STATE'], rec['ZONE'])
spt.write_to_csv( 'stateplane.csv' )
| 32.921986
| 79
| 0.592417
|
0acdbf1bdf3e63582a7971ed05ade77b5d07d2db
| 515
|
py
|
Python
|
app/migrations/versions/0169ae36e249_.py
|
adamw523/simple-flask-app
|
07ba4e796c0f89118dd3beafa3ce46c35f993f66
|
[
"MIT"
] | null | null | null |
app/migrations/versions/0169ae36e249_.py
|
adamw523/simple-flask-app
|
07ba4e796c0f89118dd3beafa3ce46c35f993f66
|
[
"MIT"
] | null | null | null |
app/migrations/versions/0169ae36e249_.py
|
adamw523/simple-flask-app
|
07ba4e796c0f89118dd3beafa3ce46c35f993f66
|
[
"MIT"
] | null | null | null |
"""empty message
Revision ID: 0169ae36e249
Revises: None
Create Date: 2016-10-12 23:40:23.906003
"""
# revision identifiers, used by Alembic.
revision = '0169ae36e249'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table(
'posts',
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('title', sa.String),
sa.Column('body', sa.String),
sa.PrimaryKeyConstraint('id')
)
def downgrade():
op.drop_table('posts')
| 19.074074
| 54
| 0.660194
|
b668f39e66b2a5668bd7a2b1eeb26783c06028f2
| 7,444
|
py
|
Python
|
examples/pipelines/osmfish_pipeline.py
|
haoxusci/starfish
|
d7bd856024c75f2ce41504406f2a663566c3814b
|
[
"MIT"
] | null | null | null |
examples/pipelines/osmfish_pipeline.py
|
haoxusci/starfish
|
d7bd856024c75f2ce41504406f2a663566c3814b
|
[
"MIT"
] | null | null | null |
examples/pipelines/osmfish_pipeline.py
|
haoxusci/starfish
|
d7bd856024c75f2ce41504406f2a663566c3814b
|
[
"MIT"
] | null | null | null |
"""
.. _osmfish_example:
Reproduce published osmFISH results with starfish
=================================================
osmFISH is an image based transcriptomics technique that can spatially resolve tens of RNA
transcripts and their expression levels **in situ**. The protocol and data analysis are described in
this `publication`_. This notebook walks through how to use starfish to process the raw images
from an osmFISH experiment into a spatially resolved gene expression image. We verify that
starfish can accurately reproduce the results from the authors' original Python `pipeline`_
.. _publication: https://www.nature.com/articles/s41592-018-0175-z
.. _pipeline: http://linnarssonlab.org/osmFISH/image_analysis/
"""
from IPython import get_ipython
import matplotlib
import matplotlib.pyplot as plt
# equivalent to %gui qt and %matplotlib inline
ipython = get_ipython()
ipython.magic("gui qt5")
ipython.magic("matplotlib inline")
matplotlib.rcParams["figure.dpi"] = 150
###################################################################################################
# Load Data into starfish from the cloud
# --------------------------------------
# The data from an osmFISH experiment are similar in form to a standard smFISH experiment. For each
# round, each color channel corresponds to presence of a particular gene. Across rounds, the color
# channels index different genes. Here, we analyze one FOV from the first round (r) and first
# channel (c), which consists of 45 z-planes (z). Each image in this image stack is of dimensions
# 2048x2048. The data are taken from mouse somatosensory cortex, and the gene in this channel is
# Adloc.
from starfish import data
from starfish import FieldOfView
experiment = data.osmFISH(use_test_data=True)
imgs = experiment["fov_000"].get_image(FieldOfView.PRIMARY_IMAGES)
print(imgs)
###################################################################################################
# Filter and visualize data
# -------------------------
# First, we remove background signal using a gaussian high-pass filter.
from starfish.image import Filter
filter_ghp = Filter.GaussianHighPass(sigma=(1, 8, 8), is_volume=True)
imgs_ghp = filter_ghp.run(imgs, in_place=False)
###################################################################################################
# Next, we enhance the spots by filtering with a Laplace filter.
filter_laplace = Filter.Laplace(sigma=(0.2, 0.5, 0.5), is_volume=True)
imgs_ghp_laplace = filter_laplace.run(imgs_ghp, in_place=False)
###################################################################################################
# Finally, we take a maximum projection over z, which effectively mitigates effects of out of focus
# z-planes.
from starfish.types import Axes
mp = imgs_ghp_laplace.reduce({Axes.ZPLANE}, func="max")
###################################################################################################
# We can now visualize our data before and after filtering.
from numpy import np
single_plane = imgs.reduce({Axes.ZPLANE}, func="max").xarray.sel({Axes.CH:0}).squeeze()
single_plane_filtered = mp.xarray.sel({Axes.CH: 0}).squeeze()
plt.figure(figsize=(10, 10))
plt.subplot(121)
plt.imshow(single_plane, cmap='gray', clim=list(np.percentile(single_plane.data, [1, 99.9])))
plt.axis('off')
plt.title('Original data, Round:0, Channel: 0')
plt.subplot(122)
plt.imshow(single_plane_filtered, cmap='gray', clim=list(np.percentile(single_plane_filtered.data, [1, 99.9])))
plt.title('Filtered data, Round:0, Channel: 0')
plt.axis('off')
###################################################################################################
# Decode the processed data into spatially resolved gene expression
# -----------------------------------------------------------------
# Decoding in a non-multiplexed image based transcriptomics method is equivalent to simple spot
# finding, since each spot in each color channel and round corresponds to a different gene. To
# find spots in osmFISH data, the authors employ a peak finder that distinguishes local maxima
# from their surroundings whose absolute intensities exceed a threshold value. It tests a number
# of different thresholds, building a curve from the number of peaks detected at each threshold.
# A threshold in the stable region or knee of the curve is selected, and final peaks are called
# with that threshold.
#
# This process is repeated independently for each round and channel. Here we show this process on
# a single round and channel to demonstrate the procedure. See the documentation for a precise
# description of the parameters.
from starfish.spots import DecodeSpots, FindSpots
from starfish.types import TraceBuildingStrategies
lmp = FindSpots.LocalMaxPeakFinder(
min_distance=6,
stringency=0,
min_obj_area=6,
max_obj_area=600,
is_volume=True
)
spots = lmp.run(mp)
decoder = DecodeSpots.PerRoundMaxChannel(codebook=experiment.codebook,
trace_building_strategy=TraceBuildingStrategies.SEQUENTIAL)
decoded_intensities = decoder.run(spots=spots)
###################################################################################################
# Compare to pySMFISH peak calls
# ------------------------------
# The Field of view that we've used for the test data corresponds to Aldoc, imaged in round one, in
# position 33. We've also packaged the results from the osmFISH publication for this target to
# demonstrate that starfish is capable of recovering the same results.
import os
import pandas as pd
import pickle
def load_results(pickle_file):
with open(pickle_file, "rb") as f:
return pickle.load(f)
def get_benchmark_peaks(loaded_results, redo_flag=False):
if not redo_flag:
sp = pd.DataFrame(
{
"y":loaded_results["selected_peaks"][:, 0],
"x":loaded_results["selected_peaks"][:, 1],
"selected_peaks_int": loaded_results["selected_peaks_int"],
}
)
else:
p = peaks(loaded_results)
coords = p[p.thr_array==loaded_results["selected_thr"]].peaks_coords
coords = coords.values[0]
sp = pd.DataFrame({"x": coords[:, 0], "y": coords[:, 1]})
return sp
try:
module_path = __file__
except NameError:
# this is probably being run from jupyter
cwd = "."
else:
cwd = os.path.dirname(module_path)
benchmark_results = load_results(os.path.join(
cwd, "data", "EXP-17-BP3597_hyb1_Aldoc_pos_33.pkl"))
benchmark_peaks = get_benchmark_peaks(benchmark_results, redo_flag=False)
###################################################################################################
# Plot spots detected in the benchmark as blue spots, and overlay spots from starfish as orange x's.
# Starfish detects the same spot positions, but 41 fewer spots in total.
benchmark_spot_count = len(benchmark_peaks)
starfish_spot_count = len(decoded_intensities)
plt.figure(figsize=(10, 10))
plt.plot(benchmark_peaks.x, -benchmark_peaks.y, "o")
plt.plot(decoded_intensities[Axes.X.value], -decoded_intensities[Axes.Y.value], "x")
plt.legend(["Benchmark: {} spots".format(benchmark_spot_count),
"Starfish: {} spots".format(starfish_spot_count)])
plt.title("Starfish x osmFISH Benchmark Comparison")
spot_difference = benchmark_spot_count - starfish_spot_count
print(f"Starfish finds {spot_difference} fewer spots")
| 41.355556
| 111
| 0.650591
|
c1fc6969b12543bced3597e18e724050c7e4b751
| 5,963
|
py
|
Python
|
heat/tests/generic_resource.py
|
pshchelo/heat
|
6cf94a3ece89d77b839f61292e5f023c3f192c82
|
[
"Apache-2.0"
] | null | null | null |
heat/tests/generic_resource.py
|
pshchelo/heat
|
6cf94a3ece89d77b839f61292e5f023c3f192c82
|
[
"Apache-2.0"
] | null | null | null |
heat/tests/generic_resource.py
|
pshchelo/heat
|
6cf94a3ece89d77b839f61292e5f023c3f192c82
|
[
"Apache-2.0"
] | null | null | null |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
import six
from heat.common.i18n import _LW
from heat.engine import attributes
from heat.engine import constraints
from heat.engine import properties
from heat.engine import resource
from heat.engine.resources import signal_responder
from heat.engine.resources import stack_user
LOG = logging.getLogger(__name__)
class GenericResource(resource.Resource):
'''
Dummy resource for use in tests
'''
properties_schema = {}
attributes_schema = {'foo': attributes.Schema('A generic attribute'),
'Foo': attributes.Schema('Another generic attribute')}
def handle_create(self):
LOG.warn(_LW('Creating generic resource (Type "%s")'),
self.type())
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
LOG.warn(_LW('Updating generic resource (Type "%s")'),
self.type())
def handle_delete(self):
LOG.warn(_LW('Deleting generic resource (Type "%s")'),
self.type())
def _resolve_attribute(self, name):
return self.name
def handle_suspend(self):
LOG.warn(_LW('Suspending generic resource (Type "%s")'),
self.type())
def handle_resume(self):
LOG.warn(_LW('Resuming generic resource (Type "%s")'),
self.type())
class ResWithComplexPropsAndAttrs(GenericResource):
properties_schema = {
'a_string': properties.Schema(properties.Schema.STRING),
'a_list': properties.Schema(properties.Schema.LIST),
'a_map': properties.Schema(properties.Schema.MAP),
'an_int': properties.Schema(properties.Schema.INTEGER)}
attributes_schema = {'list': attributes.Schema('A list'),
'map': attributes.Schema('A map'),
'string': attributes.Schema('A string')}
update_allowed_properties = ('an_int',)
def _resolve_attribute(self, name):
try:
return self.properties["a_%s" % name]
except KeyError:
return None
class ResourceWithProps(GenericResource):
properties_schema = {
'Foo': properties.Schema(properties.Schema.STRING),
'FooInt': properties.Schema(properties.Schema.INTEGER)}
class ResourceWithPropsAndAttrs(ResourceWithProps):
attributes_schema = {'Bar': attributes.Schema('Something.')}
class ResourceWithResourceID(GenericResource):
properties_schema = {'ID': properties.Schema(properties.Schema.STRING)}
def handle_create(self):
super(ResourceWithResourceID, self).handle_create()
self.resource_id_set(self.properties.get('ID'))
def handle_delete(self):
self.mox_resource_id(self.resource_id)
def mox_resource_id(self, resource_id):
pass
class ResourceWithComplexAttributes(GenericResource):
attributes_schema = {
'list': attributes.Schema('A list'),
'flat_dict': attributes.Schema('A flat dictionary'),
'nested_dict': attributes.Schema('A nested dictionary'),
'none': attributes.Schema('A None')
}
list = ['foo', 'bar']
flat_dict = {'key1': 'val1', 'key2': 'val2', 'key3': 'val3'}
nested_dict = {'list': [1, 2, 3],
'string': 'abc',
'dict': {'a': 1, 'b': 2, 'c': 3}}
def _resolve_attribute(self, name):
if name == 'list':
return self.list
if name == 'flat_dict':
return self.flat_dict
if name == 'nested_dict':
return self.nested_dict
if name == 'none':
return None
class ResourceWithRequiredProps(GenericResource):
properties_schema = {'Foo': properties.Schema(properties.Schema.STRING,
required=True)}
class SignalResource(signal_responder.SignalResponder):
properties_schema = {}
attributes_schema = {'AlarmUrl': attributes.Schema('Get a signed webhook')}
def handle_create(self):
super(SignalResource, self).handle_create()
self.resource_id_set(self._get_user_id())
def handle_signal(self, details=None):
LOG.warn(_LW('Signaled resource (Type "%(type)s") %(details)s'),
{'type': self.type(), 'details': details})
def _resolve_attribute(self, name):
if name == 'AlarmUrl' and self.resource_id is not None:
return six.text_type(self._get_signed_url())
class StackUserResource(stack_user.StackUser):
properties_schema = {}
attributes_schema = {}
def handle_create(self):
super(StackUserResource, self).handle_create()
self.resource_id_set(self._get_user_id())
class ResourceWithCustomConstraint(GenericResource):
properties_schema = {
'Foo': properties.Schema(
properties.Schema.STRING,
constraints=[constraints.CustomConstraint('neutron.network')])}
class ResourceWithAttributeType(GenericResource):
attributes_schema = {
'attr1': attributes.Schema('A generic attribute',
type=attributes.Schema.STRING),
'attr2': attributes.Schema('Another generic attribute',
type=attributes.Schema.MAP)
}
def _resolve_attribute(self, name):
if name == 'attr1':
return "valid_sting"
elif name == 'attr2':
return "invalid_type"
| 33.127778
| 79
| 0.64263
|
11e6f832124eb57fb7590c9b0a950c0d25329e70
| 5,011
|
py
|
Python
|
networking_cisco/apps/saf/server/services/firewall/native/drivers/phy_asa.py
|
Tehsmash/networking-cisco
|
fdbd79a832fe090f3c4c7bd7a4f0ec0c349d4d16
|
[
"Apache-2.0"
] | 1
|
2019-01-19T09:12:49.000Z
|
2019-01-19T09:12:49.000Z
|
networking_cisco/apps/saf/server/services/firewall/native/drivers/phy_asa.py
|
Tehsmash/networking-cisco
|
fdbd79a832fe090f3c4c7bd7a4f0ec0c349d4d16
|
[
"Apache-2.0"
] | null | null | null |
networking_cisco/apps/saf/server/services/firewall/native/drivers/phy_asa.py
|
Tehsmash/networking-cisco
|
fdbd79a832fe090f3c4c7bd7a4f0ec0c349d4d16
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016 Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from networking_cisco.apps.saf.common import dfa_logger as logging
from networking_cisco.apps.saf.server.services.firewall.native import (
fabric_setup_base as FP)
from networking_cisco.apps.saf.server.services.firewall.native.drivers import (
asa_rest as asa)
from networking_cisco.apps.saf.server.services.firewall.native.drivers import (
base)
LOG = logging.getLogger(__name__)
class PhyAsa(base.BaseDriver, FP.FabricApi):
"""Physical ASA Driver. """
def __init__(self):
LOG.info("Initializing physical ASA")
super(PhyAsa, self).__init__()
def initialize(self, cfg_dict):
self.mgmt_ip_addr = cfg_dict.get('mgmt_ip_addr').strip()
self.user = cfg_dict.get('user').strip()
self.pwd = cfg_dict.get('pwd').strip()
self.interface_in = cfg_dict.get('interface_in').strip()
self.interface_out = cfg_dict.get('interface_out').strip()
LOG.info("ASA with mgmt %s getting initialized",
self.mgmt_ip_addr)
self.asa5585 = asa.Asa5585(self.mgmt_ip_addr, self.user, self.pwd)
def populate_event_que(self, que_obj):
LOG.info("Populate Event for PhyAsa")
def populate_dcnm_obj(self, dcnm_obj):
LOG.info("Populate Event for DCNM obj")
def network_create_notif(self, tenant_id, tenant_name, cidr):
"""Network Create Notification. """
LOG.info("Nwk Create Notif PhyAsa")
def network_delete_notif(self, tenant_id, tenant_name, network_id):
"""Network Delete Notification. """
LOG.info("Nwk Delete Notif PhyAsa")
def is_device_virtual(self):
return False
def get_name(self):
return 'phy_asa'
def get_max_quota(self):
return self.asa5585.get_quota()
def create_fw(self, tenant_id, data):
LOG.info("In creating phy ASA FW data is %s", data)
tenant_name = data.get('tenant_name')
in_ip_dict = self.get_in_ip_addr(tenant_id)
in_gw = in_ip_dict.get('gateway')
in_sec_gw = in_ip_dict.get('sec_gateway')
in_serv_node = self.get_in_srvc_node_ip_addr(tenant_id)
out_ip_dict = self.get_out_ip_addr(tenant_id)
out_ip_gw = out_ip_dict.get('gateway')
out_sec_gw = out_ip_dict.get('sec_gateway')
out_serv_node = self.get_out_srvc_node_ip_addr(tenant_id)
in_seg, in_vlan = self.get_in_seg_vlan(tenant_id)
out_seg, out_vlan = self.get_out_seg_vlan(tenant_id)
kw = {'params': {'tenant_name': tenant_name,
'in_vlan': in_vlan, 'out_vlan': out_vlan,
'in_ip': in_serv_node, 'in_mask': '255.255.255.0',
'in_gw': in_gw, 'in_sec_gw': in_sec_gw,
'out_ip': out_serv_node, 'out_mask': '255.255.255.0',
'out_gw': out_ip_gw, 'out_sec_gw': out_sec_gw,
'intf_in': self.interface_in,
'intf_out': self.interface_out}}
status = self.asa5585.setup(**kw)
if status is False:
LOG.error("Physical FW instance creation failure for "
"tenant %s", tenant_name)
return False
status = self.asa5585.apply_policy(data)
if status is False:
LOG.error("Applying FW policy failure for tenant %s",
tenant_name)
return status
def delete_fw(self, tenant_id, data):
LOG.info("In Delete fw data is %s", data)
tenant_name = data.get('tenant_name')
in_serv_node = self.get_in_srvc_node_ip_addr(tenant_id)
out_serv_node = self.get_out_srvc_node_ip_addr(tenant_id)
in_seg, in_vlan = self.get_in_seg_vlan(tenant_id)
out_seg, out_vlan = self.get_out_seg_vlan(tenant_id)
kw = dict(params=dict(tenant_name=tenant_name,
in_vlan=in_vlan, out_vlan=out_vlan,
in_ip=in_serv_node, in_mask='255.255.255.0',
out_ip=out_serv_node, out_mask='255.255.255.0',
intf_in=self.interface_in,
intf_out=self.interface_out))
status = self.asa5585.cleanup(**kw)
return status
def modify_fw(self, tenant_id, data):
LOG.info("In Modify fw data is %s", data)
return self.asa5585.apply_policy(data)
| 40.41129
| 79
| 0.637398
|
e2b961a46a3ccd9750c1063b97dca5c8bb9cae52
| 25,035
|
py
|
Python
|
TrainingExtensions/tensorflow/test/python/test_cross_layer_equalization.py
|
Rohan-Chaudhury/aimet
|
1c38cac8cc0fd32dca40ce5e39940805d29f7a4a
|
[
"BSD-3-Clause"
] | 945
|
2020-04-30T02:23:55.000Z
|
2022-03-31T08:44:32.000Z
|
TrainingExtensions/tensorflow/test/python/test_cross_layer_equalization.py
|
Rohan-Chaudhury/aimet
|
1c38cac8cc0fd32dca40ce5e39940805d29f7a4a
|
[
"BSD-3-Clause"
] | 563
|
2020-05-01T03:07:22.000Z
|
2022-03-30T05:35:58.000Z
|
TrainingExtensions/tensorflow/test/python/test_cross_layer_equalization.py
|
Rohan-Chaudhury/aimet
|
1c38cac8cc0fd32dca40ce5e39940805d29f7a4a
|
[
"BSD-3-Clause"
] | 186
|
2020-04-30T00:55:26.000Z
|
2022-03-30T09:54:51.000Z
|
# /usr/bin/env python3.5
# -*- mode: python -*-
# =============================================================================
# @@-COPYRIGHT-START-@@
#
# Copyright (c) 2019, Qualcomm Innovation Center, Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# SPDX-License-Identifier: BSD-3-Clause
#
# @@-COPYRIGHT-END-@@
# =============================================================================
""" This file contains unit tests for testing cross layer scaling feature of CLE """
import unittest
import numpy as np
import os
import tensorflow as tf
tf.compat.v1.logging.set_verbosity(tf.logging.WARN)
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
import aimet_tensorflow.utils.graph_saver
from aimet_tensorflow.cross_layer_equalization import CrossLayerScaling, GraphSearchUtils, equalize_model, \
fold_all_batch_norms, HighBiasFold
from aimet_tensorflow.utils.op.conv import WeightTensorUtils, BiasUtils
class TestCrossLayerEqualization(unittest.TestCase):
""" Test methods for Cross layer equalization """
@staticmethod
def _custom_two_conv_layer_model():
"""
Builds a custom model with two conv layers
:return:
"""
tf.compat.v1.reset_default_graph()
inputs = tf.keras.Input(shape=(32, 32, 3,), name="inputs")
x = tf.keras.layers.Conv2D(32, (3, 3))(inputs)
x = tf.nn.relu(x, name='ReluInTheMiddle')
x = tf.keras.layers.Conv2D(32, (3, 3))(x)
x = tf.nn.relu(x, name='Relu')
return x
@staticmethod
def _custom_three_layer_model_keras():
"""
Builds a custom model with three conv layers
:return:
"""
tf.compat.v1.reset_default_graph()
inputs = tf.keras.Input(shape=(32, 32, 3,), name="inputs")
x = tf.keras.layers.Conv2D(32, (3, 3))(inputs)
x = tf.nn.relu(x, name='ReluInTheMiddle')
x = tf.keras.layers.Conv2D(32, (3, 3))(x)
x = tf.keras.layers.ReLU(name='AnotherRelu')(x)
x = tf.keras.layers.Conv2D(32, (3, 3))(x)
x = tf.nn.relu(x, name='Relu')
return x
@staticmethod
def _custom_three_layer_model_keras_prelu():
"""
Builds a custom model with three conv layers
:return:
"""
tf.compat.v1.reset_default_graph()
inputs = tf.keras.Input(shape=(32, 32, 3,), name="inputs")
x = tf.keras.layers.Conv2D(32, (3, 3))(inputs)
x = tf.nn.relu(x, name='ReluInTheMiddle')
x = tf.keras.layers.Conv2D(32, (3, 3))(x)
x = tf.keras.layers.PReLU(name='prelu')(x)
x = tf.keras.layers.Conv2D(32, (3, 3))(x)
x = tf.nn.relu(x, name='Relu')
return x
def test_find_layer_groups_to_scale_custom_model_with_candidate_layers(self):
""" Test find_layer_groups_to_scale() on a custom model """
_ = TestCrossLayerEqualization._custom_two_conv_layer_model()
tf.set_random_seed(0)
init = tf.compat.v1.global_variables_initializer()
sess = tf.compat.v1.Session()
sess.run(init)
start_op = "inputs"
graph_util = GraphSearchUtils(tf.compat.v1.get_default_graph(), start_op, 'Relu')
_ , layer_groups = graph_util.find_layer_groups_to_scale()
self.assertEqual(1, len(layer_groups))
def test_find_layers_groups_tp_scale_custom_model_without_candidate_layers(self):
""" Test find_layer_groups_to_scale() on a model without potential layers for scaling """
tf.compat.v1.reset_default_graph()
tf.set_random_seed(0)
inputs = tf.keras.Input(shape=(32, 32, 3,), name="inputs")
conv_op = tf.keras.layers.Conv2D(32, (3, 3))(inputs)
bn_op = tf.keras.layers.BatchNormalization(fused=True)(conv_op)
_ = tf.nn.relu(bn_op)
init = tf.compat.v1.global_variables_initializer()
sess = tf.compat.v1.Session()
sess.run(init)
graph_util = GraphSearchUtils(tf.compat.v1.get_default_graph(), "inputs", 'Relu')
_ , layer_groups = graph_util.find_layer_groups_to_scale()
self.assertEqual(0, len(layer_groups))
sess.close()
def test_update_weight_tensor_for_op(self):
""" Test update_weight_tensor_for_op() on custom conv op """
# get VGG16 model
tf.compat.v1.reset_default_graph()
tf.set_random_seed(0)
inputs = tf.keras.Input(shape=(32, 32, 3,), name="inputs")
conv_op = tf.keras.layers.Conv2D(32, (3, 3))(inputs)
_ = tf.nn.relu(conv_op)
init = tf.compat.v1.global_variables_initializer()
sess = tf.compat.v1.Session()
sess.run(init)
conv_op = sess.graph.get_operation_by_name('conv2d/Conv2D')
initial_data = WeightTensorUtils.get_tensor_as_numpy_data(sess, conv_op)
wt_data = initial_data + 2
# this is block1_conv1/Conv2D in VGG16
WeightTensorUtils.update_tensor_for_op(sess, conv_op, wt_data)
new_sess = aimet_tensorflow.utils.graph_saver.save_and_load_graph('./temp_conv_wt_updated', sess)
# check for if reroute was successful
# read op from conv op should be same as one defined by new variable type
conv_op = new_sess.graph.get_operation_by_name('conv2d/Conv2D')
new_wt_data = WeightTensorUtils.get_tensor_as_numpy_data(new_sess, conv_op)
assert not np.allclose(initial_data, new_wt_data)
sess.close()
def test_scale_cls_set_with_conv_layers_custom_model(self):
"""
Test scale_cls_set_with_conv_layers() on a custom model
"""
tf.set_random_seed(0)
_ = TestCrossLayerEqualization._custom_two_conv_layer_model()
init = tf.compat.v1.global_variables_initializer()
sess = tf.compat.v1.Session()
sess.run(init)
graph_util = GraphSearchUtils(tf.compat.v1.get_default_graph(), "inputs", 'Relu')
_ , layer_groups_as_tf_ops = graph_util.find_layer_groups_to_scale()
scaling_factors = CrossLayerScaling.scale_cls_set_with_conv_layers(sess, layer_groups_as_tf_ops[0])
self.assertEqual(32, len(scaling_factors))
range_conv1_after_scaling = np.amax(np.abs(WeightTensorUtils.get_tensor_as_numpy_data(
sess, layer_groups_as_tf_ops[0][0])), axis=(2, 0, 1))
range_conv2_after_scaling = np.amax(np.abs(WeightTensorUtils.get_tensor_as_numpy_data(
sess, layer_groups_as_tf_ops[0][1])), axis=(3, 0, 1))
assert np.allclose(range_conv1_after_scaling, range_conv2_after_scaling)
sess.close()
def test_scale_cls_set_with_depthwise_conv_layer_custom_model(self):
"""
Test test_scale_cls_set_with_depthwise_layers() on a custom model
"""
tf.set_random_seed(0)
tf.compat.v1.reset_default_graph()
inputs = tf.keras.Input(shape=(10, 10, 3,))
x = tf.keras.layers.Conv2D(10, (1, 1))(inputs)
y = tf.keras.layers.DepthwiseConv2D((3, 3), padding='valid',depth_multiplier=1, strides=(1,1), use_bias=False)(x)
z = tf.keras.layers.Conv2D(10, (1, 1))(y)
_ = tf.nn.relu(z)
init = tf.compat.v1.global_variables_initializer()
sess = tf.compat.v1.Session(graph = tf.compat.v1.get_default_graph())
sess.run(init)
graph_util = GraphSearchUtils(tf.compat.v1.get_default_graph(), "input_1", 'Relu')
_ , layer_groups_as_tf_ops = graph_util.find_layer_groups_to_scale()
scaling_matrix12, scaling_matrix23 = CrossLayerScaling.scale_cls_set_with_depthwise_layers(
sess, layer_groups_as_tf_ops[0])
self.assertEqual(10, len(scaling_matrix12))
self.assertEqual(10, len(scaling_matrix23))
sess.close()
def test_scale_model_custom(self):
""" Test scale_model on a custom model """
tf.set_random_seed(0)
_ = TestCrossLayerEqualization._custom_two_conv_layer_model()
init = tf.compat.v1.global_variables_initializer()
sess = tf.compat.v1.Session()
sess.run(init)
new_sess, scaling_factors = CrossLayerScaling.scale_model(sess, "inputs", 'Relu')
# scaling factors for number of groups selected for scaling returned
self.assertEqual(1, len(scaling_factors))
self.assertTrue(scaling_factors[0].cls_pair_info_list[0].relu_activation_between_layers)
sess.close()
def test_scale_three_layer_model(self):
""" Test scale_model on a custom 3-layer model """
_ = TestCrossLayerEqualization._custom_three_layer_model_keras()
init = tf.compat.v1.global_variables_initializer()
sess = tf.compat.v1.Session()
sess.run(init)
new_sess, scaling_factors = CrossLayerScaling.scale_model(sess, "inputs", 'Relu')
# scaling factors for number of groups selected for scaling returned
self.assertEqual(2, len(scaling_factors))
self.assertTrue(scaling_factors[0].cls_pair_info_list[0].relu_activation_between_layers)
self.assertTrue(scaling_factors[1].cls_pair_info_list[0].relu_activation_between_layers)
sess.close()
def test_scale_three_layer_model_with_prelu(self):
""" Test scale_model on a custom 3-layer model with prelu """
tf.set_random_seed(0)
_ = TestCrossLayerEqualization._custom_three_layer_model_keras_prelu()
init = tf.compat.v1.global_variables_initializer()
sess = tf.compat.v1.Session()
sess.run(init)
new_sess, scaling_factors = CrossLayerScaling.scale_model(sess, "inputs", 'Relu')
# scaling factors for number of groups selected for scaling returned
self.assertEqual(2, len(scaling_factors))
self.assertTrue(scaling_factors[0].cls_pair_info_list[0].relu_activation_between_layers)
self.assertTrue(scaling_factors[1].cls_pair_info_list[0].relu_activation_between_layers)
sess.close()
def test_relu6_replaced_with_relu(self):
"""
Test replacing Relu6 wth Relu
"""
tf.compat.v1.reset_default_graph()
tf.set_random_seed(0)
inputs = tf.keras.Input(shape=(32, 32, 3,))
conv_op = tf.keras.layers.Conv2D(32, (3, 3))(inputs)
_ = tf.nn.relu6(conv_op)
init = tf.compat.v1.global_variables_initializer()
sess = tf.compat.v1.Session()
sess.run(init)
bias_add = sess.graph.get_operation_by_name('conv2d/BiasAdd')
self.assertEqual('Relu6', bias_add.outputs[0].consumers()[0].type)
#update Relu
start_op = "input_1"
graph_util = GraphSearchUtils(sess.graph, start_op, 'Relu6')
after_relu_replace_sess = graph_util.find_and_replace_relu6_with_relu(sess)
updated_bias_add = after_relu_replace_sess.graph.get_operation_by_name('conv2d/BiasAdd')
self.assertEqual('Relu', updated_bias_add.outputs[0].consumers()[0].type)
sess.close()
def test_high_bias_fold_two_bn_folded_convs(self):
"""
Test high bias fold with a custom model with two BN folded convs
"""
tf.compat.v1.reset_default_graph()
tf.set_random_seed(0)
inputs = tf.keras.Input(shape=(32, 32, 3,))
conv_op = tf.keras.layers.Conv2D(32, (3, 3))(inputs)
bn_op = tf.keras.layers.BatchNormalization(fused=True)(conv_op)
relu_1= tf.nn.relu(bn_op)
conv2_op = tf.keras.layers.Conv2D(32, (3, 3))(relu_1)
bn_op_2 = tf.keras.layers.BatchNormalization(fused=True)(conv2_op)
relu_2 = tf.nn.relu(bn_op_2)
init = tf.compat.v1.global_variables_initializer()
sess = tf.compat.v1.Session()
sess.run(init)
np.random.seed(0)
conv_op = sess.graph.get_operation_by_name('conv2d/Conv2D')
start_op_names = conv_op.inputs[0].op.name
output_op_names = 'Relu_1'
# fold batchnorm layers
after_bn_fold_sess, folded_pairs = fold_all_batch_norms(sess, start_op_names, output_op_names)
# replace any ReLU6 layers with ReLU
graph_util = GraphSearchUtils(after_bn_fold_sess.graph, start_op_names, output_op_names)
after_relu_replace_sess = graph_util.find_and_replace_relu6_with_relu(after_bn_fold_sess)
# perform cross-layer scaling on applicable layer sets
after_cls_sess, cls_set_info_list = CrossLayerScaling.scale_model(after_relu_replace_sess, start_op_names,
output_op_names)
# we want to validate that after high bias fold, bias for conv is >= bias before high bias fold.
conv_op = after_cls_sess.graph.get_operation_by_name('conv2d_1/Conv2D')
before_high_bias_fold_bias_data = BiasUtils.get_bias_as_numpy_data(after_cls_sess, conv_op)
# perform high-bias fold
after_hbf_sess = HighBiasFold.bias_fold(after_cls_sess, folded_pairs, cls_set_info_list)
# read updated bias value
conv_op = after_hbf_sess.graph.get_operation_by_name('conv2d_1/Conv2D')
high_bias_folded_bias_data = BiasUtils.get_bias_as_numpy_data(after_hbf_sess, conv_op)
for i in range(len(before_high_bias_fold_bias_data)):
# folded bias should be greater than previous bias
self.assertTrue(high_bias_folded_bias_data[i] >= before_high_bias_fold_bias_data[i])
sess.close()
def test_bias_add_custom_model(self):
""" test update bias when no bias present """
tf.compat.v1.reset_default_graph()
tf.set_random_seed(0)
inputs = tf.keras.Input(shape=(32, 32, 3,))
conv_op = tf.keras.layers.Conv2D(32, (3, 3), use_bias=False)(inputs)
conv2_op = tf.keras.layers.Conv2D(32, (3, 3), use_bias=False)(inputs)
relu2= tf.nn.relu(conv2_op)
add = tf.keras.layers.add([conv_op, relu2])
relu= tf.nn.relu(add)
init = tf.compat.v1.global_variables_initializer()
sess = tf.compat.v1.Session()
sess.run(init)
shape = WeightTensorUtils.get_tensor_shape(conv_op.op)
np.random.seed(0)
bias_data = np.random.rand(shape[3])
assert BiasUtils.is_bias_none(conv_op.op)
BiasUtils.update_bias_for_op(sess, conv_op.op, bias_data)
n_sess = aimet_tensorflow.utils.graph_saver.save_and_load_graph('./test_update', sess)
conv_op_updated = n_sess.graph.get_operation_by_name(conv_op.op.name)
assert not BiasUtils.is_bias_none(conv_op_updated)
updated_bias = BiasUtils.get_bias_as_numpy_data(n_sess, conv_op_updated)
self.assertTrue(np.allclose(updated_bias, bias_data))
sess.close()
def test_cls_layer_select_conv_with_identity(self):
"""
test cross layer scaling layer selection code when convs have identity nodes in-btw.
This was observed with TF Slim Mobilenetv2 model
"""
tf.compat.v1.reset_default_graph()
tf.set_random_seed(0)
inputs = tf.keras.Input(shape=(32, 32, 3,), name="inputs")
conv1_op = tf.keras.layers.Conv2D(32, (3, 3))(inputs)
relu_op = tf.nn.relu(conv1_op)
identity = tf.identity(relu_op)
conv2_op = tf.keras.layers.Conv2D(32, (3, 3))(identity)
relu2_op = tf.nn.relu(conv2_op)
init = tf.compat.v1.global_variables_initializer()
sess = tf.compat.v1.Session()
sess.run(init)
start_op = "inputs"
output_op = 'Relu_1'
graph_search = GraphSearchUtils(sess.graph, start_op, output_op)
_ , layer_groups_as_tf_ops = graph_search.find_layer_groups_to_scale()
assert len(layer_groups_as_tf_ops) == 1
sess.close()
def test_high_bias_fold_custom_model(self):
"""
Test high bias fold with a custom model
"""
tf.compat.v1.reset_default_graph()
tf.set_random_seed(0)
inputs = tf.keras.Input(shape=(32, 32, 3,))
conv_op = tf.keras.layers.Conv2D(32, (3, 3))(inputs)
relu_1= tf.nn.relu(conv_op)
conv2_op = tf.keras.layers.Conv2D(32, (3, 3))(relu_1)
bn_op_2 = tf.keras.layers.BatchNormalization(fused=True)(conv2_op)
conv3_op = tf.keras.layers.Conv2D(32, (3, 3))(bn_op_2)
relu_2 = tf.nn.relu(conv3_op)
init = tf.compat.v1.global_variables_initializer()
sess = tf.compat.v1.Session()
sess.run(init)
np.random.seed(0)
conv_op = sess.graph.get_operation_by_name('conv2d/Conv2D')
start_op_names = conv_op.inputs[0].op.name
output_op_names = relu_2.op.name
# fold batchnorm layers
after_bn_fold_sess, folded_pairs = fold_all_batch_norms(sess, start_op_names, output_op_names)
# replace any ReLU6 layers with ReLU
graph_util = GraphSearchUtils(after_bn_fold_sess.graph, start_op_names, output_op_names)
after_relu_replace_sess = graph_util.find_and_replace_relu6_with_relu(after_bn_fold_sess)
# perform cross-layer scaling on applicable layer sets
after_cls_sess, cls_set_info_list = CrossLayerScaling.scale_model(after_relu_replace_sess, start_op_names,
output_op_names)
# we want to validate that after high bias fold, bias for conv is >= bias before high bias fold.
conv_op = after_cls_sess.graph.get_operation_by_name('conv2d_2/Conv2D')
before_high_bias_fold_bias_data = BiasUtils.get_bias_as_numpy_data(after_cls_sess, conv_op)
# perform high-bias fold
after_hbf_sess = HighBiasFold.bias_fold(after_cls_sess, folded_pairs, cls_set_info_list)
# read updated bias value
conv_op = after_hbf_sess.graph.get_operation_by_name('conv2d_2/Conv2D')
high_bias_folded_bias_data = BiasUtils.get_bias_as_numpy_data(after_hbf_sess, conv_op)
for i in range(len(before_high_bias_fold_bias_data)):
# folded bias should be greater than previous bias
self.assertTrue(high_bias_folded_bias_data[i] >= before_high_bias_fold_bias_data[i])
sess.close()
def test_equalize_model_multi_input(self):
"""
Test bn fold with multiple input nodes
"""
tf.compat.v1.reset_default_graph()
tf.set_random_seed(0)
input1 = tf.keras.Input(name='input1', shape=(10, 10, 3))
input2 = tf.keras.Input(name='input2', shape=(12, 12, 3))
x1 = tf.keras.layers.Conv2D(8, (1, 1), name='conv1a',
kernel_initializer=tf.random_uniform_initializer(-1, 1),
bias_initializer='random_uniform')(input1)
x2 = tf.keras.layers.Conv2D(8, (3, 3), name='conv1b',
kernel_initializer=tf.random_uniform_initializer(-1, 1),
bias_initializer='random_uniform')(x1)
x3 = tf.keras.layers.Conv2D(8, (3, 3), name='conv1c',
kernel_initializer=tf.random_uniform_initializer(-1, 1),
bias_initializer='random_uniform')(input2)
x4 = tf.keras.layers.Conv2D(8, (3, 3), name='conv1d',
kernel_initializer=tf.random_uniform_initializer(-1, 1),
bias_initializer='random_uniform')(x3)
x = tf.keras.layers.add([x2, x4])
conv2_op = tf.keras.layers.Conv2D(32, (3, 3))(x)
bn_op = tf.keras.layers.BatchNormalization(fused=True)(conv2_op)
_ = tf.nn.relu(bn_op)
init = tf.compat.v1.global_variables_initializer()
sess = tf.compat.v1.Session()
sess.run(init)
conv_1b_before_equalize = sess.graph.get_operation_by_name('conv1b/Conv2D')
conv_1b_bias_data_before_fold = BiasUtils.get_bias_as_numpy_data(sess, conv_1b_before_equalize)
conv_1d_before_equalize = sess.graph.get_operation_by_name('conv1d/Conv2D')
conv_1d_bias_data_before_fold = BiasUtils.get_bias_as_numpy_data(sess, conv_1d_before_equalize)
new_sess = equalize_model(sess, ["input1", "input2"], 'Relu')
conv_1b_after_equalize = new_sess.graph.get_operation_by_name('conv1b/Conv2D')
conv_1b_bias_data_after_fold = BiasUtils.get_bias_as_numpy_data(new_sess, conv_1b_after_equalize)
conv_1d_after_equalize = new_sess.graph.get_operation_by_name('conv1d/Conv2D')
conv_1d_bias_data_after_fold = BiasUtils.get_bias_as_numpy_data(new_sess, conv_1d_after_equalize)
for i in range(len(conv_1b_bias_data_after_fold)):
self.assertTrue(conv_1b_bias_data_after_fold[i] <= conv_1b_bias_data_before_fold[i])
for i in range(len(conv_1d_bias_data_after_fold)):
self.assertTrue(conv_1d_bias_data_after_fold[i] <= conv_1d_bias_data_before_fold[i])
sess.close()
def test_equalize_with_custom_model_no_bias(self):
"""
Test equalize with a custom model with conv without bias param
"""
tf.compat.v1.reset_default_graph()
sess = tf.compat.v1.Session(graph=tf.compat.v1.get_default_graph())
with sess.as_default():
inputs = tf.keras.Input(shape=(32, 32, 3,))
conv_op = tf.keras.layers.Conv2D(32, (3, 3), use_bias=False)(inputs)
bn_op = tf.keras.layers.BatchNormalization(fused=True)(conv_op)
relu_1= tf.nn.relu(bn_op)
conv2_op = tf.keras.layers.Conv2D(32, (3, 3), use_bias=False)(relu_1)
bn_op_2 = tf.keras.layers.BatchNormalization(fused=True)(conv2_op, training=False)
relu_2 = tf.nn.relu(bn_op_2)
init = tf.compat.v1.global_variables_initializer()
sess.run(init)
old_conv_op = sess.graph.get_operation_by_name('conv2d/Conv2D')
self.assertTrue(BiasUtils.is_bias_none(old_conv_op))
conv_op = sess.graph.get_operation_by_name('conv2d/Conv2D')
new_sess = equalize_model(sess, conv_op.inputs[0].op.name, 'Relu_1')
new_conv_op = new_sess.graph.get_operation_by_name('conv2d/Conv2D')
bias = BiasUtils.get_bias_as_numpy_data(new_sess, new_conv_op)
self.assertFalse(BiasUtils.is_bias_none(new_conv_op))
sess.close()
def test_equalize_fold_forward(self):
"""
Test equalize on a model with a forward bn fold
"""
tf.compat.v1.reset_default_graph()
inputs = tf.keras.Input(shape=(32, 32, 3,), name="inputs")
conv_op = tf.keras.layers.Conv2D(32, (3, 3))(inputs)
r_op = tf.nn.relu(conv_op)
bn_op = tf.keras.layers.BatchNormalization(fused=True)(r_op)
conv2_op = tf.keras.layers.Conv2D(32, (3, 3))(bn_op)
conv3_op = tf.keras.layers.Conv2D(32, (3, 3))(conv2_op)
_ = tf.nn.relu(conv3_op)
init = tf.compat.v1.global_variables_initializer()
sess = tf.compat.v1.Session(graph = tf.compat.v1.get_default_graph())
sess.run(init)
old_conv_op = sess.graph.get_operation_by_name('conv2d/Conv2D')
conv_bias_data_before_fold = BiasUtils.get_bias_as_numpy_data(sess, old_conv_op)
conv_op = sess.graph.get_operation_by_name('conv2d/Conv2D')
new_sess = equalize_model(sess, conv_op.inputs[0].op.name, 'Relu_1')
new_conv_op = new_sess.graph.get_operation_by_name('conv2d/Conv2D')
self.assertFalse(BiasUtils.is_bias_none(new_conv_op))
conv_bias_data_after_fold = BiasUtils.get_bias_as_numpy_data(new_sess, new_conv_op)
for i in range(len(conv_bias_data_before_fold)):
self.assertTrue(conv_bias_data_before_fold[i] <= conv_bias_data_after_fold[i])
sess.close()
| 43.767483
| 121
| 0.669383
|
a3e649c36610eb6bf06a40cbc29a123ad064f0b4
| 21,968
|
py
|
Python
|
spark_fhir_schemas/r4/complex_types/observation_component.py
|
icanbwell/SparkFhirSchemas
|
8c828313c39850b65f8676e67f526ee92b7d624e
|
[
"Apache-2.0"
] | null | null | null |
spark_fhir_schemas/r4/complex_types/observation_component.py
|
icanbwell/SparkFhirSchemas
|
8c828313c39850b65f8676e67f526ee92b7d624e
|
[
"Apache-2.0"
] | null | null | null |
spark_fhir_schemas/r4/complex_types/observation_component.py
|
icanbwell/SparkFhirSchemas
|
8c828313c39850b65f8676e67f526ee92b7d624e
|
[
"Apache-2.0"
] | null | null | null |
from typing import Union, List, Optional
from pyspark.sql.types import (
StructType,
StructField,
StringType,
ArrayType,
BooleanType,
IntegerType,
DataType,
TimestampType,
)
# This file is auto-generated by generate_schema so do not edit it manually
# noinspection PyPep8Naming
class Observation_ComponentSchema:
"""
Measurements and simple assertions made about a patient, device or other
subject.
"""
# noinspection PyDefaultArgument
@staticmethod
def get_schema(
max_nesting_depth: Optional[int] = 6,
nesting_depth: int = 0,
nesting_list: List[str] = [],
max_recursion_limit: Optional[int] = 2,
include_extension: Optional[bool] = False,
extension_fields: Optional[List[str]] = None,
extension_depth: int = 0,
max_extension_depth: Optional[int] = 2,
include_modifierExtension: Optional[bool] = False,
use_date_for: Optional[List[str]] = None,
parent_path: Optional[str] = "",
) -> Union[StructType, DataType]:
"""
Measurements and simple assertions made about a patient, device or other
subject.
id: Unique id for the element within a resource (for internal references). This
may be any string value that does not contain spaces.
extension: May be used to represent additional information that is not part of the basic
definition of the element. To make the use of extensions safe and manageable,
there is a strict set of governance applied to the definition and use of
extensions. Though any implementer can define an extension, there is a set of
requirements that SHALL be met as part of the definition of the extension.
modifierExtension: May be used to represent additional information that is not part of the basic
definition of the element and that modifies the understanding of the element
in which it is contained and/or the understanding of the containing element's
descendants. Usually modifier elements provide negation or qualification. To
make the use of extensions safe and manageable, there is a strict set of
governance applied to the definition and use of extensions. Though any
implementer can define an extension, there is a set of requirements that SHALL
be met as part of the definition of the extension. Applications processing a
resource are required to check for modifier extensions.
Modifier extensions SHALL NOT change the meaning of any elements on Resource
or DomainResource (including cannot change the meaning of modifierExtension
itself).
code: Describes what was observed. Sometimes this is called the observation "code".
valueQuantity: The information determined as a result of making the observation, if the
information has a simple value.
valueCodeableConcept: The information determined as a result of making the observation, if the
information has a simple value.
valueString: The information determined as a result of making the observation, if the
information has a simple value.
valueBoolean: The information determined as a result of making the observation, if the
information has a simple value.
valueInteger: The information determined as a result of making the observation, if the
information has a simple value.
valueRange: The information determined as a result of making the observation, if the
information has a simple value.
valueRatio: The information determined as a result of making the observation, if the
information has a simple value.
valueSampledData: The information determined as a result of making the observation, if the
information has a simple value.
valueTime: The information determined as a result of making the observation, if the
information has a simple value.
valueDateTime: The information determined as a result of making the observation, if the
information has a simple value.
valuePeriod: The information determined as a result of making the observation, if the
information has a simple value.
dataAbsentReason: Provides a reason why the expected value in the element
Observation.component.value[x] is missing.
interpretation: A categorical assessment of an observation value. For example, high, low,
normal.
referenceRange: Guidance on how to interpret the value by comparison to a normal or
recommended range.
"""
if extension_fields is None:
extension_fields = [
"valueBoolean",
"valueCode",
"valueDate",
"valueDateTime",
"valueDecimal",
"valueId",
"valueInteger",
"valuePositiveInt",
"valueString",
"valueTime",
"valueUnsignedInt",
"valueUri",
"valueUrl",
"valueReference",
"valueCodeableConcept",
"valueAddress",
]
from spark_fhir_schemas.r4.complex_types.extension import ExtensionSchema
from spark_fhir_schemas.r4.complex_types.codeableconcept import (
CodeableConceptSchema,
)
from spark_fhir_schemas.r4.complex_types.quantity import QuantitySchema
from spark_fhir_schemas.r4.complex_types.range import RangeSchema
from spark_fhir_schemas.r4.complex_types.ratio import RatioSchema
from spark_fhir_schemas.r4.complex_types.sampleddata import SampledDataSchema
from spark_fhir_schemas.r4.complex_types.period import PeriodSchema
from spark_fhir_schemas.r4.complex_types.observation_referencerange import (
Observation_ReferenceRangeSchema,
)
if (
max_recursion_limit
and nesting_list.count("Observation_Component") >= max_recursion_limit
) or (max_nesting_depth and nesting_depth >= max_nesting_depth):
return StructType([StructField("id", StringType(), True)])
# add my name to recursion list for later
my_nesting_list: List[str] = nesting_list + ["Observation_Component"]
my_parent_path = (
parent_path + ".observation_component"
if parent_path
else "observation_component"
)
schema = StructType(
[
# Unique id for the element within a resource (for internal references). This
# may be any string value that does not contain spaces.
StructField("id", StringType(), True),
# May be used to represent additional information that is not part of the basic
# definition of the element. To make the use of extensions safe and manageable,
# there is a strict set of governance applied to the definition and use of
# extensions. Though any implementer can define an extension, there is a set of
# requirements that SHALL be met as part of the definition of the extension.
StructField(
"extension",
ArrayType(
ExtensionSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
)
),
True,
),
# May be used to represent additional information that is not part of the basic
# definition of the element and that modifies the understanding of the element
# in which it is contained and/or the understanding of the containing element's
# descendants. Usually modifier elements provide negation or qualification. To
# make the use of extensions safe and manageable, there is a strict set of
# governance applied to the definition and use of extensions. Though any
# implementer can define an extension, there is a set of requirements that SHALL
# be met as part of the definition of the extension. Applications processing a
# resource are required to check for modifier extensions.
#
# Modifier extensions SHALL NOT change the meaning of any elements on Resource
# or DomainResource (including cannot change the meaning of modifierExtension
# itself).
StructField(
"modifierExtension",
ArrayType(
ExtensionSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
)
),
True,
),
# Describes what was observed. Sometimes this is called the observation "code".
StructField(
"code",
CodeableConceptSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
),
True,
),
# The information determined as a result of making the observation, if the
# information has a simple value.
StructField(
"valueQuantity",
QuantitySchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
),
True,
),
# The information determined as a result of making the observation, if the
# information has a simple value.
StructField(
"valueCodeableConcept",
CodeableConceptSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
),
True,
),
# The information determined as a result of making the observation, if the
# information has a simple value.
StructField("valueString", StringType(), True),
# The information determined as a result of making the observation, if the
# information has a simple value.
StructField("valueBoolean", BooleanType(), True),
# The information determined as a result of making the observation, if the
# information has a simple value.
StructField("valueInteger", IntegerType(), True),
# The information determined as a result of making the observation, if the
# information has a simple value.
StructField(
"valueRange",
RangeSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
),
True,
),
# The information determined as a result of making the observation, if the
# information has a simple value.
StructField(
"valueRatio",
RatioSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
),
True,
),
# The information determined as a result of making the observation, if the
# information has a simple value.
StructField(
"valueSampledData",
SampledDataSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
),
True,
),
# The information determined as a result of making the observation, if the
# information has a simple value.
StructField("valueTime", StringType(), True),
# The information determined as a result of making the observation, if the
# information has a simple value.
StructField("valueDateTime", TimestampType(), True),
# The information determined as a result of making the observation, if the
# information has a simple value.
StructField(
"valuePeriod",
PeriodSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
),
True,
),
# Provides a reason why the expected value in the element
# Observation.component.value[x] is missing.
StructField(
"dataAbsentReason",
CodeableConceptSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
),
True,
),
# A categorical assessment of an observation value. For example, high, low,
# normal.
StructField(
"interpretation",
ArrayType(
CodeableConceptSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
)
),
True,
),
# Guidance on how to interpret the value by comparison to a normal or
# recommended range.
StructField(
"referenceRange",
ArrayType(
Observation_ReferenceRangeSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
)
),
True,
),
]
)
if not include_extension:
schema.fields = [
c
if c.name != "extension"
else StructField("extension", StringType(), True)
for c in schema.fields
]
if not include_modifierExtension:
schema.fields = [
c
if c.name != "modifierExtension"
else StructField("modifierExtension", StringType(), True)
for c in schema.fields
]
return schema
| 49.589165
| 104
| 0.559177
|
28594a0b7b1e5f0237dd7c52af6b6b97c95688c7
| 1,010
|
py
|
Python
|
setup.py
|
adw62/viridian_workflow
|
5215697e432e2504978186b121ab2b398ece02d7
|
[
"MIT"
] | null | null | null |
setup.py
|
adw62/viridian_workflow
|
5215697e432e2504978186b121ab2b398ece02d7
|
[
"MIT"
] | null | null | null |
setup.py
|
adw62/viridian_workflow
|
5215697e432e2504978186b121ab2b398ece02d7
|
[
"MIT"
] | null | null | null |
import glob
from setuptools import setup, find_packages
from Cython.Build import cythonize
with open("requirements.txt") as f:
install_requires = [x.rstrip() for x in f]
setup(
name="viridian_workflow",
version="0.3.4",
description="FIXME",
packages=find_packages(),
package_data={"viridian_workflow": ["amplicon_scheme_data/*"]},
author="Jeff Knaggs,Martin Hunt",
author_email="FIXME",
url="https://github.com/iqbal-lab-org/viridian_workflow",
test_suite="nose.collector",
tests_require=["pytest"],
entry_points={
"console_scripts": ["viridian_workflow = viridian_workflow.__main__:main"]
},
install_requires=install_requires,
license="MIT",
classifiers=[
"Development Status :: 4 - Beta",
"Topic :: Scientific/Engineering :: Bio-Informatics",
"Programming Language :: Python :: 3 :: Only",
"License :: OSI Approved :: MIT License",
],
ext_modules=cythonize("./viridian_workflow/self_qc.pyx")
)
| 31.5625
| 82
| 0.672277
|
a4eded3812123bdb44bc2f004afe4f6aa7191de5
| 422
|
py
|
Python
|
abuseipdb/komand_abuseipdb/util/helper.py
|
emartin-merrill-r7/insightconnect-plugins
|
a589745dbcc9f01d3e601431e77ab7221a84c117
|
[
"MIT"
] | 1
|
2020-03-18T09:14:55.000Z
|
2020-03-18T09:14:55.000Z
|
abuseipdb/komand_abuseipdb/util/helper.py
|
OSSSP/insightconnect-plugins
|
846758dab745170cf1a8c146211a8bea9592e8ff
|
[
"MIT"
] | 1
|
2021-02-23T23:57:37.000Z
|
2021-02-23T23:57:37.000Z
|
abuseipdb/komand_abuseipdb/util/helper.py
|
OSSSP/insightconnect-plugins
|
846758dab745170cf1a8c146211a8bea9592e8ff
|
[
"MIT"
] | null | null | null |
from komand.exceptions import PluginException
def get_json(response):
json_ = response.json()
if "errors" in json_:
details = ''
if len(json_['errors']) > 0 and 'details' in json_['errors'][0]:
details = json_['errors'][0]["detail"]
raise PluginException(cause='Received an error response from AbuseIPDB.',
assistance=details)
return json_
| 28.133333
| 81
| 0.601896
|
d143c71acf4eea61af584ab605953eeaf9867e52
| 1,434
|
py
|
Python
|
zephyrus_sc2_parser/game/perception_action_cycle.py
|
dareonion/zephyrus-sc2-parser
|
0e31ac86e4546466eba44f37ed13ab2880fe5d39
|
[
"MIT"
] | null | null | null |
zephyrus_sc2_parser/game/perception_action_cycle.py
|
dareonion/zephyrus-sc2-parser
|
0e31ac86e4546466eba44f37ed13ab2880fe5d39
|
[
"MIT"
] | null | null | null |
zephyrus_sc2_parser/game/perception_action_cycle.py
|
dareonion/zephyrus-sc2-parser
|
0e31ac86e4546466eba44f37ed13ab2880fe5d39
|
[
"MIT"
] | null | null | null |
class PerceptionActionCycle:
def __init__(self, initial_camera_position, initial_gameloop):
self.initial_camera_position = initial_camera_position
self.initial_gameloop = initial_gameloop
self.final_camera_position = None
self.final_gameloop = None
self.actions = []
self.camera_moves = []
self.min_duration = 4 # 4 game loops (~2sec) minimum
self.min_camera_move = 6 # 6 camera units (x or y) minimum
def check_position(self, new_position):
"""
Compares the initial camera position of the PAC
to the current camera position.
If the current position differs by more than 6
units, a boolean (False) is returned and the current PAC ends.
"""
x_diff = abs(new_position[0] - self.initial_camera_position[0])
y_diff = abs(new_position[1] - self.initial_camera_position[1])
total_diff = (x_diff**2) + (y_diff**2)
if total_diff > self.min_camera_move**2:
return False
return True
def check_duration(self, new_gameloop):
"""
Compares the initial gameloop the PAC
started on to the current gameloop.
If the difference is greater than 4 units,
the PAC is valid and a boolean (True) is returned.
"""
if new_gameloop - self.initial_gameloop > self.min_duration:
return True
return False
| 36.769231
| 71
| 0.644351
|
551229b82b58a26e8bb736115f25f59f56499229
| 5,659
|
py
|
Python
|
tests/test_provider_sql_ts.py
|
flzara/shaystack
|
6bf815f25f3a5d64494ec1c4a34a7b23ea0ad4ce
|
[
"BSD-2-Clause"
] | 9
|
2021-04-30T13:04:31.000Z
|
2022-01-11T14:11:53.000Z
|
tests/test_provider_sql_ts.py
|
flzara/shaystack
|
6bf815f25f3a5d64494ec1c4a34a7b23ea0ad4ce
|
[
"BSD-2-Clause"
] | 7
|
2021-03-19T07:31:22.000Z
|
2021-03-26T12:31:45.000Z
|
tests/test_provider_sql_ts.py
|
flzara/shaystack
|
6bf815f25f3a5d64494ec1c4a34a7b23ea0ad4ce
|
[
"BSD-2-Clause"
] | 5
|
2021-04-29T11:51:04.000Z
|
2022-02-22T21:10:19.000Z
|
import datetime
import logging
import os
from typing import cast
from unittest.mock import patch
import pytz
from nose.plugins.attrib import attr
# noinspection PyProtectedMember
from shaystack import Ref, Grid, Quantity, MARKER, REMOVE, Coordinate, NA, parse_date_range, XStr
from shaystack.providers import get_provider
from shaystack.providers.sql import Provider as SQLProvider
from shaystack.providers.timestream import Provider as DBTSProvider
# Set HAYSTACK_DB variable, before running the tests to validate with another database
# HAYSTACK_DB = 'postgresql://postgres:password@172.17.0.2:5432/postgres#haystack'
HAYSTACK_DB = os.environ.get("_HAYSTACK_DB", 'sqlite3:///:memory:#haystack')
HAYSTACK_TS = os.environ.get("_HAYSTACK_TS", 'timestream://HaystackDemo/?mem_ttl=8766&mag_ttl=400#haystack')
FAKE_NOW = datetime.datetime(2020, 10, 1, 0, 0, 0, 0, tzinfo=pytz.UTC)
log = logging.getLogger("sql_ts.Provider")
@attr('aws')
def test_create_db():
envs = {'HAYSTACK_DB': HAYSTACK_DB,
'HAYSTACK_TS': HAYSTACK_TS,
'AWS_PROFILE': os.environ['AWS_PROFILE'],
'AWS_REGION': os.environ['AWS_REGION']
}
with cast(DBTSProvider, get_provider("shaystack.providers.timestream", envs)) as provider:
provider.create_db()
@attr('aws')
@patch.object(SQLProvider, 'get_customer_id')
@patch.object(DBTSProvider, 'get_customer_id')
def test_import_ts_grid_in_db_and_his_read(mock1, mock2):
mock1.return_value = "customer"
mock2.return_value = "customer"
envs = {'HAYSTACK_DB': HAYSTACK_DB,
'HAYSTACK_TS': HAYSTACK_TS,
'AWS_PROFILE': os.environ['AWS_PROFILE'],
'AWS_REGION': os.environ['AWS_REGION']
}
with cast(DBTSProvider, get_provider("shaystack.providers.timestream", envs)) as provider:
values = [
(XStr("hex", "deadbeef"), "Str"),
("100", "Str"),
(100.0, "Number"), (Quantity(1, "m"), "Number"), (100, "Number"),
(True, "Bool"), (False, "Bool"),
(MARKER, "Marker"), (None, "Marker"),
(REMOVE, "Remove"), (None, "Remove"),
(NA, "NA"), (None, "NA"),
(Ref("abc"), "Ref"),
(datetime.datetime.utcnow().replace(microsecond=0), "DateTime"),
(datetime.date.today(), "Date"),
(datetime.datetime.utcnow().time(), "Time"),
(datetime.time(16, 58, 57, 994), "Time"),
(Coordinate(100.0, 200.0), "Coord"),
]
# Check TS with all types
entity_id = Ref("abc")
for val, kind in values:
# Clean DB for the specific kind
provider.purge_db()
provider.create_db()
# Insert an entity for the TS, with an attribut "kind"
grid = Grid(columns=["id", "kind"])
grid.append({"id": entity_id, "kind": kind}) # Without "kind", the default is "Number" or "float"
version = datetime.datetime.now(tz=pytz.UTC)
provider.update_grid(diff_grid=grid, version=version, customer_id="customer")
# WARNING: timestream accept only datetime in the memory retention period.
# Not before and not after.
log.debug("Test %s", type(val))
grid = Grid(columns=["ts", "val"])
# You must use utcnow() and a retention
grid.append({"ts": datetime.datetime.utcnow(), "val": val})
provider._import_ts_in_db(grid, entity_id, "customer", FAKE_NOW)
grid_ts = provider.his_read(entity_id, parse_date_range("today", provider.get_tz()), None)
assert grid_ts[0]['val'] == val, f"with kind={kind} and val={val}"
@attr('aws')
@patch.object(SQLProvider, 'get_customer_id')
@patch.object(DBTSProvider, 'get_customer_id')
def test_import_ts_grid_in_db_with_a_lot_of_records(mock1, mock2):
mock1.return_value = "customer"
mock2.return_value = "customer"
envs = {'HAYSTACK_DB': HAYSTACK_DB,
'HAYSTACK_TS': HAYSTACK_TS,
'AWS_PROFILE': os.environ['AWS_PROFILE'],
'AWS_REGION': os.environ['AWS_REGION']
}
with cast(DBTSProvider, get_provider("shaystack.providers.timestream", envs)) as provider:
# Check TS with all types
entity_id = Ref("abc")
# Insert an antity for the TS, with an attribut "kind"
provider.purge_db()
grid = Grid(columns=["id", "kind"])
grid.append({"id": entity_id, "kind": "Number"}) # Without "kind", the default is "Number" or "float"
version = datetime.datetime.now(tz=pytz.UTC)
provider.update_grid(diff_grid=grid, version=version, customer_id="customer")
# WARNING: timestream accept only datetime in the memory retention period.
# Not before and not after.
# It's not possible to extend the memory retention temporary to inject an old value
provider.purge_ts()
provider.create_db()
grid = Grid(columns=["ts", "val"])
# You must use utcnow() and a retention
for i in range(0, 200):
grid.append({"ts": datetime.datetime.utcnow().replace(microsecond=i * 1000), "val": i})
provider._import_ts_in_db(grid, entity_id, "customer", FAKE_NOW)
@attr('aws')
def test_about():
envs = {'HAYSTACK_DB': HAYSTACK_DB,
'HAYSTACK_TS': HAYSTACK_TS,
'AWS_PROFILE': os.environ['AWS_PROFILE'],
'AWS_REGION': os.environ['AWS_REGION']
}
with get_provider("shaystack.providers.timestream", envs) as provider:
result = provider.about("http://localhost")
assert result[0]['moduleName'] == 'SQLProvider'
| 40.71223
| 110
| 0.63209
|
6dd561598e4fc44fed4cd764acf9e5ef26618c40
| 3,243
|
py
|
Python
|
LEGOxGBA/gba_input.py
|
CoderBeanLiang/OhMyGenius
|
e0874490cae44c6c2a619f3d84dd313716000889
|
[
"CC0-1.0"
] | null | null | null |
LEGOxGBA/gba_input.py
|
CoderBeanLiang/OhMyGenius
|
e0874490cae44c6c2a619f3d84dd313716000889
|
[
"CC0-1.0"
] | null | null | null |
LEGOxGBA/gba_input.py
|
CoderBeanLiang/OhMyGenius
|
e0874490cae44c6c2a619f3d84dd313716000889
|
[
"CC0-1.0"
] | null | null | null |
import RPi.GPIO as GPIO
import uinput
import time
# Define GPIO BCM code
# Should be related to your own GPIO wires
btnLeft = 24
btnRight = 23
btnUp = 25
btnDown = 18
btnA = 26
btnB = 21
btnL = 22
btnR = 20
btnStart = 17
btnSelect = 27
# Button callback, param channel is BCM code defined above
# GPIO state 'Low' means button pressed because my wires are PUD_UP mode
def button_trigger(channel):
state = not GPIO.input(channel)
if channel == btnLeft:
device.emit(uinput.BTN_DPAD_LEFT, state)
elif channel == btnRight:
device.emit(uinput.BTN_DPAD_RIGHT, state)
elif channel == btnUp:
device.emit(uinput.BTN_DPAD_UP, state)
elif channel == btnDown:
device.emit(uinput.BTN_DPAD_DOWN, state)
elif channel == btnA:
device.emit(uinput.BTN_A, state)
elif channel == btnB:
device.emit(uinput.BTN_B, state)
elif channel == btnL:
device.emit(uinput.BTN_TL, state)
elif channel == btnR:
device.emit(uinput.BTN_TR, state)
elif channel == btnStart:
device.emit(uinput.BTN_START, state)
elif channel == btnSelect:
device.emit(uinput.BTN_SELECT, state)
# See print out to check your wires
if state == 1:
print("Pressed:{:>2}".format(channel))
else:
print("Release:{:>2}".format(channel))
# Set mode
GPIO.setmode(GPIO.BCM)
# Disable warnings
# GPIO.setwarnings(False)
# Init GPIO, PUD_UP means 'High' defaut, should input 'Low'
GPIO.setup(btnLeft, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(btnRight, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(btnUp, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(btnDown, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(btnA, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(btnB, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(btnL, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(btnR, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(btnStart, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(btnSelect, GPIO.IN, pull_up_down=GPIO.PUD_UP)
# Add call back
GPIO.add_event_detect(btnLeft, GPIO.BOTH, callback=button_trigger)
GPIO.add_event_detect(btnRight, GPIO.BOTH, callback=button_trigger)
GPIO.add_event_detect(btnUp, GPIO.BOTH, callback=button_trigger)
GPIO.add_event_detect(btnDown, GPIO.BOTH, callback=button_trigger)
GPIO.add_event_detect(btnA, GPIO.BOTH, callback=button_trigger)
GPIO.add_event_detect(btnB, GPIO.BOTH, callback=button_trigger)
GPIO.add_event_detect(btnL, GPIO.BOTH, callback=button_trigger)
GPIO.add_event_detect(btnR, GPIO.BOTH, callback=button_trigger)
GPIO.add_event_detect(btnStart, GPIO.BOTH, callback=button_trigger)
GPIO.add_event_detect(btnSelect, GPIO.BOTH, callback=button_trigger)
# uinput init
# Simply use BTN_0 ~ BTN_9 maybe OK, because button's
# real function only depends on input configs of RetroPie
events = (
uinput.BTN_A,
uinput.BTN_B,
uinput.BTN_TL,
uinput.BTN_TR,
uinput.BTN_START,
uinput.BTN_SELECT,
uinput.BTN_DPAD_LEFT,
uinput.BTN_DPAD_RIGHT,
uinput.BTN_DPAD_UP,
uinput.BTN_DPAD_DOWN,
)
# Open uinput device
with uinput.Device(events) as device:
print("GBA_INPUT DEVICE STARTED!")
while True:
# Dont know if this sleep is needed
time.sleep(0.01)
| 31.794118
| 72
| 0.728955
|
0b50548f3eab0f05b1ed36ab5db0bee7c6b0e063
| 5,430
|
py
|
Python
|
setup/categories/classifier_setup.py
|
aber-wgr/OD-test
|
1f0836dd7a1c5ede34caca1a3492e9e7c3023538
|
[
"MIT"
] | 61
|
2018-09-14T02:48:01.000Z
|
2022-02-14T09:13:45.000Z
|
setup/categories/classifier_setup.py
|
ashafaei/OD-test
|
8252aace84e2ae1ab95067876985f62a1060aad6
|
[
"MIT"
] | 3
|
2019-07-31T09:59:46.000Z
|
2020-04-16T21:55:16.000Z
|
setup/categories/classifier_setup.py
|
aber-wgr/OD-test
|
1f0836dd7a1c5ede34caca1a3492e9e7c3023538
|
[
"MIT"
] | 12
|
2018-09-25T10:36:39.000Z
|
2022-03-28T18:09:00.000Z
|
from __future__ import print_function
import os
from termcolor import colored
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
import models as Models
import global_vars as Global
from utils.iterative_trainer import IterativeTrainer, IterativeTrainerConfig
from utils.logger import Logger
from datasets import MirroredDataset
def get_classifier_config(args, model, dataset):
print("Preparing training D1 for %s"%(dataset.name))
# 80%, 20% for local train+test
train_ds, valid_ds = dataset.split_dataset(0.8)
if dataset.name in Global.mirror_augment:
print(colored("Mirror augmenting %s"%dataset.name, 'green'))
new_train_ds = train_ds + MirroredDataset(train_ds)
train_ds = new_train_ds
# Initialize the multi-threaded loaders.
train_loader = DataLoader(train_ds, batch_size=args.batch_size, shuffle=True, num_workers=args.workers, pin_memory=True)
valid_loader = DataLoader(valid_ds, batch_size=args.batch_size, num_workers=args.workers, pin_memory=True)
all_loader = DataLoader(dataset, batch_size=args.batch_size, num_workers=args.workers, pin_memory=True)
# Set up the criterion
criterion = nn.NLLLoss().to(args.device)
# Set up the model
model = model.to(args.device)
# Set up the config
config = IterativeTrainerConfig()
config.name = 'classifier_%s_%s'%(dataset.name, model.__class__.__name__)
config.train_loader = train_loader
config.valid_loader = valid_loader
config.phases = {
'train': {'dataset' : train_loader, 'backward': True},
'test': {'dataset' : valid_loader, 'backward': False},
'all': {'dataset' : all_loader, 'backward': False},
}
config.criterion = criterion
config.classification = True
config.stochastic_gradient = True
config.visualize = not args.no_visualize
config.model = model
config.logger = Logger()
config.optim = optim.Adam(model.parameters(), lr=1e-3)
config.scheduler = optim.lr_scheduler.ReduceLROnPlateau(config.optim, patience=10, threshold=1e-2, min_lr=1e-6, factor=0.1, verbose=True)
config.max_epoch = 120
if hasattr(model, 'train_config'):
model_train_config = model.train_config()
for key, value in model_train_config.iteritems():
print('Overriding config.%s'%key)
config.__setattr__(key, value)
return config
def train_classifier(args, model, dataset):
config = get_classifier_config(args, model, dataset)
home_path = Models.get_ref_model_path(args, config.model.__class__.__name__, dataset.name, model_setup=True, suffix_str='base')
hbest_path = os.path.join(home_path, 'model.best.pth')
if not os.path.isdir(home_path):
os.makedirs(home_path)
trainer = IterativeTrainer(config, args)
if not os.path.isfile(hbest_path+".done"):
print(colored('Training from scratch', 'green'))
best_accuracy = -1
for epoch in range(1, config.max_epoch+1):
# Track the learning rates.
lrs = [float(param_group['lr']) for param_group in config.optim.param_groups]
config.logger.log('LRs', lrs, epoch)
config.logger.get_measure('LRs').legend = ['LR%d'%i for i in range(len(lrs))]
# One epoch of train and test.
trainer.run_epoch(epoch, phase='train')
trainer.run_epoch(epoch, phase='test')
train_loss = config.logger.get_measure('train_loss').mean_epoch()
config.scheduler.step(train_loss)
if config.visualize:
# Show the average losses for all the phases in one figure.
config.logger.visualize_average_keys('.*_loss', 'Average Loss', trainer.visdom)
config.logger.visualize_average_keys('.*_accuracy', 'Average Accuracy', trainer.visdom)
config.logger.visualize_average('LRs', trainer.visdom)
test_average_acc = config.logger.get_measure('test_accuracy').mean_epoch()
# Save the logger for future reference.
torch.save(config.logger.measures, os.path.join(home_path, 'logger.pth'))
# Saving a checkpoint. Enable if needed!
# if args.save and epoch % 10 == 0:
# print('Saving a %s at iter %s'%(colored('snapshot', 'yellow'), colored('%d'%epoch, 'yellow')))
# torch.save(config.model.state_dict(), os.path.join(home_path, 'model.%d.pth'%epoch))
if args.save and best_accuracy < test_average_acc:
print('Updating the on file model with %s'%(colored('%.4f'%test_average_acc, 'red')))
best_accuracy = test_average_acc
torch.save(config.model.state_dict(), hbest_path)
torch.save({'finished':True}, hbest_path + ".done")
if config.visualize:
trainer.visdom.save([trainer.visdom.env])
else:
print("Skipping %s"%(colored(home_path, 'yellow')))
print("Loading the best model.")
config.model.load_state_dict(torch.load(hbest_path))
config.model.eval()
trainer.run_epoch(0, phase='all')
test_average_acc = config.logger.get_measure('all_accuracy').mean_epoch(epoch=0)
print("All average accuracy %s"%colored('%.4f%%'%(test_average_acc*100), 'red'))
| 41.450382
| 141
| 0.660589
|
661d666d96a8f21b888817e4ba1fcc3f6a192afd
| 13,758
|
py
|
Python
|
FWCore/Integration/test/testRunMergeTEST1_cfg.py
|
nistefan/cmssw
|
ea13af97f7f2117a4f590a5e654e06ecd9825a5b
|
[
"Apache-2.0"
] | null | null | null |
FWCore/Integration/test/testRunMergeTEST1_cfg.py
|
nistefan/cmssw
|
ea13af97f7f2117a4f590a5e654e06ecd9825a5b
|
[
"Apache-2.0"
] | null | null | null |
FWCore/Integration/test/testRunMergeTEST1_cfg.py
|
nistefan/cmssw
|
ea13af97f7f2117a4f590a5e654e06ecd9825a5b
|
[
"Apache-2.0"
] | null | null | null |
# This is the same as testRunMergeTEST except the noEventSort
# option is set in the PoolSource, which changes the order
# events are processed. Within a LuminosityBlock, they are
# in entry order instead of event number order. The RunLumiEventAnalyzer
# module checks this.
import FWCore.ParameterSet.Config as cms
process = cms.Process("TEST")
process.load("FWCore.MessageService.MessageLogger_cfi")
process.MessageLogger.cerr.FwkReport.reportEvery = 1000
process.MessageLogger.cerr.threshold = 'ERROR'
import FWCore.Framework.test.cmsExceptionsFatalOption_cff
process.options = cms.untracked.PSet(
fileMode = cms.untracked.string('FULLMERGE'),
Rethrow = FWCore.Framework.test.cmsExceptionsFatalOption_cff.Rethrow
)
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(
'file:testRunMerge.root',
'file:testRunMerge.root'
),
secondaryFileNames = cms.untracked.vstring(
'file:testRunMerge1.root',
'file:testRunMerge2.root',
'file:testRunMerge3.root',
'file:testRunMerge4.root',
'file:testRunMerge5.root'
),
noEventSort = cms.untracked.bool(True)
, duplicateCheckMode = cms.untracked.string('checkEachRealDataFile')
)
process.out = cms.OutputModule("PoolOutputModule",
fileName = cms.untracked.string('file:testRunMergeRecombined1.root')
)
process.test = cms.EDAnalyzer("TestMergeResults",
# These values below are just arbitrary and meaningless
# We are checking to see that the value we get out matches what
# was put in.
# expected values listed below come in sets of three
# value expected in Thing
# value expected in ThingWithMerge
# value expected in ThingWithIsEqual
# This set of 3 is repeated below at each point it might change
# The Prod suffix refers to objects from the process named PROD
# The New suffix refers to objects created in the most recent process
# When the sequence of parameter values is exhausted it stops checking
# 0's are just placeholders, if the value is a "0" the check is not made
# and it indicates the product does not exist at that point.
# *'s indicate lines where the checks are actually run by the test module.
expectedBeginRunProd = cms.untracked.vint32(
0, 0, 0, # start
0, 0, 0, # begin file 1
10001, 20004, 10003, # * begin run 1
10001, 30006, 10003, # * events
10001, 30006, 10003, # end run 1
10001, 10002, 10003, # * begin run 2
10001, 10002, 10003, # * events
10001, 10002, 10003, # end run 2
10001, 20004, 10003, # * begin run 11
10001, 20004, 10003, # * events
10001, 20004, 10003, # begin file 2
10001, 20004, 10003, # end run 11
10001, 20004, 10003, # * begin run 1
10001, 30006, 10003, # * events
10001, 30006, 10003, # end run 1
10001, 10002, 10003, # * begin run 2
10001, 10002, 10003, # * events
10001, 10002, 10003, # end run 2
10001, 20004, 10003, # * begin run 11
10001, 20004, 10003, # * events
10001, 20004, 10003 # end run 11
),
expectedEndRunProd = cms.untracked.vint32(
0, 0, 0, # start
0, 0, 0, # begin file 1
100001, 200004, 100003, # begin run 1
100001, 300006, 100003, # * events
100001, 300006, 100003, # * end run 1
100001, 100002, 100003, # begin run 2
100001, 100002, 100003, # * events
100001, 100002, 100003, # * end run 2
100001, 200004, 100003, # begin run 11
100001, 200004, 100003, # * events
100001, 200004, 100003, # begin file 2
100001, 200004, 100003, # * end run 11
100001, 200004, 100003, # begin run 1
100001, 300006, 100003, # * events
100001, 300006, 100003, # * end run 1
100001, 100002, 100003, # begin run 2
100001, 100002, 100003, # * events
100001, 100002, 100003, # * end run 2
100001, 200004, 100003, # * begin run 11
100001, 200004, 100003, # * events
100001, 200004, 100003 # * end run 11
),
expectedBeginLumiProd = cms.untracked.vint32(
0, 0, 0, # start
0, 0, 0, # begin file 1
101, 204, 103, # * begin run 1 lumi 1
101, 306, 103, # * events
101, 306, 103, # end run 1 lumi 1
101, 102, 103, # * begin run 2 lumi 1
101, 102, 103, # * events
101, 102, 103, # end run 2 lumi 1
101, 102, 103, # * begin run 11 lumi 1
101, 102, 103, # * events
101, 102, 103, # end run 11 lumi 1
101, 102, 103, # * begin run 11 lumi 2
101, 102, 103, # * events
101, 102, 103, # begin file 2
101, 102, 103, # end run 11 lumi 2
101, 204, 103, # * begin run 1 lumi 1
101, 306, 103, # * events
101, 306, 103, # end run 1 lumi 1
101, 102, 103, # * begin run 2 lumi 1
101, 102, 103, # * events
101, 102, 103, # end run 2 lumi 1
101, 102, 103, # * begin run 11 lumi 1
101, 102, 103, # * events
101, 102, 103, # end run 11 lumi 1
101, 102, 103, # * begin run 11 lumi 2
101, 102, 103, # * events
101, 102, 103 # end run 11 lumi 2
),
expectedEndLumiProd = cms.untracked.vint32(
0, 0, 0, # start
0, 0, 0, # begin file 1
1001, 2004, 1003, # begin run 1 lumi 1
1001, 3006, 1003, # * events
1001, 3006, 1003, # * end run 1 lumi 1
1001, 1002, 1003, # begin run 2 lumi 1
1001, 1002, 1003, # * events
1001, 1002, 1003, # * end run 2 lumi 1
1001, 1002, 1003, # begin run 11 lumi 1
1001, 1002, 1003, # * events
1001, 1002, 1003, # * end run 11 lumi 1
1001, 1002, 1003, # begin run 11 lumi 2
1001, 1002, 1003, # * events
1001, 1002, 1003, # begin file 2
1001, 1002, 1003, # * end run 11 lumi 2
1001, 2004, 1003, # begin run 1 lumi 1
1001, 3006, 1003, # * events
1001, 3006, 1003, # * end run 1 lumi 1
1001, 1002, 1003, # begin run 2 lumi 1
1001, 1002, 1003, # * events
1001, 1002, 1003, # * end run 2 lumi 1
1001, 1002, 1003, # begin run 11 lumi 1
1001, 1002, 1003, # * events
1001, 1002, 1003, # * end run 11 lumi 1
1001, 1002, 1003, # begin run 11 lumi 2
1001, 1002, 1003, # * events
1001, 1002, 1003 # * end run 11 lumi 2
),
expectedBeginRunNew = cms.untracked.vint32(
0, 0, 0, # start
0, 0, 0, # begin file 1
10001, 10002, 10003, # * begin run 1
10001, 20004, 10003, # * events
10001, 20004, 10003, # end run 1
10001, 10002, 10003, # * begin run 2
10001, 10002, 10003, # * events
10001, 10002, 10003, # end run 2
10001, 10002, 10003, # * begin run 11
10001, 10002, 10003, # * events
10001, 10002, 10003, # begin file 2
10001, 10002, 10003, # end run 11
10001, 10002, 10003, # * begin run 1
10001, 20004, 10003, # * events
10001, 20004, 10003, # end run 1
10001, 10002, 10003, # * begin run 2
10001, 10002, 10003, # * events
10001, 10002, 10003, # end run 2
10001, 10002, 10003, # * begin run 11
10001, 10002, 10003, # * events
10001, 10002, 10003 # end run 11
),
expectedEndRunNew = cms.untracked.vint32(
0, 0, 0, # start
0, 0, 0, # begin file 1
100001, 100002, 100003, # begin run 1
100001, 200004, 100003, # * events
100001, 200004, 100003, # * end run 1
100001, 100002, 100003, # begin run 2
100001, 100002, 100003, # * events
100001, 100002, 100003, # * end run 2
100001, 100002, 100003, # begin run 11
100001, 100002, 100003, # * events
100001, 100002, 100003, # begin file 2
100001, 100002, 100003, # * end run 11
100001, 100002, 100003, # begin run 1
100001, 200004, 100003, # * events
100001, 200004, 100003, # * end run 1
100001, 100002, 100003, # begin run 2
100001, 100002, 100003, # * events
100001, 100002, 100003, # * end run 2
100001, 100002, 100003, # begin run 11
100001, 100002, 100003, # * events
100001, 100002, 100003 # * end run 11
),
expectedBeginLumiNew = cms.untracked.vint32(
0, 0, 0, # start
0, 0, 0, # begin file 1
101, 102, 103, # * begin run 1 lumi 1
101, 204, 103, # * events
101, 204, 103, # end run 1 lumi 1
101, 102, 103, # * begin run 2 lumi 1
101, 102, 103, # * events
101, 102, 103, # end run 2 lumi 1
101, 102, 103, # * begin run 11 lumi 1
101, 102, 103, # * events
101, 102, 103, # end run 11 lumi 1
101, 102, 103, # * begin run 11 lumi 2
101, 102, 103, # * events
101, 102, 103, # begin file 2
101, 102, 103, # end run 11 lumi 2
101, 102, 103, # * begin run 1 lumi 1
101, 204, 103, # * events
101, 204, 103, # end run 1 lumi 1
101, 102, 103, # * begin run 2 lumi 1
101, 102, 103, # * events
101, 102, 103, # end run 2 lumi 1
101, 102, 103, # * begin run 11 lumi 1
101, 102, 103, # * events
101, 102, 103, # end run 11 lumi 1
101, 102, 103, # * begin run 11 lumi 2
101, 102, 103, # * events
101, 102, 103 # end run 11 lumi 2
),
expectedEndLumiNew = cms.untracked.vint32(
0, 0, 0, # start
0, 0, 0, # begin file 1
1001, 1002, 1003, # begin run 1 lumi 1
1001, 2004, 1003, # * events
1001, 2004, 1003, # * end run 1 lumi 1
1001, 1002, 1003, # begin run 2 lumi 1
1001, 1002, 1003, # * events
1001, 1002, 1003, # * end run 2 lumi 1
1001, 1002, 1003, # begin run 11 lumi 1
1001, 1002, 1003, # * events
1001, 1002, 1003, # * end run 11 lumi 1
1001, 1002, 1003, # begin run 11 lumi 2
1001, 1002, 1003, # * events
1001, 1002, 1003, # begin file 2
1001, 1002, 1003, # * end run 11 lumi 2
1001, 1002, 1003, # begin run 1 lumi 1
1001, 2004, 1003, # * events
1001, 2004, 1003, # * end run 1 lumi 1
1001, 1002, 1003, # begin run 2 lumi 1
1001, 1002, 1003, # * events
1001, 1002, 1003, # * end run 2 lumi 1
1001, 1002, 1003, # begin run 11 lumi 1
1001, 1002, 1003, # * events
1001, 1002, 1003, # * end run 11 lumi 1
1001, 1002, 1003, # begin run 11 lumi 2
1001, 1002, 1003, # * events
1001, 1002, 1003 # * end run 11 lumi 2
),
expectedDroppedEvent = cms.untracked.vint32(13, 10003, 100003, 103, 1003),
verbose = cms.untracked.bool(False),
expectedParents = cms.untracked.vstring(
'm1', 'm1', 'm1', 'm1', 'm1',
'm1', 'm1', 'm1', 'm1', 'm1',
'm2', 'm2', 'm2', 'm2', 'm2',
'm3', 'm3', 'm3', 'm3', 'm3',
'm3', 'm3', 'm3', 'm3', 'm3',
'm2', 'm2', 'm2', 'm2', 'm2',
'm1', 'm1',
'm1', 'm1', 'm1', 'm1', 'm1',
'm1', 'm1', 'm1', 'm1', 'm1',
'm2', 'm2', 'm2', 'm2', 'm2',
'm3', 'm3', 'm3', 'm3', 'm3',
'm3', 'm3', 'm3', 'm3', 'm3',
'm2', 'm2', 'm2', 'm2', 'm2',
'm1', 'm1'
)
)
process.test2 = cms.EDAnalyzer('RunLumiEventAnalyzer',
verbose = cms.untracked.bool(True),
expectedRunLumiEvents = cms.untracked.vuint32(
1, 0, 0,
1, 1, 0,
1, 1, 11,
1, 1, 12,
1, 1, 13,
1, 1, 14,
1, 1, 15,
1, 1, 16,
1, 1, 17,
1, 1, 18,
1, 1, 19,
1, 1, 20,
1, 1, 21,
1, 1, 22,
1, 1, 23,
1, 1, 24,
1, 1, 25,
1, 1, 1,
1, 1, 2,
1, 1, 3,
1, 1, 4,
1, 1, 5,
1, 1, 6,
1, 1, 7,
1, 1, 8,
1, 1, 9,
1, 1, 10,
1, 1, 0,
1, 0, 0,
2, 0, 0,
2, 1, 0,
2, 1, 1,
2, 1, 2,
2, 1, 3,
2, 1, 4,
2, 1, 5,
2, 1, 0,
2, 0, 0,
11, 0, 0,
11, 1, 0,
11, 1, 1,
11, 1, 0,
11, 2, 0,
11, 2, 1,
11, 2, 0,
11, 0, 0,
1, 0, 0,
1, 1, 0,
1, 1, 11,
1, 1, 12,
1, 1, 13,
1, 1, 14,
1, 1, 15,
1, 1, 16,
1, 1, 17,
1, 1, 18,
1, 1, 19,
1, 1, 20,
1, 1, 21,
1, 1, 22,
1, 1, 23,
1, 1, 24,
1, 1, 25,
1, 1, 1,
1, 1, 2,
1, 1, 3,
1, 1, 4,
1, 1, 5,
1, 1, 6,
1, 1, 7,
1, 1, 8,
1, 1, 9,
1, 1, 10,
1, 1, 0,
1, 0, 0,
2, 0, 0,
2, 1, 0,
2, 1, 1,
2, 1, 2,
2, 1, 3,
2, 1, 4,
2, 1, 5,
2, 1, 0,
2, 0, 0
)
)
process.path1 = cms.Path(process.test + process.test2)
process.endpath1 = cms.EndPath(process.out)
| 35.186701
| 80
| 0.496366
|
8630d53384ca87932be69f6f8ad4462f46f41a5a
| 4,575
|
py
|
Python
|
tournament_test.py
|
bschmoker/fireside
|
70c8d9b8c6c3bdc0b437ec68be0aa99f904e7598
|
[
"MIT"
] | 1
|
2015-11-05T20:15:34.000Z
|
2015-11-05T20:15:34.000Z
|
tournament_test.py
|
bschmoker/fireside
|
70c8d9b8c6c3bdc0b437ec68be0aa99f904e7598
|
[
"MIT"
] | 3
|
2015-08-30T19:55:11.000Z
|
2015-08-30T19:56:20.000Z
|
tournament_test.py
|
bschmoker/fireside
|
70c8d9b8c6c3bdc0b437ec68be0aa99f904e7598
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#
# Test cases for tournament.py
from tournament import *
def testDeleteMatches():
deleteMatches()
print "1. Old matches can be deleted."
def testDelete():
deleteMatches()
deletePlayers()
print "2. Player records can be deleted."
def testCount():
deleteMatches()
deletePlayers()
c = countPlayers()
if c == '0':
raise TypeError(
"countPlayers() should return numeric zero, not string '0'.")
if c != 0:
raise ValueError("After deleting, countPlayers should return zero.")
print "3. After deleting, countPlayers() returns zero."
def testRegister():
deleteMatches()
deletePlayers()
registerPlayer("Jeff Goldblum")
c = countPlayers()
if c != 1:
raise ValueError(
"After one player registers, countPlayers() should be 1.")
print "4. After registering a player, countPlayers() returns 1."
def testRegisterCountDelete():
deleteMatches()
deletePlayers()
registerPlayer("Jeff Goldblum")
registerPlayer("Dweezil Zappa")
registerPlayer("Josh Brolin")
registerPlayer("Atlanta Hope")
c = countPlayers()
if c != 4:
raise ValueError(
"After registering four players, countPlayers should be 4.")
deletePlayers()
c = countPlayers()
if c != 0:
raise ValueError("After deleting, countPlayers should return zero.")
print "5. Players can be registered and deleted."
def testStandingsBeforeMatches():
deleteMatches()
deletePlayers()
registerPlayer("Jeff Goldblum")
registerPlayer("Biff Rackchest")
standings = playerStandings()
if len(standings) < 2:
raise ValueError("Players should appear in playerStandings even before "
"they have played any matches.")
elif len(standings) > 2:
raise ValueError("Only registered players should appear in standings.")
if len(standings[0]) != 4:
raise ValueError("Each playerStandings row should have four columns.")
[(id1, name1, wins1, matches1), (id2, name2, wins2, matches2)] = standings
if matches1 != 0 or matches2 != 0 or wins1 != 0 or wins2 != 0:
raise ValueError(
"Newly registered players should have no matches or wins.")
if set([name1, name2]) != set(["Melpomene Murray", "Randy Schwartz"]):
raise ValueError("Registered players' names should appear in standings, "
"even if they have no matches played.")
print "6. Newly registered players appear in the standings with no matches."
def testReportMatches():
deleteMatches()
deletePlayers()
registerPlayer("Jeff Goldblum")
registerPlayer("Jeff Goldblum")
registerPlayer("Jeff Goldblum")
registerPlayer("Jeff Goldblum")
standings = playerStandings()
[id1, id2, id3, id4] = [row[0] for row in standings]
reportMatch(id1, id2)
reportMatch(id3, id4)
standings = playerStandings()
for (i, n, w, m) in standings:
if m != 1:
raise ValueError("Each player should have one match recorded.")
if i in (id1, id3) and w != 1:
raise ValueError("Each match winner should have one win recorded.")
elif i in (id2, id4) and w != 0:
raise ValueError("Each match loser should have zero wins recorded.")
print "7. After a match, players have updated standings."
def testPairings():
deleteMatches()
deletePlayers()
registerPlayer("Thrall")
registerPlayer("Valeera")
registerPlayer("Jaina Proudmoore")
registerPlayer("Goldfarmer #888")
standings = playerStandings()
[id1, id2, id3, id4] = [row[0] for row in standings]
reportMatch(id1, id2)
reportMatch(id3, id4)
pairings = swissPairings()
if len(pairings) != 2:
raise ValueError(
"For four players, swissPairings should return two pairs.")
[(pid1, pname1, pid2, pname2), (pid3, pname3, pid4, pname4)] = pairings
correct_pairs = set([frozenset([id1, id3]), frozenset([id2, id4])])
actual_pairs = set([frozenset([pid1, pid2]), frozenset([pid3, pid4])])
if correct_pairs != actual_pairs:
raise ValueError(
"After one match, players with one win should be paired.")
print "8. After one match, players with one win are paired."
if __name__ == '__main__':
testDeleteMatches()
testDelete()
testCount()
testRegister()
testRegisterCountDelete()
testStandingsBeforeMatches()
testReportMatches()
testPairings()
print "Success! All tests pass!"
| 32.678571
| 81
| 0.65071
|
81c2effb53159158d85b3cc8df7a0fbb3acf75d3
| 2,498
|
py
|
Python
|
Python/Tic-Tac-Toe/Tic-tac-toe.py
|
AyushBilkhiwal/Hacktoberfest2020_
|
5f8155b0b8a950643c10b58b1c5ea3cbf007384f
|
[
"MIT"
] | null | null | null |
Python/Tic-Tac-Toe/Tic-tac-toe.py
|
AyushBilkhiwal/Hacktoberfest2020_
|
5f8155b0b8a950643c10b58b1c5ea3cbf007384f
|
[
"MIT"
] | null | null | null |
Python/Tic-Tac-Toe/Tic-tac-toe.py
|
AyushBilkhiwal/Hacktoberfest2020_
|
5f8155b0b8a950643c10b58b1c5ea3cbf007384f
|
[
"MIT"
] | null | null | null |
"""
@author: Arpit Somani
"""
# Import the required modules
import numpy
# Creating the platform, where we play the game.
board = numpy.array([['_', '_', '_'], ['_', '_', '_'], ['_', '_', '_']])
# We have 2 symbols , as its a 2 player game
p1s = 'X'
p2s = 'O'
# Checking for empty place in rows
def check_rows(symbol):
for r in range(3):
count = 0
for c in range(3):
if board[r][c] == symbol:
count = count + 1
if count == 3:
print(symbol, "Won")
return True
return False
# Checking for empty place in columns
def check_cols(symbol):
for c in range(3):
count = 0
for r in range(3):
if board[r][c] == symbol:
count = count + 1
if count == 3:
print(symbol, "Won")
return True
return False
# Checking for empty place in diagonals
def check_diagonals(symbol):
if board[0][2] == board[1][1] and board[1][1] == board[2][0] and board[1][1] == symbol:
print(symbol, "Won")
return True
if board[0][0] == board[1][1] and board[1][1] == board[2][2] and board[1][1] == symbol:
print(symbol, "Won")
return True
return False
# When a player get into the win siituation, when a straight line formed either at row,column or diagonal position.
def won(symbol):
return check_rows(symbol) or check_cols(symbol) or check_diagonals(symbol)
# Placing of players symbol as desired empty position
def place(symbol):
print(numpy.matrix(board))
while 1:
row = int(input('Enter row: 1 or 2 or 3: '))
col = int(input('Enter col: 1 or 2 or 3: '))
if row in range(4) and col in range(4) and board[row - 1][col - 1] == '_':
break
else:
print('Invalid input. PLease enter again!')
board[row - 1][col - 1] = symbol
# The play function, player 1 starts game, and the chances will revolve one by one in between 2 players,
# until a winner arise.
def play():
for turn in range(9):
if turn % 2 == 0:
print("X's turn")
place(p1s)
if won(p1s):
break
else:
print("O's turn")
place(p2s)
if won(p2s):
break
if not (won(p1s)) and not (won(p2s)):
print("Draw!")
# Calling play function
play()
| 24.490196
| 116
| 0.527622
|
36510440ae397fde7f1b1b56e2c94615e39f81df
| 7,604
|
py
|
Python
|
readthedocs/core/views/serve.py
|
chirathr/readthedocs.org
|
4f1a5dc07fd9d55d4284fdb22deae735932b2ec9
|
[
"MIT"
] | 1
|
2020-03-12T14:24:48.000Z
|
2020-03-12T14:24:48.000Z
|
readthedocs/core/views/serve.py
|
chirathr/readthedocs.org
|
4f1a5dc07fd9d55d4284fdb22deae735932b2ec9
|
[
"MIT"
] | 1
|
2018-12-24T04:01:31.000Z
|
2018-12-24T04:01:31.000Z
|
readthedocs/core/views/serve.py
|
chirathr/readthedocs.org
|
4f1a5dc07fd9d55d4284fdb22deae735932b2ec9
|
[
"MIT"
] | 6
|
2019-02-13T16:08:41.000Z
|
2020-03-12T14:17:14.000Z
|
# -*- coding: utf-8 -*-
"""
Doc serving from Python.
In production there are two modes,
* Serving from public symlinks in nginx (readthedocs.org & readthedocs.com)
* Serving from private symlinks in Python (readthedocs.com only)
In development, we have two modes:
* Serving from public symlinks in Python
* Serving from private symlinks in Python
This means we should only serve from public symlinks in dev,
and generally default to serving from private symlinks in Python only.
Privacy
-------
These views will take into account the version privacy level.
Settings
--------
PYTHON_MEDIA (False) - Set this to True to serve docs & media from Python
SERVE_DOCS (['private']) - The list of ['private', 'public'] docs to serve.
"""
from __future__ import (
absolute_import, division, print_function, unicode_literals)
import logging
import mimetypes
import os
from functools import wraps
from django.conf import settings
from django.http import HttpResponse, HttpResponseRedirect, Http404
from django.shortcuts import get_object_or_404
from django.shortcuts import render
from django.views.static import serve
from readthedocs.builds.models import Version
from readthedocs.core.permissions import AdminPermission
from readthedocs.core.resolver import resolve, resolve_path
from readthedocs.core.symlink import PrivateSymlink, PublicSymlink
from readthedocs.projects import constants
from readthedocs.projects.models import Project, ProjectRelationship
log = logging.getLogger(__name__)
def map_subproject_slug(view_func):
"""
A decorator that maps a ``subproject_slug`` URL param into a Project.
:raises: Http404 if the Project doesn't exist
.. warning:: Does not take into account any kind of privacy settings.
"""
@wraps(view_func)
def inner_view(request, subproject=None, subproject_slug=None, *args, **kwargs): # noqa
if subproject is None and subproject_slug:
# Try to fetch by subproject alias first, otherwise we might end up
# redirected to an unrelated project.
try:
# Depends on a project passed into kwargs
rel = ProjectRelationship.objects.get(
parent=kwargs['project'],
alias=subproject_slug,
)
subproject = rel.child
except (ProjectRelationship.DoesNotExist, KeyError):
subproject = get_object_or_404(Project, slug=subproject_slug)
return view_func(request, subproject=subproject, *args, **kwargs)
return inner_view
def map_project_slug(view_func):
"""
A decorator that maps a ``project_slug`` URL param into a Project.
:raises: Http404 if the Project doesn't exist
.. warning:: Does not take into account any kind of privacy settings.
"""
@wraps(view_func)
def inner_view(request, project=None, project_slug=None, *args, **kwargs): # noqa
if project is None:
if not project_slug:
project_slug = request.slug
try:
project = Project.objects.get(slug=project_slug)
except Project.DoesNotExist:
raise Http404('Project does not exist.')
return view_func(request, project=project, *args, **kwargs)
return inner_view
@map_project_slug
@map_subproject_slug
def redirect_project_slug(request, project, subproject): # pylint: disable=unused-argument
"""Handle / -> /en/latest/ directs on subdomains."""
return HttpResponseRedirect(resolve(subproject or project))
@map_project_slug
@map_subproject_slug
def redirect_page_with_filename(request, project, subproject, filename): # pylint: disable=unused-argument # noqa
"""Redirect /page/file.html to /en/latest/file.html."""
return HttpResponseRedirect(
resolve(subproject or project, filename=filename))
def _serve_401(request, project):
res = render(request, '401.html')
res.status_code = 401
log.debug('Unauthorized access to {0} documentation'.format(project.slug))
return res
def _serve_file(request, filename, basepath):
# Serve the file from the proper location
if settings.DEBUG or getattr(settings, 'PYTHON_MEDIA', False):
# Serve from Python
return serve(request, filename, basepath)
# Serve from Nginx
content_type, encoding = mimetypes.guess_type(
os.path.join(basepath, filename))
content_type = content_type or 'application/octet-stream'
response = HttpResponse(content_type=content_type)
if encoding:
response['Content-Encoding'] = encoding
try:
response['X-Accel-Redirect'] = os.path.join(
basepath[len(settings.SITE_ROOT):],
filename,
)
except UnicodeEncodeError:
raise Http404
return response
@map_project_slug
@map_subproject_slug
def serve_docs(
request, project, subproject, lang_slug=None, version_slug=None,
filename=''):
"""Exists to map existing proj, lang, version, filename views to the file format."""
if not version_slug:
version_slug = project.get_default_version()
try:
version = project.versions.public(request.user).get(slug=version_slug)
except Version.DoesNotExist:
# Properly raise a 404 if the version doesn't exist (or is inactive) and
# a 401 if it does
if project.versions.filter(slug=version_slug, active=True).exists():
return _serve_401(request, project)
raise Http404('Version does not exist.')
filename = resolve_path(
subproject or project, # Resolve the subproject if it exists
version_slug=version_slug,
language=lang_slug,
filename=filename,
subdomain=True, # subdomain will make it a "full" path without a URL prefix
)
if (version.privacy_level == constants.PRIVATE and
not AdminPermission.is_member(user=request.user, obj=project)):
return _serve_401(request, project)
return _serve_symlink_docs(
request,
filename=filename,
project=project,
privacy_level=version.privacy_level,
)
@map_project_slug
def _serve_symlink_docs(request, project, privacy_level, filename=''):
"""Serve a file by symlink, or a 404 if not found."""
# Handle indexes
if filename == '' or filename[-1] == '/':
filename += 'index.html'
# This breaks path joining, by ignoring the root when given an "absolute" path
if filename[0] == '/':
filename = filename[1:]
log.info('Serving %s for %s', filename, project)
files_tried = []
serve_docs = getattr(settings, 'SERVE_DOCS', [constants.PRIVATE])
if (settings.DEBUG or constants.PUBLIC in serve_docs) and privacy_level != constants.PRIVATE: # yapf: disable # noqa
public_symlink = PublicSymlink(project)
basepath = public_symlink.project_root
if os.path.exists(os.path.join(basepath, filename)):
return _serve_file(request, filename, basepath)
files_tried.append(os.path.join(basepath, filename))
if (settings.DEBUG or constants.PRIVATE in serve_docs) and privacy_level == constants.PRIVATE: # yapf: disable # noqa
# Handle private
private_symlink = PrivateSymlink(project)
basepath = private_symlink.project_root
if os.path.exists(os.path.join(basepath, filename)):
return _serve_file(request, filename, basepath)
files_tried.append(os.path.join(basepath, filename))
raise Http404(
'File not found. Tried these files: %s' % ','.join(files_tried))
| 34.880734
| 123
| 0.692136
|
4067c5c33136a3077df6e5eb986575b53a6927fd
| 316
|
py
|
Python
|
ThirdParty/Blizzard/Storm/Current/src/GenDef.py
|
alanoooaao/YDWE
|
fa9c6dc24d01f78919b5c8b2c69252291536424a
|
[
"Apache-2.0"
] | 5
|
2019-01-22T02:35:35.000Z
|
2022-02-28T02:50:03.000Z
|
ThirdParty/Blizzard/Storm/Current/src/GenDef.py
|
shawwwn/YDWE
|
b83ffe041d9623409d9ffd951988e2b482d9cfc3
|
[
"Apache-2.0"
] | 8
|
2016-10-19T00:04:05.000Z
|
2016-11-14T10:58:14.000Z
|
ThirdParty/Blizzard/Storm/Current/src/GenDef.py
|
shawwwn/YDWE
|
b83ffe041d9623409d9ffd951988e2b482d9cfc3
|
[
"Apache-2.0"
] | 2
|
2016-11-14T11:39:37.000Z
|
2019-09-06T00:21:15.000Z
|
# coding: utf-8
# Python 3 scripts
import os, sys, re
rg = re.compile(r"\s*//\s+(\d+)\s+(\w+)")
print('LIBRARY "Storm"')
print('EXPORTS')
with open("../include/BlizzardStorm.h", "r") as f:
for line in f:
mat = rg.match(line)
if mat:
print('\t{0}\t\t\t@{1}\tNONAME'.format(mat.group(2), mat.group(1)))
| 19.75
| 70
| 0.591772
|
af869d43f1291b05d190187cf1fb1a717a769211
| 7,021
|
py
|
Python
|
src/packages/database/database_manager.py
|
nathanctech/pipsqueak3
|
f5c038fcecb0f54a6a71f6000c13e683ae72920f
|
[
"BSD-3-Clause"
] | null | null | null |
src/packages/database/database_manager.py
|
nathanctech/pipsqueak3
|
f5c038fcecb0f54a6a71f6000c13e683ae72920f
|
[
"BSD-3-Clause"
] | null | null | null |
src/packages/database/database_manager.py
|
nathanctech/pipsqueak3
|
f5c038fcecb0f54a6a71f6000c13e683ae72920f
|
[
"BSD-3-Clause"
] | null | null | null |
"""
database_manager.py - allows connections to SQL databases.
Provides postgreSQL connectivity for mechasqueak3.
Copyright (c) 2018 The Fuel Rat Mischief,
All rights reserved.
Licensed under the BSD 3-Clause License.
See LICENSE.md
"""
import typing
import psycopg2
from loguru import logger
from psycopg2 import sql, pool
from src.config import CONFIG_MARKER
class DatabaseManager:
"""
Database Manager class intended to be inherited by a parent class that requires database
connectivity. Currently, only PostgreSQL 9.5+ is supported.
ODBC drivers are not required on Windows.
Usage:
>>> DatabaseManager(dbhost='DatabaseServer.org',
... dbport=5432,
... dbname='DatabaseName',
... dbuser='DatabaseUserName',
... dbpassword='UserPassword') # doctest: +SKIP
All arguments are optional. If omitted, config values will be pulled from config file.
Instantiation of the DBM is not intended to be done per method, but rather once as a
class property, and the DatabaseManage.query() method used to perform a query.
Connections are managed by a SimpleConnectionPool, keeping a minimum of 5 and a maximum
of 10 connections, able to dynamically open/close ports as needed.
Performing A Query:
.query() does not accept a direct string. You must use a psycopg2 composed SQL (sql.SQL)
object, with appropriate substitutions.
DO NOT USE STRING CONCATENATION OR APPEND VALUES. THIS IS BAD PRACTICE, AND AN INJECTION
RISK!
>>> query = sql.SQL(
... "SELECT FROM public.table WHERE table.name=%s AND table.lang=%s AND table.something=%s")
>>> dbm.query(query, ('tuple','of','values'))# doctest: +SKIP
"""
_config: typing.ClassVar[typing.Dict] = {}
@classmethod
@CONFIG_MARKER
def rehash_handler(cls, data: typing.Dict):
"""
Apply new configuration data
Args:
data (typing.Dict): new configuration data to apply.
"""
cls._config = data
@classmethod
@CONFIG_MARKER
def validate_config(cls, data: typing.Dict):
"""
Validate database portion of the configuration file
Args:
data(typing.Dict): configuration object
"""
module_config = data['database']
# Require all values to be set
for setting in module_config.values():
if not setting:
raise ValueError(f"[database]{setting} is required for instantiation but was empty")
# Host
if not isinstance(module_config['host'], str):
raise ValueError("[database]host must be a string.")
# Port
if not isinstance(module_config['port'], int):
raise ValueError("[database]port must be an integer.")
# Database Name
if not isinstance(module_config['dbname'], str):
raise ValueError("[database]database name must be a string.")
# Database Username
if not isinstance(module_config['username'], str):
raise ValueError("[database]database username must be a string.")
# Database Password
if not isinstance(module_config['password'], str):
raise ValueError("[database]database password must be a string")
def __init__(self,
dbhost=None,
dbport=None,
dbname=None,
dbuser=None,
dbpassword=None
):
if not hasattr(self, "_initialized"):
self._initialized = True
# Utilize function arguments if they are provided,
# otherwise retrieve from config file and use those values.
self._dbhost = dbhost if dbhost is not None else self._config['database'].get('host')
assert self._dbhost
self._dbport = dbport if dbhost is not None else self._config['database'].get('port')
assert self._dbport
self._dbname = dbname if dbname is not None else self._config['database'].get('dbname')
assert self._dbname
self._dbuser = dbuser if dbuser is not None else self._config['database'].get(
'username')
assert self._dbuser
self._dbpass = dbpassword if dbpassword is not None else \
self._config['database'].get('password')
assert self._dbpass
# Create Database Connections Pool
try:
self._dbpool = psycopg2.pool.SimpleConnectionPool(5, 10, host=self._dbhost,
port=self._dbport,
dbname=self._dbname,
user=self._dbuser,
password=self._dbpass)
except psycopg2.DatabaseError as error:
logger.exception("Unable to connect to database!")
raise error
async def query(self,
query: sql.SQL,
values: typing.Union[typing.Tuple, typing.Dict]) -> typing.List:
"""
Send a query to the connected database. Pulls a connection from the pool and creates
a cursor, executing the composed query with the values.
Requires a composed SQL object (See psycopg2 docs)
Args:
query: composed SQL query object
values: tuple or dict of values for query
Returns:
List of rows matching query. May return an empty list if there are no matching rows.
"""
# Verify composed SQL object
if not isinstance(query, sql.SQL):
raise TypeError("Expected composed SQL object for query.")
# Verify value is tuple or dict.
if not isinstance(values, (dict, tuple)):
raise TypeError(f"Expected tuple or dict for query values.")
# Pull a connection from the pool, and create a cursor from it.
with self._dbpool.getconn() as connection:
# If we could set these at connection time, we would,
# but they must be set outside the pool.
connection.autocommit = True
connection.set_client_encoding('utf-8')
# Create cursor, and execute the query.
with connection.cursor() as cursor:
cursor.execute(query, values)
# Check if cursor.description is NONE - meaning no results returned.
if cursor.description:
result = cursor.fetchall()
else:
# Return a blank tuple if there are no results, since we are
# forcing this to a list.
result = ()
# Release connection back to the pool.
self._dbpool.putconn(connection)
return list(result)
| 36.952632
| 100
| 0.590514
|
8eab7851f5a8f96a4ecc4a5c6af7777d198e7a32
| 3,616
|
py
|
Python
|
deep-rl/lib/python2.7/site-packages/OpenGL/arrays/numbers.py
|
ShujaKhalid/deep-rl
|
99c6ba6c3095d1bfdab81bd01395ced96bddd611
|
[
"MIT"
] | 210
|
2016-04-09T14:26:00.000Z
|
2022-03-25T18:36:19.000Z
|
deep-rl/lib/python2.7/site-packages/OpenGL/arrays/numbers.py
|
ShujaKhalid/deep-rl
|
99c6ba6c3095d1bfdab81bd01395ced96bddd611
|
[
"MIT"
] | 72
|
2016-09-04T09:30:19.000Z
|
2022-03-27T17:06:53.000Z
|
deep-rl/lib/python2.7/site-packages/OpenGL/arrays/numbers.py
|
ShujaKhalid/deep-rl
|
99c6ba6c3095d1bfdab81bd01395ced96bddd611
|
[
"MIT"
] | 64
|
2016-04-09T14:26:49.000Z
|
2022-03-21T11:19:47.000Z
|
"""Numbers passed as array handling code for PyOpenGL
"""
REGISTRY_NAME = 'numbers'
from OpenGL.raw.GL import _types
from OpenGL.raw.GL.VERSION import GL_1_1
from OpenGL.arrays import formathandler
import ctypes
from OpenGL._bytes import long, integer_types
class NumberHandler( formathandler.FormatHandler ):
"""Allows the user to pass a bald Python float,int, etceteras as an array-of-1"""
HANDLED_TYPES = integer_types + (
float,
_types.GLdouble,
_types.GLfloat,
_types.GLint,
_types.GLshort,
_types.GLuint,
_types.GLulong,
_types.GLushort,
_types.GLclampf,
_types.GLclampd,
)
def from_param( self, value, typeCode=None ):
"""If it's a ctypes value, pass on, otherwise do asArray"""
try:
return ctypes.byref(value)
except TypeError as err:
err.args += (' If you have ERROR_ON_COPY enabled, remember to pass in an array to array-requiring functions.', )
raise
dataPointer = from_param
def zeros( self, dims, typeCode=None ):
"""Currently don't allow Number as output types!"""
raise NotImplemented( """Number data-type not allowed as an output array format""" )
def ones( self, dims, typeCode=None ):
"""Currently don't allow Number as output types!"""
raise NotImplemented( """Number data-type not allowed as an output array format""" )
def arrayToGLType( self, value ):
"""Given a value, guess OpenGL type of the corresponding pointer"""
if value.__class__ in TARGET_TYPES:
return TARGET_TYPES[ value.__class__ ]
else:
guess = DEFAULT_TYPES.get( value.__class__ )
if guess is not None:
return guess[1]
raise TypeError( """Can't guess array data-type for %r types"""%(type(value)))
def arraySize( self, value, typeCode = None ):
"""Given a data-value, calculate ravelled size for the array"""
return 1
def asArray( self, value, typeCode=None ):
"""Convert given value to an array value of given typeCode"""
if value.__class__ in TARGET_TYPES:
return value
targetType = CONSTANT_TO_TYPE.get( typeCode )
if targetType is not None:
return targetType( value )
raise TypeError( """Don't know how to convert %r to an array type"""%(
typeCode,
))
def unitSize( self, value, typeCode=None ):
"""Determine unit size of an array (if possible)"""
return 1 # there's only 1 possible value in the set...
def registerEquivalent( self, typ, base ):
"""Register a sub-class for handling as the base-type"""
global TARGET_TYPE_TUPLE
for source in (DEFAULT_TYPES, TARGET_TYPES, BYTE_SIZES):
if base in source:
source[typ] = source[base]
if base in TARGET_TYPES:
TARGET_TYPE_TUPLE = TARGET_TYPE_TUPLE + (base,)
DEFAULT_TYPES = {
float: (_types.GLdouble,GL_1_1.GL_DOUBLE),
int: (_types.GLint,GL_1_1.GL_INT),
long: (_types.GLint,GL_1_1.GL_INT),
}
TARGET_TYPES = dict([
(getattr( _types,n),c)
for (n,c) in _types.ARRAY_TYPE_TO_CONSTANT
])
TARGET_TYPE_TUPLE = tuple([
getattr(_types,n)
for (n,c) in _types.ARRAY_TYPE_TO_CONSTANT
])
CONSTANT_TO_TYPE = dict([
(c,getattr( _types, n))
for (n,c) in _types.ARRAY_TYPE_TO_CONSTANT
])
BYTE_SIZES = dict([
( c, ctypes.sizeof( getattr( _types, n) ) )
for (n,c) in _types.ARRAY_TYPE_TO_CONSTANT
])
try:
del n,c
except NameError as err:
pass
| 36.16
| 124
| 0.636892
|
754cefe05860bbb3fed4cc2f8d5f2e7d02435eb2
| 21,275
|
py
|
Python
|
cogs/moderation.py
|
cringshotadeer/snakebot
|
e24730dbf49ebdd2dadb7912d07e94a88e988491
|
[
"MIT"
] | null | null | null |
cogs/moderation.py
|
cringshotadeer/snakebot
|
e24730dbf49ebdd2dadb7912d07e94a88e988491
|
[
"MIT"
] | null | null | null |
cogs/moderation.py
|
cringshotadeer/snakebot
|
e24730dbf49ebdd2dadb7912d07e94a88e988491
|
[
"MIT"
] | null | null | null |
import asyncio
from discord.ext import commands, menus
import discord
import orjson
class HistoryMenu(menus.ListPageSource):
def __init__(self, data):
super().__init__(data, per_page=10)
async def format_page(self, menu, entries):
embed = discord.Embed(color=discord.Color.blurple())
for date, message in entries:
if not message:
continue
embed.add_field(name=f"<t:{date}:R>", value=message)
return embed
class moderation(commands.Cog):
"""For commands related to moderation."""
def __init__(self, bot: commands.Bot) -> None:
self.bot = bot
self.DB = bot.DB
self.loop = bot.loop
@commands.command()
async def inactive(self, ctx, days: int = 7):
"""Gets how many people can be pruned.
days: int
"""
inactive = await ctx.guild.estimate_pruned_members(days=days)
await ctx.send(
embed=discord.Embed(
color=discord.Color.blurple(),
description=f"```{inactive} members inactive for {days} days```",
)
)
async def _end_poll(self, guild, message):
"""Ends a poll and sends the results."""
polls = self.DB.main.get(b"polls")
if not polls:
return
message_id = str(message.id)
polls = orjson.loads(polls)
if guild not in polls:
return
if message_id not in polls[guild]:
return
winner = max(
polls[guild][message_id], key=lambda x: polls[guild][message_id][x]["count"]
)
await message.reply(f"Winner of the poll was {winner}")
polls[guild].pop(message_id)
self.DB.main.put(b"polls", orjson.dumps(polls))
@commands.command()
@commands.has_permissions(kick_members=True)
async def poll(self, ctx, title, *options):
"""Starts a poll.
title: str
options: tuple
"""
embed = discord.Embed(color=discord.Color.blurple())
if len(options) > 20:
embed.description = "```You can have a maximum of 20 options```"
return await ctx.send(embed=embed)
if len(options) < 2:
embed.description = "```You need at least 2 options```"
return await ctx.send(embed=embed)
polls = self.DB.main.get(b"polls")
if not polls:
polls = {}
else:
polls = orjson.loads(polls)
guild = str(ctx.guild.id)
if guild not in polls:
polls[guild] = {}
polls[guild]["temp"] = {}
embed.description = ""
for number, option in enumerate(options):
emoji = chr(127462 + number)
polls[guild]["temp"][emoji] = {
"name": option,
"count": 0,
}
embed.description += f"{emoji}: {option}\n"
embed.title = title
message = await ctx.send(embed=embed)
polls[guild][str(message.id)] = polls[guild].pop("temp")
for i in range(len(options)):
await message.add_reaction(chr(127462 + i))
self.DB.main.put(b"polls", orjson.dumps(polls))
self.loop.call_later(21600, asyncio.create_task, self.end_poll(guild, message))
@commands.command()
@commands.has_permissions(kick_members=True)
async def end_poll(self, ctx, message_id):
"""Ends a poll based off its message id."""
polls = self.DB.main.get(b"polls")
if not polls:
return
polls = orjson.loads(polls)
if str(ctx.guild.id) not in polls:
return await ctx.send(
embed=discord.Embed(
color=discord.Colo.blurple(), description="No polls found"
)
)
if message_id not in polls[str(ctx.guild.id)]:
return await ctx.send(
embed=discord.Embed(
color=discord.Colo.blurple(), description="Poll not found"
)
)
winner = max(
polls[str(ctx.guild.id)][message_id],
key=lambda x: polls[str(ctx.guild.id)][message_id][x]["count"],
)
await ctx.reply(f"Winner of the poll was {winner}")
polls[str(ctx.guild.id)].pop(message_id)
self.DB.main.put(b"polls", orjson.dumps(polls))
@commands.command(name="mute")
@commands.has_permissions(kick_members=True)
async def mute_member(self, ctx, member: discord.Member, *, reason=None):
"""Mutes a member.
member: discord.member
reason: str
"""
role = discord.utils.get(ctx.guild.roles, name="Muted")
embed = discord.Embed(color=discord.Color.blurple())
if role in member.roles:
await member.remove_roles(role)
embed.description = f"```Unmuted {member.display_name}```"
return await ctx.send(embed=embed)
member_id = f"{ctx.guild.id}-{member.id}".encode()
infractions = self.DB.infractions.get(member_id)
if not infractions:
infractions = {
"count": 0,
"bans": [],
"kicks": [],
"mutes": [],
"warnings": [],
}
else:
infractions = orjson.loads(infractions)
infractions["count"] += 1
infractions["mutes"].append(reason)
if not role:
reactions = ["✅", "❎"]
def check(reaction: discord.Reaction, user: discord.User) -> bool:
return (
user.id == ctx.author.id
and reaction.message.channel == ctx.channel
and reaction.emoji in reactions
)
embed.description = "```No muted role found react to add Muted role.```"
message = await ctx.send(embed=embed)
for reaction in reactions:
await message.add_reaction(reaction)
reaction, user = await ctx.bot.wait_for(
"reaction_add", timeout=60.0, check=check
)
if reaction.emoji == "✅":
role = await ctx.guild.create_role(
name="Muted", color=discord.Color.dark_red()
)
for categories in ctx.guild.categories:
await categories.set_permissions(
role, send_messages=False, connect=False
)
else:
return
await member.add_roles(role)
embed = discord.Embed(
color=discord.Color.dark_red(),
description="{} has been muted. They have {} total infractions.".format(
member.mention, infractions["count"]
),
)
await ctx.send(embed=embed)
self.DB.infractions.put(member_id, orjson.dumps(infractions))
@commands.command()
@commands.has_permissions(manage_nicknames=True)
async def nick(self, ctx, member: discord.Member, *, nickname):
"""Changes a members nickname.
member: discord.Member
nickname: str
"""
await member.edit(nick=nickname)
await ctx.send(f"Changed {member.display_name}'s nickname to {nickname}'")
@commands.command(name="warn")
@commands.has_permissions(manage_messages=True)
async def warn_member(self, ctx, member: discord.Member, *, reason=None):
"""Warns a member and keeps track of how many warnings a member has.
member: discord.member
reason: str
"""
member_id = f"{ctx.guild.id}-{member.id}".encode()
infractions = self.DB.infractions.get(member_id)
if not infractions:
infractions = {
"count": 0,
"bans": [],
"kicks": [],
"mutes": [],
"warnings": [],
}
else:
infractions = orjson.loads(infractions)
infractions["count"] += 1
infractions["warnings"].append(reason)
embed = discord.Embed(
color=discord.Color.dark_red(),
description="{} has been warned. They have {} total infractions.".format(
member.mention, infractions["count"]
),
)
await ctx.send(embed=embed)
self.DB.infractions.put(member_id, orjson.dumps(infractions))
@commands.command()
@commands.has_permissions(manage_messages=True)
async def warnings(self, ctx, member: discord.Member):
"""Shows the warnings a member has.
member: discord.members
"""
member_id = f"{ctx.guild.id}-{member.id}".encode()
infractions = self.DB.infractions.get(member_id)
embed = discord.Embed(color=discord.Color.blurple())
if not infractions:
embed.description = "```Member has no infractions```"
return await ctx.send(embed=embed)
infractions = orjson.loads(infractions)
embed.description = "```{} Has {} warnings\n\n{}```".format(
member.display_name,
len(infractions["warnings"]),
"\n".join(infractions["warnings"]),
)
await ctx.send(embed=embed)
async def end_date(self, duration):
"""Converts a duration to an end date.
duration: str
How much to add onto the current date e.g 5d 10h 25m 5s
"""
seconds = 0
times = {"s": 1, "m": 60, "h": 3600, "d": 86400}
try:
for time in duration.split():
seconds += int(time[:-1]) * times[time[-1]]
except ValueError:
return None
return seconds
async def ban(self, ctx, member: discord.Member, duration=None, *, reason=None):
"""Bans a member.
member: discord.Member
The member to ban.
duration: str
How long to ban the member for.
reason: str
The reason for banning the member.
"""
embed = discord.Embed(color=discord.Color.dark_red())
if ctx.author.top_role <= member.top_role and ctx.guild.owner != ctx.author:
embed.description = "```You can't ban someone higher or equal to you```"
return await ctx.send(embed=embed)
if duration:
seconds = await self.end_date(duration)
if not seconds:
embed.description = "```Invalid duration. Example: '3d 5h 10m'```"
return await ctx.send(embed=embed)
self.loop.call_later(seconds, asyncio.create_task, ctx.guild.unban(member))
embed.title = f"Banned {member.display_name} for {seconds}s"
else:
embed.title = f"Banned {member.display_name}"
await member.ban(reason=reason)
member_id = f"{ctx.guild.id}-{member.id}".encode()
infractions = self.DB.infractions.get(member_id)
if not infractions:
infractions = {
"count": 0,
"bans": [],
"kicks": [],
"mutes": [],
"warnings": [],
}
else:
infractions = orjson.loads(infractions)
infractions["count"] += 1
infractions["bans"].append(reason)
embed.description = f"```They had {infractions['count']} total infractions.```"
self.DB.infractions.put(member_id, orjson.dumps(infractions))
await ctx.send(embed=embed)
@commands.command(name="ban")
@commands.has_permissions(ban_members=True)
async def ban_member(self, ctx, member: discord.Member, *, reason=None):
"""Bans a member.
Usage:
.ban @Singularity#8953 He was rude
member: discord.Member
The member to ban.
reason: str
The reason for banning the member.
"""
await self.ban(ctx=ctx, member=member, reason=reason)
@commands.command(name="tempban")
@commands.has_permissions(ban_members=True)
async def temp_ban_member(
self, ctx, member: discord.Member, duration=None, *, reason=None
):
"""Temporarily bans a member.
Usage:
.ban @Singularity#8953 "3d 5h 10m" He was rude
You need the quotes for the duration or it will only get the first argument
member: discord.Member
The member to ban.
duration: str
How long to ban the member for.
reason: str
The reason for banning the member.
"""
await self.ban(ctx=ctx, member=member, duration=duration, reason=reason)
@commands.command()
@commands.has_permissions(ban_members=True)
async def unban(self, ctx, name):
"""Unbans a member based off their name.
name: str
"""
embed = discord.Embed(color=discord.Color.blurple())
for entry in await ctx.guild.bans():
if name == entry.user.name:
embed.description = "```User Unbanned.```"
await ctx.guild.unban(entry.user)
return await ctx.send(embed=embed)
embed.description = f"```User {name} Not Found.```"
await ctx.send(embed=embed)
@commands.command(name="kick")
@commands.has_permissions(kick_members=True)
async def kick_member(self, ctx, member: discord.Member, *, reason=None):
"""Kicks a member.
member: discord.Member
The member to kick can be an id, @ or name.
reason: str
"""
if ctx.author.top_role <= member.top_role and ctx.guild.owner != ctx.author:
return await ctx.send(
embed=discord.Embed(
color=discord.Color.blurple(),
description="```You can't kick someone higher or equal to you```",
)
)
await member.kick()
member_id = f"{ctx.guild.id}-{member.id}".encode()
infractions = self.DB.infractions.get(member_id)
if not infractions:
infractions = {
"count": 0,
"bans": [],
"kicks": [],
"mutes": [],
"warnings": [],
}
else:
infractions = orjson.loads(infractions)
infractions["count"] += 1
infractions["kicks"].append(reason)
embed = discord.Embed(
color=discord.Color.dark_red(),
title=f"{member.display_name} has been kicked",
description=f"```They had {infractions['count']} total infractions.```",
)
await ctx.send(embed=embed)
self.DB.infractions.put(member_id, orjson.dumps(infractions))
@commands.command()
@commands.has_permissions(manage_roles=True)
async def role(self, ctx, member: discord.Member, role: discord.Role):
"""Gives member a role.
role: discord.Role
The name of the role.
member: discord.Member
The member to give the role.
"""
embed = discord.Embed(color=discord.Color.blurple())
if (
ctx.author != member
and ctx.author.top_role <= member.top_role
and ctx.guild.owner != ctx.author
):
embed.description = (
"```You can't change the roles of someone higher than you.```"
)
return await ctx.send(embed=embed)
if (
ctx.author == member
and ctx.author.top_role <= role
and ctx.guild.owner != ctx.author
):
embed.description = (
"```You can't give yourself a role higher than your highest role.```"
)
return await ctx.send(embed=embed)
if role in member.roles:
await member.remove_roles(role)
embed.description = f"```Removed the role {role} from {member}```"
return await ctx.send(embed=embed)
await member.add_roles(role)
embed.description = f"```Gave {member} the role {role}```"
return await ctx.send(embed=embed)
@commands.group()
@commands.has_permissions(manage_messages=True)
@commands.guild_only()
async def purge(self, ctx):
"""Purges messages.
num: int
The number of messages to delete.
"""
if not ctx.invoked_subcommand:
try:
await ctx.channel.purge(limit=int(ctx.subcommand_passed) + 1)
except ValueError:
embed = discord.Embed(
color=discord.Color.blurple(),
description=f"```Usage: {ctx.prefix}purge [amount]```",
)
await ctx.send(embed=embed)
@purge.command()
@commands.has_permissions(manage_messages=True)
@commands.guild_only()
async def till(self, ctx, message_id: int):
"""Clear messages in a channel until the given message_id. Given ID is not deleted."""
try:
message = await ctx.fetch_message(message_id)
except discord.errors.NotFound:
return await ctx.send(
embed=discord.Embed(
color=discord.Color.blurple(),
description="```Message could not be found in this channel```",
)
)
await ctx.channel.purge(after=message)
@purge.command()
@commands.has_permissions(manage_messages=True)
@commands.guild_only()
async def user(self, ctx, user: discord.User, num_messages: int = 100):
"""Clear all messagges of <User> withing the last [n=100] messages.
user: discord.User
The user to purge the messages of.
num_messages: int
The number of messages to check.
"""
def check(msg):
return msg.author.id == user.id
await ctx.channel.purge(limit=num_messages, check=check, before=None)
@purge.command()
@commands.has_permissions(manage_channels=True)
@commands.guild_only()
async def channel(self, ctx, channel: discord.TextChannel = None):
"""Purges a channel by cloning and then deleting it.
channel: discord.TextChannel
"""
channel = channel or ctx.channel
await channel.clone()
await channel.delete()
@commands.group()
@commands.has_permissions(manage_messages=True)
async def history(self, ctx):
"""Shows the edited message or deleted message history of a member."""
if not ctx.invoked_subcommand:
embed = discord.Embed(
color=discord.Color.blurple(),
description=f"```Usage: {ctx.prefix}history [deleted/edited]```",
)
await ctx.send(embed=embed)
@history.command(aliases=["d"])
@commands.has_permissions(manage_messages=True)
async def deleted(self, ctx, member: discord.Member = None):
"""Shows a members most recent deleted message history.
member: discord.Member
The user to get the history of.
amount: int
The amount of messages to get.
"""
member = member or ctx.author
member_id = f"{ctx.guild.id}-{member.id}".encode()
deleted = self.DB.deleted.get(member_id)
embed = discord.Embed(color=discord.Color.blurple())
if not deleted:
embed.description = "```No deleted messages found```"
return await ctx.send(embed=embed)
deleted = orjson.loads(deleted)
messages = []
for index, date in enumerate(reversed(deleted)):
messages.append((date, deleted[date].replace("`", "`")))
pages = menus.MenuPages(
source=HistoryMenu(messages),
clear_reactions_after=True,
delete_message_after=True,
)
await pages.start(ctx)
@history.command(aliases=["e"])
@commands.has_permissions(manage_messages=True)
async def edited(self, ctx, member: discord.Member = None, amount: int = 10):
"""Shows a users most recent edit message history.
member: discord.Member
The user to get the edit history of.
amount: int
The amount of messages to get.
"""
member = member or ctx.author
member_id = f"{ctx.guild.id}-{member.id}".encode()
edited = self.DB.edited.get(member_id)
embed = discord.Embed(color=discord.Color.blurple())
if not edited:
embed.description = "```No edited messages found```"
return await ctx.send(embed=embed)
edited = orjson.loads(edited)
messages = []
for index, date in enumerate(reversed(edited)):
if index == amount:
break
before = edited[date][0].replace("`", "`\u200b")
after = edited[date][1].replace("`", "`\u200b")
messages.append((date, f"{before} >>> {after}"))
pages = menus.MenuPages(
source=HistoryMenu(messages),
clear_reactions_after=True,
delete_message_after=True,
)
await pages.start(ctx)
def setup(bot: commands.Bot) -> None:
"""Starts moderation cog."""
bot.add_cog(moderation(bot))
| 32.38204
| 94
| 0.560423
|
3c548306372fe5f5cee470b6dc9c08324371c74f
| 11,338
|
py
|
Python
|
train.py
|
ky207/yolov4-tiny
|
204e5a07fb6dbbf11ab3165f57253ffcc5328df3
|
[
"MIT"
] | 1
|
2022-02-21T01:34:56.000Z
|
2022-02-21T01:34:56.000Z
|
train.py
|
ky207/yolov4-tiny
|
204e5a07fb6dbbf11ab3165f57253ffcc5328df3
|
[
"MIT"
] | null | null | null |
train.py
|
ky207/yolov4-tiny
|
204e5a07fb6dbbf11ab3165f57253ffcc5328df3
|
[
"MIT"
] | null | null | null |
#-------------------------------------#
# 对数据集进行训练
#-------------------------------------#
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.optim as optim
from torch.utils.data import DataLoader
from nets.yolo import YoloBody
from nets.yolo_training import YOLOLoss, weights_init
from utils.callbacks import LossHistory
from utils.dataloader import YoloDataset, yolo_dataset_collate
from utils.utils import get_anchors, get_classes
from utils.utils_fit import fit_one_epoch
'''
训练自己的目标检测模型一定需要注意以下几点:
1、训练前仔细检查自己的格式是否满足要求,该库要求数据集格式为VOC格式,需要准备好的内容有输入图片和标签
输入图片为.jpg图片,无需固定大小,传入训练前会自动进行resize。
灰度图会自动转成RGB图片进行训练,无需自己修改。
输入图片如果后缀非jpg,需要自己批量转成jpg后再开始训练。
标签为.xml格式,文件中会有需要检测的目标信息,标签文件和输入图片文件相对应。
2、训练好的权值文件保存在logs文件夹中,每个epoch都会保存一次,如果只是训练了几个step是不会保存的,epoch和step的概念要捋清楚一下。
在训练过程中,该代码并没有设定只保存最低损失的,因此按默认参数训练完会有100个权值,如果空间不够可以自行删除。
这个并不是保存越少越好也不是保存越多越好,有人想要都保存、有人想只保存一点,为了满足大多数的需求,还是都保存可选择性高。
3、损失值的大小用于判断是否收敛,比较重要的是有收敛的趋势,即验证集损失不断下降,如果验证集损失基本上不改变的话,模型基本上就收敛了。
损失值的具体大小并没有什么意义,大和小只在于损失的计算方式,并不是接近于0才好。如果想要让损失好看点,可以直接到对应的损失函数里面除上10000。
训练过程中的损失值会保存在logs文件夹下的loss_%Y_%m_%d_%H_%M_%S文件夹中
4、调参是一门蛮重要的学问,没有什么参数是一定好的,现有的参数是我测试过可以正常训练的参数,因此我会建议用现有的参数。
但是参数本身并不是绝对的,比如随着batch的增大学习率也可以增大,效果也会好一些;过深的网络不要用太大的学习率等等。
这些都是经验上,只能靠各位同学多查询资料和自己试试了。
'''
if __name__ == "__main__":
#-------------------------------#
# 是否使用Cuda
# 没有GPU可以设置成False
#-------------------------------#
Cuda = True
#--------------------------------------------------------#
# 训练前一定要修改classes_path,使其对应自己的数据集
#--------------------------------------------------------#
classes_path = 'model_data/voc_classes.txt'
#---------------------------------------------------------------------#
# anchors_path代表先验框对应的txt文件,一般不修改。
# anchors_mask用于帮助代码找到对应的先验框,一般不修改。
#---------------------------------------------------------------------#
anchors_path = 'model_data/yolo_anchors.txt'
anchors_mask = [[3, 4, 5], [1, 2, 3]]
#----------------------------------------------------------------------------------------------------------------------------#
# 权值文件的下载请看README,可以通过网盘下载。模型的 预训练权重 对不同数据集是通用的,因为特征是通用的。
# 模型的 预训练权重 比较重要的部分是 主干特征提取网络的权值部分,用于进行特征提取。
# 预训练权重对于99%的情况都必须要用,不用的话主干部分的权值太过随机,特征提取效果不明显,网络训练的结果也不会好
#
# 如果训练过程中存在中断训练的操作,可以将model_path设置成logs文件夹下的权值文件,将已经训练了一部分的权值再次载入。
# 同时修改下方的 冻结阶段 或者 解冻阶段 的参数,来保证模型epoch的连续性。
#
# 当model_path = ''的时候不加载整个模型的权值。
#
# 此处使用的是整个模型的权重,因此是在train.py进行加载的。
# 如果想要让模型从0开始训练,则设置model_path = '',下面的Freeze_Train = Fasle,此时从0开始训练,且没有冻结主干的过程。
# 一般来讲,从0开始训练效果会很差,因为权值太过随机,特征提取效果不明显。
#
# 网络一般不从0开始训练,至少会使用主干部分的权值,有些论文提到可以不用预训练,主要原因是他们 数据集较大 且 调参能力优秀。
# 如果一定要训练网络的主干部分,可以了解imagenet数据集,首先训练分类模型,分类模型的 主干部分 和该模型通用,基于此进行训练。
#----------------------------------------------------------------------------------------------------------------------------#
model_path = 'model_data/yolov4_tiny_weights_coco.pth'
#------------------------------------------------------#
# 输入的shape大小,一定要是32的倍数
#------------------------------------------------------#
input_shape = [416, 416]
#-------------------------------#
# 所使用的注意力机制的类型
# phi = 0为不使用注意力机制
# phi = 1为SE
# phi = 2为CBAM
# phi = 3为ECA
#-------------------------------#
phi = 0
#------------------------------------------------------#
# Yolov4的tricks应用
# mosaic 马赛克数据增强 True or False
# 实际测试时mosaic数据增强并不稳定,所以默认为False
# Cosine_lr 余弦退火学习率 True or False
# label_smoothing 标签平滑 0.01以下一般 如0.01、0.005
#------------------------------------------------------#
mosaic = False
Cosine_lr = False
label_smoothing = 0
#----------------------------------------------------#
# 训练分为两个阶段,分别是冻结阶段和解冻阶段。
# 显存不足与数据集大小无关,提示显存不足请调小batch_size。
# 受到BatchNorm层影响,batch_size最小为2,不能为1。
#----------------------------------------------------#
#----------------------------------------------------#
# 冻结阶段训练参数
# 此时模型的主干被冻结了,特征提取网络不发生改变
# 占用的显存较小,仅对网络进行微调
#----------------------------------------------------#
Init_Epoch = 0
Freeze_Epoch = 50
Freeze_batch_size = 32
Freeze_lr = 1e-3
#----------------------------------------------------#
# 解冻阶段训练参数
# 此时模型的主干不被冻结了,特征提取网络会发生改变
# 占用的显存较大,网络所有的参数都会发生改变
#----------------------------------------------------#
UnFreeze_Epoch = 100
Unfreeze_batch_size = 16
Unfreeze_lr = 1e-4
#------------------------------------------------------#
# 是否进行冻结训练,默认先冻结主干训练后解冻训练。
#------------------------------------------------------#
Freeze_Train = True
#------------------------------------------------------#
# 用于设置是否使用多线程读取数据
# 开启后会加快数据读取速度,但是会占用更多内存
# 内存较小的电脑可以设置为2或者0
#------------------------------------------------------#
num_workers = 4
#----------------------------------------------------#
# 获得图片路径和标签
#----------------------------------------------------#
train_annotation_path = '2007_train.txt'
val_annotation_path = '2007_val.txt'
#----------------------------------------------------#
# 获取classes和anchor
#----------------------------------------------------#
class_names, num_classes = get_classes(classes_path)
anchors, num_anchors = get_anchors(anchors_path)
#------------------------------------------------------#
# 创建yolo模型
#------------------------------------------------------#
model = YoloBody(anchors_mask, num_classes, phi = phi)
weights_init(model)
if model_path != '':
#------------------------------------------------------#
# 权值文件请看README,百度网盘下载
#------------------------------------------------------#
print('Load weights {}.'.format(model_path))
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model_dict = model.state_dict()
pretrained_dict = torch.load(model_path, map_location = device)
pretrained_dict = {k: v for k, v in pretrained_dict.items() if np.shape(model_dict[k]) == np.shape(v)}
model_dict.update(pretrained_dict)
model.load_state_dict(model_dict)
model_train = model.train()
if Cuda:
model_train = torch.nn.DataParallel(model)
cudnn.benchmark = True
model_train = model_train.cuda()
yolo_loss = YOLOLoss(anchors, num_classes, input_shape, Cuda, anchors_mask, label_smoothing)
loss_history = LossHistory("logs/")
#---------------------------#
# 读取数据集对应的txt
#---------------------------#
with open(train_annotation_path) as f:
train_lines = f.readlines()
with open(val_annotation_path) as f:
val_lines = f.readlines()
num_train = len(train_lines)
num_val = len(val_lines)
#------------------------------------------------------#
# 主干特征提取网络特征通用,冻结训练可以加快训练速度
# 也可以在训练初期防止权值被破坏。
# Init_Epoch为起始世代
# Freeze_Epoch为冻结训练的世代
# UnFreeze_Epoch总训练世代
# 提示OOM或者显存不足请调小Batch_size
#------------------------------------------------------#
if True:
batch_size = Freeze_batch_size
lr = Freeze_lr
start_epoch = Init_Epoch
end_epoch = Freeze_Epoch
optimizer = optim.Adam(model_train.parameters(), lr, weight_decay = 5e-4)
if Cosine_lr:
lr_scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=5, eta_min=1e-5)
else:
lr_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.94)
train_dataset = YoloDataset(train_lines, input_shape, num_classes, mosaic=mosaic, train = True)
val_dataset = YoloDataset(val_lines, input_shape, num_classes, mosaic=False, train = False)
gen = DataLoader(train_dataset, shuffle = True, batch_size = batch_size, num_workers = num_workers, pin_memory=True,
drop_last=True, collate_fn=yolo_dataset_collate)
gen_val = DataLoader(val_dataset , shuffle = True, batch_size = batch_size, num_workers = num_workers, pin_memory=True,
drop_last=True, collate_fn=yolo_dataset_collate)
epoch_step = num_train // batch_size
epoch_step_val = num_val // batch_size
if epoch_step == 0 or epoch_step_val == 0:
raise ValueError("数据集过小,无法进行训练,请扩充数据集。")
#------------------------------------#
# 冻结一定部分训练
#------------------------------------#
if Freeze_Train:
for param in model.backbone.parameters():
param.requires_grad = False
for epoch in range(start_epoch, end_epoch):
fit_one_epoch(model_train, model, yolo_loss, loss_history, optimizer, epoch,
epoch_step, epoch_step_val, gen, gen_val, end_epoch, Cuda)
lr_scheduler.step()
if True:
batch_size = Unfreeze_batch_size
lr = Unfreeze_lr
start_epoch = Freeze_Epoch
end_epoch = UnFreeze_Epoch
optimizer = optim.Adam(model_train.parameters(), lr, weight_decay = 5e-4)
if Cosine_lr:
lr_scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=5, eta_min=1e-5)
else:
lr_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.94)
train_dataset = YoloDataset(train_lines, input_shape, num_classes, mosaic=mosaic, train = True)
val_dataset = YoloDataset(val_lines, input_shape, num_classes, mosaic=False, train = False)
gen = DataLoader(train_dataset, shuffle = True, batch_size = batch_size, num_workers = num_workers, pin_memory=True,
drop_last=True, collate_fn=yolo_dataset_collate)
gen_val = DataLoader(val_dataset , shuffle = True, batch_size = batch_size, num_workers = num_workers, pin_memory=True,
drop_last=True, collate_fn=yolo_dataset_collate)
epoch_step = num_train // batch_size
epoch_step_val = num_val // batch_size
if epoch_step == 0 or epoch_step_val == 0:
raise ValueError("数据集过小,无法进行训练,请扩充数据集。")
#------------------------------------#
# 冻结一定部分训练
#------------------------------------#
if Freeze_Train:
for param in model.backbone.parameters():
param.requires_grad = True
for epoch in range(start_epoch, end_epoch):
fit_one_epoch(model_train, model, yolo_loss, loss_history, optimizer, epoch,
epoch_step, epoch_step_val, gen, gen_val, end_epoch, Cuda)
lr_scheduler.step()
| 44.116732
| 138
| 0.500176
|
c63fa04c73b074bd4280094b17d2effa90dd9a8c
| 3,921
|
py
|
Python
|
test/functional/wallet_scriptaddress2.py
|
DrRayThe2nd/litecoin
|
961f9a36d55c52a5fe5f940a205fa9f7b45330b3
|
[
"MIT"
] | null | null | null |
test/functional/wallet_scriptaddress2.py
|
DrRayThe2nd/litecoin
|
961f9a36d55c52a5fe5f940a205fa9f7b45330b3
|
[
"MIT"
] | null | null | null |
test/functional/wallet_scriptaddress2.py
|
DrRayThe2nd/litecoin
|
961f9a36d55c52a5fe5f940a205fa9f7b45330b3
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test new Pittscoin multisig prefix functionality.
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import decimal
class ScriptAddress2Test(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 3
self.setup_clean_chain = False
self.extra_args = [['-addresstype=legacy'], [], []]
def setup_network(self, split=False):
self.setup_nodes()
connect_nodes(self.nodes[1], 0)
connect_nodes(self.nodes[2], 0)
self.sync_all()
def run_test(self):
cnt = self.nodes[0].getblockcount()
# Mine some blocks
self.nodes[1].generate(101)
self.sync_all()
if (self.nodes[0].getblockcount() != cnt + 101):
raise AssertionError("Failed to mine 100 blocks")
addr = self.nodes[0].getnewaddress()
addr2 = self.nodes[0].getnewaddress()
multisig_addr = self.nodes[0].addmultisigaddress(2, [addr, addr2], "multisigaccount")['address']
assert_equal(multisig_addr[0], 'Q')
# Send to a new multisig address
txid = self.nodes[1].sendtoaddress(multisig_addr, 1)
block = self.nodes[1].generate(3)
self.sync_all()
tx = self.nodes[2].getrawtransaction(txid, 1)
dest_addrs = [tx["vout"][0]['scriptPubKey']['addresses'][0],
tx["vout"][1]['scriptPubKey']['addresses'][0]]
assert(multisig_addr in dest_addrs)
# Spend from the new multisig address
addr3 = self.nodes[1].getnewaddress()
txid = self.nodes[0].sendfrom("multisigaccount", addr3, 0.8)
block = self.nodes[0].generate(2)
self.sync_all()
assert(self.nodes[0].getbalance("multisigaccount", 1) < 0.2)
assert(self.nodes[1].listtransactions()[-1]['address'] == addr3)
# Send to an old multisig address. The api addmultisigaddress
# can only generate a new address so we manually compute
# multisig_addr_old beforehand using an old client.
priv_keys = ["cU7eeLPKzXeKMeZvnEJhvZZ3tLqVF3XGeo1BbM8dnbmV7pP3Qg89",
"cTw7mRhSvTfzqCt6MFgBoTBqwBpYu2rWugisXcwjv4cAASh3iqPt"]
addrs = ["mj6gNGRXPXrD69R5ApjcsDerZGrYKSfb6v",
"mqET4JA3L7P7FoUjUP3F6m6YsLpCkyzzou"]
self.nodes[0].importprivkey(priv_keys[0])
self.nodes[0].importprivkey(priv_keys[1])
multisig_addr_new = self.nodes[0].addmultisigaddress(2, addrs, "multisigaccount2")['address']
assert_equal(multisig_addr_new, 'QZ974ZrPrmqMmm1PSVp4m8YEgo3bCQZBbe')
multisig_addr_old = "2N5nLwYz9qfnGdaFLpPn3gS6oYQbmLTWPjq"
## Let's send to the old address. We can then find it in the
## new address with the new client. So basically the old
## address and the new one are the same thing.
txid = self.nodes[1].sendtoaddress(multisig_addr_old, 1)
block = self.nodes[1].generate(1)
self.sync_all()
tx = self.nodes[2].getrawtransaction(txid, 1)
dest_addrs = [tx["vout"][0]['scriptPubKey']['addresses'][0],
tx["vout"][1]['scriptPubKey']['addresses'][0]]
assert(multisig_addr_new in dest_addrs)
assert(multisig_addr_old not in dest_addrs)
# Spend from the new multisig address
addr4 = self.nodes[1].getnewaddress()
txid = self.nodes[0].sendfrom("multisigaccount2", addr4, 0.8)
block = self.nodes[0].generate(2)
self.sync_all()
assert(self.nodes[0].getbalance("multisigaccount2", 1) < 0.2)
assert(self.nodes[1].listtransactions()[-1]['address'] == addr4)
if __name__ == '__main__':
ScriptAddress2Test().main()
| 40.010204
| 104
| 0.650344
|
0b10dbf0f445ba43d342e483e7d79686d75a3486
| 2,742
|
py
|
Python
|
tests/kafkatest/utils/util.py
|
ss10/kafka
|
bc5051565171cf65b4ed7dd4d9ef269d66a1021a
|
[
"Apache-2.0"
] | null | null | null |
tests/kafkatest/utils/util.py
|
ss10/kafka
|
bc5051565171cf65b4ed7dd4d9ef269d66a1021a
|
[
"Apache-2.0"
] | null | null | null |
tests/kafkatest/utils/util.py
|
ss10/kafka
|
bc5051565171cf65b4ed7dd4d9ef269d66a1021a
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2015 Confluent Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from kafkatest import __version__ as __kafkatest_version__
import re
def kafkatest_version():
"""Return string representation of current ducktape version."""
return __kafkatest_version__
def _kafka_jar_versions(proc_string):
"""Use a rough heuristic to find all kafka versions explicitly in the process classpath"""
versions = re.findall("kafka-[a-z]+-([0-9]+\.[0-9]+\.[0-9]+\.[0-9]+)", proc_string)
versions.extend(re.findall("kafka-([0-9]+\.[0-9]+\.[0-9]+\.[0-9]+)", proc_string))
return set(versions)
def is_version(node, version_list, proc_grep_string="kafka"):
"""Heuristic to check that only the specified version appears in the classpath of the process
A useful tool to aid in checking that service version apis are working correctly.
"""
lines = [l for l in node.account.ssh_capture("ps ax | grep %s | grep -v grep" % proc_grep_string)]
assert len(lines) == 1
versions = _kafka_jar_versions(lines[0])
return versions == {str(v) for v in version_list}
def is_int(msg):
"""Method used to check whether the given message is an integer
return int or raises an exception if message is not an integer
"""
try:
return int(msg)
except ValueError:
raise Exception("Unexpected message format (expected an integer). Message: %s" % (msg))
def is_int_with_prefix(msg):
"""
Method used check whether the given message is of format 'integer_prefix'.'integer_value'
:param msg: message to validate
:return: msg or raises an exception is a message is of wrong format
"""
try:
parts = msg.split(".")
if len(parts) != 2:
raise Exception("Unexpected message format. Message should be of format: integer "
"prefix dot integer value. Message: %s" % (msg))
int(parts[0])
int(parts[1])
return msg
except ValueError:
raise Exception("Unexpected message format. Message should be of format: integer "
"prefix dot integer value, but one of the two parts (before or after dot) "
"are not integers. Message: %s" % (msg))
| 37.054054
| 102
| 0.677243
|
7417851690f6648356a4781e40d6b4ca4808e91d
| 34,242
|
py
|
Python
|
frappe/core/doctype/user/user.py
|
mojerro/frappe
|
0f434846bed87c02491aad0a3da631df382bfb3c
|
[
"MIT"
] | null | null | null |
frappe/core/doctype/user/user.py
|
mojerro/frappe
|
0f434846bed87c02491aad0a3da631df382bfb3c
|
[
"MIT"
] | null | null | null |
frappe/core/doctype/user/user.py
|
mojerro/frappe
|
0f434846bed87c02491aad0a3da631df382bfb3c
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals, print_function
from bs4 import BeautifulSoup
import frappe
import frappe.share
import frappe.defaults
import frappe.permissions
from frappe.model.document import Document
from frappe.utils import (cint, flt, has_gravatar, escape_html, format_datetime,
now_datetime, get_formatted_email, today)
from frappe import throw, msgprint, _
from frappe.utils.password import update_password as _update_password, check_password, get_password_reset_limit
from frappe.desk.notifications import clear_notifications
from frappe.desk.doctype.notification_settings.notification_settings import create_notification_settings, toggle_notifications
from frappe.utils.user import get_system_managers
from frappe.website.utils import is_signup_disabled
from frappe.rate_limiter import rate_limit
from frappe.core.doctype.user_type.user_type import user_linked_with_permission_on_doctype
STANDARD_USERS = ("Guest", "Administrator")
class User(Document):
__new_password = None
def __setup__(self):
# because it is handled separately
self.flags.ignore_save_passwords = ['new_password']
def autoname(self):
"""set name as Email Address"""
if self.get("is_admin") or self.get("is_guest"):
self.name = self.first_name
else:
self.email = self.email.strip().lower()
self.name = self.email
def onload(self):
from frappe.config import get_modules_from_all_apps
self.set_onload('all_modules',
[m.get("module_name") for m in get_modules_from_all_apps()])
def before_insert(self):
self.flags.in_insert = True
throttle_user_creation()
def after_insert(self):
create_notification_settings(self.name)
frappe.cache().delete_key('users_for_mentions')
frappe.cache().delete_key('enabled_users')
def validate(self):
# clear new password
self.__new_password = self.new_password
self.new_password = ""
if not frappe.flags.in_test:
self.password_strength_test()
if self.name not in STANDARD_USERS:
self.validate_email_type(self.email)
self.validate_email_type(self.name)
self.add_system_manager_role()
self.set_system_user()
self.set_full_name()
self.check_enable_disable()
self.ensure_unique_roles()
self.remove_all_roles_for_guest()
self.validate_username()
self.remove_disabled_roles()
self.validate_user_email_inbox()
ask_pass_update()
self.validate_roles()
self.validate_allowed_modules()
self.validate_user_image()
if self.language == "Loading...":
self.language = None
if (self.name not in ["Administrator", "Guest"]) and (not self.get_social_login_userid("frappe")):
self.set_social_login_userid("frappe", frappe.generate_hash(length=39))
def validate_roles(self):
if self.role_profile_name:
role_profile = frappe.get_doc('Role Profile', self.role_profile_name)
self.set('roles', [])
self.append_roles(*[role.role for role in role_profile.roles])
def validate_allowed_modules(self):
if self.module_profile:
module_profile = frappe.get_doc('Module Profile', self.module_profile)
self.set('block_modules', [])
for d in module_profile.get('block_modules'):
self.append('block_modules', {
'module': d.module
})
def validate_user_image(self):
if self.user_image and len(self.user_image) > 2000:
frappe.throw(_("Not a valid User Image."))
def on_update(self):
# clear new password
self.share_with_self()
clear_notifications(user=self.name)
frappe.clear_cache(user=self.name)
now=frappe.flags.in_test or frappe.flags.in_install
self.send_password_notification(self.__new_password)
frappe.enqueue(
'frappe.core.doctype.user.user.create_contact',
user=self,
ignore_mandatory=True,
now=now
)
if self.name not in ('Administrator', 'Guest') and not self.user_image:
frappe.enqueue('frappe.core.doctype.user.user.update_gravatar', name=self.name, now=now)
# Set user selected timezone
if self.time_zone:
frappe.defaults.set_default("time_zone", self.time_zone, self.name)
if self.has_value_changed('allow_in_mentions') or self.has_value_changed('user_type'):
frappe.cache().delete_key('users_for_mentions')
if self.has_value_changed('enabled'):
frappe.cache().delete_key('enabled_users')
def has_website_permission(self, ptype, user, verbose=False):
"""Returns true if current user is the session user"""
return self.name == frappe.session.user
def set_full_name(self):
self.full_name = " ".join(filter(None, [self.first_name, self.last_name]))
def check_enable_disable(self):
# do not allow disabling administrator/guest
if not cint(self.enabled) and self.name in STANDARD_USERS:
frappe.throw(_("User {0} cannot be disabled").format(self.name))
if not cint(self.enabled):
self.a_system_manager_should_exist()
# clear sessions if disabled
if not cint(self.enabled) and getattr(frappe.local, "login_manager", None):
frappe.local.login_manager.logout(user=self.name)
# toggle notifications based on the user's status
toggle_notifications(self.name, enable=cint(self.enabled))
def add_system_manager_role(self):
# if adding system manager, do nothing
if not cint(self.enabled) or ("System Manager" in [user_role.role for user_role in
self.get("roles")]):
return
if (self.name not in STANDARD_USERS and self.user_type == "System User" and not self.get_other_system_managers()
and cint(frappe.db.get_single_value('System Settings', 'setup_complete'))):
msgprint(_("Adding System Manager to this User as there must be atleast one System Manager"))
self.append("roles", {
"doctype": "Has Role",
"role": "System Manager"
})
if self.name == 'Administrator':
# Administrator should always have System Manager Role
self.extend("roles", [
{
"doctype": "Has Role",
"role": "System Manager"
},
{
"doctype": "Has Role",
"role": "Administrator"
}
])
def email_new_password(self, new_password=None):
if new_password and not self.flags.in_insert:
_update_password(user=self.name, pwd=new_password, logout_all_sessions=self.logout_all_sessions)
def set_system_user(self):
'''For the standard users like admin and guest, the user type is fixed.'''
user_type_mapper = {
'Administrator': 'System User',
'Guest': 'Website User'
}
if self.user_type and not frappe.get_cached_value('User Type', self.user_type, 'is_standard'):
if user_type_mapper.get(self.name):
self.user_type = user_type_mapper.get(self.name)
else:
self.set_roles_and_modules_based_on_user_type()
else:
'''Set as System User if any of the given roles has desk_access'''
self.user_type = 'System User' if self.has_desk_access() else 'Website User'
def set_roles_and_modules_based_on_user_type(self):
user_type_doc = frappe.get_cached_doc('User Type', self.user_type)
if user_type_doc.role:
self.roles = []
# Check whether User has linked with the 'Apply User Permission On' doctype or not
if user_linked_with_permission_on_doctype(user_type_doc, self.name):
self.append('roles', {
'role': user_type_doc.role
})
frappe.msgprint(_('Role has been set as per the user type {0}')
.format(self.user_type), alert=True)
user_type_doc.update_modules_in_user(self)
def has_desk_access(self):
'''Return true if any of the set roles has desk access'''
if not self.roles:
return False
return len(frappe.db.sql("""select name
from `tabRole` where desk_access=1
and name in ({0}) limit 1""".format(', '.join(['%s'] * len(self.roles))),
[d.role for d in self.roles]))
def share_with_self(self):
frappe.share.add(self.doctype, self.name, self.name, write=1, share=1,
flags={"ignore_share_permission": True})
def validate_share(self, docshare):
pass
# if docshare.user == self.name:
# if self.user_type=="System User":
# if docshare.share != 1:
# frappe.throw(_("Sorry! User should have complete access to their own record."))
# else:
# frappe.throw(_("Sorry! Sharing with Website User is prohibited."))
def send_password_notification(self, new_password):
try:
if self.flags.in_insert:
if self.name not in STANDARD_USERS:
if new_password:
# new password given, no email required
_update_password(user=self.name, pwd=new_password,
logout_all_sessions=self.logout_all_sessions)
if not self.flags.no_welcome_mail and cint(self.send_welcome_email):
self.send_welcome_mail_to_user()
self.flags.email_sent = 1
if frappe.session.user != 'Guest':
msgprint(_("Welcome email sent"))
return
else:
self.email_new_password(new_password)
except frappe.OutgoingEmailError:
print(frappe.get_traceback())
pass # email server not set, don't send email
@Document.hook
def validate_reset_password(self):
pass
def reset_password(self, send_email=False, password_expired=False):
from frappe.utils import random_string, get_url
key = random_string(32)
self.db_set("reset_password_key", key)
url = "/update-password?key=" + key
if password_expired:
url = "/update-password?key=" + key + '&password_expired=true'
link = get_url(url)
if send_email:
self.password_reset_mail(link)
return link
def get_other_system_managers(self):
return frappe.db.sql("""select distinct `user`.`name` from `tabHas Role` as `user_role`, `tabUser` as `user`
where user_role.role='System Manager'
and `user`.docstatus<2
and `user`.enabled=1
and `user_role`.parent = `user`.name
and `user_role`.parent not in ('Administrator', %s) limit 1""", (self.name,))
def get_fullname(self):
"""get first_name space last_name"""
return (self.first_name or '') + \
(self.first_name and " " or '') + (self.last_name or '')
def password_reset_mail(self, link):
self.send_login_mail(_("Password Reset"),
"password_reset", {"link": link}, now=True)
def send_welcome_mail_to_user(self):
from frappe.utils import get_url
link = self.reset_password()
subject = None
method = frappe.get_hooks("welcome_email")
if method:
subject = frappe.get_attr(method[-1])()
if not subject:
site_name = frappe.db.get_default('site_name') or frappe.get_conf().get("site_name")
if site_name:
subject = _("Welcome to {0}").format(site_name)
else:
subject = _("Complete Registration")
self.send_login_mail(subject, "new_user",
dict(
link=link,
site_url=get_url(),
))
def send_login_mail(self, subject, template, add_args, now=None):
"""send mail with login details"""
from frappe.utils.user import get_user_fullname
from frappe.utils import get_url
created_by = get_user_fullname(frappe.session['user'])
if created_by == "Guest":
created_by = "Administrator"
args = {
'first_name': self.first_name or self.last_name or "user",
'user': self.name,
'title': subject,
'login_url': get_url(),
'created_by': created_by
}
args.update(add_args)
sender = frappe.session.user not in STANDARD_USERS and get_formatted_email(frappe.session.user) or None
frappe.sendmail(recipients=self.email, sender=sender, subject=subject,
template=template, args=args, header=[subject, "green"],
delayed=(not now) if now!=None else self.flags.delay_emails, retry=3)
def a_system_manager_should_exist(self):
if not self.get_other_system_managers():
throw(_("There should remain at least one System Manager"))
def on_trash(self):
frappe.clear_cache(user=self.name)
if self.name in STANDARD_USERS:
throw(_("User {0} cannot be deleted").format(self.name))
self.a_system_manager_should_exist()
# disable the user and log him/her out
self.enabled = 0
if getattr(frappe.local, "login_manager", None):
frappe.local.login_manager.logout(user=self.name)
# delete todos
frappe.db.sql("""DELETE FROM `tabToDo` WHERE `owner`=%s""", (self.name,))
frappe.db.sql("""UPDATE `tabToDo` SET `assigned_by`=NULL WHERE `assigned_by`=%s""",
(self.name,))
# delete events
frappe.db.sql("""delete from `tabEvent` where owner=%s
and event_type='Private'""", (self.name,))
# delete shares
frappe.db.sql("""delete from `tabDocShare` where user=%s""", self.name)
# delete messages
frappe.db.sql("""delete from `tabCommunication`
where communication_type in ('Chat', 'Notification')
and reference_doctype='User'
and (reference_name=%s or owner=%s)""", (self.name, self.name))
# unlink contact
frappe.db.sql("""update `tabContact`
set `user`=null
where `user`=%s""", (self.name))
# delete notification settings
frappe.delete_doc("Notification Settings", self.name, ignore_permissions=True)
if self.get('allow_in_mentions'):
frappe.cache().delete_key('users_for_mentions')
frappe.cache().delete_key('enabled_users')
def before_rename(self, old_name, new_name, merge=False):
frappe.clear_cache(user=old_name)
self.validate_rename(old_name, new_name)
def validate_rename(self, old_name, new_name):
# do not allow renaming administrator and guest
if old_name in STANDARD_USERS:
throw(_("User {0} cannot be renamed").format(self.name))
self.validate_email_type(new_name)
def validate_email_type(self, email):
from frappe.utils import validate_email_address
validate_email_address(email.strip(), True)
def after_rename(self, old_name, new_name, merge=False):
tables = frappe.db.get_tables()
for tab in tables:
desc = frappe.db.get_table_columns_description(tab)
has_fields = []
for d in desc:
if d.get('name') in ['owner', 'modified_by']:
has_fields.append(d.get('name'))
for field in has_fields:
frappe.db.sql("""UPDATE `%s`
SET `%s` = %s
WHERE `%s` = %s""" %
(tab, field, '%s', field, '%s'), (new_name, old_name))
if frappe.db.exists("Notification Settings", old_name):
frappe.rename_doc("Notification Settings", old_name, new_name, force=True, show_alert=False)
# set email
frappe.db.sql("""UPDATE `tabUser`
SET email = %s
WHERE name = %s""", (new_name, new_name))
def append_roles(self, *roles):
"""Add roles to user"""
current_roles = [d.role for d in self.get("roles")]
for role in roles:
if role in current_roles:
continue
self.append("roles", {"role": role})
def add_roles(self, *roles):
"""Add roles to user and save"""
self.append_roles(*roles)
self.save()
def remove_roles(self, *roles):
existing_roles = dict((d.role, d) for d in self.get("roles"))
for role in roles:
if role in existing_roles:
self.get("roles").remove(existing_roles[role])
self.save()
def remove_all_roles_for_guest(self):
if self.name == "Guest":
self.set("roles", list(set(d for d in self.get("roles") if d.role == "Guest")))
def remove_disabled_roles(self):
disabled_roles = [d.name for d in frappe.get_all("Role", filters={"disabled":1})]
for role in list(self.get('roles')):
if role.role in disabled_roles:
self.get('roles').remove(role)
def ensure_unique_roles(self):
exists = []
for i, d in enumerate(self.get("roles")):
if (not d.role) or (d.role in exists):
self.get("roles").remove(d)
else:
exists.append(d.role)
def validate_username(self):
if not self.username and self.is_new() and self.first_name:
self.username = frappe.scrub(self.first_name)
if not self.username:
return
# strip space and @
self.username = self.username.strip(" @")
if self.username_exists():
if self.user_type == 'System User':
frappe.msgprint(_("Username {0} already exists").format(self.username))
self.suggest_username()
self.username = ""
def password_strength_test(self):
""" test password strength """
if self.flags.ignore_password_policy:
return
if self.__new_password:
user_data = (self.first_name, self.middle_name, self.last_name, self.email, self.birth_date)
result = test_password_strength(self.__new_password, '', None, user_data)
feedback = result.get("feedback", None)
if feedback and not feedback.get('password_policy_validation_passed', False):
handle_password_test_fail(result)
def suggest_username(self):
def _check_suggestion(suggestion):
if self.username != suggestion and not self.username_exists(suggestion):
return suggestion
return None
# @firstname
username = _check_suggestion(frappe.scrub(self.first_name))
if not username:
# @firstname_last_name
username = _check_suggestion(frappe.scrub("{0} {1}".format(self.first_name, self.last_name or "")))
if username:
frappe.msgprint(_("Suggested Username: {0}").format(username))
return username
def username_exists(self, username=None):
return frappe.db.get_value("User", {"username": username or self.username, "name": ("!=", self.name)})
def get_blocked_modules(self):
"""Returns list of modules blocked for that user"""
return [d.module for d in self.block_modules] if self.block_modules else []
def validate_user_email_inbox(self):
""" check if same email account added in User Emails twice """
email_accounts = [ user_email.email_account for user_email in self.user_emails ]
if len(email_accounts) != len(set(email_accounts)):
frappe.throw(_("Email Account added multiple times"))
def get_social_login_userid(self, provider):
try:
for p in self.social_logins:
if p.provider == provider:
return p.userid
except:
return None
def set_social_login_userid(self, provider, userid, username=None):
social_logins = {
"provider": provider,
"userid": userid
}
if username:
social_logins["username"] = username
self.append("social_logins", social_logins)
def get_restricted_ip_list(self):
if not self.restrict_ip:
return
return [i.strip() for i in self.restrict_ip.split(",")]
@classmethod
def find_by_credentials(cls, user_name: str, password: str, validate_password: bool = True):
"""Find the user by credentials.
This is a login utility that needs to check login related system settings while finding the user.
1. Find user by email ID by default
2. If allow_login_using_mobile_number is set, you can use mobile number while finding the user.
3. If allow_login_using_user_name is set, you can use username while finding the user.
"""
login_with_mobile = cint(frappe.db.get_value("System Settings", "System Settings", "allow_login_using_mobile_number"))
login_with_username = cint(frappe.db.get_value("System Settings", "System Settings", "allow_login_using_user_name"))
or_filters = [{"name": user_name}]
if login_with_mobile:
or_filters.append({"mobile_no": user_name})
if login_with_username:
or_filters.append({"username": user_name})
users = frappe.db.get_all('User', fields=['name', 'enabled'], or_filters=or_filters, limit=1)
if not users:
return
user = users[0]
user['is_authenticated'] = True
if validate_password:
try:
check_password(user['name'], password, delete_tracker_cache=False)
except frappe.AuthenticationError:
user['is_authenticated'] = False
return user
@frappe.whitelist()
def get_timezones():
import pytz
return {
"timezones": pytz.all_timezones
}
@frappe.whitelist()
def get_all_roles(arg=None):
"""return all roles"""
active_domains = frappe.get_active_domains()
roles = frappe.get_all("Role", filters={
"name": ("not in", "Administrator,Guest,All"),
"disabled": 0
}, or_filters={
"ifnull(restrict_to_domain, '')": "",
"restrict_to_domain": ("in", active_domains)
}, order_by="name")
return [ role.get("name") for role in roles ]
@frappe.whitelist()
def get_roles(arg=None):
"""get roles for a user"""
return frappe.get_roles(frappe.form_dict['uid'])
@frappe.whitelist()
def get_perm_info(role):
"""get permission info"""
from frappe.permissions import get_all_perms
return get_all_perms(role)
@frappe.whitelist(allow_guest=True)
def update_password(new_password, logout_all_sessions=0, key=None, old_password=None):
#validate key to avoid key input like ['like', '%'], '', ['in', ['']]
if key and not isinstance(key, str):
frappe.throw(_('Invalid key type'))
result = test_password_strength(new_password, key, old_password)
feedback = result.get("feedback", None)
if feedback and not feedback.get('password_policy_validation_passed', False):
handle_password_test_fail(result)
res = _get_user_for_update_password(key, old_password)
if res.get('message'):
frappe.local.response.http_status_code = 410
return res['message']
else:
user = res['user']
logout_all_sessions = cint(logout_all_sessions) or frappe.db.get_single_value("System Settings", "logout_on_password_reset")
_update_password(user, new_password, logout_all_sessions=cint(logout_all_sessions))
user_doc, redirect_url = reset_user_data(user)
# get redirect url from cache
redirect_to = frappe.cache().hget('redirect_after_login', user)
if redirect_to:
redirect_url = redirect_to
frappe.cache().hdel('redirect_after_login', user)
frappe.local.login_manager.login_as(user)
frappe.db.set_value("User", user, "last_password_reset_date", today())
frappe.db.set_value("User", user, "reset_password_key", "")
if user_doc.user_type == "System User":
return "/app"
else:
return redirect_url if redirect_url else "/"
@frappe.whitelist(allow_guest=True)
def test_password_strength(new_password, key=None, old_password=None, user_data=None):
from frappe.utils.password_strength import test_password_strength as _test_password_strength
password_policy = frappe.db.get_value("System Settings", None,
["enable_password_policy", "minimum_password_score"], as_dict=True) or {}
enable_password_policy = cint(password_policy.get("enable_password_policy", 0))
minimum_password_score = cint(password_policy.get("minimum_password_score", 0))
if not enable_password_policy:
return {}
if not user_data:
user_data = frappe.db.get_value('User', frappe.session.user,
['first_name', 'middle_name', 'last_name', 'email', 'birth_date'])
if new_password:
result = _test_password_strength(new_password, user_inputs=user_data)
password_policy_validation_passed = False
# score should be greater than 0 and minimum_password_score
if result.get('score') and result.get('score') >= minimum_password_score:
password_policy_validation_passed = True
result['feedback']['password_policy_validation_passed'] = password_policy_validation_passed
return result
#for login
@frappe.whitelist()
def has_email_account(email):
return frappe.get_list("Email Account", filters={"email_id": email})
@frappe.whitelist(allow_guest=False)
def get_email_awaiting(user):
waiting = frappe.db.sql("""select email_account,email_id
from `tabUser Email`
where awaiting_password = 1
and parent = %(user)s""", {"user":user}, as_dict=1)
if waiting:
return waiting
else:
frappe.db.sql("""update `tabUser Email`
set awaiting_password =0
where parent = %(user)s""",{"user":user})
return False
def ask_pass_update():
# update the sys defaults as to awaiting users
from frappe.utils import set_default
users = frappe.db.sql("""SELECT DISTINCT(parent) as user FROM `tabUser Email`
WHERE awaiting_password = 1""", as_dict=True)
password_list = [ user.get("user") for user in users ]
set_default("email_user_password", u','.join(password_list))
def _get_user_for_update_password(key, old_password):
# verify old password
result = frappe._dict()
if key:
result.user = frappe.db.get_value("User", {"reset_password_key": key})
if not result.user:
result.message = _("The Link specified has either been used before or Invalid")
elif old_password:
# verify old password
frappe.local.login_manager.check_password(frappe.session.user, old_password)
user = frappe.session.user
result.user = user
return result
def reset_user_data(user):
user_doc = frappe.get_doc("User", user)
redirect_url = user_doc.redirect_url
user_doc.reset_password_key = ''
user_doc.redirect_url = ''
user_doc.save(ignore_permissions=True)
return user_doc, redirect_url
@frappe.whitelist()
def verify_password(password):
frappe.local.login_manager.check_password(frappe.session.user, password)
@frappe.whitelist(allow_guest=True)
def sign_up(email, full_name, redirect_to):
if is_signup_disabled():
frappe.throw(_('Sign Up is disabled'), title='Not Allowed')
user = frappe.db.get("User", {"email": email})
if user:
if user.enabled:
return 0, _("Already Registered")
else:
return 0, _("Registered but disabled")
else:
if frappe.db.get_creation_count('User', 60) > 300:
frappe.respond_as_web_page(_('Temporarily Disabled'),
_('Too many users signed up recently, so the registration is disabled. Please try back in an hour'),
http_status_code=429)
from frappe.utils import random_string
user = frappe.get_doc({
"doctype":"User",
"email": email,
"first_name": escape_html(full_name),
"enabled": 1,
"new_password": random_string(10),
"user_type": "Website User"
})
user.flags.ignore_permissions = True
user.flags.ignore_password_policy = True
user.insert()
# set default signup role as per Portal Settings
default_role = frappe.db.get_value("Portal Settings", None, "default_role")
if default_role:
user.add_roles(default_role)
if redirect_to:
frappe.cache().hset('redirect_after_login', user.name, redirect_to)
if user.flags.email_sent:
return 1, _("Please check your email for verification")
else:
return 2, _("Please ask your administrator to verify your sign-up")
@frappe.whitelist(allow_guest=True)
@rate_limit(limit=get_password_reset_limit, seconds = 24*60*60, methods=['POST'])
def reset_password(user):
if user=="Administrator":
return 'not allowed'
try:
user = frappe.get_doc("User", user)
if not user.enabled:
return 'disabled'
user.validate_reset_password()
user.reset_password(send_email=True)
return frappe.msgprint(_("Password reset instructions have been sent to your email"))
except frappe.DoesNotExistError:
frappe.clear_messages()
return 'not found'
@frappe.whitelist()
@frappe.validate_and_sanitize_search_inputs
def user_query(doctype, txt, searchfield, start, page_len, filters):
from frappe.desk.reportview import get_match_cond, get_filters_cond
conditions=[]
user_type_condition = "and user_type != 'Website User'"
if filters and filters.get('ignore_user_type'):
user_type_condition = ''
filters.pop('ignore_user_type')
txt = "%{}%".format(txt)
return frappe.db.sql("""SELECT `name`, CONCAT_WS(' ', first_name, middle_name, last_name)
FROM `tabUser`
WHERE `enabled`=1
{user_type_condition}
AND `docstatus` < 2
AND `name` NOT IN ({standard_users})
AND ({key} LIKE %(txt)s
OR CONCAT_WS(' ', first_name, middle_name, last_name) LIKE %(txt)s)
{fcond} {mcond}
ORDER BY
CASE WHEN `name` LIKE %(txt)s THEN 0 ELSE 1 END,
CASE WHEN concat_ws(' ', first_name, middle_name, last_name) LIKE %(txt)s
THEN 0 ELSE 1 END,
NAME asc
LIMIT %(page_len)s OFFSET %(start)s
""".format(
user_type_condition = user_type_condition,
standard_users=", ".join([frappe.db.escape(u) for u in STANDARD_USERS]),
key=searchfield,
fcond=get_filters_cond(doctype, filters, conditions),
mcond=get_match_cond(doctype)
),
dict(start=start, page_len=page_len, txt=txt)
)
def get_total_users():
"""Returns total no. of system users"""
return flt(frappe.db.sql('''SELECT SUM(`simultaneous_sessions`)
FROM `tabUser`
WHERE `enabled` = 1
AND `user_type` = 'System User'
AND `name` NOT IN ({})'''.format(", ".join(["%s"]*len(STANDARD_USERS))), STANDARD_USERS)[0][0])
def get_system_users(exclude_users=None, limit=None):
if not exclude_users:
exclude_users = []
elif not isinstance(exclude_users, (list, tuple)):
exclude_users = [exclude_users]
limit_cond = ''
if limit:
limit_cond = 'limit {0}'.format(limit)
exclude_users += list(STANDARD_USERS)
system_users = frappe.db.sql_list("""select name from `tabUser`
where enabled=1 and user_type != 'Website User'
and name not in ({}) {}""".format(", ".join(["%s"]*len(exclude_users)), limit_cond),
exclude_users)
return system_users
def get_active_users():
"""Returns No. of system users who logged in, in the last 3 days"""
return frappe.db.sql("""select count(*) from `tabUser`
where enabled = 1 and user_type != 'Website User'
and name not in ({})
and hour(timediff(now(), last_active)) < 72""".format(", ".join(["%s"]*len(STANDARD_USERS))), STANDARD_USERS)[0][0]
def get_website_users():
"""Returns total no. of website users"""
return frappe.db.sql("""select count(*) from `tabUser`
where enabled = 1 and user_type = 'Website User'""")[0][0]
def get_active_website_users():
"""Returns No. of website users who logged in, in the last 3 days"""
return frappe.db.sql("""select count(*) from `tabUser`
where enabled = 1 and user_type = 'Website User'
and hour(timediff(now(), last_active)) < 72""")[0][0]
def get_permission_query_conditions(user):
if user=="Administrator":
return ""
else:
return """(`tabUser`.name not in ({standard_users}))""".format(
standard_users = ", ".join(frappe.db.escape(user) for user in STANDARD_USERS))
def has_permission(doc, user):
if (user != "Administrator") and (doc.name in STANDARD_USERS):
# dont allow non Administrator user to view / edit Administrator user
return False
def notify_admin_access_to_system_manager(login_manager=None):
if (login_manager
and login_manager.user == "Administrator"
and frappe.local.conf.notify_admin_access_to_system_manager):
site = '<a href="{0}" target="_blank">{0}</a>'.format(frappe.local.request.host_url)
date_and_time = '<b>{0}</b>'.format(format_datetime(now_datetime(), format_string="medium"))
ip_address = frappe.local.request_ip
access_message = _('Administrator accessed {0} on {1} via IP Address {2}.').format(
site, date_and_time, ip_address)
frappe.sendmail(
recipients=get_system_managers(),
subject=_("Administrator Logged In"),
template="administrator_logged_in",
args={'access_message': access_message},
header=['Access Notification', 'orange']
)
def extract_mentions(txt):
"""Find all instances of @mentions in the html."""
soup = BeautifulSoup(txt, 'html.parser')
emails = []
for mention in soup.find_all(class_='mention'):
if mention.get('data-is-group') == 'true':
try:
user_group = frappe.get_cached_doc('User Group', mention['data-id'])
emails += [d.user for d in user_group.user_group_members]
except frappe.DoesNotExistError:
pass
continue
email = mention['data-id']
emails.append(email)
return emails
def handle_password_test_fail(result):
suggestions = result['feedback']['suggestions'][0] if result['feedback']['suggestions'] else ''
warning = result['feedback']['warning'] if 'warning' in result['feedback'] else ''
suggestions += "<br>" + _("Hint: Include symbols, numbers and capital letters in the password") + '<br>'
frappe.throw(' '.join([_('Invalid Password:'), warning, suggestions]))
def update_gravatar(name):
gravatar = has_gravatar(name)
if gravatar:
frappe.db.set_value('User', name, 'user_image', gravatar)
def throttle_user_creation():
if frappe.flags.in_import:
return
if frappe.db.get_creation_count('User', 60) > frappe.local.conf.get("throttle_user_limit", 60):
frappe.throw(_('Throttled'))
@frappe.whitelist()
def get_role_profile(role_profile):
roles = frappe.get_doc('Role Profile', {'role_profile': role_profile})
return roles.roles
@frappe.whitelist()
def get_module_profile(module_profile):
module_profile = frappe.get_doc('Module Profile', {'module_profile_name': module_profile})
return module_profile.get('block_modules')
def create_contact(user, ignore_links=False, ignore_mandatory=False):
from frappe.contacts.doctype.contact.contact import get_contact_name
if user.name in ["Administrator", "Guest"]: return
contact_name = get_contact_name(user.email)
if not contact_name:
contact = frappe.get_doc({
"doctype": "Contact",
"first_name": user.first_name,
"last_name": user.last_name,
"user": user.name,
"gender": user.gender,
})
if user.email:
contact.add_email(user.email, is_primary=True)
if user.phone:
contact.add_phone(user.phone, is_primary_phone=True)
if user.mobile_no:
contact.add_phone(user.mobile_no, is_primary_mobile_no=True)
contact.insert(ignore_permissions=True, ignore_links=ignore_links, ignore_mandatory=ignore_mandatory)
else:
contact = frappe.get_doc("Contact", contact_name)
contact.first_name = user.first_name
contact.last_name = user.last_name
contact.gender = user.gender
# Add mobile number if phone does not exists in contact
if user.phone and not any(new_contact.phone == user.phone for new_contact in contact.phone_nos):
# Set primary phone if there is no primary phone number
contact.add_phone(
user.phone,
is_primary_phone=not any(
new_contact.is_primary_phone == 1 for new_contact in contact.phone_nos
)
)
# Add mobile number if mobile does not exists in contact
if user.mobile_no and not any(new_contact.phone == user.mobile_no for new_contact in contact.phone_nos):
# Set primary mobile if there is no primary mobile number
contact.add_phone(
user.mobile_no,
is_primary_mobile_no=not any(
new_contact.is_primary_mobile_no == 1 for new_contact in contact.phone_nos
)
)
contact.save(ignore_permissions=True)
@frappe.whitelist()
def generate_keys(user):
"""
generate api key and api secret
:param user: str
"""
frappe.only_for("System Manager")
user_details = frappe.get_doc("User", user)
api_secret = frappe.generate_hash(length=15)
# if api key is not set generate api key
if not user_details.api_key:
api_key = frappe.generate_hash(length=15)
user_details.api_key = api_key
user_details.api_secret = api_secret
user_details.save()
return {"api_secret": api_secret}
@frappe.whitelist()
def switch_theme(theme):
if theme in ["Dark", "Light"]:
frappe.db.set_value("User", frappe.session.user, "desk_theme", theme)
def get_enabled_users():
def _get_enabled_users():
enabled_users = frappe.get_all("User", filters={"enabled": "1"}, pluck="name")
return enabled_users
return frappe.cache().get_value("enabled_users", _get_enabled_users)
| 32.487666
| 126
| 0.729017
|
74b28c20ded47fd1a6c9fe812cf2f8b4850a4578
| 3,094
|
py
|
Python
|
test/functional/getblocktemplate_longpoll.py
|
twairgroup/wondercoin
|
c075c2d0c1a4927d9f04d5100106e369a85128e5
|
[
"MIT"
] | 1
|
2021-04-29T09:04:49.000Z
|
2021-04-29T09:04:49.000Z
|
test/functional/getblocktemplate_longpoll.py
|
twairgroup/wondercoin
|
c075c2d0c1a4927d9f04d5100106e369a85128e5
|
[
"MIT"
] | 2
|
2021-06-08T21:50:46.000Z
|
2021-06-09T14:04:30.000Z
|
test/functional/getblocktemplate_longpoll.py
|
twairgroup/wondercoin
|
c075c2d0c1a4927d9f04d5100106e369a85128e5
|
[
"MIT"
] | 1
|
2021-06-09T01:09:47.000Z
|
2021-06-09T01:09:47.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test longpolling with getblocktemplate."""
from test_framework.test_framework import WondercoinTestFramework
from test_framework.util import *
import threading
class LongpollThread(threading.Thread):
def __init__(self, node):
threading.Thread.__init__(self)
# query current longpollid
templat = node.getblocktemplate()
self.longpollid = templat['longpollid']
# create a new connection to the node, we can't use the same
# connection from two threads
self.node = get_rpc_proxy(node.url, 1, timeout=600, coveragedir=node.coverage_dir)
def run(self):
self.node.getblocktemplate({'longpollid':self.longpollid})
class GetBlockTemplateLPTest(WondercoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
def run_test(self):
self.log.info("Warning: this test will take about 70 seconds in the best case. Be patient.")
self.nodes[0].generate(10)
templat = self.nodes[0].getblocktemplate()
longpollid = templat['longpollid']
# longpollid should not change between successive invocations if nothing else happens
templat2 = self.nodes[0].getblocktemplate()
assert(templat2['longpollid'] == longpollid)
# Test 1: test that the longpolling wait if we do nothing
thr = LongpollThread(self.nodes[0])
thr.start()
# check that thread still lives
thr.join(5) # wait 5 seconds or until thread exits
assert(thr.is_alive())
# Test 2: test that longpoll will terminate if another node generates a block
self.nodes[1].generate(1) # generate a block on another node
# check that thread will exit now that new transaction entered mempool
thr.join(5) # wait 5 seconds or until thread exits
assert(not thr.is_alive())
# Test 3: test that longpoll will terminate if we generate a block ourselves
thr = LongpollThread(self.nodes[0])
thr.start()
self.nodes[0].generate(1) # generate a block on another node
thr.join(5) # wait 5 seconds or until thread exits
assert(not thr.is_alive())
# Test 4: test that introducing a new transaction into the mempool will terminate the longpoll
thr = LongpollThread(self.nodes[0])
thr.start()
# generate a random transaction and submit it
min_relay_fee = self.nodes[0].getnetworkinfo()["relayfee"]
# min_relay_fee is fee per 1000 bytes, which should be more than enough.
(txid, txhex, fee) = random_transaction(self.nodes, Decimal("1.1"), min_relay_fee, Decimal("0.001"), 20)
# after one minute, every 10 seconds the mempool is probed, so in 80 seconds it should have returned
thr.join(60 + 20)
assert(not thr.is_alive())
if __name__ == '__main__':
GetBlockTemplateLPTest().main()
| 42.972222
| 112
| 0.682935
|
23ed797094023866f4c08ecdda4e47d80d676510
| 881
|
py
|
Python
|
test/token_utils.py
|
lumapps/endpoints-management-python
|
0408547e82de2a344ebb6b5600559f82e78c3980
|
[
"Apache-2.0"
] | 22
|
2016-09-01T16:40:34.000Z
|
2020-12-13T14:54:18.000Z
|
test/token_utils.py
|
lumapps/endpoints-management-python
|
0408547e82de2a344ebb6b5600559f82e78c3980
|
[
"Apache-2.0"
] | 69
|
2016-09-01T22:25:16.000Z
|
2020-10-20T07:44:10.000Z
|
test/token_utils.py
|
lumapps/endpoints-management-python
|
0408547e82de2a344ebb6b5600559f82e78c3980
|
[
"Apache-2.0"
] | 20
|
2016-09-01T22:05:07.000Z
|
2022-03-13T02:26:59.000Z
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides a utility method that generates auth token."""
import json
from jwkest import jws
def generate_auth_token(payload, keys, alg=u"ES256", kid=None):
json_web_signature = jws.JWS(json.dumps(payload), alg=alg, kid=kid)
return json_web_signature.sign_compact(keys=keys)
| 35.24
| 74
| 0.763905
|
0759bc964a76f278f346f96fdfd4d7b43ce0d951
| 1,485
|
py
|
Python
|
bagpipe/exabgp/message/update/attribute/mprnlri.py
|
taheri0/MPLS-over-GRE
|
7d14b819d396b7779b02ea1150bcaf412cb6d36c
|
[
"Apache-2.0"
] | 94
|
2015-01-05T17:15:21.000Z
|
2022-01-09T20:05:16.000Z
|
bagpipe/exabgp/message/update/attribute/mprnlri.py
|
taheri0/MPLS-over-GRE
|
7d14b819d396b7779b02ea1150bcaf412cb6d36c
|
[
"Apache-2.0"
] | 18
|
2015-03-16T16:16:55.000Z
|
2017-03-27T11:22:38.000Z
|
bagpipe/exabgp/message/update/attribute/mprnlri.py
|
taheri0/MPLS-over-GRE
|
7d14b819d396b7779b02ea1150bcaf412cb6d36c
|
[
"Apache-2.0"
] | 36
|
2015-03-13T02:30:15.000Z
|
2021-06-22T10:10:23.000Z
|
# encoding: utf-8
"""
mprnlri.py
Created by Thomas Mangin on 2009-11-05.
Copyright (c) 2009-2012 Exa Networks. All rights reserved.
Modified by Orange - 2014
"""
from bagpipe.exabgp.structure.address import Address, SAFI #,AFI
from bagpipe.exabgp.message.update.attribute import AttributeID,Flag,Attribute
# =================================================================== MP Reacheable NLRI (15)
class MPRNLRI (Attribute):
FLAG = Flag.OPTIONAL
ID = AttributeID.MP_REACH_NLRI
MULTIPLE = True
def __init__ (self,routes):
# all the routes must have the same next-hop
self.routes = routes
def pack (self):
next_hop = ''
# EOR do not have any next_hop
if self.routes[0].attributes.has(AttributeID.NEXT_HOP):
# we do not want a next_hop attribute packed (with the _attribute()) but just the next_hop itself
next_hop = self.routes[0].attributes[AttributeID.NEXT_HOP].next_hop.pack()
# FIX: for SAFI 128, the next-hop is encoded like this:
if self.routes[0].nlri.safi == SAFI.mpls_vpn:
next_hop = "\0"*8 + next_hop
routes = ''.join([route.nlri.pack() for route in self.routes])
return self._attribute(
self.routes[0].nlri.afi.pack() + self.routes[0].nlri.safi.pack() +
chr(len(next_hop)) + next_hop +
chr(0) + routes
)
def __len__ (self):
return len(self.pack())
def __str__ (self):
return "MP_REACH_NLRI Family %s %d NLRI(s)" % (Address.__str__(self.routes[0]),len(self.routes))
def __repr__ (self):
return str(self)
| 28.557692
| 100
| 0.674074
|
9bfb219fb7c5ccdab033d4366c8fbe22ffe36b2e
| 4,987
|
py
|
Python
|
scrapy_redis/dupefilter.py
|
vanilla111/CQUPT_Spider
|
c404a398b5f84eba5215511b3eb5a80bcc8563dd
|
[
"MIT"
] | 3
|
2020-01-06T00:29:53.000Z
|
2021-03-23T15:10:51.000Z
|
scrapy_redis/dupefilter.py
|
vanilla111/CQUPT_Spider
|
c404a398b5f84eba5215511b3eb5a80bcc8563dd
|
[
"MIT"
] | null | null | null |
scrapy_redis/dupefilter.py
|
vanilla111/CQUPT_Spider
|
c404a398b5f84eba5215511b3eb5a80bcc8563dd
|
[
"MIT"
] | 2
|
2019-08-01T07:11:41.000Z
|
2020-02-14T04:57:48.000Z
|
import logging
import time
from scrapy.dupefilters import BaseDupeFilter
from scrapy.utils.request import request_fingerprint
from . import defaults
from .connection import get_redis_from_settings
from .bloomfilter import BloomFilter
logger = logging.getLogger(__name__)
class RFPDupeFilter(BaseDupeFilter):
"""Redis-based request duplicates filter.
This class can also be used with default Scrapy's scheduler.
"""
logger = logger
def __init__(self, server, key, debug, bit, hash_number):
"""Initialize the duplicates filter.
Parameters
----------
server : redis.StrictRedis
The redis server instance.
key : str
Redis key Where to store fingerprints.
debug : bool, optional
Whether to log filtered requests.
"""
self.server = server
self.key = key
self.debug = debug
self.logdupes = True
self.bf = BloomFilter(server, self.key, bit, hash_number)
@classmethod
def from_settings(cls, settings):
"""Returns an instance from given settings.
This uses by default the key ``dupefilter:<timestamp>``. When using the
``scrapy_redis.scheduler.Scheduler`` class, this method is not used as
it needs to pass the spider name in the key.
Parameters
----------
settings : scrapy.settings.Settings
Returns
-------
RFPDupeFilter
A RFPDupeFilter instance.
"""
server = get_redis_from_settings(settings)
# XXX: This creates one-time key. needed to support to use this
# class as standalone dupefilter with scrapy's default scheduler
# if scrapy passes spider on open() method this wouldn't be needed
# TODO: Use SCRAPY_JOB env as default and fallback to timestamp.
key = defaults.DUPEFILTER_KEY % {'timestamp': int(time.time())}
debug = settings.getbool('DUPEFILTER_DEBUG')
bit = settings.getint('BLOOMFILTER_BIT', defaults.BLOOMFILTER_BIT)
hash_number = settings.getint('BLOOMFILTER_HASH_NUMBER', defaults.BLOOMFILTER_HASH_NUMBER)
return cls(server, key=key, debug=debug, bit=bit, hash_number=hash_number)
@classmethod
def from_crawler(cls, crawler):
"""Returns instance from crawler.
Parameters
----------
crawler : scrapy.crawler.Crawler
Returns
-------
RFPDupeFilter
Instance of RFPDupeFilter.
"""
return cls.from_settings(crawler.settings)
def request_seen(self, request):
"""Returns True if request was already seen.
Parameters
----------
request : scrapy.http.Request
Returns
-------
bool
"""
fp = self.request_fingerprint(request)
# This returns the number of values added, zero if already exists.
if self.bf.exists(fp):
return True
self.bf.insert(fp)
return False
def request_fingerprint(self, request):
"""Returns a fingerprint for a given request.
Parameters
----------
request : scrapy.http.Request
Returns
-------
str
"""
return request_fingerprint(request)
@classmethod
def from_spider(cls, spider):
settings = spider.settings
server = get_redis_from_settings(settings)
dupefilter_key = settings.get("SCHEDULER_DUPEFILTER_KEY", defaults.SCHEDULER_DUPEFILTER_KEY)
key = dupefilter_key % {'spider': spider.name}
debug = settings.getbool('DUPEFILTER_DEBUG')
bit = settings.getint('BLOOMFILTER_BIT', defaults.BLOOMFILTER_BIT)
hash_number = settings.getint('BLOOMFILTER_HASH_NUMBER', defaults.BLOOMFILTER_HASH_NUMBER)
return cls(server, key=key, debug=debug, bit=bit, hash_number=hash_number)
def close(self, reason=''):
"""Delete data on close. Called by Scrapy's scheduler.
Parameters
----------
reason : str, optional
"""
self.clear()
def clear(self):
"""Clears fingerprints data."""
self.server.delete(self.key)
def log(self, request, spider):
"""Logs given request.
Parameters
----------
request : scrapy.http.Request
spider : scrapy.spiders.Spider
"""
if self.debug:
msg = "Filtered duplicate request: %(request)s"
self.logger.debug(msg, {'request': request}, extra={'spider': spider})
elif self.logdupes:
msg = ("Filtered duplicate request %(request)s"
" - no more duplicates will be shown"
" (see DUPEFILTER_DEBUG to show all duplicates)")
self.logger.debug(msg, {'request': request}, extra={'spider': spider})
self.logdupes = False
spider.crawler.stats.inc_value('bloomfilter/filtered', spider=spider)
| 29.862275
| 100
| 0.614798
|
794747a28490edd4f4f0299e6243b3d70b80e72d
| 12,168
|
py
|
Python
|
script.mrknow.urlresolver/lib/urlresolver9/lib/net.py
|
mrknow/filmkodi
|
0162cde9ae25ddbf4a69330948714833ff2f78c9
|
[
"Apache-2.0"
] | 105
|
2015-11-28T00:03:11.000Z
|
2021-05-05T20:47:42.000Z
|
script.mrknow.urlresolver/lib/urlresolver9/lib/net.py
|
rrosajp/filmkodi
|
0162cde9ae25ddbf4a69330948714833ff2f78c9
|
[
"Apache-2.0"
] | 918
|
2015-11-28T14:12:40.000Z
|
2022-03-23T20:24:49.000Z
|
script.mrknow.urlresolver/lib/urlresolver9/lib/net.py
|
rrosajp/filmkodi
|
0162cde9ae25ddbf4a69330948714833ff2f78c9
|
[
"Apache-2.0"
] | 111
|
2015-12-01T14:06:10.000Z
|
2020-08-01T10:44:39.000Z
|
'''
common XBMC Module
Copyright (C) 2011 t0mm0
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import random
import cookielib
import gzip
import re
import StringIO
import urllib
import urllib2
import socket
import time
import kodi
# Set Global timeout - Useful for slow connections and Putlocker.
socket.setdefaulttimeout(10)
BR_VERS = [
['%s.0' % i for i in xrange(18, 50)],
['37.0.2062.103', '37.0.2062.120', '37.0.2062.124', '38.0.2125.101', '38.0.2125.104', '38.0.2125.111', '39.0.2171.71', '39.0.2171.95', '39.0.2171.99', '40.0.2214.93', '40.0.2214.111',
'40.0.2214.115', '42.0.2311.90', '42.0.2311.135', '42.0.2311.152', '43.0.2357.81', '43.0.2357.124', '44.0.2403.155', '44.0.2403.157', '45.0.2454.101', '45.0.2454.85', '46.0.2490.71',
'46.0.2490.80', '46.0.2490.86', '47.0.2526.73', '47.0.2526.80', '48.0.2564.116', '49.0.2623.112', '50.0.2661.86'],
['11.0'],
['8.0', '9.0', '10.0', '10.6']]
WIN_VERS = ['Windows NT 10.0', 'Windows NT 7.0', 'Windows NT 6.3', 'Windows NT 6.2', 'Windows NT 6.1', 'Windows NT 6.0', 'Windows NT 5.1', 'Windows NT 5.0']
FEATURES = ['; WOW64', '; Win64; IA64', '; Win64; x64', '']
RAND_UAS = ['Mozilla/5.0 ({win_ver}{feature}; rv:{br_ver}) Gecko/20100101 Firefox/{br_ver}',
'Mozilla/5.0 ({win_ver}{feature}) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/{br_ver} Safari/537.36',
'Mozilla/5.0 ({win_ver}{feature}; Trident/7.0; rv:{br_ver}) like Gecko',
'Mozilla/5.0 (compatible; MSIE {br_ver}; {win_ver}{feature}; Trident/6.0)']
def get_ua():
try: last_gen = int(kodi.get_setting('last_ua_create'))
except: last_gen = 0
if not kodi.get_setting('current_ua') or last_gen < (time.time() - (7 * 24 * 60 * 60)):
index = random.randrange(len(RAND_UAS))
versions = {'win_ver': random.choice(WIN_VERS), 'feature': random.choice(FEATURES), 'br_ver': random.choice(BR_VERS[index])}
user_agent = RAND_UAS[index].format(**versions)
# log_utils.log('Creating New User Agent: %s' % (user_agent), log_utils.LOGDEBUG)
kodi.set_setting('current_ua', user_agent)
kodi.set_setting('last_ua_create', str(int(time.time())))
else:
user_agent = kodi.get_setting('current_ua')
return user_agent
class Net:
'''
This class wraps :mod:`urllib2` and provides an easy way to make http
requests while taking care of cookies, proxies, gzip compression and
character encoding.
Example::
from addon.common.net import Net
net = Net()
response = net.http_GET('http://xbmc.org')
print response.content
'''
_cj = cookielib.LWPCookieJar()
_proxy = None
_user_agent = 'Mozilla/5.0 (Windows NT 6.3; rv:36.0) Gecko/20100101 Firefox/36.0'
_http_debug = False
def __init__(self, cookie_file='', proxy='', user_agent='', http_debug=False):
'''
Kwargs:
cookie_file (str): Full path to a file to be used to load and save
cookies to.
proxy (str): Proxy setting (eg.
``'http://user:pass@example.com:1234'``)
user_agent (str): String to use as the User Agent header. If not
supplied the class will use a default user agent (chrome)
http_debug (bool): Set ``True`` to have HTTP header info written to
the XBMC log for all requests.
'''
if cookie_file:
self.set_cookies(cookie_file)
if proxy:
self.set_proxy(proxy)
if user_agent:
self.set_user_agent(user_agent)
self._http_debug = http_debug
self._update_opener()
def set_cookies(self, cookie_file):
'''
Set the cookie file and try to load cookies from it if it exists.
Args:
cookie_file (str): Full path to a file to be used to load and save
cookies to.
'''
try:
self._cj.load(cookie_file, ignore_discard=True)
self._update_opener()
return True
except:
return False
def get_cookies(self):
'''Returns A dictionary containing all cookie information by domain.'''
return self._cj._cookies
def save_cookies(self, cookie_file):
'''
Saves cookies to a file.
Args:
cookie_file (str): Full path to a file to save cookies to.
'''
self._cj.save(cookie_file, ignore_discard=True)
def set_proxy(self, proxy):
'''
Args:
proxy (str): Proxy setting (eg.
``'http://user:pass@example.com:1234'``)
'''
self._proxy = proxy
self._update_opener()
def get_proxy(self):
'''Returns string containing proxy details.'''
return self._proxy
def set_user_agent(self, user_agent):
'''
Args:
user_agent (str): String to use as the User Agent header.
'''
self._user_agent = user_agent
def get_user_agent(self):
'''Returns user agent string.'''
return self._user_agent
def _update_opener(self):
'''
Builds and installs a new opener to be used by all future calls to
:func:`urllib2.urlopen`.
'''
if self._http_debug:
http = urllib2.HTTPHandler(debuglevel=1)
else:
http = urllib2.HTTPHandler()
if self._proxy:
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self._cj),
urllib2.ProxyHandler({'http':
self._proxy}),
urllib2.HTTPBasicAuthHandler(),
http)
else:
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self._cj),
urllib2.HTTPBasicAuthHandler(),
http)
urllib2.install_opener(opener)
def http_GET(self, url, headers={}, compression=True):
'''
Perform an HTTP GET request.
Args:
url (str): The URL to GET.
Kwargs:
headers (dict): A dictionary describing any headers you would like
to add to the request. (eg. ``{'X-Test': 'testing'}``)
compression (bool): If ``True`` (default), try to use gzip
compression.
Returns:
An :class:`HttpResponse` object containing headers and other
meta-information about the page and the page content.
'''
return self._fetch(url, headers=headers, compression=compression)
def http_POST(self, url, form_data, headers={}, compression=True):
'''
Perform an HTTP POST request.
Args:
url (str): The URL to POST.
form_data (dict): A dictionary of form data to POST.
Kwargs:
headers (dict): A dictionary describing any headers you would like
to add to the request. (eg. ``{'X-Test': 'testing'}``)
compression (bool): If ``True`` (default), try to use gzip
compression.
Returns:
An :class:`HttpResponse` object containing headers and other
meta-information about the page and the page content.
'''
return self._fetch(url, form_data, headers=headers, compression=compression)
def http_HEAD(self, url, headers={}):
'''
Perform an HTTP HEAD request.
Args:
url (str): The URL to GET.
Kwargs:
headers (dict): A dictionary describing any headers you would like
to add to the request. (eg. ``{'X-Test': 'testing'}``)
Returns:
An :class:`HttpResponse` object containing headers and other
meta-information about the page.
'''
request = urllib2.Request(url)
request.get_method = lambda: 'HEAD'
request.add_header('User-Agent', self._user_agent)
for key in headers:
request.add_header(key, headers[key])
response = urllib2.urlopen(request)
return HttpResponse(response)
def _fetch(self, url, form_data={}, headers={}, compression=True):
'''
Perform an HTTP GET or POST request.
Args:
url (str): The URL to GET or POST.
form_data (dict): A dictionary of form data to POST. If empty, the
request will be a GET, if it contains form data it will be a POST.
Kwargs:
headers (dict): A dictionary describing any headers you would like
to add to the request. (eg. ``{'X-Test': 'testing'}``)
compression (bool): If ``True`` (default), try to use gzip
compression.
Returns:
An :class:`HttpResponse` object containing headers and other
meta-information about the page and the page content.
'''
req = urllib2.Request(url)
if form_data:
if isinstance(form_data, basestring):
form_data = form_data
else:
form_data = urllib.urlencode(form_data, True)
req = urllib2.Request(url, form_data)
req.add_header('User-Agent', self._user_agent)
for key in headers:
req.add_header(key, headers[key])
if compression:
req.add_header('Accept-Encoding', 'gzip')
req.add_unredirected_header('Host', req.get_host())
response = urllib2.urlopen(req)
return HttpResponse(response)
class HttpResponse:
'''
This class represents a resoponse from an HTTP request.
The content is examined and every attempt is made to properly encode it to
Unicode.
.. seealso::
:meth:`Net.http_GET`, :meth:`Net.http_HEAD` and :meth:`Net.http_POST`
'''
content = ''
'''Unicode encoded string containing the body of the reposne.'''
def __init__(self, response):
'''
Args:
response (:class:`mimetools.Message`): The object returned by a call
to :func:`urllib2.urlopen`.
'''
self._response = response
@property
def content(self):
html = self._response.read()
encoding = None
try:
if self._response.headers['content-encoding'].lower() == 'gzip':
html = gzip.GzipFile(fileobj=StringIO.StringIO(html)).read()
except:
pass
try:
content_type = self._response.headers['content-type']
if 'charset=' in content_type:
encoding = content_type.split('charset=')[-1]
except:
pass
r = re.search('<meta\s+http-equiv="Content-Type"\s+content="(?:.+?);\s+charset=(.+?)"', html, re.IGNORECASE)
if r:
encoding = r.group(1)
if encoding is not None:
try: html = html.decode(encoding)
except: pass
return html
def get_headers(self, as_dict=False):
'''Returns headers returned by the server.
If as_dict is True, headers are returned as a dictionary otherwise a list'''
if as_dict:
return dict([(item[0].title(), item[1]) for item in self._response.info().items()])
else:
return self._response.info().headers
def get_url(self):
'''
Return the URL of the resource retrieved, commonly used to determine if
a redirect was followed.
'''
return self._response.geturl()
| 35.683284
| 187
| 0.585059
|
eb34f473dc1a474cde296645eb64cde5cd6a49af
| 1,801
|
py
|
Python
|
bakpack/main.py
|
pramasoul/bakpack
|
804652e008a2d1bb04a521d6025a4d73ac7511af
|
[
"MIT"
] | null | null | null |
bakpack/main.py
|
pramasoul/bakpack
|
804652e008a2d1bb04a521d6025a4d73ac7511af
|
[
"MIT"
] | null | null | null |
bakpack/main.py
|
pramasoul/bakpack
|
804652e008a2d1bb04a521d6025a4d73ac7511af
|
[
"MIT"
] | null | null | null |
#
from absl import app
from absl import flags
from absl import logging
from collections import defaultdict
import json
from pathlib import Path
from sys import stdout
from .packer import binpack
FLAGS = flags.FLAGS
flags.DEFINE_integer("size", None, "How much a bin can hold", lower_bound=1)
flags.DEFINE_string("output", None, "Output filename (defaults to stdout)")
flags.DEFINE_bool("json", False, "Output only JSON of files with sizes")
def main(argv):
logging.info(f"Bin size is {FLAGS.size}")
logging.info(f"args are {argv}")
pathnames_of_size = defaultdict(list)
sizes = []
for pathname in argv[1:]:
path = Path(pathname)
size = path.stat().st_size
sizes.append(size)
pathnames_of_size[size].append(pathname)
logging.info(f"pathnames_of_size is {pathnames_of_size}")
logging.info(f"sizes: {sizes}")
packed = binpack(FLAGS.size, sizes)
logging.info(f"packed {packed}")
pathed = []
for bin_contents in packed:
paths = []
for size in bin_contents:
paths.append((pathnames_of_size[size].pop(), size))
pathed.append(paths)
with get_outfile() as f:
if FLAGS.json:
json.dump(pathed, f)
return
f.write("# Some overall preamble\n")
for bin_contents in pathed:
f.write(f"# Some preamble to a particular tape\n")
for pathname, size in bin_contents:
f.write(f"# process Path {pathname}\n")
f.write(f"# Some post-process for a particular tape\n")
f.write(f"# Any overall post-process\n")
def get_outfile():
if FLAGS.output is None:
return stdout
return open(FLAGS.output, "w")
def entry():
app.run(main)
if __name__ == "__main__":
app.run(main)
| 26.880597
| 76
| 0.642976
|
8f3897135e0873e20e04513d2e5d938e37ce5b7b
| 1,361
|
py
|
Python
|
weather/havesine.py
|
jawahar273/bm
|
0501210077c5435b27d04c96a2f85da453d63689
|
[
"MIT"
] | null | null | null |
weather/havesine.py
|
jawahar273/bm
|
0501210077c5435b27d04c96a2f85da453d63689
|
[
"MIT"
] | 218
|
2018-05-29T00:34:55.000Z
|
2021-06-10T18:40:26.000Z
|
weather/havesine.py
|
jawahar273/bm
|
0501210077c5435b27d04c96a2f85da453d63689
|
[
"MIT"
] | null | null | null |
import math
class Haversine:
"""
use the haversine class to calculate the distance between
two lon/lat coordnate pairs.
output distance available in kilometers, meters, miles, and feet.
example usage: Haversine([lon1,lat1],[lon2,lat2]).feet
Distance of the user's location and weather's stations is calucated to
call the `weather api` get the weather details.
please ref this
`link <https://nathanrooy.github.io/posts/2016-09-07/haversine-with-python/>`_
to know more of its formula by its author Nathan A. Rooy.
"""
def __init__(self, coord1, coord2):
lon1, lat1 = coord1
lon2, lat2 = coord2
R = 6371000 # radius of Earth in meters
phi_1 = math.radians(lat1)
phi_2 = math.radians(lat2)
delta_phi = math.radians(lat2 - lat1)
delta_lambda = math.radians(lon2 - lon1)
a = (
math.sin(delta_phi / 2.0) ** 2
+ math.cos(phi_1) * math.cos(phi_2) * math.sin(delta_lambda / 2.0) ** 2
)
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
self.meters = R * c # output distance in meters
self.km = self.meters / 1000.0 # output distance in kilometers
self.miles = self.meters * 0.000621371 # output distance in miles
self.feet = self.miles * 5280 # output distance in feet
| 32.404762
| 83
| 0.626745
|
f797f902bc87faffb58c58ad7ac27d0eff5369f4
| 2,863
|
py
|
Python
|
src/first_bot_email.py
|
dvntaka/hawaii_coronavirus_bot
|
091d8f4a1f9a75250973234f0ad82807bbf2e2ae
|
[
"MIT"
] | null | null | null |
src/first_bot_email.py
|
dvntaka/hawaii_coronavirus_bot
|
091d8f4a1f9a75250973234f0ad82807bbf2e2ae
|
[
"MIT"
] | 2
|
2021-05-26T22:24:23.000Z
|
2021-05-26T22:26:38.000Z
|
src/first_bot_email.py
|
dvntaka/hawaii_coronavirus_bot
|
091d8f4a1f9a75250973234f0ad82807bbf2e2ae
|
[
"MIT"
] | null | null | null |
# Created follow a tutorial at: https://towardsdatascience.com/how-to-track-coronavirus-with-python-a5320b778c8e
from datetime import date
from urllib.request import Request, urlopen
import pandas as pd
import smtplib
def get_data():
#req = Request('https://www.worldometers.info/coronavirus/#countries', headers={'User-Agent': 'Mozilla/5.0'})
req = Request('https://www.worldometers.info/coronavirus/country/us/', headers={'User-Agent': 'Mozilla/5.0'})
webpage = urlopen(req).read()
df = pd.read_html(webpage)[0]
# class Coronavirus():
# def _init_(self):
# self.driver = webdriver.Chrome()
#
# def get_data(self):
# self.driver.get('https://www.worldometers.info/coronavirus/country/us/')
#df = pd.read_html('https://www.worldometers.info/coronavirus/#countries')[0]
hi = df.loc[df['USAState'].isin(['Hawaii'])] #df.loc[df['TotalCases'] == 'USA']# df[['TotalCases'] == 'USA']
hi = hi.fillna(0)
total_cases = int(hi.iloc[0]['TotalCases'])
new_cases = int(hi.iloc[0]['NewCases'])
total_deaths = int(hi.iloc[0]['TotalDeaths'])
new_deaths = int(hi.iloc[0]['NewDeaths'])
# categories = []
# categories.append(total_cases)
# categories.append(new_cases)
# categories.append(total_deaths)
# categories.append(new_deaths)
#
# for x in categories:
# if math.isnan(x):
# print('hi')
# x = '0'
# print(categories)
print(new_cases)
# /html/body/div[4]/div[1]/div/div[5]/div[1]/div/table#usa_table_countries_today
# document.querySelector("#main_table_countries_today")
# #main_table_countries_today
# /html/body/div[3]/div[3]/div/div[3]/div[1]/div/table
send_mail(total_cases,new_cases,total_deaths, new_deaths)
def send_mail(total_cases, new_cases, total_deaths, new_deaths):
body = 'Total cases: ' + str(total_cases) + '\
\nNew cases: ' + str(new_cases) + '\
\nTotal deaths: ' + str(total_deaths) + '\
\nNew deaths: ' + str(new_deaths) + '\
\nCheck the link: https://www.worldometers.info/coronavirus/country/us'
FROM = 'pythonbotsarefun@gmail.com'
TO = 'dvntaka@yahoo.com'
SUBJECT = 'HI Coronavirus Stats for ' + str(date.today())
TEXT = body
message = """From: %s\nTo: %s\nSubject: %s\n\n%s
""" % (FROM, TO, SUBJECT, TEXT)
server = smtplib.SMTP('smtp.gmail.com', 587)
server.ehlo()
server.starttls()
server.login('pythonbotsarefun@gmail.com', 'aT@8J8f!')
server.sendmail(
FROM,
TO,
message
)
print('Hey Email has been sent!')
server.close()
if __name__=="__main__":
get_data()
#schedule.every().day.at("13:46").do(get_data)
#schedule.every(10).seconds.do(get_data)
# while True:
# schedule.run_pending()
# time.sleep(60)
| 30.457447
| 113
| 0.630807
|
cbb1e0547b46c49dd575eedb41df8747748768a1
| 1,410
|
py
|
Python
|
eks_auth/aws/client.py
|
ElDiabloRojo/eks_auth
|
c87430d82b70698958cd75b710ccda8cfcbbbb96
|
[
"MIT"
] | 1
|
2020-10-13T19:51:18.000Z
|
2020-10-13T19:51:18.000Z
|
eks_auth/aws/client.py
|
ElDiabloRojo/eks_auth
|
c87430d82b70698958cd75b710ccda8cfcbbbb96
|
[
"MIT"
] | 537
|
2020-04-23T21:06:54.000Z
|
2022-03-31T17:42:17.000Z
|
eks_auth/aws/client.py
|
ElDiabloRojo/eks_auth
|
c87430d82b70698958cd75b710ccda8cfcbbbb96
|
[
"MIT"
] | null | null | null |
import boto3
class AWSCLient:
def __init__(self, service, access_key, secret_key, region, session_token=None):
self.service = service
self.access_key = access_key
self.secret_access_key = secret_key
self.session_token = session_token
self.region = region
def primary_client(self):
client = boto3.client(
self.service,
aws_access_key_id=self.access_key,
aws_secret_access_key=self.secret_access_key
)
return client
def session_token_client(self, region):
client = boto3.client(
self.service,
aws_access_key_id=self.access_key,
aws_secret_access_key=self.secret_access_key,
aws_session_token=self.session_token,
region_name=region
)
return client
def get_session_token(self, user_name, account_number):
mfa_code = input('Enter the MFA code: ')
client = boto3.client(
self.service,
aws_access_key_id=self.access_key,
aws_secret_access_key=self.secret_access_key
)
response = client.get_session_token(
DurationSeconds=3600,
SerialNumber='arn:aws:iam::' + account_number + ':mfa/' + user_name,
TokenCode=mfa_code
)
return response
def show_object(self):
print(self.__dict__)
| 29.375
| 84
| 0.623404
|
981da89342f0934b5191b93a28613d9cff40b493
| 1,525
|
py
|
Python
|
source/legacy/muc4_type_samples.py
|
Glaciohound/Schema_Induction
|
116df31a0a042ad4358d093f303b21a051ebdaad
|
[
"MIT"
] | null | null | null |
source/legacy/muc4_type_samples.py
|
Glaciohound/Schema_Induction
|
116df31a0a042ad4358d093f303b21a051ebdaad
|
[
"MIT"
] | null | null | null |
source/legacy/muc4_type_samples.py
|
Glaciohound/Schema_Induction
|
116df31a0a042ad4358d093f303b21a051ebdaad
|
[
"MIT"
] | null | null | null |
import argparse
import json
from components.load_muc4 import load_muc4
from components.muc4_tools import \
extract_relevant_sentences, corpora_to_dict, all_types
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("--dir", default="data/muc34/TASK/CORPORA", type=str)
parser.add_argument("--output-file", type=str,
default="data/muc34/outputs/muc4-type-samples.txt")
args = parser.parse_args()
return args
def main(args):
dev_corpora, dev_events, tst_corpora, tst_events, proper_nouns = \
load_muc4()
dev_corpora_dict = corpora_to_dict(dev_corpora)
dev_grouped = {
_type: list(filter(
lambda x: x["INCIDENT: TYPE"][0] == _type,
dev_events))
for _type in all_types}
with open(args.output_file, 'w') as f:
for _type, _samples in dev_grouped.items():
f.write(_type)
f.write("\n")
f.write("=" * 30)
f.write("\n")
for _sample in _samples[:30]:
f.write(json.dumps(_sample, indent=4))
f.write("\n")
f.write(json.dumps(
extract_relevant_sentences(
_sample, dev_corpora_dict[_sample["MESSAGE: ID"]]),
indent=4
)[0])
f.write("\n")
f.write("-" * 30)
f.write("\n")
f.write("\n")
if __name__ == "__main__":
args = get_args()
main(args)
| 30.5
| 77
| 0.554754
|
b1a241155f8906edc4b80182a8ad94dc0542d621
| 5,867
|
py
|
Python
|
contrib/node/tests/python/pants_test/contrib/node/tasks/test_node_bundle_integration.py
|
oseemann/pants
|
628c83d5ab2706b0f64d69568c57a718ec7c5e2a
|
[
"Apache-2.0"
] | null | null | null |
contrib/node/tests/python/pants_test/contrib/node/tasks/test_node_bundle_integration.py
|
oseemann/pants
|
628c83d5ab2706b0f64d69568c57a718ec7c5e2a
|
[
"Apache-2.0"
] | null | null | null |
contrib/node/tests/python/pants_test/contrib/node/tasks/test_node_bundle_integration.py
|
oseemann/pants
|
628c83d5ab2706b0f64d69568c57a718ec7c5e2a
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from contextlib import contextmanager
from pants.fs.archive import archiver, archiver_for_path
from pants.util.contextutil import temporary_dir
from pants_test.pants_run_integration_test import PantsRunIntegrationTest
class NodeBundleIntegrationTest(PantsRunIntegrationTest):
DIST_DIR = 'dist'
TGZ_SUFFIX = '.tar.gz'
JAR_SUFFIX = '.jar'
PROJECT_DIR = 'contrib/node/examples/src/node/web-component-button'
WEB_COMPONENT_BUTTON_PROJECT = 'web-component-button'
WEB_COMPONENT_BUTTON_PROCESSED_PROJECT = 'web-component-button-processed'
WITH_DEPENDENCY_ARTIFACTS_PROJECT = 'web-component-button-processed-with-dependency-artifacts'
WEB_COMPONENT_BUTTON_BUNDLE = 'web-component-button-bundle'
WEB_COMPONENT_BUTTON_PROCESSED_BUNDLE = 'web-component-button-processed-bundle'
PREINSTALLED_PROJECT_DIR = 'contrib/node/examples/src/node/preinstalled-project'
PREINSTALLED_PROJECT = 'preinstalled-project'
PREINSTALLED_BUNDLE = 'preinstalled-project-bundle'
JVM_PROJECT = 'jsresources'
JVM_WITH_ARTIFACTS_PROJECT = 'jsresources-with-dependency-artifacts'
JVM_PROJECT_DIR = 'contrib/node/examples/src/java/org/pantsbuild/testproject/jsresources'
WEB_COMPONENT_BUTTON_ARTIFACT = os.path.join(
DIST_DIR, WEB_COMPONENT_BUTTON_BUNDLE + TGZ_SUFFIX)
WEB_COMPONENT_BUTTON_PROCESSED_ARTIFACT = os.path.join(
DIST_DIR, WEB_COMPONENT_BUTTON_PROCESSED_BUNDLE + TGZ_SUFFIX)
PREINSTALLED_ARTIFACT = os.path.join(
DIST_DIR, PREINSTALLED_BUNDLE + TGZ_SUFFIX)
JVM_PROJECT_ARTIFACT = os.path.join(DIST_DIR, JVM_PROJECT + JAR_SUFFIX)
JVM_WITH_ARTIFACTS_ARTIFACT = os.path.join(DIST_DIR, JVM_WITH_ARTIFACTS_PROJECT + JAR_SUFFIX)
def test_bundle_node_module(self):
command = [
'bundle',
':'.join([self.PROJECT_DIR, self.WEB_COMPONENT_BUTTON_BUNDLE])]
pants_run = self.run_pants(command=command)
self.assert_success(pants_run)
with self._extract_archive(self.WEB_COMPONENT_BUTTON_ARTIFACT) as temp_dir:
actual_set = set(os.listdir(temp_dir))
expected_set = set(['src', 'test', 'node_modules', 'package.json', 'webpack.config.js'])
self.assertTrue(expected_set <= actual_set)
if expected_set < actual_set:
# npm 5 introduced package-lock.json
self.assertEqual(actual_set - expected_set, set(['package-lock.json']))
# Make sure .bin symlinks remains as symlinks.
self.assertTrue(os.path.islink(os.path.join(temp_dir, 'node_modules', '.bin', 'mocha')))
def test_bundle_node_module_processed(self):
command = [
'bundle',
':'.join([self.PROJECT_DIR, self.WEB_COMPONENT_BUTTON_PROCESSED_BUNDLE])]
pants_run = self.run_pants(command=command)
self.assert_success(pants_run)
with self._extract_archive(self.WEB_COMPONENT_BUTTON_PROCESSED_ARTIFACT) as temp_dir:
self.assertEquals(
set(os.listdir(temp_dir)),
set(['Button.js'])
)
def test_bundle_jvm_binary_with_node_module(self):
command = [
'binary',
':'.join([self.JVM_PROJECT_DIR, self.JVM_PROJECT])
]
pants_run = self.run_pants(command=command)
self.assert_success(pants_run)
with self._extract_archive(self.JVM_PROJECT_ARTIFACT) as temp_dir:
self.assertEquals(
set(os.listdir(os.path.join(temp_dir, self.WEB_COMPONENT_BUTTON_PROCESSED_PROJECT))),
set(['Button.js'])
)
# Only include node build results, not original node_modules directory
self.assertTrue('node_modules' not in os.listdir(temp_dir))
# Transitive dependency that marked as not generating artifacts should not be included.
self.assertTrue('web-build-tool' not in os.listdir(temp_dir))
def test_bundle_jvm_binary_with_node_module_and_dependencies(self):
command = [
'binary',
':'.join([self.JVM_PROJECT_DIR, self.JVM_WITH_ARTIFACTS_PROJECT])
]
pants_run = self.run_pants(command=command)
self.assert_success(pants_run)
with self._extract_archive(self.JVM_WITH_ARTIFACTS_ARTIFACT) as temp_dir:
print (os.listdir(temp_dir))
self.assertEquals(
set(os.listdir(os.path.join(temp_dir, self.WITH_DEPENDENCY_ARTIFACTS_PROJECT))),
set(['Button.js'])
)
# Only include node build results, not original node_modules directory
self.assertTrue('node_modules' not in os.listdir(temp_dir))
# Transitive dependency should not be included.
self.assertTrue('web-dependency-test' in os.listdir(temp_dir))
def test_bundle_node_preinstalled_module(self):
command = [
'bundle',
':'.join([self.PREINSTALLED_PROJECT_DIR, self.PREINSTALLED_BUNDLE])]
self.assert_success(self.run_pants(command=command))
with self._extract_archive(self.PREINSTALLED_ARTIFACT) as temp_dir:
self.assertEquals(
set(os.listdir(temp_dir)),
set(['src', 'test', 'node_modules', 'package.json'])
)
def test_no_bundle_for_node_module(self):
command = ['bundle', ':'.join([self.PREINSTALLED_PROJECT_DIR, self.PREINSTALLED_PROJECT])]
self.assert_success(self.run_pants(command=command))
self.assertFalse(os.path.exists(self.PREINSTALLED_BUNDLE))
@contextmanager
def _extract_archive(self, archive_path):
with temporary_dir() as temp_dir:
_, extension = os.path.splitext(archive_path)
print (extension)
if extension == '.jar':
extraction_archiver = archiver('zip')
else:
extraction_archiver = archiver_for_path(os.path.basename(archive_path))
extraction_archiver.extract(archive_path, temp_dir)
yield temp_dir
| 39.911565
| 96
| 0.740583
|
3b269112fe4d005db7eb6627217b27c9c690f6b2
| 712
|
py
|
Python
|
telegram_bot/config.py
|
Carmo-sousa/telegram-bot
|
7a2f675bc3b5da65742851c1a2c4438f9e982745
|
[
"MIT"
] | null | null | null |
telegram_bot/config.py
|
Carmo-sousa/telegram-bot
|
7a2f675bc3b5da65742851c1a2c4438f9e982745
|
[
"MIT"
] | null | null | null |
telegram_bot/config.py
|
Carmo-sousa/telegram-bot
|
7a2f675bc3b5da65742851c1a2c4438f9e982745
|
[
"MIT"
] | null | null | null |
import os
import logging
from dotenv import load_dotenv
# Carrega as váriaveis de ambiente do projeto.
load_dotenv()
logging.basicConfig(
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
level=logging.INFO,
)
class Config:
# Nível de permição do projeto.
# https://developers.google.com/sheets/api/guides/authorizing
SCOPES = ["https://www.googleapis.com/auth/spreadsheets"]
# ID da planilha
SPREADSHEET_ID = os.getenv("SPREADSHEET_ID")
RANGE_NAME = os.getenv("RANGE_NAME")
# Token do bot do telegram
# https://t.me/botfather
TOKEN = os.getenv("TOKEN", "1435718138:AAFJkL3MTrSvXPYpcz6cueZHdCQTNnYFRhM")
PORT = os.getenv("PORT", 8443)
pass
| 24.551724
| 80
| 0.695225
|
c6e1959a2fb2c84fcda3660890feafa3bb1eafda
| 8,298
|
py
|
Python
|
server/attender-mobile/lib/requests/packages/urllib3/connection.py
|
denbedilov/SIStore
|
da8e6f38170959efe756bafe2f83adcf1fbb14a4
|
[
"MIT"
] | 71
|
2015-05-15T08:27:25.000Z
|
2022-01-16T03:45:42.000Z
|
server/attender-mobile/lib/requests/packages/urllib3/connection.py
|
denbedilov/SIStore
|
da8e6f38170959efe756bafe2f83adcf1fbb14a4
|
[
"MIT"
] | 11
|
2015-10-26T22:47:32.000Z
|
2020-10-16T19:13:09.000Z
|
server/attender-mobile/lib/requests/packages/urllib3/connection.py
|
denbedilov/SIStore
|
da8e6f38170959efe756bafe2f83adcf1fbb14a4
|
[
"MIT"
] | 27
|
2015-03-08T22:03:00.000Z
|
2021-07-27T05:19:37.000Z
|
import datetime
import sys
import socket
from socket import timeout as SocketTimeout
import warnings
try: # Python 3
from http.client import HTTPConnection as _HTTPConnection, HTTPException
except ImportError:
from httplib import HTTPConnection as _HTTPConnection, HTTPException
class DummyConnection(object):
"Used to detect a failed ConnectionCls import."
pass
try: # Compiled with SSL?
HTTPSConnection = DummyConnection
import ssl
BaseSSLError = ssl.SSLError
except (ImportError, AttributeError): # Platform-specific: No SSL.
ssl = None
class BaseSSLError(BaseException):
pass
from .exceptions import (
ConnectTimeoutError,
SystemTimeWarning,
)
from .packages.ssl_match_hostname import match_hostname
from .packages import six
from .util.ssl_ import (
resolve_cert_reqs,
resolve_ssl_version,
ssl_wrap_socket,
assert_fingerprint,
)
from .util import connection
port_by_scheme = {
'http': 80,
'https': 443,
}
RECENT_DATE = datetime.date(2014, 1, 1)
class HTTPConnection(_HTTPConnection, object):
"""
Based on httplib.HTTPConnection but provides an extra constructor
backwards-compatibility layer between older and newer Pythons.
Additional keyword parameters are used to configure attributes of the connection.
Accepted parameters include:
- ``strict``: See the documentation on :class:`urllib3.connectionpool.HTTPConnectionPool`
- ``source_address``: Set the source address for the current connection.
.. note:: This is ignored for Python 2.6. It is only applied for 2.7 and 3.x
- ``socket_options``: Set specific options on the underlying socket. If not specified, then
defaults are loaded from ``HTTPConnection.default_socket_options`` which includes disabling
Nagle's algorithm (sets TCP_NODELAY to 1) unless the connection is behind a proxy.
For example, if you wish to enable TCP Keep Alive in addition to the defaults,
you might pass::
HTTPConnection.default_socket_options + [
(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1),
]
Or you may want to disable the defaults by passing an empty list (e.g., ``[]``).
"""
default_port = port_by_scheme['http']
#: Disable Nagle's algorithm by default.
#: ``[(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]``
default_socket_options = [(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]
#: Whether this connection verifies the host's certificate.
is_verified = False
def __init__(self, *args, **kw):
if six.PY3: # Python 3
kw.pop('strict', None)
# Pre-set source_address in case we have an older Python like 2.6.
self.source_address = kw.get('source_address')
if sys.version_info < (2, 7): # Python 2.6
# _HTTPConnection on Python 2.6 will balk at this keyword arg, but
# not newer versions. We can still use it when creating a
# connection though, so we pop it *after* we have saved it as
# self.source_address.
kw.pop('source_address', None)
#: The socket options provided by the user. If no options are
#: provided, we use the default options.
self.socket_options = kw.pop('socket_options', self.default_socket_options)
# Superclass also sets self.source_address in Python 2.7+.
_HTTPConnection.__init__(self, *args, **kw)
def _new_conn(self):
""" Establish a socket connection and set nodelay settings on it.
:return: New socket connection.
"""
extra_kw = {}
if self.source_address:
extra_kw['source_address'] = self.source_address
if self.socket_options:
extra_kw['socket_options'] = self.socket_options
try:
conn = connection.create_connection(
(self.host, self.port), self.timeout, **extra_kw)
except SocketTimeout:
raise ConnectTimeoutError(
self, "Connection to %s timed out. (connect timeout=%s)" %
(self.host, self.timeout))
return conn
def _prepare_conn(self, conn):
self.sock = conn
# the _tunnel_host attribute was added in python 2.6.3 (via
# http://hg.python.org/cpython/rev/0f57b30a152f) so pythons 2.6(0-2) do
# not have them.
if getattr(self, '_tunnel_host', None):
# TODO: Fix tunnel so it doesn't depend on self.sock state.
self._tunnel()
# Mark this connection as not reusable
self.auto_open = 0
def connect(self):
conn = self._new_conn()
self._prepare_conn(conn)
class HTTPSConnection(HTTPConnection):
default_port = port_by_scheme['https']
def __init__(self, host, port=None, key_file=None, cert_file=None,
strict=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, **kw):
HTTPConnection.__init__(self, host, port, strict=strict,
timeout=timeout, **kw)
self.key_file = key_file
self.cert_file = cert_file
# Required property for Google AppEngine 1.9.0 which otherwise causes
# HTTPS requests to go out as HTTP. (See Issue #356)
self._protocol = 'https'
def connect(self):
conn = self._new_conn()
self._prepare_conn(conn)
self.sock = ssl.wrap_socket(conn, self.key_file, self.cert_file)
class VerifiedHTTPSConnection(HTTPSConnection):
"""
Based on httplib.HTTPSConnection but wraps the socket with
SSL certification.
"""
cert_reqs = None
ca_certs = None
ssl_version = None
assert_fingerprint = None
def set_cert(self, key_file=None, cert_file=None,
cert_reqs=None, ca_certs=None,
assert_hostname=None, assert_fingerprint=None):
self.key_file = key_file
self.cert_file = cert_file
self.cert_reqs = cert_reqs
self.ca_certs = ca_certs
self.assert_hostname = assert_hostname
self.assert_fingerprint = assert_fingerprint
def connect(self):
# Add certificate verification
conn = self._new_conn()
resolved_cert_reqs = resolve_cert_reqs(self.cert_reqs)
resolved_ssl_version = resolve_ssl_version(self.ssl_version)
hostname = self.host
if getattr(self, '_tunnel_host', None):
# _tunnel_host was added in Python 2.6.3
# (See: http://hg.python.org/cpython/rev/0f57b30a152f)
self.sock = conn
# Calls self._set_hostport(), so self.host is
# self._tunnel_host below.
self._tunnel()
# Mark this connection as not reusable
self.auto_open = 0
# Override the host with the one we're requesting data from.
hostname = self._tunnel_host
is_time_off = datetime.date.today() < RECENT_DATE
if is_time_off:
warnings.warn((
'System time is way off (before {0}). This will probably '
'lead to SSL verification errors').format(RECENT_DATE),
SystemTimeWarning
)
# Wrap socket using verification with the root certs in
# trusted_root_certs
self.sock = ssl_wrap_socket(conn, self.key_file, self.cert_file,
cert_reqs=resolved_cert_reqs,
ca_certs=self.ca_certs,
server_hostname=hostname,
ssl_version=resolved_ssl_version)
if self.assert_fingerprint:
assert_fingerprint(self.sock.getpeercert(binary_form=True),
self.assert_fingerprint)
elif resolved_cert_reqs != ssl.CERT_NONE \
and self.assert_hostname is not False:
match_hostname(self.sock.getpeercert(),
self.assert_hostname or hostname)
self.is_verified = (resolved_cert_reqs == ssl.CERT_REQUIRED
or self.assert_fingerprint is not None)
if ssl:
# Make a copy for testing.
UnverifiedHTTPSConnection = HTTPSConnection
HTTPSConnection = VerifiedHTTPSConnection
| 33.595142
| 99
| 0.639552
|
c1a1d7f8f4c2245347d7fbda04e2f20628a24f10
| 755
|
py
|
Python
|
test_program/test_fetchdata.py
|
YuhaoCheng/IoT-Project
|
26b4473c2366b00ba7606c6782de95fea250296b
|
[
"Apache-2.0"
] | 7
|
2018-05-06T02:34:38.000Z
|
2020-11-07T14:52:50.000Z
|
test_program/test_fetchdata.py
|
YuhaoCheng/IoT-Project
|
26b4473c2366b00ba7606c6782de95fea250296b
|
[
"Apache-2.0"
] | null | null | null |
test_program/test_fetchdata.py
|
YuhaoCheng/IoT-Project
|
26b4473c2366b00ba7606c6782de95fea250296b
|
[
"Apache-2.0"
] | 1
|
2018-03-29T03:26:59.000Z
|
2018-03-29T03:26:59.000Z
|
import pymysql
import json
def handle_request(conn,json_str,day,time):
db = pymysql.connect("172.17.118.89", "root", "123456", "pycom")
cursor = db.cursor()
cursor = db.cursor()
sql = """select * from console_device"""
reply = {"Messagetype":'result'}
deviceList = []
gatewayId = json_str['GatewayID']
reply["GatewayID"] = gatewayId
reply["Day"] = day
reply["Time"] = time
try:
cursor.execute(sql)
results = cursor.fetchall()
for row in results:
deviceList.append(row[1])
# return deviceList
except:
print('No the table')
reply["DeviceList"] = deviceList
reply_str = json.dumps(reply)
conn.send(bytes(reply_str,'utf8'))
conn.close()
| 23.59375
| 68
| 0.602649
|
9ee251504285643e735c14711f277a462fc64e48
| 5,103
|
py
|
Python
|
groups/admin.py
|
FroshOU/manga
|
60ec24a007a7e9ebe0c152cf1f2a2aa0362f17f2
|
[
"MIT"
] | 58
|
2019-03-04T09:22:42.000Z
|
2022-02-18T09:11:57.000Z
|
groups/admin.py
|
FroshOU/manga
|
60ec24a007a7e9ebe0c152cf1f2a2aa0362f17f2
|
[
"MIT"
] | 21
|
2019-03-07T19:34:53.000Z
|
2021-12-19T12:46:40.000Z
|
groups/admin.py
|
FroshOU/manga
|
60ec24a007a7e9ebe0c152cf1f2a2aa0362f17f2
|
[
"MIT"
] | 14
|
2019-06-06T09:53:13.000Z
|
2021-12-17T14:34:13.000Z
|
"""Admin models for the groups app."""
from typing import Optional
from django.contrib import admin
from django.db.models.functions import Lower
# XXX: cannot be resolved under TYPE_CHECKING
from django.forms.models import BaseInlineFormSet, ModelForm
from django.forms.widgets import HiddenInput
# XXX: cannot be resolved under TYPE_CHECKING
from django.http import HttpRequest
from django.utils.html import format_html
from MangAdventure import filters, utils
from .models import Group, Member, Role
class MemberRoleInline(admin.StackedInline):
"""Inline admin model for :class:`~groups.models.Role`."""
model = Role
extra = 1
def get_formset(self, request: 'HttpRequest',
obj: Optional[Role], **kwargs
) -> 'BaseInlineFormSet': # pragma: no cover
formset = super().get_formset(request, obj, **kwargs)
if 'group' in formset.form.base_fields:
formset.form.base_fields['group'].queryset = \
Group.objects.filter(manager_id=request.user.id)
return formset
class MemberAdmin(admin.ModelAdmin):
"""Admin model for :class:`~groups.models.Member`."""
inlines = (MemberRoleInline,)
ordering = (Lower('name'),)
list_display = ('name', '_twitter', 'discord', 'irc', '_reddit')
search_fields = ('name', 'twitter', 'discord', 'irc', 'reddit')
list_filter = (
('roles__group__name', filters.related_filter('group')),
('roles__role', filters.related_filter('role')),
)
def _twitter(self, obj: Member) -> str:
if not obj.twitter:
return ''
return format_html(
'<a href="https://twitter.com/{0}" rel="noopener noreferrer"'
' target="_blank">@{0}</a>', obj.twitter
)
_twitter.short_description = 'twitter'
_twitter.admin_order_field = 'twitter'
def _reddit(self, obj: Member) -> str:
if not obj.reddit:
return ''
return format_html(
'<a href="https://reddit.com/u/{0}" rel="noopener noreferrer"'
' target="_blank">/u/{0}</a>', obj.reddit
)
_reddit.short_description = 'reddit'
_reddit.admin_order_field = 'reddit'
class GroupAdmin(admin.ModelAdmin):
"""Admin model for :class:`~groups.models.Group`."""
exclude = ('id',)
ordering = (Lower('name'),)
list_display = ('image', 'name', '_website', 'manager', 'description')
search_fields = ('name', 'website', 'description')
list_display_links = ('name',)
list_filter = (
('manager', filters.related_filter('manager')),
)
empty_value_display = 'N/A'
def get_form(self, request: 'HttpRequest', obj: Optional[Group]
= None, change: bool = False, **kwargs) -> 'ModelForm':
form = super().get_form(request, obj, change, **kwargs)
if 'manager' in form.base_fields:
form.base_fields['manager'].initial = request.user.id
if not request.user.is_superuser: # pragma: no cover
form.base_fields['manager'].widget = HiddenInput()
return form
def image(self, obj: Group) -> str:
"""
Get the logo of the group as an HTML ``<img>``.
:param obj: A ``Group`` model instance.
:return: An ``<img>`` tag with the group's logo.
"""
return utils.img_tag(obj.logo, 'logo', height=25)
image.short_description = 'logo'
def _website(self, obj: Group) -> str:
if not obj.website:
return ''
return format_html(
'<a href="{0}" rel="noopener noreferrer"'
' target="_blank">{0}</a>', obj.website
)
_website.short_description = 'website'
_website.admin_order_field = 'website'
def has_change_permission(self, request: 'HttpRequest', obj:
Optional[Group] = None) -> bool:
"""
Return ``True`` if editing the object is permitted.
| Superusers can edit any group.
| Scanlators can only edit groups they manage.
:param request: The original request.
:param obj: A ``Group`` model instance.
:return: ``True`` if the user is allowed to edit the group.
"""
if request.user.is_superuser or obj is None:
return True
return obj.manager_id == request.user.id
def has_delete_permission(self, request: 'HttpRequest', obj:
Optional[Group] = None) -> bool:
"""
Return ``True`` if deleting the object is permitted.
| Superusers can delete any group.
| Scanlators can only delete groups they manage.
:param request: The original request.
:param obj: A ``Group`` model instance.
:return: ``True`` if the user is allowed to delete the group.
"""
if request.user.is_superuser or obj is None:
return True
return obj.manager_id == request.user.id
admin.site.register(Group, GroupAdmin)
admin.site.register(Member, MemberAdmin)
__all__ = ['MemberRoleInline', 'MemberAdmin', 'GroupAdmin']
| 33.794702
| 74
| 0.612777
|
5b1f033c6f806d5d2900afee9152b4093e38b7e8
| 28,840
|
py
|
Python
|
Lib/test/lock_tests.py
|
Himanshu-Lakhara/cpython
|
3b20d3454e8082e07dba93617793de5dc9237828
|
[
"PSF-2.0"
] | 15
|
2015-04-14T00:33:13.000Z
|
2021-10-18T01:08:54.000Z
|
Lib/test/lock_tests.py
|
Himanshu-Lakhara/cpython
|
3b20d3454e8082e07dba93617793de5dc9237828
|
[
"PSF-2.0"
] | 1
|
2019-10-06T22:32:10.000Z
|
2020-08-01T15:22:38.000Z
|
Lib/test/lock_tests.py
|
Himanshu-Lakhara/cpython
|
3b20d3454e8082e07dba93617793de5dc9237828
|
[
"PSF-2.0"
] | 3
|
2015-04-23T11:12:32.000Z
|
2021-10-18T01:08:55.000Z
|
"""
Various tests for synchronization primitives.
"""
import sys
import time
from _thread import start_new_thread, TIMEOUT_MAX
import threading
import unittest
import weakref
from test import support
def _wait():
# A crude wait/yield function not relying on synchronization primitives.
time.sleep(0.01)
class Bunch(object):
"""
A bunch of threads.
"""
def __init__(self, f, n, wait_before_exit=False):
"""
Construct a bunch of `n` threads running the same function `f`.
If `wait_before_exit` is True, the threads won't terminate until
do_finish() is called.
"""
self.f = f
self.n = n
self.started = []
self.finished = []
self._can_exit = not wait_before_exit
self.wait_thread = support.wait_threads_exit()
self.wait_thread.__enter__()
def task():
tid = threading.get_ident()
self.started.append(tid)
try:
f()
finally:
self.finished.append(tid)
while not self._can_exit:
_wait()
try:
for i in range(n):
start_new_thread(task, ())
except:
self._can_exit = True
raise
def wait_for_started(self):
while len(self.started) < self.n:
_wait()
def wait_for_finished(self):
while len(self.finished) < self.n:
_wait()
# Wait for threads exit
self.wait_thread.__exit__(None, None, None)
def do_finish(self):
self._can_exit = True
class BaseTestCase(unittest.TestCase):
def setUp(self):
self._threads = support.threading_setup()
def tearDown(self):
support.threading_cleanup(*self._threads)
support.reap_children()
def assertTimeout(self, actual, expected):
# The waiting and/or time.time() can be imprecise, which
# is why comparing to the expected value would sometimes fail
# (especially under Windows).
self.assertGreaterEqual(actual, expected * 0.6)
# Test nothing insane happened
self.assertLess(actual, expected * 10.0)
class BaseLockTests(BaseTestCase):
"""
Tests for both recursive and non-recursive locks.
"""
def test_constructor(self):
lock = self.locktype()
del lock
def test_repr(self):
lock = self.locktype()
self.assertRegex(repr(lock), "<unlocked .* object (.*)?at .*>")
del lock
def test_locked_repr(self):
lock = self.locktype()
lock.acquire()
self.assertRegex(repr(lock), "<locked .* object (.*)?at .*>")
del lock
def test_acquire_destroy(self):
lock = self.locktype()
lock.acquire()
del lock
def test_acquire_release(self):
lock = self.locktype()
lock.acquire()
lock.release()
del lock
def test_try_acquire(self):
lock = self.locktype()
self.assertTrue(lock.acquire(False))
lock.release()
def test_try_acquire_contended(self):
lock = self.locktype()
lock.acquire()
result = []
def f():
result.append(lock.acquire(False))
Bunch(f, 1).wait_for_finished()
self.assertFalse(result[0])
lock.release()
def test_acquire_contended(self):
lock = self.locktype()
lock.acquire()
N = 5
def f():
lock.acquire()
lock.release()
b = Bunch(f, N)
b.wait_for_started()
_wait()
self.assertEqual(len(b.finished), 0)
lock.release()
b.wait_for_finished()
self.assertEqual(len(b.finished), N)
def test_with(self):
lock = self.locktype()
def f():
lock.acquire()
lock.release()
def _with(err=None):
with lock:
if err is not None:
raise err
_with()
# Check the lock is unacquired
Bunch(f, 1).wait_for_finished()
self.assertRaises(TypeError, _with, TypeError)
# Check the lock is unacquired
Bunch(f, 1).wait_for_finished()
def test_thread_leak(self):
# The lock shouldn't leak a Thread instance when used from a foreign
# (non-threading) thread.
lock = self.locktype()
def f():
lock.acquire()
lock.release()
n = len(threading.enumerate())
# We run many threads in the hope that existing threads ids won't
# be recycled.
Bunch(f, 15).wait_for_finished()
if len(threading.enumerate()) != n:
# There is a small window during which a Thread instance's
# target function has finished running, but the Thread is still
# alive and registered. Avoid spurious failures by waiting a
# bit more (seen on a buildbot).
time.sleep(0.4)
self.assertEqual(n, len(threading.enumerate()))
def test_timeout(self):
lock = self.locktype()
# Can't set timeout if not blocking
self.assertRaises(ValueError, lock.acquire, 0, 1)
# Invalid timeout values
self.assertRaises(ValueError, lock.acquire, timeout=-100)
self.assertRaises(OverflowError, lock.acquire, timeout=1e100)
self.assertRaises(OverflowError, lock.acquire, timeout=TIMEOUT_MAX + 1)
# TIMEOUT_MAX is ok
lock.acquire(timeout=TIMEOUT_MAX)
lock.release()
t1 = time.time()
self.assertTrue(lock.acquire(timeout=5))
t2 = time.time()
# Just a sanity test that it didn't actually wait for the timeout.
self.assertLess(t2 - t1, 5)
results = []
def f():
t1 = time.time()
results.append(lock.acquire(timeout=0.5))
t2 = time.time()
results.append(t2 - t1)
Bunch(f, 1).wait_for_finished()
self.assertFalse(results[0])
self.assertTimeout(results[1], 0.5)
def test_weakref_exists(self):
lock = self.locktype()
ref = weakref.ref(lock)
self.assertIsNotNone(ref())
def test_weakref_deleted(self):
lock = self.locktype()
ref = weakref.ref(lock)
del lock
self.assertIsNone(ref())
class LockTests(BaseLockTests):
"""
Tests for non-recursive, weak locks
(which can be acquired and released from different threads).
"""
def test_reacquire(self):
# Lock needs to be released before re-acquiring.
lock = self.locktype()
phase = []
def f():
lock.acquire()
phase.append(None)
lock.acquire()
phase.append(None)
with support.wait_threads_exit():
start_new_thread(f, ())
while len(phase) == 0:
_wait()
_wait()
self.assertEqual(len(phase), 1)
lock.release()
while len(phase) == 1:
_wait()
self.assertEqual(len(phase), 2)
def test_different_thread(self):
# Lock can be released from a different thread.
lock = self.locktype()
lock.acquire()
def f():
lock.release()
b = Bunch(f, 1)
b.wait_for_finished()
lock.acquire()
lock.release()
def test_state_after_timeout(self):
# Issue #11618: check that lock is in a proper state after a
# (non-zero) timeout.
lock = self.locktype()
lock.acquire()
self.assertFalse(lock.acquire(timeout=0.01))
lock.release()
self.assertFalse(lock.locked())
self.assertTrue(lock.acquire(blocking=False))
class RLockTests(BaseLockTests):
"""
Tests for recursive locks.
"""
def test_reacquire(self):
lock = self.locktype()
lock.acquire()
lock.acquire()
lock.release()
lock.acquire()
lock.release()
lock.release()
def test_release_unacquired(self):
# Cannot release an unacquired lock
lock = self.locktype()
self.assertRaises(RuntimeError, lock.release)
lock.acquire()
lock.acquire()
lock.release()
lock.acquire()
lock.release()
lock.release()
self.assertRaises(RuntimeError, lock.release)
def test_release_save_unacquired(self):
# Cannot _release_save an unacquired lock
lock = self.locktype()
self.assertRaises(RuntimeError, lock._release_save)
lock.acquire()
lock.acquire()
lock.release()
lock.acquire()
lock.release()
lock.release()
self.assertRaises(RuntimeError, lock._release_save)
def test_different_thread(self):
# Cannot release from a different thread
lock = self.locktype()
def f():
lock.acquire()
b = Bunch(f, 1, True)
try:
self.assertRaises(RuntimeError, lock.release)
finally:
b.do_finish()
b.wait_for_finished()
def test__is_owned(self):
lock = self.locktype()
self.assertFalse(lock._is_owned())
lock.acquire()
self.assertTrue(lock._is_owned())
lock.acquire()
self.assertTrue(lock._is_owned())
result = []
def f():
result.append(lock._is_owned())
Bunch(f, 1).wait_for_finished()
self.assertFalse(result[0])
lock.release()
self.assertTrue(lock._is_owned())
lock.release()
self.assertFalse(lock._is_owned())
class EventTests(BaseTestCase):
"""
Tests for Event objects.
"""
def test_is_set(self):
evt = self.eventtype()
self.assertFalse(evt.is_set())
evt.set()
self.assertTrue(evt.is_set())
evt.set()
self.assertTrue(evt.is_set())
evt.clear()
self.assertFalse(evt.is_set())
evt.clear()
self.assertFalse(evt.is_set())
def _check_notify(self, evt):
# All threads get notified
N = 5
results1 = []
results2 = []
def f():
results1.append(evt.wait())
results2.append(evt.wait())
b = Bunch(f, N)
b.wait_for_started()
_wait()
self.assertEqual(len(results1), 0)
evt.set()
b.wait_for_finished()
self.assertEqual(results1, [True] * N)
self.assertEqual(results2, [True] * N)
def test_notify(self):
evt = self.eventtype()
self._check_notify(evt)
# Another time, after an explicit clear()
evt.set()
evt.clear()
self._check_notify(evt)
def test_timeout(self):
evt = self.eventtype()
results1 = []
results2 = []
N = 5
def f():
results1.append(evt.wait(0.0))
t1 = time.time()
r = evt.wait(0.5)
t2 = time.time()
results2.append((r, t2 - t1))
Bunch(f, N).wait_for_finished()
self.assertEqual(results1, [False] * N)
for r, dt in results2:
self.assertFalse(r)
self.assertTimeout(dt, 0.5)
# The event is set
results1 = []
results2 = []
evt.set()
Bunch(f, N).wait_for_finished()
self.assertEqual(results1, [True] * N)
for r, dt in results2:
self.assertTrue(r)
def test_set_and_clear(self):
# Issue #13502: check that wait() returns true even when the event is
# cleared before the waiting thread is woken up.
evt = self.eventtype()
results = []
N = 5
def f():
results.append(evt.wait(1))
b = Bunch(f, N)
b.wait_for_started()
time.sleep(0.5)
evt.set()
evt.clear()
b.wait_for_finished()
self.assertEqual(results, [True] * N)
def test_reset_internal_locks(self):
# ensure that condition is still using a Lock after reset
evt = self.eventtype()
with evt._cond:
self.assertFalse(evt._cond.acquire(False))
evt._reset_internal_locks()
with evt._cond:
self.assertFalse(evt._cond.acquire(False))
class ConditionTests(BaseTestCase):
"""
Tests for condition variables.
"""
def test_acquire(self):
cond = self.condtype()
# Be default we have an RLock: the condition can be acquired multiple
# times.
cond.acquire()
cond.acquire()
cond.release()
cond.release()
lock = threading.Lock()
cond = self.condtype(lock)
cond.acquire()
self.assertFalse(lock.acquire(False))
cond.release()
self.assertTrue(lock.acquire(False))
self.assertFalse(cond.acquire(False))
lock.release()
with cond:
self.assertFalse(lock.acquire(False))
def test_unacquired_wait(self):
cond = self.condtype()
self.assertRaises(RuntimeError, cond.wait)
def test_unacquired_notify(self):
cond = self.condtype()
self.assertRaises(RuntimeError, cond.notify)
def _check_notify(self, cond):
# Note that this test is sensitive to timing. If the worker threads
# don't execute in a timely fashion, the main thread may think they
# are further along then they are. The main thread therefore issues
# _wait() statements to try to make sure that it doesn't race ahead
# of the workers.
# Secondly, this test assumes that condition variables are not subject
# to spurious wakeups. The absence of spurious wakeups is an implementation
# detail of Condition Cariables in current CPython, but in general, not
# a guaranteed property of condition variables as a programming
# construct. In particular, it is possible that this can no longer
# be conveniently guaranteed should their implementation ever change.
N = 5
ready = []
results1 = []
results2 = []
phase_num = 0
def f():
cond.acquire()
ready.append(phase_num)
result = cond.wait()
cond.release()
results1.append((result, phase_num))
cond.acquire()
ready.append(phase_num)
result = cond.wait()
cond.release()
results2.append((result, phase_num))
b = Bunch(f, N)
b.wait_for_started()
# first wait, to ensure all workers settle into cond.wait() before
# we continue. See issues #8799 and #30727.
while len(ready) < 5:
_wait()
ready.clear()
self.assertEqual(results1, [])
# Notify 3 threads at first
cond.acquire()
cond.notify(3)
_wait()
phase_num = 1
cond.release()
while len(results1) < 3:
_wait()
self.assertEqual(results1, [(True, 1)] * 3)
self.assertEqual(results2, [])
# make sure all awaken workers settle into cond.wait()
while len(ready) < 3:
_wait()
# Notify 5 threads: they might be in their first or second wait
cond.acquire()
cond.notify(5)
_wait()
phase_num = 2
cond.release()
while len(results1) + len(results2) < 8:
_wait()
self.assertEqual(results1, [(True, 1)] * 3 + [(True, 2)] * 2)
self.assertEqual(results2, [(True, 2)] * 3)
# make sure all workers settle into cond.wait()
while len(ready) < 5:
_wait()
# Notify all threads: they are all in their second wait
cond.acquire()
cond.notify_all()
_wait()
phase_num = 3
cond.release()
while len(results2) < 5:
_wait()
self.assertEqual(results1, [(True, 1)] * 3 + [(True,2)] * 2)
self.assertEqual(results2, [(True, 2)] * 3 + [(True, 3)] * 2)
b.wait_for_finished()
def test_notify(self):
cond = self.condtype()
self._check_notify(cond)
# A second time, to check internal state is still ok.
self._check_notify(cond)
def test_timeout(self):
cond = self.condtype()
results = []
N = 5
def f():
cond.acquire()
t1 = time.time()
result = cond.wait(0.5)
t2 = time.time()
cond.release()
results.append((t2 - t1, result))
Bunch(f, N).wait_for_finished()
self.assertEqual(len(results), N)
for dt, result in results:
self.assertTimeout(dt, 0.5)
# Note that conceptually (that"s the condition variable protocol)
# a wait() may succeed even if no one notifies us and before any
# timeout occurs. Spurious wakeups can occur.
# This makes it hard to verify the result value.
# In practice, this implementation has no spurious wakeups.
self.assertFalse(result)
def test_waitfor(self):
cond = self.condtype()
state = 0
def f():
with cond:
result = cond.wait_for(lambda : state==4)
self.assertTrue(result)
self.assertEqual(state, 4)
b = Bunch(f, 1)
b.wait_for_started()
for i in range(4):
time.sleep(0.01)
with cond:
state += 1
cond.notify()
b.wait_for_finished()
def test_waitfor_timeout(self):
cond = self.condtype()
state = 0
success = []
def f():
with cond:
dt = time.time()
result = cond.wait_for(lambda : state==4, timeout=0.1)
dt = time.time() - dt
self.assertFalse(result)
self.assertTimeout(dt, 0.1)
success.append(None)
b = Bunch(f, 1)
b.wait_for_started()
# Only increment 3 times, so state == 4 is never reached.
for i in range(3):
time.sleep(0.01)
with cond:
state += 1
cond.notify()
b.wait_for_finished()
self.assertEqual(len(success), 1)
class BaseSemaphoreTests(BaseTestCase):
"""
Common tests for {bounded, unbounded} semaphore objects.
"""
def test_constructor(self):
self.assertRaises(ValueError, self.semtype, value = -1)
self.assertRaises(ValueError, self.semtype, value = -sys.maxsize)
def test_acquire(self):
sem = self.semtype(1)
sem.acquire()
sem.release()
sem = self.semtype(2)
sem.acquire()
sem.acquire()
sem.release()
sem.release()
def test_acquire_destroy(self):
sem = self.semtype()
sem.acquire()
del sem
def test_acquire_contended(self):
sem = self.semtype(7)
sem.acquire()
N = 10
sem_results = []
results1 = []
results2 = []
phase_num = 0
def f():
sem_results.append(sem.acquire())
results1.append(phase_num)
sem_results.append(sem.acquire())
results2.append(phase_num)
b = Bunch(f, 10)
b.wait_for_started()
while len(results1) + len(results2) < 6:
_wait()
self.assertEqual(results1 + results2, [0] * 6)
phase_num = 1
for i in range(7):
sem.release()
while len(results1) + len(results2) < 13:
_wait()
self.assertEqual(sorted(results1 + results2), [0] * 6 + [1] * 7)
phase_num = 2
for i in range(6):
sem.release()
while len(results1) + len(results2) < 19:
_wait()
self.assertEqual(sorted(results1 + results2), [0] * 6 + [1] * 7 + [2] * 6)
# The semaphore is still locked
self.assertFalse(sem.acquire(False))
# Final release, to let the last thread finish
sem.release()
b.wait_for_finished()
self.assertEqual(sem_results, [True] * (6 + 7 + 6 + 1))
def test_try_acquire(self):
sem = self.semtype(2)
self.assertTrue(sem.acquire(False))
self.assertTrue(sem.acquire(False))
self.assertFalse(sem.acquire(False))
sem.release()
self.assertTrue(sem.acquire(False))
def test_try_acquire_contended(self):
sem = self.semtype(4)
sem.acquire()
results = []
def f():
results.append(sem.acquire(False))
results.append(sem.acquire(False))
Bunch(f, 5).wait_for_finished()
# There can be a thread switch between acquiring the semaphore and
# appending the result, therefore results will not necessarily be
# ordered.
self.assertEqual(sorted(results), [False] * 7 + [True] * 3 )
def test_acquire_timeout(self):
sem = self.semtype(2)
self.assertRaises(ValueError, sem.acquire, False, timeout=1.0)
self.assertTrue(sem.acquire(timeout=0.005))
self.assertTrue(sem.acquire(timeout=0.005))
self.assertFalse(sem.acquire(timeout=0.005))
sem.release()
self.assertTrue(sem.acquire(timeout=0.005))
t = time.time()
self.assertFalse(sem.acquire(timeout=0.5))
dt = time.time() - t
self.assertTimeout(dt, 0.5)
def test_default_value(self):
# The default initial value is 1.
sem = self.semtype()
sem.acquire()
def f():
sem.acquire()
sem.release()
b = Bunch(f, 1)
b.wait_for_started()
_wait()
self.assertFalse(b.finished)
sem.release()
b.wait_for_finished()
def test_with(self):
sem = self.semtype(2)
def _with(err=None):
with sem:
self.assertTrue(sem.acquire(False))
sem.release()
with sem:
self.assertFalse(sem.acquire(False))
if err:
raise err
_with()
self.assertTrue(sem.acquire(False))
sem.release()
self.assertRaises(TypeError, _with, TypeError)
self.assertTrue(sem.acquire(False))
sem.release()
class SemaphoreTests(BaseSemaphoreTests):
"""
Tests for unbounded semaphores.
"""
def test_release_unacquired(self):
# Unbounded releases are allowed and increment the semaphore's value
sem = self.semtype(1)
sem.release()
sem.acquire()
sem.acquire()
sem.release()
class BoundedSemaphoreTests(BaseSemaphoreTests):
"""
Tests for bounded semaphores.
"""
def test_release_unacquired(self):
# Cannot go past the initial value
sem = self.semtype()
self.assertRaises(ValueError, sem.release)
sem.acquire()
sem.release()
self.assertRaises(ValueError, sem.release)
class BarrierTests(BaseTestCase):
"""
Tests for Barrier objects.
"""
N = 5
defaultTimeout = 2.0
def setUp(self):
self.barrier = self.barriertype(self.N, timeout=self.defaultTimeout)
def tearDown(self):
self.barrier.abort()
def run_threads(self, f):
b = Bunch(f, self.N-1)
f()
b.wait_for_finished()
def multipass(self, results, n):
m = self.barrier.parties
self.assertEqual(m, self.N)
for i in range(n):
results[0].append(True)
self.assertEqual(len(results[1]), i * m)
self.barrier.wait()
results[1].append(True)
self.assertEqual(len(results[0]), (i + 1) * m)
self.barrier.wait()
self.assertEqual(self.barrier.n_waiting, 0)
self.assertFalse(self.barrier.broken)
def test_barrier(self, passes=1):
"""
Test that a barrier is passed in lockstep
"""
results = [[],[]]
def f():
self.multipass(results, passes)
self.run_threads(f)
def test_barrier_10(self):
"""
Test that a barrier works for 10 consecutive runs
"""
return self.test_barrier(10)
def test_wait_return(self):
"""
test the return value from barrier.wait
"""
results = []
def f():
r = self.barrier.wait()
results.append(r)
self.run_threads(f)
self.assertEqual(sum(results), sum(range(self.N)))
def test_action(self):
"""
Test the 'action' callback
"""
results = []
def action():
results.append(True)
barrier = self.barriertype(self.N, action)
def f():
barrier.wait()
self.assertEqual(len(results), 1)
self.run_threads(f)
def test_abort(self):
"""
Test that an abort will put the barrier in a broken state
"""
results1 = []
results2 = []
def f():
try:
i = self.barrier.wait()
if i == self.N//2:
raise RuntimeError
self.barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
except RuntimeError:
self.barrier.abort()
pass
self.run_threads(f)
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertTrue(self.barrier.broken)
def test_reset(self):
"""
Test that a 'reset' on a barrier frees the waiting threads
"""
results1 = []
results2 = []
results3 = []
def f():
i = self.barrier.wait()
if i == self.N//2:
# Wait until the other threads are all in the barrier.
while self.barrier.n_waiting < self.N-1:
time.sleep(0.001)
self.barrier.reset()
else:
try:
self.barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
# Now, pass the barrier again
self.barrier.wait()
results3.append(True)
self.run_threads(f)
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertEqual(len(results3), self.N)
def test_abort_and_reset(self):
"""
Test that a barrier can be reset after being broken.
"""
results1 = []
results2 = []
results3 = []
barrier2 = self.barriertype(self.N)
def f():
try:
i = self.barrier.wait()
if i == self.N//2:
raise RuntimeError
self.barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
except RuntimeError:
self.barrier.abort()
pass
# Synchronize and reset the barrier. Must synchronize first so
# that everyone has left it when we reset, and after so that no
# one enters it before the reset.
if barrier2.wait() == self.N//2:
self.barrier.reset()
barrier2.wait()
self.barrier.wait()
results3.append(True)
self.run_threads(f)
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertEqual(len(results3), self.N)
def test_timeout(self):
"""
Test wait(timeout)
"""
def f():
i = self.barrier.wait()
if i == self.N // 2:
# One thread is late!
time.sleep(1.0)
# Default timeout is 2.0, so this is shorter.
self.assertRaises(threading.BrokenBarrierError,
self.barrier.wait, 0.5)
self.run_threads(f)
def test_default_timeout(self):
"""
Test the barrier's default timeout
"""
# create a barrier with a low default timeout
barrier = self.barriertype(self.N, timeout=0.3)
def f():
i = barrier.wait()
if i == self.N // 2:
# One thread is later than the default timeout of 0.3s.
time.sleep(1.0)
self.assertRaises(threading.BrokenBarrierError, barrier.wait)
self.run_threads(f)
def test_single_thread(self):
b = self.barriertype(1)
b.wait()
b.wait()
| 30.389884
| 84
| 0.551768
|
b8bbc5d9dc0f40879c40227455e339b700ea707c
| 16,386
|
py
|
Python
|
prody/proteins/localpdb.py
|
grandevelia/ProDy
|
7c725640a94c16543423c0756388998cb86a97ae
|
[
"MIT"
] | 1
|
2017-10-01T18:52:08.000Z
|
2017-10-01T18:52:08.000Z
|
prody/proteins/localpdb.py
|
grandevelia/ProDy
|
7c725640a94c16543423c0756388998cb86a97ae
|
[
"MIT"
] | null | null | null |
prody/proteins/localpdb.py
|
grandevelia/ProDy
|
7c725640a94c16543423c0756388998cb86a97ae
|
[
"MIT"
] | 1
|
2021-10-03T06:10:45.000Z
|
2021-10-03T06:10:45.000Z
|
# -*- coding: utf-8 -*-
"""This module defines functions for handling local PDB folders."""
from glob import glob, iglob
from os.path import sep as pathsep
from os.path import abspath, isdir, isfile, join, split, splitext, normpath
from prody import LOGGER, SETTINGS
from prody.utilities import makePath, gunzip, relpath, copyFile, isWritable
from prody.utilities import sympath
from . import wwpdb
from .wwpdb import checkIdentifiers, fetchPDBviaFTP, fetchPDBviaHTTP
__all__ = ['pathPDBFolder', 'pathPDBMirror',
'fetchPDB', 'fetchPDBfromMirror',
'iterPDBFilenames', 'findPDBFiles']
def pathPDBFolder(folder=None, divided=False):
"""Returns or specify local PDB folder for storing PDB files downloaded from
`wwPDB <http://www.wwpdb.org/>`_ servers. Files stored in this folder can
be accessed via :func:`.fetchPDB` from any working directory. To release
the current folder, pass an invalid path, e.g. ``folder=''``.
If *divided* is **True**, the divided folder structure of wwPDB servers
will be assumed when reading from and writing to the local folder. For
example, a structure with identifier **1XYZ** will be present as
:file:`pdblocalfolder/yz/pdb1xyz.pdb.gz`.
If *divided* is **False**, a plain folder structure will be expected and
adopted when saving files. For example, the same structure will be
present as :file:`pdblocalfolder/1xyz.pdb.gz`.
Finally, in either case, lower case letters will be used and compressed
files will be stored."""
if folder is None:
folder = SETTINGS.get('pdb_local_folder')
if folder:
if isdir(folder):
return folder, SETTINGS.get('pdb_local_divided', True)
else:
LOGGER.warn('PDB local folder {0} is not a accessible.'
.format(repr(folder)))
else:
if isdir(folder):
folder = abspath(folder)
LOGGER.info('Local PDB folder is set: {0}'.format(repr(folder)))
if divided:
LOGGER.info('wwPDB divided folder structure will be assumed.')
else:
LOGGER.info('A plain folder structure will be assumed.')
SETTINGS['pdb_local_folder'] = folder
SETTINGS['pdb_local_divided'] = bool(divided)
SETTINGS.save()
else:
current = SETTINGS.pop('pdb_local_folder')
if current:
LOGGER.info('PDB folder {0} is released.'
.format(repr(current)))
SETTINGS.pop('pdb_local_divided')
SETTINGS.save()
else:
raise IOError('{0} is not a valid path.'.format(repr(folder)))
wwpdb.pathPDBFolder = pathPDBFolder
def pathPDBMirror(path=None, format=None):
"""Returns or specify PDB mirror path to be used by :func:`.fetchPDB`.
To release the current mirror, pass an invalid path, e.g. ``path=''``.
If you are keeping a partial mirror, such as PDB files in
:file:`/data/structures/divided/pdb/` folder, specify *format*, which is
``'pdb'`` in this case."""
if path is None:
path = SETTINGS.get('pdb_mirror_path')
format = SETTINGS.get('pdb_mirror_format', None)
if path:
if isdir(path):
if format is None:
return path
else:
return path, format
else:
LOGGER.warning('PDB mirror path {0} is not a accessible.'
.format(repr(path)))
else:
if isdir(path):
path = abspath(path)
LOGGER.info('Local PDB mirror path is set: {0}'
.format(repr(path)))
SETTINGS['pdb_mirror_path'] = path
SETTINGS['pdb_mirror_format'] = format
SETTINGS.save()
else:
current = SETTINGS.pop('pdb_mirror_path')
if current:
LOGGER.info('PDB mirror {0} is released.'
.format(repr(current)))
SETTINGS.save()
else:
raise IOError('{0} is not a valid path.'.format(repr(path)))
def fetchPDBfromMirror(*pdb, **kwargs):
"""Returns path(s) to PDB (default), PDBML, or mmCIF file(s) for specified
*pdb* identifier(s). If a *folder* is specified, files will be copied
into this folder. If *compressed* is **False**, files will decompressed.
*format* argument can be used to get `PDBML <http://pdbml.pdb.org/>`_ and
`mmCIF <http://mmcif.pdb.org/>`_ files: ``format='cif'`` will fetch an
mmCIF file, and ``format='xml'`` will fetch a PDBML file. If PDBML header
file is desired, ``noatom=True`` argument will do the job."""
mirror = pathPDBMirror()
if mirror is None:
raise IOError('no mirror path is set')
try:
mirror, mirror_format = mirror
except ValueError:
mirror_format = None
format = str(kwargs.pop('format', 'pdb')).lower()
if kwargs.get('check', True):
identifiers = checkIdentifiers(*pdb)
else:
identifiers = list(pdb)
if format == 'pdb':
ftp_divided = 'data/structures/divided/pdb'
ftp_pdbext = '.ent.gz'
ftp_prefix = 'pdb'
extension = '.pdb'
elif format == 'xml':
if bool(kwargs.pop('noatom', False)):
ftp_divided = 'data/structures/divided/XML-noatom'
ftp_pdbext = '-noatom.xml.gz'
extension = '-noatom.xml'
else:
ftp_divided = 'data/structures/divided/XML'
ftp_pdbext = '.xml.gz'
extension = '.xml'
ftp_prefix = ''
elif format == 'cif':
ftp_divided = 'data/structures/divided/mmCIF'
ftp_pdbext = '.cif.gz'
ftp_prefix = ''
extension = '.cif'
else:
if format:
raise ValueError('{0} is not a recognized format'
.format(repr(format)))
else:
raise ValueError('please specify a valid format')
if mirror_format:
if mirror_format.lower() != format:
raise IOError('mirror contains only ' + mirror_format + ' files')
ftp_divided = ''
else:
ftp_divided = join(*ftp_divided.split('/'))
folder = kwargs.get('folder')
compressed = kwargs.get('compressed', True)
filenames = []
append = filenames.append
success = 0
failure = 0
for pdb in identifiers:
if pdb is None:
append(None)
continue
fn = join(mirror, ftp_divided, pdb[1:3],
ftp_prefix + pdb + ftp_pdbext)
if isfile(fn):
if folder or not compressed:
if compressed:
fn = copyFile(fn, join(folder or '.',
pdb + extension + '.gz'))
else:
fn = gunzip(fn, join(folder or '.', pdb + extension))
append(normpath(fn))
success += 1
else:
append(None)
failure += 1
if len(identifiers) == 1:
fn = filenames[0]
if success:
LOGGER.debug('PDB file is found in the local mirror ({0}).'
.format(sympath(fn)))
return fn
else:
LOGGER.debug('PDB files found in the local mirror ({0} found, '
'{1} missed).'.format(success, failure))
return filenames
def fetchPDB(*pdb, **kwargs):
"""Returns path(s) to PDB file(s) for specified *pdb* identifier(s). Files
will be sought in user specified *folder* or current working director, and
then in local PDB folder and mirror, if they are available. If *copy*
is set **True**, files will be copied into *folder*. If *compressed* is
**False**, all files will be decompressed. See :func:`pathPDBFolder` and
:func:`pathPDBMirror` for managing local resources, :func:`.fetchPDBviaFTP`
and :func:`.fetchPDBviaHTTP` for downloading files from PDB servers."""
if len(pdb) == 1 and isinstance(pdb[0], list):
pdb = pdb[0]
if 'format' in kwargs and kwargs.get('format') != 'pdb':
return fetchPDBviaFTP(*pdb, **kwargs)
identifiers = checkIdentifiers(*pdb)
folder = kwargs.get('folder', '.')
compressed = kwargs.get('compressed')
# check *folder* specified by the user, usually pwd ('.')
filedict = findPDBFiles(folder, compressed=compressed)
filenames = []
not_found = []
exists = 0
for i, pdb in enumerate(identifiers):
if pdb is None:
filenames.append(None)
elif pdb in filedict:
filenames.append(filedict[pdb])
exists += 1
else:
filenames.append(None)
not_found.append((i, pdb))
if not not_found:
if len(filenames) == 1:
filenames = filenames[0]
if exists:
LOGGER.debug('PDB file is found in working directory ({0}).'
.format(sympath(filenames)))
return filenames
if not isWritable(folder):
raise IOError('permission to write in {0} is denied, please '
'specify another folder'.format(folder))
if compressed is not None and not compressed:
filedict = findPDBFiles(folder, compressed=True)
not_found, decompress = [], not_found
for i, pdb in decompress:
if pdb in filedict:
fn = filedict[pdb]
filenames[i] = gunzip(fn, splitext(fn)[0])
else:
not_found.append((i, pdb))
if not not_found:
return filenames[0] if len(identifiers) == 1 else filenames
local_folder = pathPDBFolder()
copy = kwargs.setdefault('copy', False)
if local_folder:
local_folder, is_divided = local_folder
temp, not_found = not_found, []
for i, pdb in temp:
if is_divided:
fn = join(local_folder, pdb[1:3], 'pdb' + pdb + '.pdb.gz')
else:
fn = join(local_folder, pdb + '.pdb.gz')
if isfile(fn):
if copy or not compressed and compressed is not None:
if compressed:
fn = copyFile(fn, join(folder, pdb + 'pdb.gz'))
else:
fn = gunzip(fn, join(folder, pdb + '.pdb'))
filenames[i] = normpath(fn)
else:
not_found.append((i, pdb))
if not not_found:
if len(identifiers) == 1:
fn = filenames[0]
items = fn.split(pathsep)
if len(items) > 5:
fndisp = pathsep.join(items[:3] + ['...'] + items[-1:])
else:
fndisp = relpath(fn)
LOGGER.debug('PDB file is found in the local folder ({0}).'
.format(fndisp))
return fn
else:
return filenames
if kwargs['copy'] or (compressed is not None and not compressed):
kwargs['folder'] = folder
downloads = [pdb for i, pdb in not_found]
fns = None
try:
fns = fetchPDBfromMirror(*downloads, **kwargs)
except IOError:
pass
else:
if len(downloads) == 1: fns = [fns]
temp, not_found = not_found, []
for i, fn in enumerate(fns):
if fn is None:
not_found.append(temp[i])
else:
i, _ = temp[i]
filenames[i] = fn
if not not_found:
return filenames[0] if len(identifiers) == 1 else filenames
if fns:
downloads = [pdb for i, pdb in not_found]
fns = None
tp = kwargs.pop('tp', None)
if tp is not None:
tp = tp.lower()
if tp == 'http':
try:
fns = fetchPDBviaHTTP(*downloads, check=False, **kwargs)
except Exception as err:
LOGGER.warn('Downloading PDB files via HTTP failed '
'({0}).'.format(str(err)))
elif tp == 'ftp':
try:
fns = fetchPDBviaFTP(*downloads, check=False, **kwargs)
except Exception as err:
LOGGER.warn('Downloading PDB files via FTP failed '
'({0}).'.format(str(err)))
else:
tryHTTP = False
try:
fns = fetchPDBviaFTP(*downloads, check=False, **kwargs)
except Exception as err:
tryHTTP = True
if fns is None or isinstance(fns, list) and None in fns:
tryHTTP = True
elif isinstance(fns, list):
downloads = [not_found[i][1] for i in range(len(fns)) if fns[i] is None]
if len(downloads) > 0:
tryHTTP = True
if tryHTTP:
LOGGER.info('Downloading PDB files via FTP failed, '
'trying HTTP.')
try:
fns = fetchPDBviaHTTP(*downloads, check=False, **kwargs)
except Exception as err:
LOGGER.warn('Downloading PDB files via HTTP also failed '
'({0}).'.format(str(err)))
if len(downloads) == 1: fns = [fns]
if fns:
for i, fn in zip([i for i, pdb in not_found], fns):
filenames[i] = fn
return filenames[0] if len(identifiers) == 1 else filenames
def iterPDBFilenames(path=None, sort=False, unique=True, **kwargs):
"""Yield PDB filenames in *path* specified by the user or in local PDB
mirror (see :func:`.pathPDBMirror`). When *unique* is **True**, files
one of potentially identical files will be yielded (e.g. :file:`1mkp.pdb`
and :file:`pdb1mkp.ent.gz1`). :file:`.pdb` and :file:`.ent` extensions,
and compressed files are considered."""
from re import compile, IGNORECASE
if path is None or kwargs.get('mirror') is True:
if path is None:
path = pathPDBMirror()
if path is None:
raise ValueError('path must be specified or PDB mirror path '
'must be set')
if sort:
pdbs = glob(join(path, 'data/structures/divided/pdb/',
'*/*.ent.gz'))
pdbs.sort(reverse=kwargs.get('reverse'))
else:
pdbs = iglob(join(path, 'data/structures/divided/pdb/',
'*/*.ent.gz'))
for fn in pdbs:
yield fn
else:
unique=bool(unique)
if unique:
yielded = set()
compressed = kwargs.get('compressed')
if compressed is None:
pdbext = compile('\.(pdb|ent)(\.gz)?$', IGNORECASE)
elif compressed:
pdbext = compile('\.(pdb|ent)\.gz$', IGNORECASE)
else:
pdbext = compile('\.(pdb|ent)$', IGNORECASE)
pdbs = [pdb for pdb in iglob(join(path, '*')) if pdbext.search(pdb)]
if sort:
pdbs.sort(reverse=kwargs.get('reverse'))
for fn in pdbs:
if unique:
pdb = splitext(splitext(split(fn)[1])[0])[0]
if len(pdb) == 7 and pdb.startswith('pdb'):
pdb = pdb[3:]
if pdb in yielded:
continue
else:
yielded.add(pdb)
yield fn
def findPDBFiles(path, case=None, **kwargs):
"""Returns a dictionary that maps PDB filenames to file paths. If *case*
is specified (``'u[pper]'`` or ``'l[ower]'``), dictionary keys (filenames)
will be modified accordingly. If a PDB filename has :file:`pdb` prefix,
it will be trimmed, for example ``'1mkp'`` will be mapped to file path
:file:`./pdb1mkp.pdb.gz`). If a file is present with multiple extensions,
only one of them will be returned. See also :func:`.iterPDBFilenames`."""
case = str(case).lower()
upper = lower = False
if case.startswith('u'):
upper = True
elif case.startswith('l'):
lower = True
pdbs = {}
for fn in iterPDBFilenames(path, sort=True, reverse=True, **kwargs):
fn = normpath(fn)
pdb = splitext(splitext(split(fn)[1])[0])[0]
if len(pdb) == 7 and pdb.startswith('pdb'):
pdb = pdb[3:]
if upper:
pdbs[pdb.upper()] = fn
elif lower:
pdbs[pdb.lower()] = fn
else:
pdbs[pdb] = fn
return pdbs
| 36.332594
| 84
| 0.554864
|
26ccb580be10a6d722cd5312c0ef793f30a2ba61
| 2,462
|
py
|
Python
|
Server_DL/main_dl.py
|
nhattruongpham/APS
|
46166236d34b32457b71d6ef52423bcbf71bc1e8
|
[
"MIT"
] | null | null | null |
Server_DL/main_dl.py
|
nhattruongpham/APS
|
46166236d34b32457b71d6ef52423bcbf71bc1e8
|
[
"MIT"
] | null | null | null |
Server_DL/main_dl.py
|
nhattruongpham/APS
|
46166236d34b32457b71d6ef52423bcbf71bc1e8
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
#-*- coding: utf8 -*-
from getImage import *
from insert_db import *
from mul_update_db_4 import *
from select_db import *
from mul_update_db_6 import *
from getPlate import *
from LCD import *
from MFRC522 import *
from myconnutils import *
from license_plate_recognize import *
import time
import datetime
import RPi.GPIO as GPIO
import cv2
import numpy as np
import os
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
MIFAREReader = MFRC522()
connection = getConnection()
cursor = connection.cursor()
_number = 'ID1'
def main():
_idx = 0
while True:
(status,TagType) = MIFAREReader.MFRC522_Request(MIFAREReader.PICC_REQIDL)
(status,uid) = MIFAREReader.MFRC522_Anticoll()
if (status == MIFAREReader.MI_OK):
start = time.time()
_timeIn = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
_uid = str(uid[0])+"." + str(uid[1]) + "." + str(uid[2]) +"." + str(uid[3])
lcd_string("ID:" + _uid, LCD_LINE_1)
_idx = _idx + 1
InsertDB("ParkingSlot", "AutoPlate", "UID", _uid, "TimeIN", _timeIn)
Mul_UpdateDB_4("ParkingSlot", "Slot", "INP", _idx, "Number", _number)
_money = SelectDB("Money", "ParkingSlot", "DataSample", "UID", _uid)
lcd_string("Balance: " + str(_money), LCD_LINE_1)
#imgOriginalScene = cv2.imread("2.jpg")
imgOriginalScene = GetImage()
_license_plate, _plate_image_full, _plate_image_low = getPlate_OCR(imgOriginalScene)
cv2.imwrite(os.path.join(os.path.join(os.getcwd(), "result_full/" + _license_plate + "_" + str(_idx) + ".png")), _plate_image_full)
cv2.imwrite(os.path.join(os.path.join(os.getcwd(), "result_low/" + _license_plate + "_" + str(_idx) + ".png")), _plate_image_low)
lcd_string("Plate: " + _license_plate, LCD_LINE_2)
Mul_UpdateDB_6("ParkingSlot", "AutoPlate", "PlateIN", _license_plate, "Balance", _money, "UID", _uid)
_outp = SelectDB("OUTP", "ParkingSlot", "Slot", "Number", _number)
_inp = SelectDB("INP", "ParkingSlot", "Slot", "Number", _number)
available = 10 - int(inp1) + int(outp)
lcd_string("Available: " + str(available), LCD_LINE_2)
if (available < 5):
lcd_string("ParkingSlot Full", LCD_LINE_2)
print("Time: ", time.time() - start)
if __name__ == "__main__":
main()
| 38.46875
| 139
| 0.621852
|
3fea0257cb9a18df494daa646fb8fc0da6b8c77d
| 1,008
|
py
|
Python
|
skaben/shape/views.py
|
skaben/server_core
|
46ba0551459790dda75abc9cf0ff147fae6d62e8
|
[
"MIT"
] | null | null | null |
skaben/shape/views.py
|
skaben/server_core
|
46ba0551459790dda75abc9cf0ff147fae6d62e8
|
[
"MIT"
] | 12
|
2020-08-14T12:43:04.000Z
|
2021-09-01T00:22:26.000Z
|
skaben/shape/views.py
|
skaben/server_core
|
46ba0551459790dda75abc9cf0ff147fae6d62e8
|
[
"MIT"
] | null | null | null |
from core.views import DynamicAuthMixin
from rest_framework import status, viewsets
from .models import AccessCode, MenuItem, Permission, WorkMode
from .serializers import (
AccessCodeSerializer,
MenuItemSerializer,
PermissionSerializer,
WorkModeSerializer,
)
class AccessCodeViewSet(viewsets.ModelViewSet, DynamicAuthMixin):
""" Events in database """
queryset = AccessCode.objects.all()
serializer_class = AccessCodeSerializer
class PermissionViewSet(viewsets.ModelViewSet, DynamicAuthMixin):
""" Events in database """
queryset = Permission.objects.all()
serializer_class = PermissionSerializer
class MenuItemViewSet(viewsets.ModelViewSet, DynamicAuthMixin):
""" Manage locks in database """
queryset = MenuItem.objects.all()
serializer_class = MenuItemSerializer
class WorkModeViewSet(viewsets.ModelViewSet, DynamicAuthMixin):
""" Manage locks in database """
queryset = WorkMode.objects.all()
serializer_class = WorkModeSerializer
| 29.647059
| 65
| 0.765873
|
65a8a0c9529b2e9777d5350a74cbfbac3f65b765
| 27,171
|
py
|
Python
|
tests/test_auto_sharding_bert.py
|
alpa-projects/alpa
|
2c54de2a8fa8a48c77069f4bad802f4e8fa6d126
|
[
"Apache-2.0"
] | 114
|
2022-03-02T20:38:16.000Z
|
2022-03-31T20:41:50.000Z
|
tests/test_auto_sharding_bert.py
|
alpa-projects/alpa
|
2c54de2a8fa8a48c77069f4bad802f4e8fa6d126
|
[
"Apache-2.0"
] | 6
|
2022-03-09T22:04:50.000Z
|
2022-03-30T17:53:15.000Z
|
tests/test_auto_sharding_bert.py
|
alpa-projects/alpa
|
2c54de2a8fa8a48c77069f4bad802f4e8fa6d126
|
[
"Apache-2.0"
] | 5
|
2022-03-05T12:04:31.000Z
|
2022-03-31T03:55:42.000Z
|
"""Test auto sharding on transformer layers and bert models."""
import unittest
import jax
import jax.numpy as jnp
import numpy as np
from flax import optim, linen as nn
from alpa import parallelize, ShardParallel, LocalPhysicalDeviceMesh, AutoShardingOption
from alpa.model.bert_model import (BertConfig, FlaxBertLayerCollection,
FlaxBertForMaskedLMModule)
from alpa.util import count_communication_primitives
from test_auto_sharding_mlp import (
assert_all_replicated, assert_close, assert_column_partitioned,
assert_data_parallel_cost, assert_fully_sharded, assert_less_equal,
assert_sharded, assert_replicated_column_partitioned,
assert_replicated_row_partitioned, assert_row_partitioned, is_fully_sharded,
assert_sharding_zero_stage_3)
class AutoShardingAttentionTest(unittest.TestCase):
def setUp(self):
assert len(jax.local_devices()) >= 4
self.physical_mesh = LocalPhysicalDeviceMesh(jax.local_devices()[:4])
self.as_option = AutoShardingOption()
def get_device_mesh(self, shape, mesh_alpha, mesh_beta):
return self.physical_mesh.get_logical_mesh(shape, mesh_alpha, mesh_beta)
def run_bert_layers(self, batch_size, seq_len, num_layers, hidden_size,
num_heads, deterministic, use_remat, device_mesh):
@parallelize(method=ShardParallel(devices=device_mesh,
auto_sharding_option=self.as_option))
def train_step(optimizer, batch, deterministic, apply_fn):
def loss_func(params):
rngs = {"dropout": batch["rng"]}
out = apply_fn(params,
batch["hidden_states"],
batch["attention_mask"],
deterministic,
rngs=rngs)[0]
return jnp.mean((out - batch["label"])**2)
grad = jax.grad(loss_func)(optimizer.target)
new_optimizer = optimizer.apply_gradient(grad)
return new_optimizer
# Init model and optimizer
hidden_states = jnp.ones((batch_size, seq_len, hidden_size),
dtype=jnp.float32)
attention_mask = jnp.ones((batch_size, seq_len), dtype=jnp.int32)
label = jnp.ones((batch_size, seq_len, hidden_size), dtype=jnp.float32)
model = FlaxBertLayerCollection(
BertConfig(num_hidden_layers=num_layers,
hidden_size=hidden_size,
intermediate_size=hidden_size * 4,
num_attention_heads=num_heads,
gradient_checkpointing=use_remat))
rngkey = jax.random.PRNGKey(0)
params = model.init(rngkey, hidden_states, attention_mask)
optimizer = optim.Adam(1e-2).create(params)
# JIT compile
optimizer = train_step(
optimizer, {
"hidden_states": hidden_states,
"attention_mask": attention_mask,
"label": label,
"rng": rngkey
}, deterministic, model.apply)
# Get optimized HLO IR
executable = train_step.get_executable(
optimizer, {
"hidden_states": hidden_states,
"attention_mask": attention_mask,
"label": label,
"rng": rngkey
}, deterministic, model.apply)
return (optimizer, executable.get_hlo_text(),
executable.auto_sharding_objective)
def run_bert_mlm(self, batch_size, seq_len, num_layers, hidden_size,
num_heads, vocab_size, deterministic, device_mesh):
@parallelize(method=ShardParallel(devices=device_mesh,
auto_sharding_option=self.as_option))
def train_step(optimizer, batch):
def loss_func(params):
rngs = {"dropout": batch["rng"]}
logits = model.apply(params,
batch["input_ids"],
batch["attention_mask"],
batch["token_type_ids"],
batch["position_ids"],
deterministic=deterministic,
rngs=rngs)[0]
label_mask = jnp.where(batch["labels"] > 0, 1.0, 0.0)
labels = jax.nn.one_hot(batch["labels"], logits.shape[-1])
loss = -jnp.sum(labels * jax.nn.log_softmax(logits, axis=-1),
axis=-1)
return (label_mask * loss).sum() / label_mask.sum() * 0.1234
grad = jax.grad(loss_func)(optimizer.target)
new_optimizer = optimizer.apply_gradient(grad)
return new_optimizer
# Init model and optimizer
input_ids = jnp.ones((batch_size, seq_len), dtype=jnp.int32)
attention_mask = jnp.ones((batch_size, seq_len), dtype=jnp.int32)
token_type_ids = jnp.ones((batch_size, seq_len), dtype=jnp.int32)
position_ids = jnp.ones((batch_size, seq_len), dtype=jnp.int32)
labels = jnp.ones((batch_size, seq_len), dtype=jnp.int32)
model = FlaxBertForMaskedLMModule(
BertConfig(
num_hidden_layers=num_layers,
hidden_size=hidden_size,
intermediate_size=hidden_size * 4,
num_attention_heads=num_heads,
vocab_size=vocab_size,
max_position_embeddings=seq_len,
))
rngkey = jax.random.PRNGKey(0)
params = model.init(rngkey, input_ids, attention_mask, token_type_ids,
position_ids)
optimizer = optim.Adam(1e-2).create(params)
# JIT compile
optimizer = train_step(
optimizer, {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
"position_ids": position_ids,
"labels": labels,
"rng": rngkey
})
# Get optimized HLO IR
executable = train_step.get_executable(
optimizer, {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
"position_ids": position_ids,
"labels": labels,
"rng": rngkey
})
return (optimizer, executable.get_hlo_text(),
executable.auto_sharding_objective)
def test_bert_layer_data_parallel(self):
batch_size = 64
seq_len = 64
num_layers = 2
hidden_size = 32
num_heads = 8
deterministic = False
use_remat = False
# Test on different logical mesh shapes
for i, mesh_shape in enumerate([(4, 1), (1, 4)]):
device_mesh = self.get_device_mesh(mesh_shape, [1, 1], [1, 1])
optimizer, hlo_ir, objective = self.run_bert_layers(
batch_size, seq_len, num_layers, hidden_size, num_heads,
deterministic, use_remat, device_mesh)
assert_data_parallel_cost(optimizer, hlo_ir, objective, device_mesh,
self.as_option, i)
def test_bert_layer_model_parallel(self):
batch_size = 8
seq_len = 8
num_layers = 2
hidden_size = 128
num_heads = 8
deterministic = False
use_remat = False
# Test on different logical mesh shapes
for i, mesh_shape in enumerate([(4, 1), (1, 4)]):
device_mesh = self.get_device_mesh(mesh_shape, [1, 1], [1, 1])
optimizer, hlo_ir, objective = self.run_bert_layers(
batch_size, seq_len, num_layers, hidden_size, num_heads,
deterministic, use_remat, device_mesh)
# Check communication cost
expected = (num_layers * 4 - 1) * device_mesh.all_reduce_cost(
batch_size * seq_len * hidden_size * 4, i)
assert_close(objective, expected)
n_total, n_all_reduce, n_all_gather, n_reduce_scatter, _ = (
count_communication_primitives(hlo_ir))
if self.as_option.prefer_reduce_scatter:
assert n_total == num_layers * 4 - 1
assert n_all_reduce == num_layers * 4 - 1
assert n_total == n_all_reduce
else:
assert n_total == num_layers * 4 - 1
assert n_all_reduce == num_layers * 4 - 1
assert n_total == n_all_reduce
# Check sharding specification
for k in range(num_layers):
params = optimizer.target["params"][str(k)]
weights = [
params["attention"]["self"]["qvk_combined"]["kernel"],
params["attention"]["output"]["dense"]["kernel"],
params["intermediate"]["dense"]["kernel"],
params["output"]["dense"]["kernel"],
]
for j in range(len(weights)):
if j % 2 == 0:
assert_column_partitioned(weights[j], mesh_shape[i], i)
else:
assert_row_partitioned(weights[j], mesh_shape[i], i)
def test_bert_layer_2d_mesh(self):
batch_size = 8
seq_len = 8
num_layers = 2
hidden_size = 128
num_heads = 8
deterministic = False
use_remat = False
# Test on different logical mesh shapes
mesh_shape = [2, 2]
device_mesh = self.get_device_mesh(mesh_shape, [2, 2], [1, 0.1])
optimizer, hlo_ir, objective = self.run_bert_layers(
batch_size, seq_len, num_layers, hidden_size, num_heads,
deterministic, use_remat, device_mesh)
# Check communication cost
params = jax.tree_util.tree_leaves(optimizer.target)
expected = (sum(
device_mesh.all_reduce_cost(
np.prod(x.shape) * 4 / mesh_shape[1], 0)
for x in params) + device_mesh.all_reduce_cost(
batch_size * seq_len * hidden_size * 4 / mesh_shape[0], 1) *
(num_layers * 4 - 1))
assert_close(objective, expected)
n_total, n_all_reduce, n_all_gather, n_reduce_scatter, _ = (
count_communication_primitives(hlo_ir,
ignore_scalar_all_reduce=True))
if self.as_option.prefer_reduce_scatter:
assert n_all_reduce == num_layers * 4 - 1
assert n_reduce_scatter == 2
assert n_all_gather == 1
assert n_total == n_all_reduce + n_reduce_scatter + n_all_gather
else:
assert n_all_reduce == num_layers * 4
assert n_total == n_all_reduce
# Check sharding specification
if self.as_option.prefer_reduce_scatter:
for weight in jax.tree_util.tree_leaves(
optimizer.state.param_states):
if len(weight.shape) > 1:
assert_fully_sharded(weight)
else:
for k in range(num_layers):
params = optimizer.target["params"][str(k)]
weights = [
params["attention"]["self"]["qvk_combined"]["kernel"],
params["attention"]["output"]["dense"]["kernel"],
params["intermediate"]["dense"]["kernel"],
params["output"]["dense"]["kernel"],
]
for j in range(len(weights)):
if j % 2 == 0:
assert_replicated_column_partitioned(
weights[j], mesh_shape)
else:
assert_replicated_row_partitioned(
weights[j], mesh_shape)
def test_bert_layer_force_batch_dim_mapping(self):
batch_size = 64
seq_len = 64
num_layers = 2
hidden_size = 32
num_heads = 8
deterministic = False
use_remat = False
self.as_option.force_batch_dim_to_mesh_dim = 0
# data parallel
device_mesh = self.get_device_mesh([4, 1], [1, 1], [1, 1])
optimizer, hlo_ir, objective = self.run_bert_layers(
batch_size, seq_len, num_layers, hidden_size, num_heads,
deterministic, use_remat, device_mesh)
assert_data_parallel_cost(optimizer, hlo_ir, objective, device_mesh, self.as_option, 0)
# model parallel (case 1)
device_mesh = self.get_device_mesh([1, 4], [1, 1], [1, 1])
optimizer, hlo_ir, objective = self.run_bert_layers(
batch_size, seq_len, num_layers, hidden_size, num_heads,
deterministic, use_remat, device_mesh)
expected = (num_layers * 4 - 1) * device_mesh.all_reduce_cost(
batch_size * seq_len * hidden_size * 4, 1)
assert_close(objective, expected)
# model parallel (case 2)
batch_size = 1
device_mesh = self.get_device_mesh([1, 4], [1, 1], [1, 1])
optimizer, hlo_ir, objective = self.run_bert_layers(
batch_size, seq_len, num_layers, hidden_size, num_heads,
deterministic, use_remat, device_mesh)
expected = (num_layers * 4 - 1) * device_mesh.all_reduce_cost(
batch_size * seq_len * hidden_size * 4, 1)
assert_close(objective, expected)
def test_embedding_2d_mesh(self):
vocab_size = 1024
hidden_size = 8
batch_size = 8
seq_len = 8
mesh_shape = [2, 2]
# Model and training step definition
class Model(nn.Module):
"""Tied input and output embedding."""
def setup(self):
self.embed = nn.Embed(vocab_size, hidden_size)
def __call__(self, x):
x = self.embed(x)
embed = self.embed.variables["params"]["embedding"]
x = x @ embed.T
return x
logical_mesh = self.get_device_mesh(mesh_shape, [1, 1], [1, 1])
@parallelize(method=ShardParallel(devices=logical_mesh))
def func(optimizer, x, y):
def loss_func(params):
out = model.apply(params, x)
y_ = jax.nn.one_hot(y, out.shape[-1])
loss = -jnp.sum(y_ * jax.nn.log_softmax(out, axis=-1), axis=-1)
return loss.sum()
grad = jax.grad(loss_func)(optimizer.target)
new_optimizer = optimizer.apply_gradient(grad)
return new_optimizer
# Init model and optimizer
x = jnp.ones((batch_size, seq_len), np.int32)
y = jnp.ones((batch_size, seq_len), np.int32)
model = Model()
rngkey = jax.random.PRNGKey(0)
params = model.init(rngkey, x)
optimizer = optim.Adam(1e-2).create(params)
# JIT Compile
optimize = func(optimizer, x, y)
# Check communication cost
executable = func.get_executable(optimizer, x, y)
hlo_ir = executable.get_hlo_text()
objective = executable.auto_sharding_objective
params = jax.tree_util.tree_leaves(optimizer.target)
expected = (
logical_mesh.all_reduce_cost(
vocab_size * hidden_size * 4 / mesh_shape[1], 0) +
logical_mesh.all_reduce_cost(
batch_size * seq_len * hidden_size * 4 / mesh_shape[0], 1) * 2 +
logical_mesh.all_reduce_cost(
batch_size * seq_len * 4 / mesh_shape[0], 1) * 2)
assert_close(objective, expected)
n_total, n_all_reduce, n_all_gather, n_reduce_scatter, _ = (
count_communication_primitives(hlo_ir))
assert n_total == n_all_reduce
def test_bert_mlm_data_parallel(self):
batch_size = 32
seq_len = 32
num_layers = 2
hidden_size = 16
num_heads = 4
vocab_size = 128
deterministic = False
# Test on different logical mesh shapes
for i, mesh_shape in enumerate([(4, 1), (1, 4)]):
device_mesh = self.get_device_mesh(mesh_shape, [1, 1], [1, 1])
optimizer, hlo_ir, objective = self.run_bert_mlm(
batch_size, seq_len, num_layers, hidden_size, num_heads,
vocab_size, deterministic, device_mesh)
if self.as_option.force_zero_stage_3:
# only the weight and opt_state of token_embed is not sharded
assert_sharding_zero_stage_3(optimizer, 3)
continue
assert_data_parallel_cost(optimizer, hlo_ir, objective, device_mesh,
self.as_option, i, 1)
@unittest.skip("This test is broken after we disallow some replicated iota."
)
def test_bert_mlm_model_parallel(self):
batch_size = 16
seq_len = 16
num_layers = 2
hidden_size = 128
num_heads = 4
vocab_size = 512
deterministic = False
self.as_option.allow_all_gather = False # Temporary hack
self.as_option.allow_all_to_all = False # Temporary hack
# Test on different logical mesh shapes
for i, mesh_shape in enumerate([(4, 1), (1, 4)]):
device_mesh = self.get_device_mesh(mesh_shape, [1, 1], [1, 1])
optimizer, hlo_ir, objective = self.run_bert_mlm(
batch_size, seq_len, num_layers, hidden_size, num_heads,
vocab_size, deterministic, device_mesh)
# Check communication cost
# expected_cost = embed.forward (1) + embed.backward(2) +
# LM_head.forward (1) + LM_head.backward (1) +
# LM_head.weight.backward (1) + log_softmax.forward (2) +
# transformer.forward (2 * num_layers) + transformer.backward (2 * num_layers)
#
# Note that the final cost is different from this estimated cost in ILP solver.
# The SPMD partitioner will eliminate some unnecessary communication in favor of
# redundant computation (e.g., it will elimiate the all-reduce in embed.backward).
expected = (
device_mesh.all_reduce_cost(
batch_size * seq_len * hidden_size * 4, i) * 5 +
device_mesh.all_reduce_cost(hidden_size * hidden_size * 4, i) +
device_mesh.all_reduce_cost(batch_size * seq_len * 4, i) * 2 +
device_mesh.all_reduce_cost(
batch_size * seq_len * hidden_size * 4, i) * num_layers * 4)
assert_close(objective, expected)
n_total, n_all_reduce, n_all_gather, n_reduce_scatter, _ = (
count_communication_primitives(hlo_ir))
# real number of all-reduce = transformers (4 * num_layers) + log_softmax (2) +
# embed.forward (1) + embad.backward (1)
assert n_all_reduce == num_layers * 4 + 4
assert n_total == n_all_reduce
# Check sharding specification
embed_weight = optimizer.target["params"]["bert"]["embeddings"][
"word_embeddings"]["embedding"]
lm_head = optimizer.target["params"]["cls"]["predictions"][
"transform"]["dense"]["kernel"]
assert_row_partitioned(embed_weight, mesh_shape[i], i)
assert_all_replicated(lm_head, np.prod(mesh_shape))
for k in range(num_layers):
params = optimizer.target["params"]["bert"]["encoder"]["layer"][
str(k)]
weights = [
params["attention"]["self"]["qvk_combined"]["kernel"],
params["attention"]["output"]["dense"]["kernel"],
params["intermediate"]["dense"]["kernel"],
params["output"]["dense"]["kernel"],
]
for j in range(len(weights)):
if j % 2 == 0:
assert_column_partitioned(weights[j], mesh_shape[i], i)
else:
assert_row_partitioned(weights[j], mesh_shape[i], i)
def test_bert_mlm_2d_mesh(self):
batch_size = 4
seq_len = 4
num_layers = 2
hidden_size = 512
num_heads = 4
vocab_size = 4096
deterministic = False
# To generate the desired strategy, we have to turn off mixed mesh shape and all-gather
# and enable recomputing heavy ops.
self.as_option.allow_recompute_heavy_op = True
self.as_option.allow_all_gather = False
self.as_option.allow_mixed_mesh_shape = False
mesh_shape = [2, 2]
device_mesh = self.get_device_mesh(mesh_shape, [2, 2], [1, 0.1])
optimizer, hlo_ir, objective = self.run_bert_mlm(
batch_size, seq_len, num_layers, hidden_size, num_heads, vocab_size,
deterministic, device_mesh)
# Check communication cost.
n_total, n_all_reduce, n_all_gather, n_reduce_scatter, _ = (
count_communication_primitives(hlo_ir,
ignore_scalar_all_reduce=True))
if self.as_option.prefer_reduce_scatter:
assert n_all_reduce == 4 * num_layers + 2 + 2
assert n_reduce_scatter <= 3 # The correct number should be 2,
# but GpuMultiOutputFusion can make
# some reduce-scatter unable to be combined
assert n_all_gather == 1
assert n_total == n_all_reduce + n_all_gather + n_reduce_scatter
else:
# real number of all-reduce = transformers (4 * num_layers) + log_softmax (2) +
# embed.forward (1) + embad.backward (1) + weights (1)
assert n_all_reduce == 4 * num_layers + 2 + 2 + 1
assert n_total == n_all_reduce
# Check sharding specification
assert "s32[4,4,4096]{2,1,0} iota()" not in hlo_ir
assert "s32[2,4,2048]{2,1,0} iota()" in hlo_ir
if self.as_option.prefer_reduce_scatter:
num_not_sharded = 0 # allow the token_type_embeddings not partitioned.
for weight in jax.tree_util.tree_leaves(
optimizer.state.param_states):
if len(weight.shape) > 1:
if not is_fully_sharded(weight):
num_not_sharded += 1
assert num_not_sharded <= 2
else:
embed_weight = (optimizer.target["params"]["bert"]["embeddings"]
["word_embeddings"]["embedding"])
lm_head = (optimizer.target["params"]["cls"]["predictions"]
["transform"]["dense"]["kernel"])
assert_replicated_row_partitioned(embed_weight, mesh_shape)
assert_all_replicated(lm_head, np.prod(mesh_shape))
for k in range(num_layers):
params = optimizer.target["params"]["bert"]["encoder"]["layer"][
str(k)]
weights = [
params["attention"]["self"]["qvk_combined"]["kernel"],
params["attention"]["output"]["dense"]["kernel"],
params["intermediate"]["dense"]["kernel"],
params["output"]["dense"]["kernel"],
]
for j in range(len(weights)):
if j % 2 == 0:
assert_replicated_column_partitioned(
weights[j], mesh_shape)
else:
assert_replicated_row_partitioned(
weights[j], mesh_shape)
def test_bert_layer_data_parallel_reduce_scatter(self):
self.as_option.prefer_reduce_scatter = True
self.test_bert_layer_data_parallel()
def test_bert_layer_model_parallel_reduce_scatter(self):
self.as_option.prefer_reduce_scatter = True
self.test_bert_layer_model_parallel()
def test_bert_layer_2d_mesh_reduce_scatter(self):
self.as_option.prefer_reduce_scatter = True
self.test_bert_layer_2d_mesh()
def test_bert_mlm_data_parallel_reduce_scatter(self):
self.as_option.prefer_reduce_scatter = True
self.test_bert_mlm_data_parallel()
def test_bert_mlm_data_parallel_reduce_scatter_zero_3(self):
self.as_option.force_zero_stage_3 = True
self.as_option.force_zero_stage_3_all_gather_threshold = 1
self.test_bert_mlm_data_parallel()
@unittest.skip("This test is broken after we disallow some replicated iota."
)
def test_bert_mlm_model_parallel_reduce_scatter(self):
self.as_option.prefer_reduce_scatter = True
self.test_bert_mlm_model_parallel()
def test_bert_mlm_2d_mesh_reduce_scatter(self):
self.as_option.prefer_reduce_scatter = True
self.test_bert_mlm_2d_mesh()
def test_bert_layer_model_parallel_remat(self):
batch_size = 8
seq_len = 8
num_layers = 2
hidden_size = 128
num_heads = 8
deterministic = False
use_remat = True
# Test on different logical mesh shapes
for i, mesh_shape in enumerate([(4, 1), (1, 4)]):
device_mesh = self.get_device_mesh(mesh_shape, [1, 1], [1, 1])
optimizer, hlo_ir, objective = self.run_bert_layers(
batch_size, seq_len, num_layers, hidden_size, num_heads,
deterministic, use_remat, device_mesh)
expected = (num_layers * 6 - 1) * device_mesh.all_reduce_cost(
batch_size * seq_len * hidden_size * 4, i)
assert_close(objective, expected)
n_total, n_all_reduce, n_all_gather, n_reduce_scatter, _ = (
count_communication_primitives(hlo_ir))
assert n_total == num_layers * 6 - 1
assert n_all_reduce == num_layers * 6 - 1
assert n_total == n_all_reduce
def suite():
suite = unittest.TestSuite()
def add(name):
suite.addTest(AutoShardingAttentionTest(name))
add("test_bert_layer_data_parallel")
add("test_bert_layer_model_parallel")
add("test_bert_layer_2d_mesh")
add("test_bert_layer_force_batch_dim_mapping")
add("test_embedding_2d_mesh")
add("test_bert_mlm_data_parallel")
add("test_bert_mlm_model_parallel")
add("test_bert_mlm_2d_mesh")
add("test_bert_layer_data_parallel_reduce_scatter")
add("test_bert_layer_model_parallel_reduce_scatter")
add("test_bert_layer_2d_mesh_reduce_scatter")
add("test_bert_mlm_data_parallel_reduce_scatter")
add("test_bert_mlm_model_parallel_reduce_scatter")
add("test_bert_mlm_2d_mesh_reduce_scatter")
add("test_bert_mlm_data_parallel_reduce_scatter_zero_3")
add("test_bert_layer_model_parallel_remat")
return suite
if __name__ == "__main__":
runner = unittest.TextTestRunner()
runner.run(suite())
| 41.673313
| 106
| 0.581208
|
22676eb943b62a7e6c4cad675d1021dd479e7e9b
| 786
|
py
|
Python
|
test/test_taxon_database.py
|
sckott/pytaxa
|
ea9f47dfbb3bf5bba53d82eb2bc7116051af87fb
|
[
"MIT"
] | 9
|
2018-06-14T23:32:01.000Z
|
2019-09-29T00:42:59.000Z
|
test/test_taxon_database.py
|
sckott/pytaxa
|
ea9f47dfbb3bf5bba53d82eb2bc7116051af87fb
|
[
"MIT"
] | 16
|
2018-06-26T21:43:30.000Z
|
2018-07-07T01:18:04.000Z
|
test/test_taxon_database.py
|
sckott/pytaxa
|
ea9f47dfbb3bf5bba53d82eb2bc7116051af87fb
|
[
"MIT"
] | 1
|
2018-08-05T21:49:11.000Z
|
2018-08-05T21:49:11.000Z
|
"""Tests for taxon_database"""
import os
# from nose.tools import *
import pytest
from pytaxa import constructors as c
def test_taxon_database():
"taxon_database - param: database"
res = c.taxon_database('ncbi')
assert dict == res.__class__
assert 4 == len(res)
assert 'ncbi' == res['database']
def test_taxon_database_other_params():
"taxon_database - param: all"
res = c.taxon_database("ncbi",
"http://www.ncbi.nlm.nih.gov/taxonomy",
"NCBI Taxonomy Database",
"*")
assert dict == res.__class__
assert 4 == len(res)
assert 'ncbi' == res['database']
assert "http://www.ncbi.nlm.nih.gov/taxonomy" == res['url']
assert "NCBI Taxonomy Database" == res['description']
assert "*" == res['id_regex']
| 30.230769
| 63
| 0.63486
|
acbeb430dce160ba5bee96ad70fd8a69435019be
| 4,969
|
py
|
Python
|
pythonProject1/venv/Lib/site-packages/pydantic/error_wrappers.py
|
mjtomlinson/CNE330_Python_1_Final_Project
|
05020806860937ef37b9a0ad2e27de4897a606de
|
[
"CC0-1.0"
] | 10
|
2020-06-11T23:20:03.000Z
|
2022-01-14T16:07:27.000Z
|
pythonProject1/venv/Lib/site-packages/pydantic/error_wrappers.py
|
mjtomlinson/CNE330_Python_1_Final_Project
|
05020806860937ef37b9a0ad2e27de4897a606de
|
[
"CC0-1.0"
] | 2
|
2021-05-12T00:09:30.000Z
|
2022-02-14T01:36:14.000Z
|
pythonProject1/venv/Lib/site-packages/pydantic/error_wrappers.py
|
mjtomlinson/CNE330_Python_1_Final_Project
|
05020806860937ef37b9a0ad2e27de4897a606de
|
[
"CC0-1.0"
] | 2
|
2020-06-22T09:46:57.000Z
|
2021-04-25T21:32:04.000Z
|
import json
from typing import TYPE_CHECKING, Any, Dict, Generator, List, Optional, Sequence, Tuple, Type, Union
from .json import pydantic_encoder
from .utils import Representation
if TYPE_CHECKING:
from .main import BaseConfig # noqa: F401
from .types import ModelOrDc # noqa: F401
from .typing import ReprArgs
Loc = Tuple[Union[int, str], ...]
__all__ = 'ErrorWrapper', 'ValidationError'
class ErrorWrapper(Representation):
__slots__ = 'exc', '_loc'
def __init__(self, exc: Exception, loc: Union[str, 'Loc']) -> None:
self.exc = exc
self._loc = loc
def loc_tuple(self) -> 'Loc':
if isinstance(self._loc, tuple):
return self._loc
else:
return (self._loc,)
def __repr_args__(self) -> 'ReprArgs':
return [('exc', self.exc), ('loc', self.loc_tuple())]
# ErrorList is something like Union[List[Union[List[ErrorWrapper], ErrorWrapper]], ErrorWrapper]
# but recursive, therefore just use:
ErrorList = Union[Sequence[Any], ErrorWrapper]
class ValidationError(Representation, ValueError):
__slots__ = 'raw_errors', 'model', '_error_cache'
def __init__(self, errors: Sequence[ErrorList], model: 'ModelOrDc') -> None:
self.raw_errors = errors
self.model = model
self._error_cache: Optional[List[Dict[str, Any]]] = None
def errors(self) -> List[Dict[str, Any]]:
if self._error_cache is None:
try:
config = self.model.__config__ # type: ignore
except AttributeError:
config = self.model.__pydantic_model__.__config__ # type: ignore
self._error_cache = list(flatten_errors(self.raw_errors, config))
return self._error_cache
def json(self, *, indent: Union[None, int, str] = 2) -> str:
return json.dumps(self.errors(), indent=indent, default=pydantic_encoder)
def __str__(self) -> str:
errors = self.errors()
no_errors = len(errors)
return (
f'{no_errors} validation error{"" if no_errors == 1 else "s"} for {self.model.__name__}\n'
f'{display_errors(errors)}'
)
def __repr_args__(self) -> 'ReprArgs':
return [('model', self.model.__name__), ('errors', self.errors())]
def display_errors(errors: List[Dict[str, Any]]) -> str:
return '\n'.join(f'{_display_error_loc(e)}\n {e["msg"]} ({_display_error_type_and_ctx(e)})' for e in errors)
def _display_error_loc(error: Dict[str, Any]) -> str:
return ' -> '.join(str(l) for l in error['loc'])
def _display_error_type_and_ctx(error: Dict[str, Any]) -> str:
t = 'type=' + error['type']
ctx = error.get('ctx')
if ctx:
return t + ''.join(f'; {k}={v}' for k, v in ctx.items())
else:
return t
def flatten_errors(
errors: Sequence[Any], config: Type['BaseConfig'], loc: Optional['Loc'] = None
) -> Generator[Dict[str, Any], None, None]:
for error in errors:
if isinstance(error, ErrorWrapper):
if loc:
error_loc = loc + error.loc_tuple()
else:
error_loc = error.loc_tuple()
if isinstance(error.exc, ValidationError):
yield from flatten_errors(error.exc.raw_errors, config, error_loc)
else:
yield error_dict(error.exc, config, error_loc)
elif isinstance(error, list):
yield from flatten_errors(error, config, loc=loc)
else:
raise RuntimeError(f'Unknown error object: {error}')
def error_dict(exc: Exception, config: Type['BaseConfig'], loc: 'Loc') -> Dict[str, Any]:
type_ = get_exc_type(exc.__class__)
msg_template = config.error_msg_templates.get(type_) or getattr(exc, 'msg_template', None)
ctx = exc.__dict__
if msg_template:
msg = msg_template.format(**ctx)
else:
msg = str(exc)
d: Dict[str, Any] = {'loc': loc, 'msg': msg, 'type': type_}
if ctx:
d['ctx'] = ctx
return d
_EXC_TYPE_CACHE: Dict[Type[Exception], str] = {}
def get_exc_type(cls: Type[Exception]) -> str:
# slightly more efficient than using lru_cache since we don't need to worry about the cache filling up
try:
return _EXC_TYPE_CACHE[cls]
except KeyError:
r = _get_exc_type(cls)
_EXC_TYPE_CACHE[cls] = r
return r
def _get_exc_type(cls: Type[Exception]) -> str:
if issubclass(cls, AssertionError):
return 'assertion_error'
base_name = 'type_error' if issubclass(cls, TypeError) else 'value_error'
if cls in (TypeError, ValueError):
# just TypeError or ValueError, no extra code
return base_name
# if it's not a TypeError or ValueError, we just take the lowercase of the exception name
# no chaining or snake case logic, use "code" for more complex error types.
code = getattr(cls, 'code', None) or cls.__name__.replace('Error', '').lower()
return base_name + '.' + code
| 32.690789
| 113
| 0.630912
|
977b2d00b513c6629f2b2ceb3ad305d8dabbe239
| 3,241
|
py
|
Python
|
lib/surface/iot/devices/update.py
|
kustodian/google-cloud-sdk
|
b6bae4137d4b58030adb3dcb1271216dfb19f96d
|
[
"Apache-2.0"
] | null | null | null |
lib/surface/iot/devices/update.py
|
kustodian/google-cloud-sdk
|
b6bae4137d4b58030adb3dcb1271216dfb19f96d
|
[
"Apache-2.0"
] | 11
|
2020-02-29T02:51:12.000Z
|
2022-03-30T23:20:08.000Z
|
lib/surface/iot/devices/update.py
|
kustodian/google-cloud-sdk
|
b6bae4137d4b58030adb3dcb1271216dfb19f96d
|
[
"Apache-2.0"
] | 1
|
2020-07-24T18:47:35.000Z
|
2020-07-24T18:47:35.000Z
|
# -*- coding: utf-8 -*- #
# Copyright 2017 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""`gcloud iot devices update` command."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.cloudiot import devices
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.iot import flags
from googlecloudsdk.command_lib.iot import resource_args
from googlecloudsdk.command_lib.iot import util
from googlecloudsdk.core import log
@base.ReleaseTracks(base.ReleaseTrack.GA)
class Update(base.UpdateCommand):
"""Update an existing device."""
@staticmethod
def Args(parser):
resource_args.AddDeviceResourceArg(parser, 'to update')
flags.AddDeviceFlagsToParser(parser, default_for_blocked_flags=False)
flags.AddLogLevelFlagToParser(parser)
def Run(self, args):
client = devices.DevicesClient()
device_ref = args.CONCEPTS.device.Parse()
metadata = util.ParseMetadata(args.metadata,
args.metadata_from_file,
client.messages)
log_level = util.ParseLogLevel(
args.log_level, client.messages.Device.LogLevelValueValuesEnum)
device = client.Patch(device_ref,
blocked=args.blocked,
metadata=metadata,
log_level=log_level)
log.UpdatedResource(device_ref.Name(), 'device')
return device
@base.ReleaseTracks(base.ReleaseTrack.ALPHA,
base.ReleaseTrack.BETA)
class UpdateAlpha(base.UpdateCommand):
"""Update an existing device."""
@staticmethod
def Args(parser):
resource_args.AddDeviceResourceArg(parser, 'to update')
flags.AddDeviceFlagsToParser(parser, default_for_blocked_flags=False)
flags.GATEWAY_AUTH_METHOD_ENUM_MAPPER.choice_arg.AddToParser(parser)
flags.AddLogLevelFlagToParser(parser)
def Run(self, args):
client = devices.DevicesClient()
device_ref = args.CONCEPTS.device.Parse()
metadata = util.ParseMetadata(args.metadata,
args.metadata_from_file,
client.messages)
auth_method = flags.GATEWAY_AUTH_METHOD_ENUM_MAPPER.GetEnumForChoice(
args.auth_method)
log_level = util.ParseLogLevel(
args.log_level, client.messages.Device.LogLevelValueValuesEnum)
device = client.Patch(device_ref,
blocked=args.blocked,
metadata=metadata,
auth_method=auth_method,
log_level=log_level)
log.UpdatedResource(device_ref.Name(), 'device')
return device
| 35.615385
| 74
| 0.695773
|
fecaaf82e30984425cb558de36c37f9f02f3e2cf
| 659
|
py
|
Python
|
corpus/tests.py
|
agladman/betterbot
|
ecabd63022b9aa0a2f7104caff69a37ecfa98781
|
[
"MIT"
] | null | null | null |
corpus/tests.py
|
agladman/betterbot
|
ecabd63022b9aa0a2f7104caff69a37ecfa98781
|
[
"MIT"
] | null | null | null |
corpus/tests.py
|
agladman/betterbot
|
ecabd63022b9aa0a2f7104caff69a37ecfa98781
|
[
"MIT"
] | null | null | null |
# from django.test import TestCase
import unittest
import unittest.mock as mock
import markovify
from .models import Sentence as Sen
from betterbot.settings import TEXT_MODEL_JSON_FILE
@unittest.skip('blocked')
class SentenceTestCase(unittest.TestCase):
"""
can't create test db, old migration that was run with --fake
(corpus 0004_auto_20161214_1203) seems to be blocking it
"""
def test_load_reconstituted_text_model(self):
reconstituted_model = markovify.Text.from_json(TEXT_MODEL_JSON_FILE)
self.assertTrue(reconstituted_model)
def test_create_method(self):
s = Sen.create()
self.assertTrue(s)
| 27.458333
| 76
| 0.746586
|
38a2463fdde84789b376972f458e8b05f0ac9080
| 1,545
|
py
|
Python
|
flex/loading/schema/__init__.py
|
maroux/flex
|
dfd7c6d79d065d7ce1b0c799e51e9bb5292612b2
|
[
"MIT"
] | 160
|
2015-01-15T05:36:44.000Z
|
2021-08-04T00:43:54.000Z
|
flex/loading/schema/__init__.py
|
maroux/flex
|
dfd7c6d79d065d7ce1b0c799e51e9bb5292612b2
|
[
"MIT"
] | 151
|
2015-01-20T16:45:36.000Z
|
2022-02-23T21:07:58.000Z
|
flex/loading/schema/__init__.py
|
maroux/flex
|
dfd7c6d79d065d7ce1b0c799e51e9bb5292612b2
|
[
"MIT"
] | 90
|
2015-01-20T11:19:36.000Z
|
2021-08-03T08:58:18.000Z
|
from flex.constants import (
OBJECT,
)
from flex.datastructures import (
ValidationDict,
)
from flex.validation.common import (
generate_object_validator,
)
from flex.loading.common.mimetypes import (
mimetype_validator,
)
from .info import info_validator
from .swagger import swagger_version_validator
from .host import host_validator
from .base_path import base_path_validator
from .schemes import schemes_validator
from .paths import paths_validator
__ALL__ = [
'info_validator',
'swagger_schema_validators',
'host_validator',
'base_path_validator',
'schemes_validator',
'mimetype_validator',
'paths_validator',
]
swagger_schema = {
'type': OBJECT,
'required': [
'info',
'paths',
'swagger',
],
}
non_field_validators = ValidationDict()
non_field_validators.add_property_validator('info', info_validator)
non_field_validators.add_property_validator('swagger', swagger_version_validator)
non_field_validators.add_property_validator('host', host_validator)
non_field_validators.add_property_validator('basePath', base_path_validator)
non_field_validators.add_property_validator('schemes', schemes_validator)
non_field_validators.add_property_validator('produces', mimetype_validator)
non_field_validators.add_property_validator('consumes', mimetype_validator)
non_field_validators.add_property_validator('paths', paths_validator)
swagger_schema_validator = generate_object_validator(
schema=swagger_schema,
non_field_validators=non_field_validators,
)
| 28.611111
| 81
| 0.794175
|
264d3e14d545cd517b88673576f7787d3334300e
| 6,781
|
py
|
Python
|
src/app.py
|
luisarojas/temp-imm-policy
|
660adc750e1c3312627a4a24df246a8e2701081a
|
[
"MIT"
] | 1
|
2021-05-12T12:48:55.000Z
|
2021-05-12T12:48:55.000Z
|
src/app.py
|
luisarojas/temp-imm-policy
|
660adc750e1c3312627a4a24df246a8e2701081a
|
[
"MIT"
] | null | null | null |
src/app.py
|
luisarojas/temp-imm-policy
|
660adc750e1c3312627a4a24df246a8e2701081a
|
[
"MIT"
] | null | null | null |
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from bs4 import BeautifulSoup
from pathlib import Path
from datetime import datetime, timedelta
from email.message import EmailMessage
import time
import sys
import smtplib
import ssl
import socket
import platform
import pytz
est_timezone = pytz.timezone('EST')
testing = False
test_file_html='test.html'
prev_page_content = None
prev_page_content_length = None
content_changed = False
length_changed = False
html = None
url = "https://www.canada.ca/en/immigration-refugees-citizenship/corporate/mandate/policies-operational-instructions-agreements/public-policies/trpr-international-graduates.html"
def check_diff():
if not testing:
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument('--headless')
chrome_options.add_argument('--no-sandbox') # Required when running as root user; otherwise there are "no sandbox" errors
try:
driver_file_name = ''
if "Linux" in platform.platform(): driver_file_name = './drivers/chromedriver-v90-linux'
elif "macOS" in platform.platform(): driver_file_name = './drivers/chromedriver-v90-mac'
else:
print("Cannot identify the current OS")
sys.exit(-1)
driver = webdriver.Chrome(driver_file_name, options=chrome_options, service_args=['--verbose', '--log-path=/tmp/chromedriver.log'])
driver.get(url)
# Wait for a maximum of 10 seconds for an element matching the given criteria to be found
element = WebDriverWait(driver, 10).until(
EC.presence_of_element_located((By.CLASS_NAME, 'mwsgeneric-base-html'))
)
html = driver.page_source
finally:
driver.close()
driver.quit()
else:
with open(test_file_html,'r') as file:
html = file.read()
soup = BeautifulSoup(html, "html.parser")
page_content_soup = soup.select('div.mwsgeneric-base-html.parbase.section')
try:
page_content = page_content_soup[0]
except IndexError as e:
return {"content-changed": False, "length-changed": True, "html": f'There has been a change in the number of sections in this page, which resulted in an error when parsing the site.<br><br>Error: {e}'}
page_content_length = len(page_content_soup)
global prev_page_content
global prev_page_content_length
# Check if either the contents or the number of sections changed
if ((hash(prev_page_content) != hash(page_content)) and (prev_page_content)) or ((hash(prev_page_content_length) != hash(page_content)) and (prev_page_content_length)):
content_changed = True
if (prev_page_content_length != page_content_length) and (prev_page_content_length):
length_changed = True
else:
content_changed = False
length_changed = False
prev_page_content = page_content
message_html = page_content
if length_changed: message_html = f'There has been a change in the number of sections in this page.'
return {"content-changed": content_changed, "length-changed": length_changed, "html": message_html}
def send_email(html_message):
if Path('env.py').exists():
from env import ENV
try:
recipient_emails_str = ', '.join(ENV['recipient']['emails'])
if testing: recipient_emails_str = ENV['recipient']['emails'][0]
print(f'Sending notification e-mail to: {recipient_emails_str}... ',end='')
except Exception as e:
print('\n', e)
print('Error: Unable to properly read recipient emails from env.py file')
sys.exit(-1)
try:
port = 465 # For SSL
smtp_server = "smtp.gmail.com"
sender_email = ENV['sender']['email']
password = ENV['sender']['password']
subject = '🆕 Temp. Immigration Policy Update'
if testing: subject += ' - Test'
message = EmailMessage()
message['Subject'] = subject
message['From'] = sender_email
message['To'] = recipient_emails_str
try:
message.set_content(html_message.get_text())
except Exception as e:
message.set_content(f'There was a problem setting the content for this message:<br>{e}')
html_template = ''
with open('template.html','r') as file: html_template = file.read()
html_message = html_template.replace('[page-content]', str(html_message)).replace('[url]', url)
message.add_alternative(html_message, subtype='html')
context = ssl.create_default_context()
with smtplib.SMTP_SSL(smtp_server, port, context=context) as server:
server.login(sender_email, password)
server.send_message(message)
print('Sent.')
except Exception as e: print('\n', e)
else: print('Unable to import env.py file. Won\'t send e-mail.')
def reset_settings():
html = None
content_changed = False
length_changed = False
if __name__ == "__main__":
if testing:
print('\033[93m\033[1m' + ('-'*30) + '\n*** IN TESTING ENVIRONMENT ***\n' + ('-'*30))
print(f'Using `{test_file_html}` as source of web-scrapping content\033[0m')
start_date = est_timezone.localize(datetime(2021, 4, 26, 9, 0, 0)) # Apr 26, 2021 at 9 AM
seconds = 86400 # check every 24 hours
# seconds = 60 # check every minute
print('Frequency: Every ', end='')
if seconds < 60: print(f'{seconds} second(s)')
elif (seconds/60) < 60: print(f'{int(seconds/60)} minutes(s)')
else: print(f'{int(seconds/60/60)} hours(s)')
print(f"Starting time: {format(start_date.strftime('%b %d, %Y at %I:%M %p %Z'))}\n")
while True:
now = est_timezone.localize(datetime.now())
# Have not yet reached the start date
if now <= start_date: time.sleep(60) # Try again in 60 seconds
else:
print(now.strftime('%b %d, %Y at %I:%M %p %Z'),': ', end='')
res = check_diff()
if res["length-changed"]:
print(f'\033[91mUnable to find HTML objects\033[0m')
send_email(res['html'])
sys.exit(0)
elif res["content-changed"]:
print(f'\033[92mThere are content changes\033[0m')
send_email(res['html'])
else: print('No changes')
reset_settings()
time.sleep(seconds)
| 35.317708
| 209
| 0.630733
|
941f13501ea23a47f81d705fda6a5c9028d604fd
| 147
|
py
|
Python
|
ws-python/ex032.py
|
DerickSilva/Python
|
8e81c6fe403e3321f18d2767f0bdfc818b180fd2
|
[
"MIT"
] | null | null | null |
ws-python/ex032.py
|
DerickSilva/Python
|
8e81c6fe403e3321f18d2767f0bdfc818b180fd2
|
[
"MIT"
] | null | null | null |
ws-python/ex032.py
|
DerickSilva/Python
|
8e81c6fe403e3321f18d2767f0bdfc818b180fd2
|
[
"MIT"
] | null | null | null |
ano = int(input('Que ano deseja analisar? '))
if ano % 4 == 0:
print(f'O ano {ano} é Bissexto')
else:
print(f'O ano {ano} não é bissexto')
| 24.5
| 45
| 0.605442
|
ae4d0e249376ef73dd34edbd99d2989644d80bcd
| 11,983
|
py
|
Python
|
tests/mypy_test.py
|
andrewshadura/typeshed
|
bf2049ef18244b8b3024cd7a8cb28b7457dd4652
|
[
"Apache-2.0"
] | null | null | null |
tests/mypy_test.py
|
andrewshadura/typeshed
|
bf2049ef18244b8b3024cd7a8cb28b7457dd4652
|
[
"Apache-2.0"
] | null | null | null |
tests/mypy_test.py
|
andrewshadura/typeshed
|
bf2049ef18244b8b3024cd7a8cb28b7457dd4652
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
"""Test runner for typeshed.
Depends on mypy being installed.
Approach:
1. Parse sys.argv
2. Compute appropriate arguments for mypy
3. Stuff those arguments into sys.argv
4. Run mypy.main('')
5. Repeat steps 2-4 for other mypy runs (e.g. --py2)
"""
import argparse
import os
import re
import sys
import tempfile
from glob import glob
from pathlib import Path
from typing import Dict, NamedTuple
import tomli
parser = argparse.ArgumentParser(description="Test runner for typeshed. Patterns are unanchored regexps on the full path.")
parser.add_argument("-v", "--verbose", action="count", default=0, help="More output")
parser.add_argument("-n", "--dry-run", action="store_true", help="Don't actually run mypy")
parser.add_argument("-x", "--exclude", type=str, nargs="*", help="Exclude pattern")
parser.add_argument("-p", "--python-version", type=str, nargs="*", help="These versions only (major[.minor])")
parser.add_argument("--platform", help="Run mypy for a certain OS platform (defaults to sys.platform)")
parser.add_argument(
"--warn-unused-ignores",
action="store_true",
help="Run mypy with --warn-unused-ignores "
"(hint: only get rid of warnings that are "
"unused for all platforms and Python versions)",
)
parser.add_argument("filter", type=str, nargs="*", help="Include pattern (default all)")
def log(args, *varargs):
if args.verbose >= 2:
print(*varargs)
def match(fn, args):
if not args.filter and not args.exclude:
log(args, fn, "accept by default")
return True
if args.exclude:
for f in args.exclude:
if re.search(f, fn):
log(args, fn, "excluded by pattern", f)
return False
if args.filter:
for f in args.filter:
if re.search(f, fn):
log(args, fn, "accepted by pattern", f)
return True
if args.filter:
log(args, fn, "rejected (no pattern matches)")
return False
log(args, fn, "accepted (no exclude pattern matches)")
return True
_VERSION_LINE_RE = re.compile(r"^([a-zA-Z_][a-zA-Z0-9_.]*): ([23]\.\d{1,2})-([23]\.\d{1,2})?$")
def parse_versions(fname):
result = {}
with open(fname) as f:
for line in f:
# Allow having some comments or empty lines.
line = line.split("#")[0].strip()
if line == "":
continue
m = _VERSION_LINE_RE.match(line)
assert m, "invalid VERSIONS line: " + line
mod = m.group(1)
min_version = parse_version(m.group(2))
max_version = parse_version(m.group(3)) if m.group(3) else (99, 99)
result[mod] = min_version, max_version
return result
_VERSION_RE = re.compile(r"^([23])\.(\d+)$")
def parse_version(v_str):
m = _VERSION_RE.match(v_str)
assert m, "invalid version: " + v_str
return int(m.group(1)), int(m.group(2))
def is_supported(distribution, major):
dist_path = Path("stubs", distribution)
with open(dist_path / "METADATA.toml") as f:
data = dict(tomli.loads(f.read()))
if major == 2:
# Python 2 is not supported by default.
return bool(data.get("python2", False))
# Python 3 is supported by default.
return has_py3_stubs(dist_path)
# Keep this in sync with stubtest_third_party.py
def has_py3_stubs(dist: Path) -> bool:
return len(glob(f"{dist}/*.pyi")) > 0 or len(glob(f"{dist}/[!@]*/__init__.pyi")) > 0
def add_files(files, seen, root, name, args):
"""Add all files in package or module represented by 'name' located in 'root'."""
full = os.path.join(root, name)
mod, ext = os.path.splitext(name)
if ext in [".pyi", ".py"]:
if match(full, args):
seen.add(mod)
files.append(full)
elif os.path.isfile(os.path.join(full, "__init__.pyi")) or os.path.isfile(os.path.join(full, "__init__.py")):
for r, ds, fs in os.walk(full):
ds.sort()
fs.sort()
for f in fs:
m, x = os.path.splitext(f)
if x in [".pyi", ".py"]:
fn = os.path.join(r, f)
if match(fn, args):
seen.add(mod)
files.append(fn)
class MypyDistConf(NamedTuple):
module_name: str
values: Dict
# The configuration section in the metadata file looks like the following, with multiple module sections possible
# [mypy-tests]
# [mypy-tests.yaml]
# module_name = "yaml"
# [mypy-tests.yaml.values]
# disallow_incomplete_defs = true
# disallow_untyped_defs = true
def add_configuration(configurations: list[MypyDistConf], distribution: str) -> None:
with open(os.path.join("stubs", distribution, "METADATA.toml")) as f:
data = dict(tomli.loads(f.read()))
mypy_tests_conf = data.get("mypy-tests")
if not mypy_tests_conf:
return
assert isinstance(mypy_tests_conf, dict), "mypy-tests should be a section"
for section_name, mypy_section in mypy_tests_conf.items():
assert isinstance(mypy_section, dict), "{} should be a section".format(section_name)
module_name = mypy_section.get("module_name")
assert module_name is not None, "{} should have a module_name key".format(section_name)
assert isinstance(module_name, str), "{} should be a key-value pair".format(section_name)
values = mypy_section.get("values")
assert values is not None, "{} should have a values section".format(section_name)
assert isinstance(values, dict), "values should be a section"
configurations.append(MypyDistConf(module_name, values.copy()))
def run_mypy(args, configurations, major, minor, files, *, custom_typeshed=False):
try:
from mypy.main import main as mypy_main
except ImportError:
print("Cannot import mypy. Did you install it?")
sys.exit(1)
with tempfile.NamedTemporaryFile("w+") as temp:
temp.write("[mypy]\n")
for dist_conf in configurations:
temp.write("[mypy-%s]\n" % dist_conf.module_name)
for k, v in dist_conf.values.items():
temp.write("{} = {}\n".format(k, v))
temp.flush()
flags = get_mypy_flags(args, major, minor, temp.name, custom_typeshed=custom_typeshed)
sys.argv = ["mypy"] + flags + files
if args.verbose:
print("running", " ".join(sys.argv))
if not args.dry_run:
try:
mypy_main("", sys.stdout, sys.stderr)
except SystemExit as err:
return err.code
return 0
def get_mypy_flags(args, major: int, minor: int, temp_name: str, *, custom_typeshed: bool = False) -> list[str]:
flags = [
"--python-version",
"%d.%d" % (major, minor),
"--config-file",
temp_name,
"--no-site-packages",
"--show-traceback",
"--no-implicit-optional",
"--disallow-untyped-decorators",
"--disallow-any-generics",
"--warn-incomplete-stub",
"--show-error-codes",
"--no-error-summary",
]
if custom_typeshed:
# Setting custom typeshed dir prevents mypy from falling back to its bundled
# typeshed in case of stub deletions
flags.extend(["--custom-typeshed-dir", os.path.dirname(os.path.dirname(__file__))])
if args.warn_unused_ignores:
flags.append("--warn-unused-ignores")
if args.platform:
flags.extend(["--platform", args.platform])
return flags
def read_dependencies(distribution: str) -> list[str]:
with open(os.path.join("stubs", distribution, "METADATA.toml")) as f:
data = dict(tomli.loads(f.read()))
requires = data.get("requires", [])
assert isinstance(requires, list)
dependencies = []
for dependency in requires:
assert isinstance(dependency, str)
assert dependency.startswith("types-")
dependencies.append(dependency[6:])
return dependencies
def add_third_party_files(
distribution: str, major: int, files: list[str], args, configurations: list[MypyDistConf], seen_dists: set[str]
) -> None:
if distribution in seen_dists:
return
seen_dists.add(distribution)
dependencies = read_dependencies(distribution)
for dependency in dependencies:
add_third_party_files(dependency, major, files, args, configurations, seen_dists)
root = os.path.join("stubs", distribution)
for name in os.listdir(root):
mod, _ = os.path.splitext(name)
if mod.startswith("."):
continue
add_files(files, set(), root, name, args)
add_configuration(configurations, distribution)
def test_third_party_distribution(distribution: str, major: int, minor: int, args) -> tuple[int, int]:
"""Test the stubs of a third-party distribution.
Return a tuple, where the first element indicates mypy's return code
and the second element is the number of checked files.
"""
files: list[str] = []
configurations: list[MypyDistConf] = []
seen_dists: set[str] = set()
add_third_party_files(distribution, major, files, args, configurations, seen_dists)
print(f"testing {distribution} ({len(files)} files)...")
if not files:
print("--- no files found ---")
sys.exit(1)
code = run_mypy(args, configurations, major, minor, files)
return code, len(files)
def main():
args = parser.parse_args()
versions = [(3, 10), (3, 9), (3, 8), (3, 7), (3, 6), (2, 7)]
if args.python_version:
versions = [v for v in versions if any(("%d.%d" % v).startswith(av) for av in args.python_version)]
if not versions:
print("--- no versions selected ---")
sys.exit(1)
code = 0
files_checked = 0
for major, minor in versions:
print(f"*** Testing Python {major}.{minor}")
seen = {"__builtin__", "builtins", "typing"} # Always ignore these.
# Test standard library files.
files = []
if major == 2:
root = os.path.join("stdlib", "@python2")
for name in os.listdir(root):
mod, _ = os.path.splitext(name)
if mod in seen or mod.startswith("."):
continue
add_files(files, seen, root, name, args)
else:
supported_versions = parse_versions(os.path.join("stdlib", "VERSIONS"))
root = "stdlib"
for name in os.listdir(root):
if name == "@python2" or name == "VERSIONS" or name.startswith("."):
continue
mod, _ = os.path.splitext(name)
if supported_versions[mod][0] <= (major, minor) <= supported_versions[mod][1]:
add_files(files, seen, root, name, args)
if files:
print("Running mypy " + " ".join(get_mypy_flags(args, major, minor, "/tmp/...", custom_typeshed=True)))
print(f"testing stdlib ({len(files)} files)...")
this_code = run_mypy(args, [], major, minor, files, custom_typeshed=True)
code = max(code, this_code)
files_checked += len(files)
# Test files of all third party distributions.
print("Running mypy " + " ".join(get_mypy_flags(args, major, minor, "/tmp/...")))
for distribution in sorted(os.listdir("stubs")):
if not is_supported(distribution, major):
continue
this_code, checked = test_third_party_distribution(distribution, major, minor, args)
code = max(code, this_code)
files_checked += checked
print()
if code:
print(f"--- exit status {code}, {files_checked} files checked ---")
sys.exit(code)
if not files_checked:
print("--- nothing to do; exit 1 ---")
sys.exit(1)
print(f"--- success, {files_checked} files checked ---")
if __name__ == "__main__":
main()
| 34.632948
| 123
| 0.610365
|
d286a6569ef52a235d026ef23ec84b423b79a274
| 117
|
py
|
Python
|
tellurium/roadrunner/__init__.py
|
kirichoi/tellurium
|
77cf6e794600587741ebe209644a78051e0db1d5
|
[
"Apache-2.0"
] | 73
|
2016-06-13T12:44:28.000Z
|
2021-12-31T14:44:39.000Z
|
tellurium/roadrunner/__init__.py
|
kirichoi/tellurium
|
77cf6e794600587741ebe209644a78051e0db1d5
|
[
"Apache-2.0"
] | 461
|
2015-03-26T00:05:16.000Z
|
2022-03-16T17:24:35.000Z
|
tellurium/roadrunner/__init__.py
|
kirichoi/tellurium
|
77cf6e794600587741ebe209644a78051e0db1d5
|
[
"Apache-2.0"
] | 30
|
2016-01-18T16:50:54.000Z
|
2021-07-06T09:29:53.000Z
|
from __future__ import print_function, division, absolute_import
from .extended_roadrunner import ExtendedRoadRunner
| 39
| 64
| 0.888889
|
657ee7cd7ce0a451f1a19f1b73d966705677c5bf
| 19,372
|
py
|
Python
|
app/extension/confluence/extension_locust.py
|
frsCommunardo/dc-app-performance-toolkit
|
39eb794b329f53e4457eec9e54b9d927612bd810
|
[
"Apache-2.0"
] | null | null | null |
app/extension/confluence/extension_locust.py
|
frsCommunardo/dc-app-performance-toolkit
|
39eb794b329f53e4457eec9e54b9d927612bd810
|
[
"Apache-2.0"
] | null | null | null |
app/extension/confluence/extension_locust.py
|
frsCommunardo/dc-app-performance-toolkit
|
39eb794b329f53e4457eec9e54b9d927612bd810
|
[
"Apache-2.0"
] | null | null | null |
import re
import random
from locustio.common_utils import confluence_measure, fetch_by_re, timestamp_int, \
TEXT_HEADERS, NO_TOKEN_HEADERS, JSON_HEADERS, RESOURCE_HEADERS, generate_random_string, init_logger, \
raise_if_login_failed
from locustio.confluence.requests_params import confluence_datasets, Login, ViewPage, ViewDashboard, ViewBlog, \
CreateBlog, CreateEditPage, UploadAttachments, LikePage
logger = init_logger(app_type='confluence')
@confluence_measure("locust_csi_embed_sharepoint_document")
def csi_embed_sharepoint_document(locust):
r = locust.get('/rest/csi/spc/templaterenderer/vm/csi-embed-sharepoint-document', catch_response = True) # call app-specific GET endpoint
content = r.content.decode('utf-8') # decode response content
token_pattern_example = '"token":"(.+?)"'
id_pattern_example = '"id":"(.+?)"'
token = re.findall(token_pattern_example, content) # get TOKEN from response using regexp
id = re.findall(id_pattern_example, content) # get ID from response using regexp
logger.locust_info(f'token: {token}, id: {id}') # log information for debug when verbose is true in jira.yml file
if 'Add a SharePoint Document' not in content:
logger.error(f"'Add a SharePoint Document' was not found in {content}")
assert 'Add a SharePoint Document' in content # assert specific string in response content
@confluence_measure("locust_csi_embed_sharepoint_list")
def csi_embed_sharepoint_list(locust):
r = locust.get('/rest/csi/spc/templaterenderer/vm/csi-embed-sharepoint-list', catch_response = True) # call app-specific GET endpoint
content = r.content.decode('utf-8') # decode response content
token_pattern_example = '"token":"(.+?)"'
id_pattern_example = '"id":"(.+?)"'
token = re.findall(token_pattern_example, content) # get TOKEN from response using regexp
id = re.findall(id_pattern_example, content) # get ID from response using regexp
logger.locust_info(f'token: {token}, id: {id}') # log information for debug when verbose is true in jira.yml file
if 'Add a SharePoint List' not in content:
logger.error(f"'Add a SharePoint List' was not found in {content}")
assert 'Add a SharePoint List' in content # assert specific string in response content
@confluence_measure("locust_csi_adal_helper")
def csi_adal_helper(locust):
r = locust.get('/plugins/servlet/csi/adal-helper', catch_response=True) # call app-specific GET endpoint
content = r.content.decode('utf-8') # decode response content
token_pattern_example = '"token":"(.+?)"'
id_pattern_example = '"id":"(.+?)"'
token = re.findall(token_pattern_example, content) # get TOKEN from response using regexp
id = re.findall(id_pattern_example, content) # get ID from response using regexp
logger.locust_info(f'token: {token}, id: {id}') # log information for debug when verbose is true in jira.yml file
if 'Redirect Target for adal' not in content:
logger.error(f"'Redirect Target for adal' was not found in {content}")
assert 'Redirect Target for adal' in content # assert specific string in response content
@confluence_measure("locust_csi_spc_license")
def csi_spc_license(locust):
r = locust.get('/rest/csi/spc/license', catch_response=True) # call app-specific GET endpoint
content = r.content.decode('utf-8') # decode response content
token_pattern_example = '"token":"(.+?)"'
id_pattern_example = '"id":"(.+?)"'
token = re.findall(token_pattern_example, content) # get TOKEN from response using regexp
id = re.findall(id_pattern_example, content) # get ID from response using regexp
logger.locust_info(f'token: {token}, id: {id}') # log information for debug when verbose is true in jira.yml file
if 'licenseValid' not in content:
logger.error(f"'licenseValid' was not found in {content}")
assert 'licenseValid' in content # assert specific string in response content
@confluence_measure("locust_csi_view_page_document_macro")
def csi_view_page_document_macro(locust):
raise_if_login_failed(locust)
params = ViewPage()
r = locust.get(f'/display/CSIO/Document+Macro+View', catch_response=True)
content = r.content.decode('utf-8')
if 'Created by' not in content or 'Save for later' not in content:
logger.error(f'Fail to open page CSIO/Document+Macro+View: {content}')
assert 'Created by' in content and 'Save for later' in content, 'Could not open page.'
parent_page_id = fetch_by_re(params.parent_page_id_re, content)
parsed_page_id = fetch_by_re(params.page_id_re, content)
space_key = fetch_by_re(params.space_key_re, content)
tree_request_id = fetch_by_re(params.tree_result_id_re, content)
has_no_root = fetch_by_re(params.has_no_root_re, content)
root_page_id = fetch_by_re(params.root_page_id_re, content)
atl_token_view_issue = fetch_by_re(params.atl_token_view_issue_re, content)
editable = fetch_by_re(params.editable_re, content)
ancestor_ids = re.findall(params.ancestor_ids_re, content)
ancestor_str = 'ancestors='
for ancestor in ancestor_ids:
ancestor_str = ancestor_str + str(ancestor) + '&'
locust.session_data_storage['page_id'] = parsed_page_id
locust.session_data_storage['has_no_root'] = has_no_root
locust.session_data_storage['tree_request_id'] = tree_request_id
locust.session_data_storage['root_page_id'] = root_page_id
locust.session_data_storage['ancestors'] = ancestor_str
locust.session_data_storage['space_key'] = space_key
locust.session_data_storage['editable'] = editable
locust.session_data_storage['atl_token_view_issue'] = atl_token_view_issue
locust.get('/rest/helptips/1.0/tips', catch_response=True)
locust.post('/rest/webResources/1.0/resources', json=params.resources_body.get("110"),
headers=RESOURCE_HEADERS, catch_response=True)
locust.get(f'/rest/likes/1.0/content/{parsed_page_id}/likes?commentLikes=true&_={timestamp_int()}',
catch_response=True)
locust.get(f'/rest/highlighting/1.0/panel-items?pageId={parsed_page_id}&_={timestamp_int()}',
catch_response=True)
locust.get(f'/rest/mywork/latest/status/notification/count?pageId={parsed_page_id}&_={timestamp_int()}',
catch_response=True)
r = locust.get(f'/rest/inlinecomments/1.0/comments?containerId={parsed_page_id}&_={timestamp_int()}',
catch_response=True)
content = r.content.decode('utf-8')
if 'authorDisplayName' not in content and '[]' not in content:
logger.error(f'Could not open comments for page {parsed_page_id}: {content}')
assert 'authorDisplayName' in content or '[]' in content, 'Could not open comments for page.'
locust.get(f'/plugins/editor-loader/editor.action?parentPageId={parent_page_id}&pageId={parsed_page_id}'
f'&spaceKey={space_key}&atl_after_login_redirect=/pages/viewpage.action'
f'&timeout=12000&_={timestamp_int()}', catch_response=True)
locust.get(f'/rest/watch-button/1.0/watchState/{parsed_page_id}?_={timestamp_int()}',
catch_response=True)
locust.post('/rest/webResources/1.0/resources', json=params.resources_body.get("145"),
headers=RESOURCE_HEADERS, catch_response=True)
locust.post('/rest/webResources/1.0/resources', json=params.resources_body.get("150"),
headers=RESOURCE_HEADERS, catch_response=True)
locust.post('/rest/webResources/1.0/resources', json=params.resources_body.get("155"),
headers=RESOURCE_HEADERS, catch_response=True)
locust.post('/rest/webResources/1.0/resources', json=params.resources_body.get("160"),
headers=RESOURCE_HEADERS, catch_response=True)
@confluence_measure("locust_csi_view_page_list_macro")
def csi_view_page_list_macro(locust):
raise_if_login_failed(locust)
params = ViewPage()
r = locust.get(f'/display/CSIO/List+Macro+View', catch_response=True)
content = r.content.decode('utf-8')
if 'Created by' not in content or 'Save for later' not in content:
logger.error(f'Fail to open page CSIO/List+Macro+View: {content}')
assert 'Created by' in content and 'Save for later' in content, 'Could not open page.'
parent_page_id = fetch_by_re(params.parent_page_id_re, content)
parsed_page_id = fetch_by_re(params.page_id_re, content)
space_key = fetch_by_re(params.space_key_re, content)
tree_request_id = fetch_by_re(params.tree_result_id_re, content)
has_no_root = fetch_by_re(params.has_no_root_re, content)
root_page_id = fetch_by_re(params.root_page_id_re, content)
atl_token_view_issue = fetch_by_re(params.atl_token_view_issue_re, content)
editable = fetch_by_re(params.editable_re, content)
ancestor_ids = re.findall(params.ancestor_ids_re, content)
ancestor_str = 'ancestors='
for ancestor in ancestor_ids:
ancestor_str = ancestor_str + str(ancestor) + '&'
locust.session_data_storage['page_id'] = parsed_page_id
locust.session_data_storage['has_no_root'] = has_no_root
locust.session_data_storage['tree_request_id'] = tree_request_id
locust.session_data_storage['root_page_id'] = root_page_id
locust.session_data_storage['ancestors'] = ancestor_str
locust.session_data_storage['space_key'] = space_key
locust.session_data_storage['editable'] = editable
locust.session_data_storage['atl_token_view_issue'] = atl_token_view_issue
locust.get('/rest/helptips/1.0/tips', catch_response=True)
locust.post('/rest/webResources/1.0/resources', json=params.resources_body.get("110"),
headers=RESOURCE_HEADERS, catch_response=True)
locust.get(f'/rest/likes/1.0/content/{parsed_page_id}/likes?commentLikes=true&_={timestamp_int()}',
catch_response=True)
locust.get(f'/rest/highlighting/1.0/panel-items?pageId={parsed_page_id}&_={timestamp_int()}',
catch_response=True)
locust.get(f'/rest/mywork/latest/status/notification/count?pageId={parsed_page_id}&_={timestamp_int()}',
catch_response=True)
r = locust.get(f'/rest/inlinecomments/1.0/comments?containerId={parsed_page_id}&_={timestamp_int()}',
catch_response=True)
content = r.content.decode('utf-8')
if 'authorDisplayName' not in content and '[]' not in content:
logger.error(f'Could not open comments for page {parsed_page_id}: {content}')
assert 'authorDisplayName' in content or '[]' in content, 'Could not open comments for page.'
locust.get(f'/plugins/editor-loader/editor.action?parentPageId={parent_page_id}&pageId={parsed_page_id}'
f'&spaceKey={space_key}&atl_after_login_redirect=/pages/viewpage.action'
f'&timeout=12000&_={timestamp_int()}', catch_response=True)
locust.get(f'/rest/watch-button/1.0/watchState/{parsed_page_id}?_={timestamp_int()}',
catch_response=True)
locust.post('/rest/webResources/1.0/resources', json=params.resources_body.get("145"),
headers=RESOURCE_HEADERS, catch_response=True)
locust.post('/rest/webResources/1.0/resources', json=params.resources_body.get("150"),
headers=RESOURCE_HEADERS, catch_response=True)
locust.post('/rest/webResources/1.0/resources', json=params.resources_body.get("155"),
headers=RESOURCE_HEADERS, catch_response=True)
locust.post('/rest/webResources/1.0/resources', json=params.resources_body.get("160"),
headers=RESOURCE_HEADERS, catch_response=True)
@confluence_measure("locust_csi_view_blog_document_macro")
def csi_view_page_blog_document_macro(locust):
raise_if_login_failed(locust)
params = ViewPage()
r = locust.get(f'/display/CSIO/2021/01/01/Blog+Document+Macro+View', catch_response=True)
content = r.content.decode('utf-8')
if 'Created by' not in content or 'Save for later' not in content:
logger.error(f'Fail to open page CSIO/2021/01/01/Blog+Document+Macro+View: {content}')
assert 'Created by' in content and 'Save for later' in content, 'Could not open page.'
parent_page_id = fetch_by_re(params.parent_page_id_re, content)
parsed_page_id = fetch_by_re(params.page_id_re, content)
space_key = fetch_by_re(params.space_key_re, content)
tree_request_id = fetch_by_re(params.tree_result_id_re, content)
has_no_root = fetch_by_re(params.has_no_root_re, content)
root_page_id = fetch_by_re(params.root_page_id_re, content)
atl_token_view_issue = fetch_by_re(params.atl_token_view_issue_re, content)
editable = fetch_by_re(params.editable_re, content)
ancestor_ids = re.findall(params.ancestor_ids_re, content)
ancestor_str = 'ancestors='
for ancestor in ancestor_ids:
ancestor_str = ancestor_str + str(ancestor) + '&'
locust.session_data_storage['page_id'] = parsed_page_id
locust.session_data_storage['has_no_root'] = has_no_root
locust.session_data_storage['tree_request_id'] = tree_request_id
locust.session_data_storage['root_page_id'] = root_page_id
locust.session_data_storage['ancestors'] = ancestor_str
locust.session_data_storage['space_key'] = space_key
locust.session_data_storage['editable'] = editable
locust.session_data_storage['atl_token_view_issue'] = atl_token_view_issue
locust.get('/rest/helptips/1.0/tips', catch_response=True)
locust.post('/rest/webResources/1.0/resources', json=params.resources_body.get("110"),
headers=RESOURCE_HEADERS, catch_response=True)
locust.get(f'/rest/likes/1.0/content/{parsed_page_id}/likes?commentLikes=true&_={timestamp_int()}',
catch_response=True)
locust.get(f'/rest/highlighting/1.0/panel-items?pageId={parsed_page_id}&_={timestamp_int()}',
catch_response=True)
locust.get(f'/rest/mywork/latest/status/notification/count?pageId={parsed_page_id}&_={timestamp_int()}',
catch_response=True)
r = locust.get(f'/rest/inlinecomments/1.0/comments?containerId={parsed_page_id}&_={timestamp_int()}',
catch_response=True)
content = r.content.decode('utf-8')
if 'authorDisplayName' not in content and '[]' not in content:
logger.error(f'Could not open comments for page {parsed_page_id}: {content}')
assert 'authorDisplayName' in content or '[]' in content, 'Could not open comments for page.'
locust.get(f'/plugins/editor-loader/editor.action?parentPageId={parent_page_id}&pageId={parsed_page_id}'
f'&spaceKey={space_key}&atl_after_login_redirect=/pages/viewpage.action'
f'&timeout=12000&_={timestamp_int()}', catch_response=True)
locust.get(f'/rest/watch-button/1.0/watchState/{parsed_page_id}?_={timestamp_int()}',
catch_response=True)
locust.post('/rest/webResources/1.0/resources', json=params.resources_body.get("145"),
headers=RESOURCE_HEADERS, catch_response=True)
locust.post('/rest/webResources/1.0/resources', json=params.resources_body.get("150"),
headers=RESOURCE_HEADERS, catch_response=True)
locust.post('/rest/webResources/1.0/resources', json=params.resources_body.get("155"),
headers=RESOURCE_HEADERS, catch_response=True)
locust.post('/rest/webResources/1.0/resources', json=params.resources_body.get("160"),
headers=RESOURCE_HEADERS, catch_response=True)
@confluence_measure("locust_csi_view_blog_list_macro")
def csi_view_page_blog_list_macro(locust):
raise_if_login_failed(locust)
params = ViewPage()
r = locust.get(f'/display/CSIO/2021/01/01/Blog+List+Macro+View', catch_response=True)
content = r.content.decode('utf-8')
if 'Created by' not in content or 'Save for later' not in content:
logger.error(f'Fail to open page CSIO/2021/01/01/Blog+List+Macro+View: {content}')
assert 'Created by' in content and 'Save for later' in content, 'Could not open page.'
parent_page_id = fetch_by_re(params.parent_page_id_re, content)
parsed_page_id = fetch_by_re(params.page_id_re, content)
space_key = fetch_by_re(params.space_key_re, content)
tree_request_id = fetch_by_re(params.tree_result_id_re, content)
has_no_root = fetch_by_re(params.has_no_root_re, content)
root_page_id = fetch_by_re(params.root_page_id_re, content)
atl_token_view_issue = fetch_by_re(params.atl_token_view_issue_re, content)
editable = fetch_by_re(params.editable_re, content)
ancestor_ids = re.findall(params.ancestor_ids_re, content)
ancestor_str = 'ancestors='
for ancestor in ancestor_ids:
ancestor_str = ancestor_str + str(ancestor) + '&'
locust.session_data_storage['page_id'] = parsed_page_id
locust.session_data_storage['has_no_root'] = has_no_root
locust.session_data_storage['tree_request_id'] = tree_request_id
locust.session_data_storage['root_page_id'] = root_page_id
locust.session_data_storage['ancestors'] = ancestor_str
locust.session_data_storage['space_key'] = space_key
locust.session_data_storage['editable'] = editable
locust.session_data_storage['atl_token_view_issue'] = atl_token_view_issue
locust.get('/rest/helptips/1.0/tips', catch_response=True)
locust.post('/rest/webResources/1.0/resources', json=params.resources_body.get("110"),
headers=RESOURCE_HEADERS, catch_response=True)
locust.get(f'/rest/likes/1.0/content/{parsed_page_id}/likes?commentLikes=true&_={timestamp_int()}',
catch_response=True)
locust.get(f'/rest/highlighting/1.0/panel-items?pageId={parsed_page_id}&_={timestamp_int()}',
catch_response=True)
locust.get(f'/rest/mywork/latest/status/notification/count?pageId={parsed_page_id}&_={timestamp_int()}',
catch_response=True)
r = locust.get(f'/rest/inlinecomments/1.0/comments?containerId={parsed_page_id}&_={timestamp_int()}',
catch_response=True)
content = r.content.decode('utf-8')
if 'authorDisplayName' not in content and '[]' not in content:
logger.error(f'Could not open comments for page {parsed_page_id}: {content}')
assert 'authorDisplayName' in content or '[]' in content, 'Could not open comments for page.'
locust.get(f'/plugins/editor-loader/editor.action?parentPageId={parent_page_id}&pageId={parsed_page_id}'
f'&spaceKey={space_key}&atl_after_login_redirect=/pages/viewpage.action'
f'&timeout=12000&_={timestamp_int()}', catch_response=True)
locust.get(f'/rest/watch-button/1.0/watchState/{parsed_page_id}?_={timestamp_int()}',
catch_response=True)
locust.post('/rest/webResources/1.0/resources', json=params.resources_body.get("145"),
headers=RESOURCE_HEADERS, catch_response=True)
locust.post('/rest/webResources/1.0/resources', json=params.resources_body.get("150"),
headers=RESOURCE_HEADERS, catch_response=True)
locust.post('/rest/webResources/1.0/resources', json=params.resources_body.get("155"),
headers=RESOURCE_HEADERS, catch_response=True)
locust.post('/rest/webResources/1.0/resources', json=params.resources_body.get("160"),
headers=RESOURCE_HEADERS, catch_response=True)
| 59.790123
| 142
| 0.72326
|
02b045d2c6994f9aa4abdebc34d56d233a699284
| 3,246
|
py
|
Python
|
Sentiment scoring/old/ScoreSentiment_Rating.py
|
stancld/MSc-Project
|
31a57ed58a902fe998649b948c61e70ca78729a4
|
[
"MIT"
] | 2
|
2021-05-27T12:43:20.000Z
|
2022-02-24T07:01:55.000Z
|
Sentiment scoring/old/ScoreSentiment_Rating.py
|
stancld/MSc-Project
|
31a57ed58a902fe998649b948c61e70ca78729a4
|
[
"MIT"
] | 5
|
2021-03-19T08:52:22.000Z
|
2021-09-22T19:21:44.000Z
|
Sentiment scoring/old/ScoreSentiment_Rating.py
|
stancld/MSc-Project
|
31a57ed58a902fe998649b948c61e70ca78729a4
|
[
"MIT"
] | 2
|
2020-09-29T03:27:38.000Z
|
2020-11-07T05:41:10.000Z
|
"""
File: ScoreSentiment_Rating.py
Author: Daniel Stancl
Description: This file takes companies and reviews dataframe and generate
columns with employee sentiment for companies in a given month/ over a given period etc.
"""
import pandas as pd
class ScoreSentiment_Rating(object):
def __init__(self, companies, reviews):
self.companies = companies
self.reviews = reviews
def run(self, sentiment_path, periods, difference):
self.sentimentMonthly(sentiment_path, difference)
[self.sentimentCustom(period, sentiment_path, difference) for period in periods]
def sentimentMonthly(self, sentiment_path, difference, _return=False):
sentiment = pd.DataFrame(
pd.DataFrame(
self.reviews
.groupby(['Company', 'Year-Month'])
.Rating
.agg(['mean', 'count'])
).to_records()
)
sentiment.columns = ['Company', 'Year-Month', 'Rating', 'Count']
self.sentiment_rating = pd.pivot_table(sentiment, 'Rating', index='Company', columns='Year-Month', fill_value=None)
self.sentiment_count = pd.pivot_table(sentiment, 'Count', index='Company', columns='Year-Month', fill_value=0)
# save
fname = f"{sentiment_path}Sentiment_Rating_1M.csv"
self.sentiment_rating.to_csv(fname)
fname = f"{sentiment_path}Sentiment_Count_1M.csv"
self.sentiment_count.to_csv(fname)
# compute difference
if difference:
sentiment_diff = self._sentimentDifference(
data=self.sentiment_rating,
)
fname = f"{sentiment_path}Sentiment_Rating_Diff_1M.csv"
sentiment_diff.to_csv(fname)
if _return:
return self.sentiment_rating, self.sentiment_count, self.sen
def sentimentCustom(self, period, sentiment_path, difference, _return=False):
sentimentSums = self.sentiment_count * self.sentiment_rating
rollingCounts = (
self.sentiment_count
.T
.rolling(window=period)
.sum()
.T
)
rollingSums = (
sentimentSums
.fillna(0)
.T
.rolling(window=period)
.sum()
.T
.replace(
to_replace=0,
value=None
)
)
rollingSentiment = rollingSums / rollingCounts
fname = f"{sentiment_path}Sentiment_Rating_{period}M.csv"
rollingSentiment.to_csv(fname)
# compute difference
if difference:
sentiment_diff = self._sentimentDifference(
data=rollingSentiment
)
fname = f"{sentiment_path}Sentiment_Rating_Diff_{period}M.csv"
sentiment_diff.to_csv(fname)
if _return:
return rollingSentiment, sentiment_diff
def _sentimentDifference(self, data):
sentimentDiff = (
data
.T
.rolling(window=2)
.apply(lambda x: self._diff_function(x))
.T
)
return sentimentDiff
def _diff_function(self, x):
return x.iloc[1] - x.iloc[0]
| 31.514563
| 123
| 0.59273
|
72b14d4fcb5dfae7ae0a0e28c60b24f59ad27d84
| 35,489
|
py
|
Python
|
tensorflow/python/keras/initializers/initializers_v2.py
|
TL-Rubick/tensorflow
|
6cf1ccf6060a95aad3ccc84544d0aa166990ec72
|
[
"Apache-2.0"
] | 9
|
2019-06-05T06:48:07.000Z
|
2020-09-29T07:08:02.000Z
|
tensorflow/python/keras/initializers/initializers_v2.py
|
TL-Rubick/tensorflow
|
6cf1ccf6060a95aad3ccc84544d0aa166990ec72
|
[
"Apache-2.0"
] | 7
|
2021-11-10T20:21:23.000Z
|
2022-03-22T19:18:39.000Z
|
tensorflow/python/keras/initializers/initializers_v2.py
|
TL-Rubick/tensorflow
|
6cf1ccf6060a95aad3ccc84544d0aa166990ec72
|
[
"Apache-2.0"
] | 3
|
2019-06-28T02:28:27.000Z
|
2021-07-06T08:16:19.000Z
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras initializers for TF 2.
"""
# pylint: disable=g-classes-have-attributes
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.keras import backend
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_linalg_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import stateless_random_ops
from tensorflow.python.util.tf_export import keras_export
_PARTITION_SHAPE = 'partition_shape'
_PARTITION_OFFSET = 'partition_offset'
@keras_export('keras.initializers.Initializer')
class Initializer(object):
"""Initializer base class: all Keras initializers inherit from this class.
Initializers should implement a `__call__` method with the following
signature:
```python
def __call__(self, shape, dtype=None, **kwargs):
# returns a tensor of shape `shape` and dtype `dtype`
# containing values drawn from a distribution of your choice.
```
Optionally, you an also implement the method `get_config` and the class
method `from_config` in order to support serialization -- just like with
any Keras object.
Here's a simple example: a random normal initializer.
```python
import tensorflow as tf
class ExampleRandomNormal(tf.keras.initializers.Initializer):
def __init__(self, mean, stddev):
self.mean = mean
self.stddev = stddev
def __call__(self, shape, dtype=None, **kwargs):
return tf.random.normal(
shape, mean=self.mean, stddev=self.stddev, dtype=dtype)
def get_config(self): # To support serialization
return {"mean": self.mean, "stddev": self.stddev}
```
Note that we don't have to implement `from_config` in the example above since
the constructor arguments of the class the keys in the config returned by
`get_config` are the same. In this case, the default `from_config`
works fine.
"""
def __call__(self, shape, dtype=None, **kwargs):
"""Returns a tensor object initialized as specified by the initializer.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor.
**kwargs: Additional keyword arguments.
"""
raise NotImplementedError
def get_config(self):
"""Returns the configuration of the initializer as a JSON-serializable dict.
Returns:
A JSON-serializable Python dict.
"""
return {}
@classmethod
def from_config(cls, config):
"""Instantiates an initializer from a configuration dictionary.
Example:
```python
initializer = RandomUniform(-1, 1)
config = initializer.get_config()
initializer = RandomUniform.from_config(config)
```
Args:
config: A Python dictionary, the output of `get_config`.
Returns:
A `tf.keras.initializers.Initializer` instance.
"""
config.pop('dtype', None)
return cls(**config)
@keras_export('keras.initializers.Zeros', 'keras.initializers.zeros', v1=[])
class Zeros(Initializer):
"""Initializer that generates tensors initialized to 0.
Also available via the shortcut function `tf.keras.initializers.zeros`.
Examples:
>>> # Standalone usage:
>>> initializer = tf.keras.initializers.Zeros()
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = tf.keras.initializers.Zeros()
>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)
"""
def __call__(self, shape, dtype=None, **kwargs):
"""Returns a tensor object initialized as specified by the initializer.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. Only numeric or boolean dtypes are
supported. If not specified, `tf.keras.backend.floatx()` is used,
which default to `float32` unless you configured it otherwise
(via `tf.keras.backend.set_floatx(float_dtype)`).
**kwargs: Additional keyword arguments.
"""
_validate_kwargs(self.__class__.__name__, kwargs)
dtype = _get_dtype(dtype)
if not dtype.is_numpy_compatible or dtype == dtypes.string:
raise ValueError('Expected numeric or boolean dtype, got %s.' % dtype)
if _PARTITION_SHAPE in kwargs:
shape = kwargs[_PARTITION_SHAPE]
return array_ops.zeros(shape, dtype)
@keras_export('keras.initializers.Ones', 'keras.initializers.ones', v1=[])
class Ones(Initializer):
"""Initializer that generates tensors initialized to 1.
Also available via the shortcut function `tf.keras.initializers.ones`.
Examples:
>>> # Standalone usage:
>>> initializer = tf.keras.initializers.Ones()
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = tf.keras.initializers.Ones()
>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)
"""
def __call__(self, shape, dtype=None, **kwargs):
"""Returns a tensor object initialized as specified by the initializer.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. Only numeric or boolean dtypes are
supported. If not specified, `tf.keras.backend.floatx()` is used,
which default to `float32` unless you configured it otherwise
(via `tf.keras.backend.set_floatx(float_dtype)`).
**kwargs: Additional keyword arguments.
"""
_validate_kwargs(self.__class__.__name__, kwargs)
dtype = _get_dtype(dtype)
if not dtype.is_numpy_compatible or dtype == dtypes.string:
raise ValueError('Expected numeric or boolean dtype, got %s.' % dtype)
if _PARTITION_SHAPE in kwargs:
shape = kwargs[_PARTITION_SHAPE]
return array_ops.ones(shape, dtype)
@keras_export('keras.initializers.Constant',
'keras.initializers.constant',
v1=[])
class Constant(Initializer):
"""Initializer that generates tensors with constant values.
Also available via the shortcut function `tf.keras.initializers.constant`.
Only scalar values are allowed.
The constant value provided must be convertible to the dtype requested
when calling the initializer.
Examples:
>>> # Standalone usage:
>>> initializer = tf.keras.initializers.Constant(3.)
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = tf.keras.initializers.Constant(3.)
>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)
Args:
value: A Python scalar.
"""
def __init__(self, value=0):
self.value = value
def __call__(self, shape, dtype=None, **kwargs):
"""Returns a tensor object initialized to `self.value`.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. If not specified,
`tf.keras.backend.floatx()` is used,
which default to `float32` unless you configured it otherwise
(via `tf.keras.backend.set_floatx(float_dtype)`).
**kwargs: Additional keyword arguments.
"""
del kwargs
return constant_op.constant(
self.value, dtype=_get_dtype(dtype), shape=shape)
def get_config(self):
return {'value': self.value}
@keras_export('keras.initializers.RandomUniform',
'keras.initializers.random_uniform',
v1=[])
class RandomUniform(Initializer):
"""Initializer that generates tensors with a uniform distribution.
Also available via the shortcut function
`tf.keras.initializers.random_uniform`.
Examples:
>>> # Standalone usage:
>>> initializer = tf.keras.initializers.RandomUniform(minval=0., maxval=1.)
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = tf.keras.initializers.RandomUniform(minval=0., maxval=1.)
>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)
Args:
minval: A python scalar or a scalar tensor. Lower bound of the range of
random values to generate (inclusive).
maxval: A python scalar or a scalar tensor. Upper bound of the range of
random values to generate (exclusive).
seed: A Python integer. An initializer created with a given seed will
always produce the same random tensor for a given shape and dtype.
"""
def __init__(self, minval=-0.05, maxval=0.05, seed=None):
self.minval = minval
self.maxval = maxval
self.seed = seed
self._random_generator = _RandomGenerator(seed)
def __call__(self, shape, dtype=None, **kwargs):
"""Returns a tensor object initialized as specified by the initializer.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. Only floating point and integer
types are supported. If not specified,
`tf.keras.backend.floatx()` is used,
which default to `float32` unless you configured it otherwise
(via `tf.keras.backend.set_floatx(float_dtype)`).
**kwargs: Additional keyword arguments.
"""
_validate_kwargs(self.__class__.__name__, kwargs)
dtype = _get_dtype(dtype)
if not dtype.is_floating and not dtype.is_integer:
raise ValueError('Expected float or integer dtype, got %s.' % dtype)
if _PARTITION_SHAPE in kwargs:
shape = kwargs[_PARTITION_SHAPE]
return self._random_generator.random_uniform(shape, self.minval,
self.maxval, dtype)
def get_config(self):
return {
'minval': self.minval,
'maxval': self.maxval,
'seed': self.seed
}
@keras_export('keras.initializers.RandomNormal',
'keras.initializers.random_normal',
v1=[])
class RandomNormal(Initializer):
"""Initializer that generates tensors with a normal distribution.
Also available via the shortcut function
`tf.keras.initializers.random_normal`.
Examples:
>>> # Standalone usage:
>>> initializer = tf.keras.initializers.RandomNormal(mean=0., stddev=1.)
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = tf.keras.initializers.RandomNormal(mean=0., stddev=1.)
>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)
Args:
mean: a python scalar or a scalar tensor. Mean of the random values to
generate.
stddev: a python scalar or a scalar tensor. Standard deviation of the random
values to generate.
seed: A Python integer. An initializer created with a given seed will
always produce the same random tensor for a given shape and dtype.
"""
def __init__(self, mean=0.0, stddev=0.05, seed=None):
self.mean = mean
self.stddev = stddev
self.seed = seed
self._random_generator = _RandomGenerator(seed)
def __call__(self, shape, dtype=None, **kwargs):
"""Returns a tensor object initialized to random normal values.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. Only floating point types are
supported. If not specified, `tf.keras.backend.floatx()` is used, which
default to `float32` unless you configured it otherwise (via
`tf.keras.backend.set_floatx(float_dtype)`)
**kwargs: Additional keyword arguments.
"""
_validate_kwargs(self.__class__.__name__, kwargs)
dtype = _assert_float_dtype(_get_dtype(dtype))
if _PARTITION_SHAPE in kwargs:
shape = kwargs[_PARTITION_SHAPE]
return self._random_generator.random_normal(shape, self.mean, self.stddev,
dtype)
def get_config(self):
return {
'mean': self.mean,
'stddev': self.stddev,
'seed': self.seed
}
@keras_export('keras.initializers.TruncatedNormal',
'keras.initializers.truncated_normal',
v1=[])
class TruncatedNormal(Initializer):
"""Initializer that generates a truncated normal distribution.
Also available via the shortcut function
`tf.keras.initializers.truncated_normal`.
The values generated are similar to values from a
`tf.keras.initializers.RandomNormal` initializer except that values more
than two standard deviations from the mean are
discarded and re-drawn.
Examples:
>>> # Standalone usage:
>>> initializer = tf.keras.initializers.TruncatedNormal(mean=0., stddev=1.)
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = tf.keras.initializers.TruncatedNormal(mean=0., stddev=1.)
>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)
Args:
mean: a python scalar or a scalar tensor. Mean of the random values
to generate.
stddev: a python scalar or a scalar tensor. Standard deviation of the
random values to generate.
seed: A Python integer. An initializer created with a given seed will
always produce the same random tensor for a given shape and dtype.
"""
def __init__(self, mean=0.0, stddev=0.05, seed=None):
self.mean = mean
self.stddev = stddev
self.seed = seed
self._random_generator = _RandomGenerator(seed)
def __call__(self, shape, dtype=None, **kwargs):
"""Returns a tensor object initialized to random normal values (truncated).
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. Only floating point types are
supported. If not specified, `tf.keras.backend.floatx()` is used, which
default to `float32` unless you configured it otherwise (via
`tf.keras.backend.set_floatx(float_dtype)`)
**kwargs: Additional keyword arguments.
"""
_validate_kwargs(self.__class__.__name__, kwargs)
dtype = _assert_float_dtype(_get_dtype(dtype))
if _PARTITION_SHAPE in kwargs:
shape = kwargs[_PARTITION_SHAPE]
return self._random_generator.truncated_normal(shape, self.mean,
self.stddev, dtype)
def get_config(self):
return {
'mean': self.mean,
'stddev': self.stddev,
'seed': self.seed
}
@keras_export('keras.initializers.VarianceScaling',
'keras.initializers.variance_scaling',
v1=[])
class VarianceScaling(Initializer):
"""Initializer capable of adapting its scale to the shape of weights tensors.
Also available via the shortcut function
`tf.keras.initializers.variance_scaling`.
With `distribution="truncated_normal" or "untruncated_normal"`, samples are
drawn from a truncated/untruncated normal distribution with a mean of zero and
a standard deviation (after truncation, if used) `stddev = sqrt(scale / n)`,
where `n` is:
- number of input units in the weight tensor, if `mode="fan_in"`
- number of output units, if `mode="fan_out"`
- average of the numbers of input and output units, if `mode="fan_avg"`
With `distribution="uniform"`, samples are drawn from a uniform distribution
within `[-limit, limit]`, where `limit = sqrt(3 * scale / n)`.
Examples:
>>> # Standalone usage:
>>> initializer = tf.keras.initializers.VarianceScaling(
... scale=0.1, mode='fan_in', distribution='uniform')
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = tf.keras.initializers.VarianceScaling(
... scale=0.1, mode='fan_in', distribution='uniform')
>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)
Args:
scale: Scaling factor (positive float).
mode: One of "fan_in", "fan_out", "fan_avg".
distribution: Random distribution to use. One of "truncated_normal",
"untruncated_normal" and "uniform".
seed: A Python integer. An initializer created with a given seed will
always produce the same random tensor for a given shape and dtype.
"""
def __init__(self,
scale=1.0,
mode='fan_in',
distribution='truncated_normal',
seed=None):
if scale <= 0.:
raise ValueError('`scale` must be positive float.')
if mode not in {'fan_in', 'fan_out', 'fan_avg'}:
raise ValueError('Invalid `mode` argument:', mode)
distribution = distribution.lower()
# Compatibility with keras-team/keras.
if distribution == 'normal':
distribution = 'truncated_normal'
if distribution not in {'uniform', 'truncated_normal',
'untruncated_normal'}:
raise ValueError('Invalid `distribution` argument:', distribution)
self.scale = scale
self.mode = mode
self.distribution = distribution
self.seed = seed
self._random_generator = _RandomGenerator(seed)
def __call__(self, shape, dtype=None, **kwargs):
"""Returns a tensor object initialized as specified by the initializer.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. Only floating point types are
supported. If not specified, `tf.keras.backend.floatx()` is used, which
default to `float32` unless you configured it otherwise (via
`tf.keras.backend.set_floatx(float_dtype)`)
**kwargs: Additional keyword arguments.
"""
_validate_kwargs(self.__class__.__name__, kwargs)
dtype = _assert_float_dtype(_get_dtype(dtype))
scale = self.scale
fan_in, fan_out = _compute_fans(shape)
if _PARTITION_SHAPE in kwargs:
shape = kwargs[_PARTITION_SHAPE]
if self.mode == 'fan_in':
scale /= max(1., fan_in)
elif self.mode == 'fan_out':
scale /= max(1., fan_out)
else:
scale /= max(1., (fan_in + fan_out) / 2.)
if self.distribution == 'truncated_normal':
# constant from scipy.stats.truncnorm.std(a=-2, b=2, loc=0., scale=1.)
stddev = math.sqrt(scale) / .87962566103423978
return self._random_generator.truncated_normal(shape, 0.0, stddev, dtype)
elif self.distribution == 'untruncated_normal':
stddev = math.sqrt(scale)
return self._random_generator.random_normal(shape, 0.0, stddev, dtype)
else:
limit = math.sqrt(3.0 * scale)
return self._random_generator.random_uniform(shape, -limit, limit, dtype)
def get_config(self):
return {
'scale': self.scale,
'mode': self.mode,
'distribution': self.distribution,
'seed': self.seed
}
@keras_export('keras.initializers.Orthogonal',
'keras.initializers.orthogonal',
v1=[])
class Orthogonal(Initializer):
"""Initializer that generates an orthogonal matrix.
Also available via the shortcut function `tf.keras.initializers.orthogonal`.
If the shape of the tensor to initialize is two-dimensional, it is initialized
with an orthogonal matrix obtained from the QR decomposition of a matrix of
random numbers drawn from a normal distribution.
If the matrix has fewer rows than columns then the output will have orthogonal
rows. Otherwise, the output will have orthogonal columns.
If the shape of the tensor to initialize is more than two-dimensional,
a matrix of shape `(shape[0] * ... * shape[n - 2], shape[n - 1])`
is initialized, where `n` is the length of the shape vector.
The matrix is subsequently reshaped to give a tensor of the desired shape.
Examples:
>>> # Standalone usage:
>>> initializer = tf.keras.initializers.Orthogonal()
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = tf.keras.initializers.Orthogonal()
>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)
Args:
gain: multiplicative factor to apply to the orthogonal matrix
seed: A Python integer. An initializer created with a given seed will
always produce the same random tensor for a given shape and dtype.
References:
[Saxe et al., 2014](https://openreview.net/forum?id=_wzZwKpTDF_9C)
([pdf](https://arxiv.org/pdf/1312.6120.pdf))
"""
def __init__(self, gain=1.0, seed=None):
self.gain = gain
self.seed = seed
self._random_generator = _RandomGenerator(seed)
def __call__(self, shape, dtype=None, **kwargs):
"""Returns a tensor object initialized to an orthogonal matrix.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. Only floating point types are
supported. If not specified, `tf.keras.backend.floatx()` is used,
which default to `float32` unless you configured it otherwise
(via `tf.keras.backend.set_floatx(float_dtype)`)
**kwargs: Additional keyword arguments.
"""
_validate_kwargs(self.__class__.__name__, kwargs, support_partition=False)
dtype = _assert_float_dtype(_get_dtype(dtype))
# Check the shape
if len(shape) < 2:
raise ValueError('The tensor to initialize must be '
'at least two-dimensional')
# Flatten the input shape with the last dimension remaining
# its original shape so it works for conv2d
num_rows = 1
for dim in shape[:-1]:
num_rows *= dim
num_cols = shape[-1]
flat_shape = (max(num_cols, num_rows), min(num_cols, num_rows))
# Generate a random matrix
a = self._random_generator.random_normal(flat_shape, dtype=dtype)
# Compute the qr factorization
q, r = gen_linalg_ops.qr(a, full_matrices=False)
# Make Q uniform
d = array_ops.tensor_diag_part(r)
q *= math_ops.sign(d)
if num_rows < num_cols:
q = array_ops.matrix_transpose(q)
return self.gain * array_ops.reshape(q, shape)
def get_config(self):
return {'gain': self.gain, 'seed': self.seed}
@keras_export('keras.initializers.Identity',
'keras.initializers.identity',
v1=[])
class Identity(Initializer):
"""Initializer that generates the identity matrix.
Also available via the shortcut function `tf.keras.initializers.identity`.
Only usable for generating 2D matrices.
Examples:
>>> # Standalone usage:
>>> initializer = tf.keras.initializers.Identity()
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = tf.keras.initializers.Identity()
>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)
Args:
gain: Multiplicative factor to apply to the identity matrix.
"""
def __init__(self, gain=1.0):
self.gain = gain
def __call__(self, shape, dtype=None, **kwargs):
"""Returns a tensor object initialized to a 2D identity matrix.
Args:
shape: Shape of the tensor. It should have exactly rank 2.
dtype: Optional dtype of the tensor. Only floating point types are
supported. If not specified, `tf.keras.backend.floatx()` is used,
which default to `float32` unless you configured it otherwise
(via `tf.keras.backend.set_floatx(float_dtype)`)
**kwargs: Additional keyword arguments.
"""
_validate_kwargs(self.__class__.__name__, kwargs, support_partition=False)
dtype = _assert_float_dtype(_get_dtype(dtype))
if len(shape) != 2:
raise ValueError(
'Identity matrix initializer can only be used for 2D matrices.')
initializer = linalg_ops.eye(*shape, dtype=dtype)
return self.gain * initializer
def get_config(self):
return {'gain': self.gain}
@keras_export('keras.initializers.GlorotUniform',
'keras.initializers.glorot_uniform',
v1=[])
class GlorotUniform(VarianceScaling):
"""The Glorot uniform initializer, also called Xavier uniform initializer.
Also available via the shortcut function
`tf.keras.initializers.glorot_uniform`.
Draws samples from a uniform distribution within `[-limit, limit]`, where
`limit = sqrt(6 / (fan_in + fan_out))` (`fan_in` is the number of input units
in the weight tensor and `fan_out` is the number of output units).
Examples:
>>> # Standalone usage:
>>> initializer = tf.keras.initializers.GlorotUniform()
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = tf.keras.initializers.GlorotUniform()
>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)
Args:
seed: A Python integer. An initializer created with a given seed will
always produce the same random tensor for a given shape and dtype.
References:
[Glorot et al., 2010](http://proceedings.mlr.press/v9/glorot10a.html)
([pdf](http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf))
"""
def __init__(self, seed=None):
super(GlorotUniform, self).__init__(
scale=1.0,
mode='fan_avg',
distribution='uniform',
seed=seed)
def get_config(self):
return {'seed': self.seed}
@keras_export('keras.initializers.GlorotNormal',
'keras.initializers.glorot_normal',
v1=[])
class GlorotNormal(VarianceScaling):
"""The Glorot normal initializer, also called Xavier normal initializer.
Also available via the shortcut function
`tf.keras.initializers.glorot_normal`.
Draws samples from a truncated normal distribution centered on 0 with `stddev
= sqrt(2 / (fan_in + fan_out))` where `fan_in` is the number of input units in
the weight tensor and `fan_out` is the number of output units in the weight
tensor.
Examples:
>>> # Standalone usage:
>>> initializer = tf.keras.initializers.GlorotNormal()
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = tf.keras.initializers.GlorotNormal()
>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)
Args:
seed: A Python integer. An initializer created with a given seed will
always produce the same random tensor for a given shape and dtype.
References:
[Glorot et al., 2010](http://proceedings.mlr.press/v9/glorot10a.html)
([pdf](http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf))
"""
def __init__(self, seed=None):
super(GlorotNormal, self).__init__(
scale=1.0,
mode='fan_avg',
distribution='truncated_normal',
seed=seed)
def get_config(self):
return {'seed': self.seed}
@keras_export('keras.initializers.LecunNormal',
'keras.initializers.lecun_normal',
v1=[])
class LecunNormal(VarianceScaling):
"""Lecun normal initializer.
Also available via the shortcut function
`tf.keras.initializers.lecun_normal`.
Initializers allow you to pre-specify an initialization strategy, encoded in
the Initializer object, without knowing the shape and dtype of the variable
being initialized.
Draws samples from a truncated normal distribution centered on 0 with `stddev
= sqrt(1 / fan_in)` where `fan_in` is the number of input units in the weight
tensor.
Examples:
>>> # Standalone usage:
>>> initializer = tf.keras.initializers.LecunNormal()
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = tf.keras.initializers.LecunNormal()
>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)
Args:
seed: A Python integer. Used to seed the random generator.
References:
- Self-Normalizing Neural Networks,
[Klambauer et al., 2017]
(https://papers.nips.cc/paper/6698-self-normalizing-neural-networks)
([pdf]
(https://papers.nips.cc/paper/6698-self-normalizing-neural-networks.pdf))
- Efficient Backprop,
[Lecun et al., 1998](http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf)
"""
def __init__(self, seed=None):
super(LecunNormal, self).__init__(
scale=1., mode='fan_in', distribution='truncated_normal', seed=seed)
def get_config(self):
return {'seed': self.seed}
@keras_export('keras.initializers.LecunUniform',
'keras.initializers.lecun_uniform',
v1=[])
class LecunUniform(VarianceScaling):
"""Lecun uniform initializer.
Also available via the shortcut function
`tf.keras.initializers.lecun_uniform`.
Draws samples from a uniform distribution within `[-limit, limit]`,
where `limit = sqrt(3 / fan_in)` (`fan_in` is the number of input units in the
weight tensor).
Examples:
>>> # Standalone usage:
>>> initializer = tf.keras.initializers.LecunUniform()
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = tf.keras.initializers.LecunUniform()
>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)
Args:
seed: A Python integer. An initializer created with a given seed will
always produce the same random tensor for a given shape and dtype.
References:
- Self-Normalizing Neural Networks,
[Klambauer et al., 2017](https://papers.nips.cc/paper/6698-self-normalizing-neural-networks) # pylint: disable=line-too-long
([pdf](https://papers.nips.cc/paper/6698-self-normalizing-neural-networks.pdf))
- Efficient Backprop,
[Lecun et al., 1998](http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf)
"""
def __init__(self, seed=None):
super(LecunUniform, self).__init__(
scale=1., mode='fan_in', distribution='uniform', seed=seed)
def get_config(self):
return {'seed': self.seed}
@keras_export('keras.initializers.HeNormal',
'keras.initializers.he_normal',
v1=[])
class HeNormal(VarianceScaling):
"""He normal initializer.
Also available via the shortcut function
`tf.keras.initializers.he_normal`.
It draws samples from a truncated normal distribution centered on 0 with
`stddev = sqrt(2 / fan_in)` where `fan_in` is the number of input units in the
weight tensor.
Examples:
>>> # Standalone usage:
>>> initializer = tf.keras.initializers.HeNormal()
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = tf.keras.initializers.HeNormal()
>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)
Args:
seed: A Python integer. An initializer created with a given seed will
always produce the same random tensor for a given shape and dtype.
References:
[He et al., 2015](https://www.cv-foundation.org/openaccess/content_iccv_2015/html/He_Delving_Deep_into_ICCV_2015_paper.html) # pylint: disable=line-too-long
([pdf](https://www.cv-foundation.org/openaccess/content_iccv_2015/papers/He_Delving_Deep_into_ICCV_2015_paper.pdf))
"""
def __init__(self, seed=None):
super(HeNormal, self).__init__(
scale=2., mode='fan_in', distribution='truncated_normal', seed=seed)
def get_config(self):
return {'seed': self.seed}
@keras_export('keras.initializers.HeUniform',
'keras.initializers.he_uniform',
v1=[])
class HeUniform(VarianceScaling):
"""He uniform variance scaling initializer.
Also available via the shortcut function
`tf.keras.initializers.he_uniform`.
Draws samples from a uniform distribution within `[-limit, limit]`, where
`limit = sqrt(6 / fan_in)` (`fan_in` is the number of input units in the
weight tensor).
Examples:
>>> # Standalone usage:
>>> initializer = tf.keras.initializers.HeUniform()
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = tf.keras.initializers.HeUniform()
>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)
Args:
seed: A Python integer. An initializer created with a given seed will
always produce the same random tensor for a given shape and dtype.
References:
[He et al., 2015](https://www.cv-foundation.org/openaccess/content_iccv_2015/html/He_Delving_Deep_into_ICCV_2015_paper.html) # pylint: disable=line-too-long
([pdf](https://www.cv-foundation.org/openaccess/content_iccv_2015/papers/He_Delving_Deep_into_ICCV_2015_paper.pdf))
"""
def __init__(self, seed=None):
super(HeUniform, self).__init__(
scale=2., mode='fan_in', distribution='uniform', seed=seed)
def get_config(self):
return {'seed': self.seed}
def _get_dtype(dtype):
if dtype is None:
dtype = backend.floatx()
return dtypes.as_dtype(dtype)
def _assert_float_dtype(dtype):
"""Validate and return floating point type based on `dtype`.
`dtype` must be a floating point type.
Args:
dtype: The data type to validate.
Returns:
Validated type.
Raises:
ValueError: if `dtype` is not a floating point type.
"""
dtype = dtypes.as_dtype(dtype)
if not dtype.is_floating:
raise ValueError('Expected floating point type, got %s.' % dtype)
return dtype
class _RandomGenerator(object):
"""Random generator that selects appropriate random ops."""
def __init__(self, seed=None):
super(_RandomGenerator, self).__init__()
if seed is not None:
# Stateless random ops requires 2-int seed.
self.seed = [seed, 0]
else:
self.seed = None
def random_normal(self, shape, mean=0.0, stddev=1, dtype=dtypes.float32):
"""A deterministic random normal if seed is passed."""
if self.seed:
op = stateless_random_ops.stateless_random_normal
else:
op = random_ops.random_normal
return op(
shape=shape, mean=mean, stddev=stddev, dtype=dtype, seed=self.seed)
def random_uniform(self, shape, minval, maxval, dtype):
"""A deterministic random uniform if seed is passed."""
if self.seed:
op = stateless_random_ops.stateless_random_uniform
else:
op = random_ops.random_uniform
return op(
shape=shape, minval=minval, maxval=maxval, dtype=dtype, seed=self.seed)
def truncated_normal(self, shape, mean, stddev, dtype):
"""A deterministic truncated normal if seed is passed."""
if self.seed:
op = stateless_random_ops.stateless_truncated_normal
else:
op = random_ops.truncated_normal
return op(
shape=shape, mean=mean, stddev=stddev, dtype=dtype, seed=self.seed)
def _compute_fans(shape):
"""Computes the number of input and output units for a weight shape.
Args:
shape: Integer shape tuple or TF tensor shape.
Returns:
A tuple of integer scalars (fan_in, fan_out).
"""
if len(shape) < 1: # Just to avoid errors for constants.
fan_in = fan_out = 1
elif len(shape) == 1:
fan_in = fan_out = shape[0]
elif len(shape) == 2:
fan_in = shape[0]
fan_out = shape[1]
else:
# Assuming convolution kernels (2D, 3D, or more).
# kernel shape: (..., input_depth, depth)
receptive_field_size = 1
for dim in shape[:-2]:
receptive_field_size *= dim
fan_in = shape[-2] * receptive_field_size
fan_out = shape[-1] * receptive_field_size
return int(fan_in), int(fan_out)
def _validate_kwargs(cls_name, kwargs, support_partition=True):
for kwarg in kwargs:
if kwarg not in [_PARTITION_SHAPE, _PARTITION_OFFSET]:
raise TypeError('Unknown keyword arguments: %s' % kwarg)
elif not support_partition:
raise ValueError('%s initializer doesn\'t support partition-related '
'arguments' % cls_name)
| 34.488824
| 162
| 0.690552
|
b6a72e359718d6a21b7fc28b4b4a0751e3e26821
| 4,561
|
py
|
Python
|
tensorflow_model_analysis/extractors/slice_key_extractor_test.py
|
yifanmai/model-analysis
|
ae11318876ac6233ded77ac30c8aacc94da691d3
|
[
"Apache-2.0"
] | 2
|
2019-10-20T05:40:09.000Z
|
2019-10-31T17:25:51.000Z
|
tensorflow_model_analysis/extractors/slice_key_extractor_test.py
|
yifanmai/model-analysis
|
ae11318876ac6233ded77ac30c8aacc94da691d3
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_model_analysis/extractors/slice_key_extractor_test.py
|
yifanmai/model-analysis
|
ae11318876ac6233ded77ac30c8aacc94da691d3
|
[
"Apache-2.0"
] | 1
|
2019-10-10T06:20:25.000Z
|
2019-10-10T06:20:25.000Z
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test for slice_key_extractor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Standard Imports
import apache_beam as beam
from apache_beam.testing import util
import numpy as np
import tensorflow as tf
from tensorflow_model_analysis import constants
from tensorflow_model_analysis import types
from tensorflow_model_analysis.eval_saved_model import testutil
from tensorflow_model_analysis.extractors import slice_key_extractor
from tensorflow_model_analysis.slicer import slicer
def make_features_dict(features_dict):
result = {}
for key, value in features_dict.items():
result[key] = {'node': np.array(value)}
return result
def create_fpls():
fpl1 = types.FeaturesPredictionsLabels(
input_ref=0,
features=make_features_dict({
'gender': ['f'],
'age': [13],
'interest': ['cars']
}),
predictions=make_features_dict({
'kb': [1],
}),
labels=make_features_dict({'ad_risk_score': [0]}))
fpl2 = types.FeaturesPredictionsLabels(
input_ref=0,
features=make_features_dict({
'gender': ['m'],
'age': [10],
'interest': ['cars']
}),
predictions=make_features_dict({
'kb': [1],
}),
labels=make_features_dict({'ad_risk_score': [0]}))
return [fpl1, fpl2]
def wrap_fpl(fpl):
return {
constants.INPUT_KEY: fpl,
constants.FEATURES_PREDICTIONS_LABELS_KEY: fpl
}
class SliceTest(testutil.TensorflowModelAnalysisTest):
def testSliceKeys(self):
with beam.Pipeline() as pipeline:
fpls = create_fpls()
slice_keys_extracts = (
pipeline
| 'CreateTestInput' >> beam.Create(fpls)
| 'WrapFpls' >> beam.Map(wrap_fpl)
| 'ExtractSlices' >> slice_key_extractor._ExtractSliceKeys([
slicer.SingleSliceSpec(),
slicer.SingleSliceSpec(columns=['gender'])
]))
def check_result(got):
try:
self.assertEqual(2, len(got), 'got: %s' % got)
expected_results = sorted([[(), (('gender', 'f'),)],
[(), (('gender', 'm'),)]])
got_results = []
for item in got:
self.assertTrue(constants.SLICE_KEY_TYPES_KEY in item)
got_results.append(sorted(item[constants.SLICE_KEY_TYPES_KEY]))
self.assertEqual(sorted(got_results), sorted(expected_results))
except AssertionError as err:
raise util.BeamAssertException(err)
util.assert_that(slice_keys_extracts, check_result)
def testMaterializedSliceKeys(self):
with beam.Pipeline() as pipeline:
fpls = create_fpls()
slice_keys_extracts = (
pipeline
| 'CreateTestInput' >> beam.Create(fpls)
| 'WrapFpls' >> beam.Map(wrap_fpl)
| 'ExtractSlices' >> slice_key_extractor._ExtractSliceKeys(
[
slicer.SingleSliceSpec(),
slicer.SingleSliceSpec(columns=['gender'])
],
materialize=True))
def check_result(got):
try:
self.assertEqual(2, len(got), 'got: %s' % got)
expected_results = sorted([
types.MaterializedColumn(
name=constants.SLICE_KEYS_KEY,
value=[b'Overall', b'gender:f']),
types.MaterializedColumn(
name=constants.SLICE_KEYS_KEY,
value=[b'Overall', b'gender:m'])
])
got_results = []
for item in got:
self.assertTrue(constants.SLICE_KEYS_KEY in item)
got_results.append(item[constants.SLICE_KEYS_KEY])
self.assertEqual(sorted(got_results), sorted(expected_results))
except AssertionError as err:
raise util.BeamAssertException(err)
util.assert_that(slice_keys_extracts, check_result)
if __name__ == '__main__':
tf.test.main()
| 32.578571
| 75
| 0.637799
|
60f3a7a9a73eed8afe761d313aba03759d8ba25d
| 4,423
|
py
|
Python
|
colour/difference/tests/test_cam02_ucs.py
|
JGoldstone/colour
|
6829b363d5f0682bff0f4826995e7ceac189ff28
|
[
"BSD-3-Clause"
] | null | null | null |
colour/difference/tests/test_cam02_ucs.py
|
JGoldstone/colour
|
6829b363d5f0682bff0f4826995e7ceac189ff28
|
[
"BSD-3-Clause"
] | null | null | null |
colour/difference/tests/test_cam02_ucs.py
|
JGoldstone/colour
|
6829b363d5f0682bff0f4826995e7ceac189ff28
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Defines the unit tests for the :mod:`colour.difference.cam02_ucs` module.
"""
import numpy as np
import unittest
from itertools import permutations
from colour.difference import (
delta_E_CAM02LCD,
delta_E_CAM02SCD,
delta_E_CAM02UCS,
)
from colour.difference.cam02_ucs import delta_E_Luo2006
from colour.models.cam02_ucs import COEFFICIENTS_UCS_LUO2006
from colour.utilities import ignore_numpy_errors
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2021 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = 'colour-developers@colour-science.org'
__status__ = 'Production'
__all__ = [
'TestDelta_E_Luo2006',
]
class TestDelta_E_Luo2006(unittest.TestCase):
"""
Defines :func:`colour.difference.cam02_ucs.delta_E_Luo2006` definition unit
tests methods.
"""
def test_delta_E_Luo2006(self):
"""
Tests :func:`colour.difference.cam02_ucs.delta_E_Luo2006` definition.
"""
self.assertAlmostEqual(
delta_E_Luo2006(
np.array([54.90433134, -0.08450395, -0.06854831]),
np.array([54.80352754, -3.96940084, -13.57591013]),
COEFFICIENTS_UCS_LUO2006['CAM02-LCD']),
14.055546437777583,
places=7)
self.assertAlmostEqual(
delta_E_Luo2006(
np.array([54.90433134, -0.08450395, -0.06854831]),
np.array([54.80352754, -3.96940084, -13.57591013]),
COEFFICIENTS_UCS_LUO2006['CAM02-LCD']),
delta_E_CAM02LCD(
np.array([54.90433134, -0.08450395, -0.06854831]),
np.array([54.80352754, -3.96940084, -13.57591013])),
places=7)
self.assertAlmostEqual(
delta_E_Luo2006(
np.array([54.90433134, -0.08450395, -0.06854831]),
np.array([54.80352754, -3.96940084, -13.57591013]),
COEFFICIENTS_UCS_LUO2006['CAM02-SCD']),
delta_E_CAM02SCD(
np.array([54.90433134, -0.08450395, -0.06854831]),
np.array([54.80352754, -3.96940084, -13.57591013])),
places=7)
self.assertAlmostEqual(
delta_E_Luo2006(
np.array([54.90433134, -0.08450395, -0.06854831]),
np.array([54.80352754, -3.96940084, -13.57591013]),
COEFFICIENTS_UCS_LUO2006['CAM02-UCS']),
delta_E_CAM02UCS(
np.array([54.90433134, -0.08450395, -0.06854831]),
np.array([54.80352754, -3.96940084, -13.57591013])),
places=7)
def test_n_dimensional_delta_E_Luo2006(self):
"""
Tests :func:`colour.difference.cam02_ucs.delta_E_Luo2006` definition
n-dimensional arrays support.
"""
Jpapbp_1 = np.array([54.90433134, -0.08450395, -0.06854831])
Jpapbp_2 = np.array([54.80352754, -3.96940084, -13.57591013])
delta_E_p = delta_E_Luo2006(Jpapbp_1, Jpapbp_2,
COEFFICIENTS_UCS_LUO2006['CAM02-LCD'])
Jpapbp_1 = np.tile(Jpapbp_1, (6, 1))
Jpapbp_2 = np.tile(Jpapbp_2, (6, 1))
delta_E_p = np.tile(delta_E_p, 6)
np.testing.assert_almost_equal(
delta_E_Luo2006(Jpapbp_1, Jpapbp_2,
COEFFICIENTS_UCS_LUO2006['CAM02-LCD']),
delta_E_p,
decimal=7)
Jpapbp_1 = np.reshape(Jpapbp_1, (2, 3, 3))
Jpapbp_2 = np.reshape(Jpapbp_2, (2, 3, 3))
delta_E_p = np.reshape(delta_E_p, (2, 3))
np.testing.assert_almost_equal(
delta_E_Luo2006(Jpapbp_1, Jpapbp_2,
COEFFICIENTS_UCS_LUO2006['CAM02-LCD']),
delta_E_p,
decimal=7)
@ignore_numpy_errors
def test_nan_delta_E_Luo2006(self):
"""
Tests :func:`colour.difference.cam02_ucs.delta_E_Luo2006`
definition nan support.
"""
cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]
cases = set(permutations(cases * 3, r=3))
for case in cases:
Jpapbp_1 = np.array(case)
Jpapbp_2 = np.array(case)
delta_E_Luo2006(Jpapbp_1, Jpapbp_2,
COEFFICIENTS_UCS_LUO2006['CAM02-LCD']),
if __name__ == '__main__':
unittest.main()
| 34.826772
| 79
| 0.597558
|
b98403697b502e8293de5c0c37092d0c9d538399
| 1,486
|
py
|
Python
|
mds/github.py
|
RMK0110/mds-provider
|
3bdb8e2a94c67b07e13763f1ede984feba944eaa
|
[
"MIT"
] | null | null | null |
mds/github.py
|
RMK0110/mds-provider
|
3bdb8e2a94c67b07e13763f1ede984feba944eaa
|
[
"MIT"
] | 1
|
2018-12-14T03:51:31.000Z
|
2018-12-14T03:51:31.000Z
|
mds/github.py
|
RMK0110/mds-provider
|
3bdb8e2a94c67b07e13763f1ede984feba944eaa
|
[
"MIT"
] | null | null | null |
"""
Data and helpers for MDS on GitHub.
"""
GITHUB = "https://github.com"
GITHUB_RAW = "https://raw.githubusercontent.com"
MDS_DEFAULT_REF = "master"
MDS_ORG_NAME = "CityOfLosAngeles"
MDS_REPO_NAME = "mobility-data-specification"
MDS = (GITHUB, MDS_ORG_NAME, MDS_REPO_NAME)
MDS_RAW = (GITHUB_RAW, MDS_ORG_NAME, MDS_REPO_NAME)
MDS_PROVIDER_REGISTRY = "/".join(MDS_RAW + ("{}/providers.csv",))
MDS_SCHEMA = "/".join(MDS_RAW + ("{}/provider/{}.json",))
def registry_url(ref=None):
"""
Helper to return a formatted provider registry URL.
Parameters:
ref: str, Version, optional
Reference the schema at the version specified, which could be any of:
* git branch name
* git commit hash (long or short)
* version str or Version instance
Return:
str
"""
ref = ref or MDS_DEFAULT_REF
return MDS_PROVIDER_REGISTRY.format(ref)
def schema_url(schema_type, ref=None):
"""
Helper to return a formatted schema URL.
Parameters:
schema_type: str
The type of MDS Provider schema ("status_changes" or "trips").
ref: str, Version, optional
Reference the schema at the version specified, which could be any of:
* git branch name
* git commit hash (long or short)
* version str or Version instance
Return:
str
"""
ref = ref or MDS_DEFAULT_REF
return MDS_SCHEMA.format(ref, schema_type)
| 26.070175
| 81
| 0.641992
|
a833b0d86543d75c6c2003937cd22b52086e448e
| 603
|
py
|
Python
|
benchmarks/sqlite/cost-factors/launch-time/time_estimation.py
|
vschiavoni/unine-twine
|
312d770585ea88c13ef135ef467fee779494fd90
|
[
"Apache-2.0"
] | 20
|
2021-04-05T20:05:57.000Z
|
2022-02-19T18:48:52.000Z
|
benchmarks/sqlite/cost-factors/launch-time/time_estimation.py
|
vschiavoni/unine-twine
|
312d770585ea88c13ef135ef467fee779494fd90
|
[
"Apache-2.0"
] | null | null | null |
benchmarks/sqlite/cost-factors/launch-time/time_estimation.py
|
vschiavoni/unine-twine
|
312d770585ea88c13ef135ef467fee779494fd90
|
[
"Apache-2.0"
] | 4
|
2021-02-22T14:52:21.000Z
|
2022-01-10T16:58:39.000Z
|
import time
import pexpect
import sys
# Start the timer for measuring process time
t_start = time.perf_counter()
# Spawn a child process with the enclave
analyzer = pexpect.spawn(" ".join(sys.argv[1:]), encoding='utf-8', timeout=None)
# Wait until the enclave is ready to be measured
analyzer.expect("Initialization done")
# End the timer for measuring process time
t_end = time.perf_counter()
# Send a dummy input to continue the child process
analyzer.sendline("0")
# Print the time to start the process in ms
print(int((t_end - t_start) * 1_000))
# Wait until the enclave ends
analyzer.wait()
| 24.12
| 80
| 0.751244
|
d48f55e9ff6d76abaa8055ee3122ff9445f99cc7
| 38,346
|
py
|
Python
|
mne/tests/test_source_space.py
|
guiomar/mne-python
|
2d19800a07904cfe69c1ba290c3eaf712625c6ab
|
[
"BSD-3-Clause"
] | 2
|
2020-05-11T13:34:36.000Z
|
2020-05-28T19:43:21.000Z
|
mne/tests/test_source_space.py
|
guiomar/mne-python
|
2d19800a07904cfe69c1ba290c3eaf712625c6ab
|
[
"BSD-3-Clause"
] | 8
|
2018-03-03T19:59:16.000Z
|
2020-10-14T11:00:33.000Z
|
mne/tests/test_source_space.py
|
guiomar/mne-python
|
2d19800a07904cfe69c1ba290c3eaf712625c6ab
|
[
"BSD-3-Clause"
] | 4
|
2017-08-14T18:03:22.000Z
|
2021-03-04T06:55:29.000Z
|
# -*- coding: utf-8 -*-
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Eric Larson <larson.eric.d@gmail.com>
#
# License: BSD-3-Clause
import os.path as op
from shutil import copytree
import pytest
import scipy
import numpy as np
from numpy.testing import (assert_array_equal, assert_allclose, assert_equal,
assert_array_less)
from mne.datasets import testing
import mne
from mne import (read_source_spaces, write_source_spaces,
setup_source_space, setup_volume_source_space,
add_source_space_distances, read_bem_surfaces,
morph_source_spaces, SourceEstimate, make_sphere_model,
compute_source_morph, pick_types,
read_bem_solution, read_freesurfer_lut,
read_trans)
from mne.fixes import _get_img_fdata
from mne.utils import (requires_nibabel, run_subprocess,
modified_env, requires_mne, check_version)
from mne.surface import _accumulate_normals, _triangle_neighbors
from mne.source_estimate import _get_src_type
from mne.source_space import (get_volume_labels_from_src,
_compare_source_spaces,
compute_distance_to_sensors)
from mne.io.pick import _picks_to_idx
from mne.io.constants import FIFF
data_path = testing.data_path(download=False)
subjects_dir = op.join(data_path, 'subjects')
fname_mri = op.join(data_path, 'subjects', 'sample', 'mri', 'T1.mgz')
aseg_fname = op.join(data_path, 'subjects', 'sample', 'mri', 'aseg.mgz')
fname = op.join(subjects_dir, 'sample', 'bem', 'sample-oct-6-src.fif')
fname_vol = op.join(subjects_dir, 'sample', 'bem',
'sample-volume-7mm-src.fif')
fname_bem = op.join(data_path, 'subjects', 'sample', 'bem',
'sample-1280-bem.fif')
fname_bem_sol = op.join(data_path, 'subjects', 'sample', 'bem',
'sample-1280-bem-sol.fif')
fname_bem_3 = op.join(data_path, 'subjects', 'sample', 'bem',
'sample-1280-1280-1280-bem.fif')
fname_bem_3_sol = op.join(data_path, 'subjects', 'sample', 'bem',
'sample-1280-1280-1280-bem-sol.fif')
fname_fs = op.join(subjects_dir, 'fsaverage', 'bem', 'fsaverage-ico-5-src.fif')
fname_morph = op.join(subjects_dir, 'sample', 'bem',
'sample-fsaverage-ico-5-src.fif')
fname_src = op.join(
data_path, 'subjects', 'sample', 'bem', 'sample-oct-4-src.fif')
fname_fwd = op.join(
data_path, 'MEG', 'sample', 'sample_audvis_trunc-meg-eeg-oct-4-fwd.fif')
trans_fname = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-trans.fif')
base_dir = op.join(op.dirname(__file__), '..', 'io', 'tests', 'data')
fname_small = op.join(base_dir, 'small-src.fif.gz')
fname_ave = op.join(base_dir, 'test-ave.fif')
rng = np.random.RandomState(0)
@testing.requires_testing_data
@pytest.mark.parametrize('picks, limits', [
('meg', (0.02, 0.250)),
(None, (0.01, 0.250)), # should be same as EEG
('eeg', (0.01, 0.250)),
])
def test_compute_distance_to_sensors(picks, limits):
"""Test computation of distances between vertices and sensors."""
src = read_source_spaces(fname_src)
fwd = mne.read_forward_solution(fname_fwd)
info = fwd['info']
trans = read_trans(trans_fname)
# trans = fwd['info']['mri_head_t']
if isinstance(picks, str):
kwargs = dict()
kwargs[picks] = True
if picks == 'eeg':
info['dev_head_t'] = None # should not break anything
use_picks = pick_types(info, **kwargs, exclude=())
else:
use_picks = picks
n_picks = len(_picks_to_idx(info, use_picks, 'data', exclude=()))
# Make sure same vertices are used in src and fwd
src[0]['inuse'] = fwd['src'][0]['inuse']
src[1]['inuse'] = fwd['src'][1]['inuse']
src[0]['nuse'] = fwd['src'][0]['nuse']
src[1]['nuse'] = fwd['src'][1]['nuse']
n_verts = src[0]['nuse'] + src[1]['nuse']
# minimum distances between vertices and sensors
depths = compute_distance_to_sensors(src, info=info, picks=use_picks,
trans=trans)
assert depths.shape == (n_verts, n_picks)
assert limits[0] * 5 > depths.min() # meaningful choice of limits
assert_array_less(limits[0], depths)
assert_array_less(depths, limits[1])
# If source space from Forward Solution and trans=None (i.e. identity) then
# depths2 should be the same as depth.
depths2 = compute_distance_to_sensors(src=fwd['src'], info=info,
picks=use_picks, trans=None)
assert_allclose(depths, depths2, rtol=1e-5)
if picks != 'eeg':
# this should break things
info['dev_head_t'] = None
with pytest.raises(ValueError,
match='Transform between meg<->head'):
compute_distance_to_sensors(src, info, use_picks, trans)
def _read_small_src(remove=True):
src = read_source_spaces(fname_small)
if remove:
for s in src:
s['nearest'] = None
s['nearest_dist'] = None
s['pinfo'] = None
return src
def test_add_patch_info(monkeypatch):
"""Test adding patch info to source space."""
# let's setup a small source space
src = _read_small_src(remove=False)
src_new = _read_small_src()
# test that no patch info is added for small dist_limit
add_source_space_distances(src_new, dist_limit=0.00001)
assert all(s['nearest'] is None for s in src_new)
assert all(s['nearest_dist'] is None for s in src_new)
assert all(s['pinfo'] is None for s in src_new)
# now let's use one that works (and test our warning-throwing)
with monkeypatch.context() as m:
m.setattr(mne.source_space, '_DIST_WARN_LIMIT', 1)
with pytest.warns(RuntimeWarning, match='Computing distances for 258'):
add_source_space_distances(src_new)
_compare_source_spaces(src, src_new, 'approx')
# Old SciPy can't do patch info only
src_new = _read_small_src()
with monkeypatch.context() as m:
m.setattr(scipy, '__version__', '1.0')
with pytest.raises(RuntimeError, match='required to calculate patch '):
add_source_space_distances(src_new, dist_limit=0)
# New SciPy can
if check_version('scipy', '1.3'):
src_nodist = src.copy()
for s in src_nodist:
for key in ('dist', 'dist_limit'):
s[key] = None
add_source_space_distances(src_new, dist_limit=0)
_compare_source_spaces(src, src_new, 'approx')
@testing.requires_testing_data
def test_add_source_space_distances_limited(tmp_path):
"""Test adding distances to source space with a dist_limit."""
src = read_source_spaces(fname)
src_new = read_source_spaces(fname)
del src_new[0]['dist']
del src_new[1]['dist']
n_do = 200 # limit this for speed
src_new[0]['vertno'] = src_new[0]['vertno'][:n_do].copy()
src_new[1]['vertno'] = src_new[1]['vertno'][:n_do].copy()
out_name = tmp_path / 'temp-src.fif'
add_source_space_distances(src_new, dist_limit=0.007)
write_source_spaces(out_name, src_new)
src_new = read_source_spaces(out_name)
for so, sn in zip(src, src_new):
assert_array_equal(so['dist_limit'], np.array([-0.007], np.float32))
assert_array_equal(sn['dist_limit'], np.array([0.007], np.float32))
do = so['dist']
dn = sn['dist']
# clean out distances > 0.007 in C code
do.data[do.data > 0.007] = 0
do.eliminate_zeros()
# make sure we have some comparable distances
assert np.sum(do.data < 0.007) > 400
# do comparison over the region computed
d = (do - dn)[:sn['vertno'][n_do - 1]][:, :sn['vertno'][n_do - 1]]
assert_allclose(np.zeros_like(d.data), d.data, rtol=0, atol=1e-6)
@pytest.mark.slowtest
@testing.requires_testing_data
def test_add_source_space_distances(tmp_path):
"""Test adding distances to source space."""
src = read_source_spaces(fname)
src_new = read_source_spaces(fname)
del src_new[0]['dist']
del src_new[1]['dist']
n_do = 19 # limit this for speed
src_new[0]['vertno'] = src_new[0]['vertno'][:n_do].copy()
src_new[1]['vertno'] = src_new[1]['vertno'][:n_do].copy()
out_name = tmp_path / 'temp-src.fif'
n_jobs = 2
assert n_do % n_jobs != 0
with pytest.raises(ValueError, match='non-negative'):
add_source_space_distances(src_new, dist_limit=-1)
add_source_space_distances(src_new, n_jobs=n_jobs)
write_source_spaces(out_name, src_new)
src_new = read_source_spaces(out_name)
# iterate over both hemispheres
for so, sn in zip(src, src_new):
v = so['vertno'][:n_do]
assert_array_equal(so['dist_limit'], np.array([-0.007], np.float32))
assert_array_equal(sn['dist_limit'], np.array([np.inf], np.float32))
do = so['dist']
dn = sn['dist']
# clean out distances > 0.007 in C code (some residual), and Python
ds = list()
for d in [do, dn]:
d.data[d.data > 0.007] = 0
d = d[v][:, v]
d.eliminate_zeros()
ds.append(d)
# make sure we actually calculated some comparable distances
assert np.sum(ds[0].data < 0.007) > 10
# do comparison
d = ds[0] - ds[1]
assert_allclose(np.zeros_like(d.data), d.data, rtol=0, atol=1e-9)
@testing.requires_testing_data
@requires_mne
def test_discrete_source_space(tmp_path):
"""Test setting up (and reading/writing) discrete source spaces."""
src = read_source_spaces(fname)
v = src[0]['vertno']
# let's make a discrete version with the C code, and with ours
temp_name = tmp_path / 'temp-src.fif'
# save
temp_pos = tmp_path / 'temp-pos.txt'
np.savetxt(str(temp_pos), np.c_[src[0]['rr'][v], src[0]['nn'][v]])
# let's try the spherical one (no bem or surf supplied)
run_subprocess(['mne_volume_source_space', '--meters',
'--pos', temp_pos, '--src', temp_name])
src_c = read_source_spaces(temp_name)
pos_dict = dict(rr=src[0]['rr'][v], nn=src[0]['nn'][v])
src_new = setup_volume_source_space(pos=pos_dict)
assert src_new.kind == 'discrete'
_compare_source_spaces(src_c, src_new, mode='approx')
assert_allclose(src[0]['rr'][v], src_new[0]['rr'],
rtol=1e-3, atol=1e-6)
assert_allclose(src[0]['nn'][v], src_new[0]['nn'],
rtol=1e-3, atol=1e-6)
# now do writing
write_source_spaces(temp_name, src_c, overwrite=True)
src_c2 = read_source_spaces(temp_name)
_compare_source_spaces(src_c, src_c2)
# now do MRI
with pytest.raises(ValueError, match='Cannot create interpolation'):
setup_volume_source_space('sample', pos=pos_dict, mri=fname_mri)
assert repr(src_new).split('~')[0] == repr(src_c).split('~')[0]
assert ' kB' in repr(src_new)
assert src_new.kind == 'discrete'
assert _get_src_type(src_new, None) == 'discrete'
with pytest.raises(RuntimeError, match='finite'):
setup_volume_source_space(
pos=dict(rr=[[0, 0, float('inf')]], nn=[[0, 1, 0]]))
@requires_nibabel()
@pytest.mark.slowtest
@testing.requires_testing_data
def test_volume_source_space(tmp_path):
"""Test setting up volume source spaces."""
src = read_source_spaces(fname_vol)
temp_name = tmp_path / 'temp-src.fif'
surf = read_bem_surfaces(fname_bem, s_id=FIFF.FIFFV_BEM_SURF_ID_BRAIN)
surf['rr'] *= 1e3 # convert to mm
bem_sol = read_bem_solution(fname_bem_3_sol)
bem = read_bem_solution(fname_bem_sol)
# The one in the testing dataset (uses bem as bounds)
for this_bem, this_surf in zip(
(bem, fname_bem, fname_bem_3, bem_sol, fname_bem_3_sol, None),
(None, None, None, None, None, surf)):
src_new = setup_volume_source_space(
'sample', pos=7.0, bem=this_bem, surface=this_surf,
subjects_dir=subjects_dir)
write_source_spaces(temp_name, src_new, overwrite=True)
src[0]['subject_his_id'] = 'sample' # XXX: to make comparison pass
_compare_source_spaces(src, src_new, mode='approx')
del src_new
src_new = read_source_spaces(temp_name)
_compare_source_spaces(src, src_new, mode='approx')
with pytest.raises(IOError, match='surface file.*not found'):
setup_volume_source_space(
'sample', surface='foo', mri=fname_mri, subjects_dir=subjects_dir)
bem['surfs'][-1]['coord_frame'] = FIFF.FIFFV_COORD_HEAD
with pytest.raises(ValueError, match='BEM is not in MRI coord.* got head'):
setup_volume_source_space(
'sample', bem=bem, mri=fname_mri, subjects_dir=subjects_dir)
bem['surfs'] = bem['surfs'][:-1] # no inner skull surf
with pytest.raises(ValueError, match='Could not get inner skul.*from BEM'):
setup_volume_source_space(
'sample', bem=bem, mri=fname_mri, subjects_dir=subjects_dir)
del bem
assert repr(src) == repr(src_new)
assert ' MB' in repr(src)
assert src.kind == 'volume'
# Spheres
sphere = make_sphere_model(r0=(0., 0., 0.), head_radius=0.1,
relative_radii=(0.9, 1.0), sigmas=(0.33, 1.0))
src = setup_volume_source_space(pos=10, sphere=(0., 0., 0., 0.09))
src_new = setup_volume_source_space(pos=10, sphere=sphere)
_compare_source_spaces(src, src_new, mode='exact')
with pytest.raises(ValueError, match='sphere, if str'):
setup_volume_source_space(sphere='foo')
# Need a radius
sphere = make_sphere_model(head_radius=None)
with pytest.raises(ValueError, match='be spherical with multiple layers'):
setup_volume_source_space(sphere=sphere)
@testing.requires_testing_data
@requires_mne
def test_other_volume_source_spaces(tmp_path):
"""Test setting up other volume source spaces."""
# these are split off because they require the MNE tools, and
# Travis doesn't seem to like them
# let's try the spherical one (no bem or surf supplied)
temp_name = tmp_path / 'temp-src.fif'
run_subprocess(['mne_volume_source_space',
'--grid', '7.0',
'--src', temp_name,
'--mri', fname_mri])
src = read_source_spaces(temp_name)
sphere = (0., 0., 0., 0.09)
src_new = setup_volume_source_space(None, pos=7.0, mri=fname_mri,
subjects_dir=subjects_dir,
sphere=sphere)
# we use a more accurate elimination criteria, so let's fix the MNE-C
# source space
assert len(src_new[0]['vertno']) == 7497
assert len(src) == 1
assert len(src_new) == 1
good_mask = np.in1d(src[0]['vertno'], src_new[0]['vertno'])
src[0]['inuse'][src[0]['vertno'][~good_mask]] = 0
assert src[0]['inuse'].sum() == 7497
src[0]['vertno'] = src[0]['vertno'][good_mask]
assert len(src[0]['vertno']) == 7497
src[0]['nuse'] = len(src[0]['vertno'])
assert src[0]['nuse'] == 7497
_compare_source_spaces(src_new, src, mode='approx')
assert 'volume, shape' in repr(src)
del src
del src_new
pytest.raises(ValueError, setup_volume_source_space, 'sample', pos=7.0,
sphere=[1., 1.], mri=fname_mri, # bad sphere
subjects_dir=subjects_dir)
# now without MRI argument, it should give an error when we try
# to read it
run_subprocess(['mne_volume_source_space',
'--grid', '7.0',
'--src', temp_name])
pytest.raises(ValueError, read_source_spaces, temp_name)
@pytest.mark.timeout(60) # can be slow on OSX Travis
@pytest.mark.slowtest
@testing.requires_testing_data
def test_triangle_neighbors():
"""Test efficient vertex neighboring triangles for surfaces."""
this = read_source_spaces(fname)[0]
this['neighbor_tri'] = [list() for _ in range(this['np'])]
for p in range(this['ntri']):
verts = this['tris'][p]
this['neighbor_tri'][verts[0]].append(p)
this['neighbor_tri'][verts[1]].append(p)
this['neighbor_tri'][verts[2]].append(p)
this['neighbor_tri'] = [np.array(nb, int) for nb in this['neighbor_tri']]
neighbor_tri = _triangle_neighbors(this['tris'], this['np'])
assert all(np.array_equal(nt1, nt2)
for nt1, nt2 in zip(neighbor_tri, this['neighbor_tri']))
def test_accumulate_normals():
"""Test efficient normal accumulation for surfaces."""
# set up comparison
n_pts = int(1.6e5) # approx number in sample source space
n_tris = int(3.2e5)
# use all positive to make a worst-case for cumulative summation
# (real "nn" vectors will have both positive and negative values)
tris = (rng.rand(n_tris, 1) * (n_pts - 2)).astype(int)
tris = np.c_[tris, tris + 1, tris + 2]
tri_nn = rng.rand(n_tris, 3)
this = dict(tris=tris, np=n_pts, ntri=n_tris, tri_nn=tri_nn)
# cut-and-paste from original code in surface.py:
# Find neighboring triangles and accumulate vertex normals
this['nn'] = np.zeros((this['np'], 3))
for p in range(this['ntri']):
# vertex normals
verts = this['tris'][p]
this['nn'][verts, :] += this['tri_nn'][p, :]
nn = _accumulate_normals(this['tris'], this['tri_nn'], this['np'])
# the moment of truth (or reckoning)
assert_allclose(nn, this['nn'], rtol=1e-7, atol=1e-7)
@pytest.mark.slowtest
@testing.requires_testing_data
def test_setup_source_space(tmp_path):
"""Test setting up ico, oct, and all source spaces."""
fname_ico = op.join(data_path, 'subjects', 'fsaverage', 'bem',
'fsaverage-ico-5-src.fif')
# first lets test some input params
for spacing in ('oct', 'oct6e'):
with pytest.raises(ValueError, match='subdivision must be an integer'):
setup_source_space('sample', spacing=spacing,
add_dist=False, subjects_dir=subjects_dir)
for spacing in ('oct0', 'oct-4'):
with pytest.raises(ValueError, match='oct subdivision must be >= 1'):
setup_source_space('sample', spacing=spacing,
add_dist=False, subjects_dir=subjects_dir)
with pytest.raises(ValueError, match='ico subdivision must be >= 0'):
setup_source_space('sample', spacing='ico-4',
add_dist=False, subjects_dir=subjects_dir)
with pytest.raises(ValueError, match='must be a string with values'):
setup_source_space('sample', spacing='7emm',
add_dist=False, subjects_dir=subjects_dir)
with pytest.raises(ValueError, match='must be a string with values'):
setup_source_space('sample', spacing='alls',
add_dist=False, subjects_dir=subjects_dir)
# ico 5 (fsaverage) - write to temp file
src = read_source_spaces(fname_ico)
with pytest.warns(None): # sklearn equiv neighbors
src_new = setup_source_space('fsaverage', spacing='ico5',
subjects_dir=subjects_dir, add_dist=False)
_compare_source_spaces(src, src_new, mode='approx')
assert repr(src).split('~')[0] == repr(src_new).split('~')[0]
assert repr(src).count('surface (') == 2
assert_array_equal(src[0]['vertno'], np.arange(10242))
assert_array_equal(src[1]['vertno'], np.arange(10242))
# oct-6 (sample) - auto filename + IO
src = read_source_spaces(fname)
temp_name = tmp_path / 'temp-src.fif'
with pytest.warns(None): # sklearn equiv neighbors
src_new = setup_source_space('sample', spacing='oct6',
subjects_dir=subjects_dir, add_dist=False)
write_source_spaces(temp_name, src_new, overwrite=True)
assert_equal(src_new[0]['nuse'], 4098)
_compare_source_spaces(src, src_new, mode='approx', nearest=False)
src_new = read_source_spaces(temp_name)
_compare_source_spaces(src, src_new, mode='approx', nearest=False)
# all source points - no file writing
src_new = setup_source_space('sample', spacing='all',
subjects_dir=subjects_dir, add_dist=False)
assert src_new[0]['nuse'] == len(src_new[0]['rr'])
assert src_new[1]['nuse'] == len(src_new[1]['rr'])
# dense source space to hit surf['inuse'] lines of _create_surf_spacing
pytest.raises(RuntimeError, setup_source_space, 'sample',
spacing='ico6', subjects_dir=subjects_dir, add_dist=False)
@testing.requires_testing_data
@requires_mne
@pytest.mark.slowtest
@pytest.mark.timeout(60)
@pytest.mark.parametrize('spacing', [2, 7])
def test_setup_source_space_spacing(tmp_path, spacing):
"""Test setting up surface source spaces using a given spacing."""
copytree(op.join(subjects_dir, 'sample'), tmp_path / 'sample')
args = [] if spacing == 7 else ['--spacing', str(spacing)]
with modified_env(SUBJECTS_DIR=str(tmp_path), SUBJECT='sample'):
run_subprocess(['mne_setup_source_space'] + args)
src = read_source_spaces(
tmp_path / 'sample' / 'bem' / ('sample-%d-src.fif' % spacing)
)
src_new = setup_source_space('sample', spacing=spacing, add_dist=False,
subjects_dir=subjects_dir)
_compare_source_spaces(src, src_new, mode='approx', nearest=True)
# Degenerate conditions
with pytest.raises(TypeError, match='spacing must be.*got.*float.*'):
setup_source_space('sample', 7., subjects_dir=subjects_dir)
with pytest.raises(ValueError, match='spacing must be >= 2, got 1'):
setup_source_space('sample', 1, subjects_dir=subjects_dir)
@testing.requires_testing_data
def test_read_source_spaces():
"""Test reading of source space meshes."""
src = read_source_spaces(fname, patch_stats=True)
# 3D source space
lh_points = src[0]['rr']
lh_faces = src[0]['tris']
lh_use_faces = src[0]['use_tris']
rh_points = src[1]['rr']
rh_faces = src[1]['tris']
rh_use_faces = src[1]['use_tris']
assert lh_faces.min() == 0
assert lh_faces.max() == lh_points.shape[0] - 1
assert lh_use_faces.min() >= 0
assert lh_use_faces.max() <= lh_points.shape[0] - 1
assert rh_faces.min() == 0
assert rh_faces.max() == rh_points.shape[0] - 1
assert rh_use_faces.min() >= 0
assert rh_use_faces.max() <= rh_points.shape[0] - 1
@pytest.mark.slowtest
@testing.requires_testing_data
def test_write_source_space(tmp_path):
"""Test reading and writing of source spaces."""
src0 = read_source_spaces(fname, patch_stats=False)
temp_fname = tmp_path / 'tmp-src.fif'
write_source_spaces(temp_fname, src0)
src1 = read_source_spaces(temp_fname, patch_stats=False)
_compare_source_spaces(src0, src1)
# test warnings on bad filenames
src_badname = tmp_path / 'test-bad-name.fif.gz'
with pytest.warns(RuntimeWarning, match='-src.fif'):
write_source_spaces(src_badname, src0)
with pytest.warns(RuntimeWarning, match='-src.fif'):
read_source_spaces(src_badname)
@testing.requires_testing_data
@requires_nibabel()
@pytest.mark.parametrize('pass_ids', (True, False))
def test_source_space_from_label(tmp_path, pass_ids):
"""Test generating a source space from volume label."""
aseg_short = 'aseg.mgz'
atlas_ids, _ = read_freesurfer_lut()
volume_label = 'Left-Cerebellum-Cortex'
# Test pos as dict
pos = dict()
with pytest.raises(ValueError, match='mri must be None if pos is a dict'):
setup_volume_source_space(
'sample', pos=pos, volume_label=volume_label, mri=aseg_short,
subjects_dir=subjects_dir)
# Test T1.mgz provided
with pytest.raises(RuntimeError, match=r'Must use a \*aseg.mgz file'):
setup_volume_source_space(
'sample', mri='T1.mgz', volume_label=volume_label,
subjects_dir=subjects_dir)
# Test invalid volume label
mri = aseg_short
with pytest.raises(ValueError, match="'Left-Cerebral' not found.*Did you"):
setup_volume_source_space(
'sample', volume_label='Left-Cerebral', mri=mri,
subjects_dir=subjects_dir)
# These should be equivalent
if pass_ids:
use_volume_label = {volume_label: atlas_ids[volume_label]}
else:
use_volume_label = volume_label
# ensure it works even when not provided (detect that it should be aseg)
src = setup_volume_source_space(
'sample', volume_label=use_volume_label, add_interpolator=False,
subjects_dir=subjects_dir)
assert_equal(volume_label, src[0]['seg_name'])
assert src[0]['nuse'] == 404 # for our given pos and label
# test reading and writing
out_name = tmp_path / 'temp-src.fif'
write_source_spaces(out_name, src)
src_from_file = read_source_spaces(out_name)
_compare_source_spaces(src, src_from_file, mode='approx')
@testing.requires_testing_data
@requires_nibabel()
def test_source_space_exclusive_complete(src_volume_labels):
"""Test that we produce exclusive and complete labels."""
# these two are neighbors and are quite large, so let's use them to
# ensure no overlaps
src, volume_labels, _ = src_volume_labels
ii = volume_labels.index('Left-Cerebral-White-Matter')
jj = volume_labels.index('Left-Cerebral-Cortex')
assert src[ii]['nuse'] == 755 # 2034 with pos=5, was 2832
assert src[jj]['nuse'] == 616 # 1520 with pos=5, was 2623
src_full = read_source_spaces(fname_vol)
# This implicitly checks for overlap because np.sort would preserve
# duplicates, and it checks for completeness because the sets should match
assert_array_equal(src_full[0]['vertno'],
np.sort(np.concatenate([s['vertno'] for s in src])))
for si, s in enumerate(src):
assert_allclose(src_full[0]['rr'], s['rr'], atol=1e-6)
# also check single_volume=True -- should be the same result
with pytest.warns(RuntimeWarning, match='Found no usable.*Left-vessel.*'):
src_single = setup_volume_source_space(
src[0]['subject_his_id'], 7., 'aseg.mgz', bem=fname_bem,
volume_label=volume_labels, single_volume=True,
add_interpolator=False, subjects_dir=subjects_dir)
assert len(src_single) == 1
assert 'Unknown+Left-Cerebral-White-Matter+Left-' in repr(src_single)
assert_array_equal(src_full[0]['vertno'], src_single[0]['vertno'])
@pytest.mark.timeout(60) # ~24 sec on Travis
@pytest.mark.slowtest
@testing.requires_testing_data
@requires_nibabel()
def test_read_volume_from_src():
"""Test reading volumes from a mixed source space."""
labels_vol = ['Left-Amygdala',
'Brain-Stem',
'Right-Amygdala']
src = read_source_spaces(fname)
# Setup a volume source space
vol_src = setup_volume_source_space('sample', mri=aseg_fname,
pos=5.0,
bem=fname_bem,
volume_label=labels_vol,
subjects_dir=subjects_dir)
# Generate the mixed source space, testing some list methods
assert src.kind == 'surface'
assert vol_src.kind == 'volume'
src += vol_src
assert src.kind == 'mixed'
assert vol_src.kind == 'volume'
assert src[:2].kind == 'surface'
assert src[2:].kind == 'volume'
assert src[:].kind == 'mixed'
with pytest.raises(RuntimeError, match='Invalid source space'):
src[::2]
volume_src = get_volume_labels_from_src(src, 'sample', subjects_dir)
volume_label = volume_src[0].name
volume_label = 'Left-' + volume_label.replace('-lh', '')
# Test
assert_equal(volume_label, src[2]['seg_name'])
assert_equal(src[2]['type'], 'vol')
@testing.requires_testing_data
@requires_nibabel()
def test_combine_source_spaces(tmp_path):
"""Test combining source spaces."""
import nibabel as nib
rng = np.random.RandomState(2)
volume_labels = ['Brain-Stem', 'Right-Hippocampus'] # two fairly large
# create a sparse surface source space to ensure all get mapped
# when mri_resolution=False
srf = setup_source_space('sample', 'oct3', add_dist=False,
subjects_dir=subjects_dir)
# setup 2 volume source spaces
vol = setup_volume_source_space('sample', subjects_dir=subjects_dir,
volume_label=volume_labels[0],
mri=aseg_fname, add_interpolator=False)
# setup a discrete source space
rr = rng.randint(0, 11, (20, 3)) * 5e-3
nn = np.zeros(rr.shape)
nn[:, -1] = 1
pos = {'rr': rr, 'nn': nn}
disc = setup_volume_source_space('sample', subjects_dir=subjects_dir,
pos=pos, verbose='error')
# combine source spaces
assert srf.kind == 'surface'
assert vol.kind == 'volume'
assert disc.kind == 'discrete'
src = srf + vol + disc
assert src.kind == 'mixed'
assert srf.kind == 'surface'
assert vol.kind == 'volume'
assert disc.kind == 'discrete'
# test addition of source spaces
assert len(src) == 4
# test reading and writing
src_out_name = tmp_path / 'temp-src.fif'
src.save(src_out_name)
src_from_file = read_source_spaces(src_out_name)
_compare_source_spaces(src, src_from_file, mode='approx')
assert repr(src).split('~')[0] == repr(src_from_file).split('~')[0]
assert_equal(src.kind, 'mixed')
# test that all source spaces are in MRI coordinates
coord_frames = np.array([s['coord_frame'] for s in src])
assert (coord_frames == FIFF.FIFFV_COORD_MRI).all()
# test errors for export_volume
image_fname = tmp_path / 'temp-image.mgz'
# source spaces with no volume
with pytest.raises(ValueError, match='at least one volume'):
srf.export_volume(image_fname, verbose='error')
# unrecognized source type
disc2 = disc.copy()
disc2[0]['type'] = 'kitty'
with pytest.raises(ValueError, match='Invalid value'):
src + disc2
del disc2
# unrecognized file type
bad_image_fname = tmp_path / 'temp-image.png'
# vertices outside vol space warning
pytest.raises(ValueError, src.export_volume, bad_image_fname,
verbose='error')
# mixed coordinate frames
disc3 = disc.copy()
disc3[0]['coord_frame'] = 10
src_mixed_coord = src + disc3
with pytest.raises(ValueError, match='must be in head coordinates'):
src_mixed_coord.export_volume(image_fname, verbose='error')
# now actually write it
fname_img = tmp_path / 'img.nii'
for mri_resolution in (False, 'sparse', True):
for src, up in ((vol, 705),
(srf + vol, 27272),
(disc + vol, 705)):
src.export_volume(
fname_img, use_lut=False,
mri_resolution=mri_resolution, overwrite=True)
img_data = _get_img_fdata(nib.load(str(fname_img)))
n_src = img_data.astype(bool).sum()
n_want = sum(s['nuse'] for s in src)
if mri_resolution is True:
n_want += up
assert n_src == n_want, src
# gh-8004
temp_aseg = tmp_path / 'aseg.mgz'
aseg_img = nib.load(aseg_fname)
aseg_affine = aseg_img.affine
aseg_affine[:3, :3] *= 0.7
new_aseg = nib.MGHImage(aseg_img.dataobj, aseg_affine)
nib.save(new_aseg, str(temp_aseg))
lh_cereb = mne.setup_volume_source_space(
"sample", mri=temp_aseg, volume_label="Left-Cerebellum-Cortex",
add_interpolator=False, subjects_dir=subjects_dir)
src = srf + lh_cereb
with pytest.warns(RuntimeWarning, match='2 surf vertices lay outside'):
src.export_volume(image_fname, mri_resolution="sparse", overwrite=True)
@testing.requires_testing_data
def test_morph_source_spaces():
"""Test morphing of source spaces."""
src = read_source_spaces(fname_fs)
src_morph = read_source_spaces(fname_morph)
src_morph_py = morph_source_spaces(src, 'sample',
subjects_dir=subjects_dir)
_compare_source_spaces(src_morph, src_morph_py, mode='approx')
@pytest.mark.timeout(60) # can be slow on OSX Travis
@pytest.mark.slowtest
@testing.requires_testing_data
def test_morphed_source_space_return():
"""Test returning a morphed source space to the original subject."""
# let's create some random data on fsaverage
data = rng.randn(20484, 1)
tmin, tstep = 0, 1.
src_fs = read_source_spaces(fname_fs)
stc_fs = SourceEstimate(data, [s['vertno'] for s in src_fs],
tmin, tstep, 'fsaverage')
n_verts_fs = sum(len(s['vertno']) for s in src_fs)
# Create our morph source space
src_morph = morph_source_spaces(src_fs, 'sample',
subjects_dir=subjects_dir)
n_verts_sample = sum(len(s['vertno']) for s in src_morph)
assert n_verts_fs == n_verts_sample
# Morph the data over using standard methods
stc_morph = compute_source_morph(
src_fs, 'fsaverage', 'sample',
spacing=[s['vertno'] for s in src_morph], smooth=1,
subjects_dir=subjects_dir, warn=False).apply(stc_fs)
assert stc_morph.data.shape[0] == n_verts_sample
# We can now pretend like this was real data we got e.g. from an inverse.
# To be complete, let's remove some vertices
keeps = [np.sort(rng.permutation(np.arange(len(v)))[:len(v) - 10])
for v in stc_morph.vertices]
stc_morph = SourceEstimate(
np.concatenate([stc_morph.lh_data[keeps[0]],
stc_morph.rh_data[keeps[1]]]),
[v[k] for v, k in zip(stc_morph.vertices, keeps)], tmin, tstep,
'sample')
# Return it to the original subject
stc_morph_return = stc_morph.to_original_src(
src_fs, subjects_dir=subjects_dir)
# This should fail (has too many verts in SourceMorph)
with pytest.warns(RuntimeWarning, match='vertices not included'):
morph = compute_source_morph(
src_morph, subject_from='sample',
spacing=stc_morph_return.vertices, smooth=1,
subjects_dir=subjects_dir)
with pytest.raises(ValueError, match='vertices do not match'):
morph.apply(stc_morph)
# Compare to the original data
with pytest.warns(RuntimeWarning, match='vertices not included'):
stc_morph_morph = compute_source_morph(
src=stc_morph, subject_from='sample',
spacing=stc_morph_return.vertices, smooth=1,
subjects_dir=subjects_dir).apply(stc_morph)
assert_equal(stc_morph_return.subject, stc_morph_morph.subject)
for ii in range(2):
assert_array_equal(stc_morph_return.vertices[ii],
stc_morph_morph.vertices[ii])
# These will not match perfectly because morphing pushes data around
corr = np.corrcoef(stc_morph_return.data[:, 0],
stc_morph_morph.data[:, 0])[0, 1]
assert corr > 0.99, corr
# Explicitly test having two vertices map to the same target vertex. We
# simulate this by having two vertices be at the same position.
src_fs2 = src_fs.copy()
vert1, vert2 = src_fs2[0]['vertno'][:2]
src_fs2[0]['rr'][vert1] = src_fs2[0]['rr'][vert2]
stc_morph_return = stc_morph.to_original_src(
src_fs2, subjects_dir=subjects_dir)
# test to_original_src method result equality
for ii in range(2):
assert_array_equal(stc_morph_return.vertices[ii],
stc_morph_morph.vertices[ii])
# These will not match perfectly because morphing pushes data around
corr = np.corrcoef(stc_morph_return.data[:, 0],
stc_morph_morph.data[:, 0])[0, 1]
assert corr > 0.99, corr
# Degenerate cases
stc_morph.subject = None # no .subject provided
pytest.raises(ValueError, stc_morph.to_original_src,
src_fs, subject_orig='fsaverage', subjects_dir=subjects_dir)
stc_morph.subject = 'sample'
del src_fs[0]['subject_his_id'] # no name in src_fsaverage
pytest.raises(ValueError, stc_morph.to_original_src,
src_fs, subjects_dir=subjects_dir)
src_fs[0]['subject_his_id'] = 'fsaverage' # name mismatch
pytest.raises(ValueError, stc_morph.to_original_src,
src_fs, subject_orig='foo', subjects_dir=subjects_dir)
src_fs[0]['subject_his_id'] = 'sample'
src = read_source_spaces(fname) # wrong source space
pytest.raises(RuntimeError, stc_morph.to_original_src,
src, subjects_dir=subjects_dir)
# The following code was used to generate small-src.fif.gz.
# Unfortunately the C code bombs when trying to add source space distances,
# possibly due to incomplete "faking" of a smaller surface on our part here.
"""
# -*- coding: utf-8 -*-
import os
import numpy as np
import mne
data_path = mne.datasets.sample.data_path()
src = mne.setup_source_space('sample', fname=None, spacing='oct5')
hemis = ['lh', 'rh']
fnames = [data_path + '/subjects/sample/surf/%s.decimated' % h for h in hemis]
vs = list()
for s, fname in zip(src, fnames):
coords = s['rr'][s['vertno']]
vs.append(s['vertno'])
idx = -1 * np.ones(len(s['rr']))
idx[s['vertno']] = np.arange(s['nuse'])
faces = s['use_tris']
faces = idx[faces]
mne.write_surface(fname, coords, faces)
# we need to move sphere surfaces
spheres = [data_path + '/subjects/sample/surf/%s.sphere' % h for h in hemis]
for s in spheres:
os.rename(s, s + '.bak')
try:
for s, v in zip(spheres, vs):
coords, faces = mne.read_surface(s + '.bak')
coords = coords[v]
mne.write_surface(s, coords, faces)
src = mne.setup_source_space('sample', fname=None, spacing='oct4',
surface='decimated')
finally:
for s in spheres:
os.rename(s + '.bak', s)
fname = 'small-src.fif'
fname_gz = fname + '.gz'
mne.write_source_spaces(fname, src)
mne.utils.run_subprocess(['mne_add_patch_info', '--src', fname,
'--srcp', fname])
mne.write_source_spaces(fname_gz, mne.read_source_spaces(fname))
"""
| 41.011765
| 79
| 0.648699
|
afc71626c25d674b62980ce4631c1dfbc7b45e7c
| 318
|
py
|
Python
|
app/test.py
|
williamFalcon/docker-flask-nginx-uwsgi-miniconda-3.4
|
4608969c974a9a506b905a3f69d1df0570b1376f
|
[
"MIT"
] | 4
|
2018-06-22T05:36:39.000Z
|
2021-12-26T15:39:51.000Z
|
app/test.py
|
williamFalcon/docker-flask-nginx-uwsgi-miniconda-3.4
|
4608969c974a9a506b905a3f69d1df0570b1376f
|
[
"MIT"
] | null | null | null |
app/test.py
|
williamFalcon/docker-flask-nginx-uwsgi-miniconda-3.4
|
4608969c974a9a506b905a3f69d1df0570b1376f
|
[
"MIT"
] | 3
|
2019-02-25T12:50:06.000Z
|
2019-10-25T16:45:14.000Z
|
import unittest
from application import app
class TestPost(unittest.TestCase):
def test_post(self):
self.test_app = app.test_client()
response = self.test_app.get('/', content_type='html/text')
self.assertEqual(response.status_code, 200)
if __name__ == '__main__':
unittest.main()
| 21.2
| 67
| 0.688679
|
cbbad0789d712cbf7b1deb2398438c2b41b7234f
| 7,335
|
py
|
Python
|
args.py
|
isamu-isozaki/hidden-networks
|
7dcb96a7de43b65ffde176d771f88b5ecedb84ab
|
[
"Apache-2.0"
] | null | null | null |
args.py
|
isamu-isozaki/hidden-networks
|
7dcb96a7de43b65ffde176d771f88b5ecedb84ab
|
[
"Apache-2.0"
] | null | null | null |
args.py
|
isamu-isozaki/hidden-networks
|
7dcb96a7de43b65ffde176d771f88b5ecedb84ab
|
[
"Apache-2.0"
] | null | null | null |
import argparse
import sys
import yaml
from configs import parser as _parser
args = None
def parse_arguments():
parser = argparse.ArgumentParser(description="PyTorch ImageNet Training")
# General Config
parser.add_argument(
"--data", help="path to dataset base directory", default="/mnt/disk1/datasets"
)
parser.add_argument("--optimizer", help="Which optimizer to use", default="sgd")
parser.add_argument("--set", help="name of dataset", type=str, default="ImageNet")
parser.add_argument(
"-a", "--arch", metavar="ARCH", default="ResNet18", help="model architecture"
)
parser.add_argument(
"--config", help="Config file to use (see configs dir)", default=None
)
parser.add_argument(
"--log-dir", help="Where to save the runs. If None use ./runs", default=None
)
parser.add_argument(
"-j",
"--workers",
default=20,
type=int,
metavar="N",
help="number of data loading workers (default: 20)",
)
parser.add_argument(
"--epochs",
default=90,
type=int,
metavar="N",
help="number of total epochs to run",
)
parser.add_argument(
"--start-epoch",
default=None,
type=int,
metavar="N",
help="manual epoch number (useful on restarts)",
)
parser.add_argument(
"-b",
"--batch-size",
default=256,
type=int,
metavar="N",
help="mini-batch size (default: 256), this is the total "
"batch size of all GPUs on the current node when "
"using Data Parallel or Distributed Data Parallel",
)
parser.add_argument(
"--lr",
"--learning-rate",
default=0.1,
type=float,
metavar="LR",
help="initial learning rate",
dest="lr",
)
parser.add_argument(
"--warmup_length", default=0, type=int, help="Number of warmup iterations"
)
parser.add_argument(
"--momentum", default=0.9, type=float, metavar="M", help="momentum"
)
parser.add_argument(
"--wd",
"--weight-decay",
default=1e-4,
type=float,
metavar="W",
help="weight decay (default: 1e-4)",
dest="weight_decay",
)
parser.add_argument(
"-p",
"--print-freq",
default=10,
type=int,
metavar="N",
help="print frequency (default: 10)",
)
parser.add_argument("--num-classes", default=10, type=int)
parser.add_argument(
"--resume",
default="",
type=str,
metavar="PATH",
help="path to latest checkpoint (default: none)",
)
parser.add_argument(
"-e",
"--evaluate",
dest="evaluate",
action="store_true",
help="evaluate model on validation set",
)
parser.add_argument(
"--pretrained",
dest="pretrained",
default=None,
type=str,
help="use pre-trained model",
)
parser.add_argument(
"--seed", default=None, type=int, help="seed for initializing training. "
)
parser.add_argument(
"--multigpu",
default=None,
type=lambda x: [int(a) for a in x.split(",")],
help="Which GPUs to use for multigpu training",
)
parser.add_argument(
"--gpu",
default=None,
type=str,
help="The name of the gpu",
)
# Learning Rate Policy Specific
parser.add_argument(
"--lr-policy", default="constant_lr", help="Policy for the learning rate."
)
parser.add_argument(
"--multistep-lr-adjust", default=30, type=int, help="Interval to drop lr"
)
parser.add_argument(
"--multistep-lr-gamma", default=0.1, type=int, help="Multistep multiplier"
)
parser.add_argument(
"--name", default=None, type=str, help="Experiment name to append to filepath"
)
parser.add_argument(
"--save_every", default=-1, type=int, help="Save every ___ epochs"
)
parser.add_argument(
"--prune-rate",
default=0.0,
help="Amount of pruning to do during sparse training",
type=float,
)
parser.add_argument(
"--low-data", default=1, help="Amount of data to use", type=float
)
parser.add_argument(
"--width-mult",
default=1.0,
help="How much to vary the width of the network.",
type=float,
)
parser.add_argument(
"--nesterov",
default=False,
action="store_true",
help="Whether or not to use nesterov for SGD",
)
parser.add_argument(
"--random-subnet",
action="store_true",
help="Whether or not to use a random subnet when fine tuning for lottery experiments",
)
parser.add_argument(
"--one-batch",
action="store_true",
help="One batch train set for debugging purposes (test overfitting)",
)
parser.add_argument(
"--conv-type", type=str, default=None, help="What kind of sparsity to use"
)
parser.add_argument(
"--freeze-weights",
action="store_true",
help="Whether or not to train only subnet (this freezes weights)",
)
parser.add_argument("--mode", default="fan_in", help="Weight initialization mode")
parser.add_argument(
"--nonlinearity", default="relu", help="Nonlinearity used by initialization"
)
parser.add_argument("--bn-type", default=None, help="BatchNorm type")
parser.add_argument(
"--init", default="kaiming_normal", help="Weight initialization modifications"
)
parser.add_argument(
"--no-bn-decay", action="store_true", default=False, help="No batchnorm decay"
)
parser.add_argument(
"--scale-fan", action="store_true", default=False, help="scale fan"
)
parser.add_argument(
"--first-layer-dense", action="store_true", help="First layer dense or sparse"
)
parser.add_argument(
"--last-layer-dense", action="store_true", help="Last layer dense or sparse"
)
parser.add_argument(
"--label-smoothing",
type=float,
help="Label smoothing to use, default 0.0",
default=None,
)
parser.add_argument(
"--first-layer-type", type=str, default=None, help="Conv type of first layer"
)
parser.add_argument(
"--trainer", type=str, default="default", help="cs, ss, or standard training"
)
parser.add_argument(
"--score-init-constant",
type=float,
default=None,
help="Sample Baseline Subnet Init",
)
args = parser.parse_args()
# Allow for use from notebook without config file
if len(sys.argv) > 1:
get_config(args)
return args
def get_config(args):
# get commands from command line
override_args = _parser.argv_to_vars(sys.argv)
# load yaml file
yaml_txt = open(args.config).read()
# override args
loaded_yaml = yaml.load(yaml_txt, Loader=yaml.FullLoader)
for v in override_args:
loaded_yaml[v] = getattr(args, v)
print(f"=> Reading YAML config from {args.config}")
args.__dict__.update(loaded_yaml)
def run_args():
global args
if args is None:
args = parse_arguments()
run_args()
| 28.652344
| 94
| 0.591411
|
cf8de8a0e73919dec44958a50d8da1fbd8487cc1
| 1,043
|
py
|
Python
|
app/core/migrations/0004_recipe.py
|
MortenTobiasNielsen/recipe-app-api
|
1bd1b7e8796f867a8c1773fbacfb5003ab2942d2
|
[
"MIT"
] | null | null | null |
app/core/migrations/0004_recipe.py
|
MortenTobiasNielsen/recipe-app-api
|
1bd1b7e8796f867a8c1773fbacfb5003ab2942d2
|
[
"MIT"
] | null | null | null |
app/core/migrations/0004_recipe.py
|
MortenTobiasNielsen/recipe-app-api
|
1bd1b7e8796f867a8c1773fbacfb5003ab2942d2
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.1.15 on 2020-12-13 13:38
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0003_ingredient'),
]
operations = [
migrations.CreateModel(
name='Recipe',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
('time_minutes', models.IntegerField()),
('price', models.DecimalField(decimal_places=2, max_digits=5)),
('link', models.CharField(blank=True, max_length=255)),
('Ingredients', models.ManyToManyField(to='core.Ingredient')),
('tags', models.ManyToManyField(to='core.Tag')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 35.965517
| 118
| 0.604027
|
2d64246d761db74873b590f0a4be9a67f35af981
| 735
|
py
|
Python
|
filesystemManager.py
|
noahlessard/folderManager
|
f76175eea3f1d76edee60230ff6e6a790d2412ed
|
[
"CC0-1.0"
] | null | null | null |
filesystemManager.py
|
noahlessard/folderManager
|
f76175eea3f1d76edee60230ff6e6a790d2412ed
|
[
"CC0-1.0"
] | null | null | null |
filesystemManager.py
|
noahlessard/folderManager
|
f76175eea3f1d76edee60230ff6e6a790d2412ed
|
[
"CC0-1.0"
] | null | null | null |
import os
from pathlib import Path
import time
while True:
path = Path("C:/Users/Administrator/Desktop/Zack\'s New World/backups").rglob('*.zip')
fileArray = []
nameArray = []
for name in path:
fileArray.append(os.path.getmtime(name))
nameArray.append(name)
print ('name= ', name, '\n')
if len(nameArray) > 3:
KeepNameNum = fileArray.index(max(fileArray))
print(nameArray[KeepNameNum])
for name in nameArray:
if name != nameArray[KeepNameNum]:
print("deleting ", name, "... \n")
os.remove(name)
continue
else:
time.sleep(10)
continue
| 21.617647
| 91
| 0.529252
|
d5aeaf5b6ab7358d005d29e65bb9fc6a9247f441
| 546
|
py
|
Python
|
Face Reconstruction/Fast Few-shot Face alignment by Reconstruction/landmarks/lmconfig.py
|
swapnilgarg7/Face-X
|
fab21bf667fa7387b8e73e5a1d72fcba4fba2818
|
[
"MIT"
] | 175
|
2020-10-02T13:42:50.000Z
|
2022-03-30T15:57:12.000Z
|
Face Reconstruction/Fast Few-shot Face alignment by Reconstruction/landmarks/lmconfig.py
|
swapnilgarg7/Face-X
|
fab21bf667fa7387b8e73e5a1d72fcba4fba2818
|
[
"MIT"
] | 704
|
2020-09-30T10:44:13.000Z
|
2022-03-30T07:18:28.000Z
|
Face Reconstruction/Fast Few-shot Face alignment by Reconstruction/landmarks/lmconfig.py
|
swapnilgarg7/Face-X
|
fab21bf667fa7387b8e73e5a1d72fcba4fba2818
|
[
"MIT"
] | 342
|
2020-10-02T14:04:49.000Z
|
2022-03-31T10:14:20.000Z
|
LANDMARK_TARGET = 'multi_channel'
MIN_LANDMARK_CONF = 0.8
LANDMARK_OCULAR_NORM = 'outer'
PREDICT_HEATMAP = True
HEATMAP_SIZE = 128
LANDMARKS_6 = [36, 39, 42, 45, 48, 54]
LANDMARKS_9 = [30, 36, 39, 42, 45, 48, 51, 54, 57]
LANDMARKS_12 = [21, 22, 27, 30, 36, 39, 42, 45, 48, 51, 54, 57]
LANDMARKS_19 = [0, 4, 8, 12, 16, 17, 21, 22, 26, 27, 30, 36, 39, 42, 45, 48, 51, 54, 57]
LANDMARKS_22 = [0, 4, 8, 12, 16, 17, 21, 22, 26, 27, 28, 29, 30, 36, 39, 42, 45, 48, 51, 54, 57]
LANDMARKS_14 = [17, 26, 21, 22, 27, 30, 36, 39, 42, 45, 48, 51, 54, 57]
| 36.4
| 96
| 0.608059
|
f89b238506bec89cdc22b0e1f9818be98993b50c
| 6,361
|
py
|
Python
|
market_data.py
|
alex-bormotov/AXE-Bot-open
|
51b7bd768bc0f08ecc9ed81becbc806f41940ed1
|
[
"MIT"
] | 18
|
2019-11-20T15:21:28.000Z
|
2021-12-19T22:21:28.000Z
|
market_data.py
|
alex-bormotov/AXE-Bot-open
|
51b7bd768bc0f08ecc9ed81becbc806f41940ed1
|
[
"MIT"
] | null | null | null |
market_data.py
|
alex-bormotov/AXE-Bot-open
|
51b7bd768bc0f08ecc9ed81becbc806f41940ed1
|
[
"MIT"
] | 6
|
2019-11-13T10:11:07.000Z
|
2021-12-03T01:03:34.000Z
|
import time
import json
import requests
import pandas as pd
import datetime as dt
from time import sleep
from notification import notificator
from requests.adapters import HTTPAdapter
from exchange import api_requests_frequency
show_error = "YES"
api_requests_frequency = api_requests_frequency()
# GET PRICE FROM CRYPTOWAT.CH:
def check_coin_price(coin_pair_for_get_bars):
def get_price():
# https://developer.cryptowat.ch/reference/rest-api-getting-started
root_url = "https://api.cryptowat.ch/markets/binance/"
url = root_url + coin_pair_for_get_bars.lower() + "/price"
user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'
req_session = requests.Session()
req_session.headers.update({"User-Agent": user_agent})
req_session.mount(url, HTTPAdapter(max_retries=3))
try:
while True:
req = req_session.get(url, headers={"User-Agent": user_agent})
if req.ok:
if 'price' in req_session.get(url, headers={"User-Agent": user_agent}).text:
price = json.loads(req.text)["result"]["price"]
if type(price) is int or type(price) is float:
return float(price)
break
else:
time.sleep(api_requests_frequency)
continue
else:
time.sleep(api_requests_frequency)
continue
else:
time.sleep(api_requests_frequency)
continue
except Exception:
pass
while True:
price = get_price()
if price is not None:
return(price)
break
else:
continue
# GET DATA FROM CRYPTOWAT.CH:
def get_bars(symbol, interval):
# https://developer.cryptowat.ch/reference/rest-api-getting-started
if interval == "1m":
periods_seconds = 60
if interval == "5m":
periods_seconds = 300
if interval == "15m":
periods_seconds = 900
if interval == "30m":
periods_seconds = 1800
if interval == "1h":
periods_seconds = 3600
if interval == "4h":
periods_seconds = 14400
if interval == "1d":
periods_seconds = 86400
user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.47 Safari/537.36'
url = f'https://api.cryptowat.ch/markets/binance/{symbol.lower()}/ohlc?periods={periods_seconds}'
req_session = requests.Session()
req_session.headers.update({"User-Agent": user_agent})
req_session.mount(url, HTTPAdapter(max_retries=3))
try:
req = req_session.get(url, headers={"User-Agent": user_agent})
if req.ok:
df = pd.DataFrame(json.loads(req.text)["result"][str(periods_seconds)])
df.columns = [
"open_time",
"open",
"high",
"low",
"close",
"volume",
"qav",
]
df.index = [dt.datetime.fromtimestamp(x) for x in df.open_time]
df.open = df.open.astype(float)
df.close = df.close.astype(float)
return df
except Exception as e:
if show_error == "YES":
notificator(str(e) + ' from get_bars' + ' df ' + str(df))
# GET PRICE FROM BINANCE (CCXT):
# from exchange import exchange
# exchange = exchange()
# def check_coin_price(coin_pair):
# try:
# return exchange.fetch_ticker(coin_pair)["last"]
#
# except Exception as e:
# if show_error == "YES":
# notificator(f'coin_pair must be like ETH/USDT (for CCXT), received {coin_pair}')
# notificator(
# str(e) + " this shit happened in market_data.py (check_coin_price)"
# )
# GET PRICE FROM BINANCE (requests):
# def check_coin_price(coin_pair_for_get_bars):
# root_url = "https://api.binance.com/api/v3/ticker/24hr?symbol="
# url = root_url + coin_pair_for_get_bars
# user_agent = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.47 Safari/537.36"
#
# req_session = requests.Session()
# req_session.headers.update({"User-Agent": user_agent})
# req_session.mount(url, HTTPAdapter(max_retries=3))
#
# try:
# req = req_session.get(url, headers={"User-Agent": user_agent})
# price = json.loads(req.text)["lastPrice"]
# return price
#
# except Exception as e:
# if show_error == "YES":
# notificator(f'coin_pair must be like ETHUSDT, received {coin_pair_for_get_bars}')
# notificator(
# str(e) + " this shit happened in market_data.py (check_coin_price)"
# )
# GET DATA FROM BINANCE:
# def get_bars(symbol, interval):
# root_url = "https://api.binance.com/api/v1/klines"
# user_agent = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.47 Safari/537.36"
# url = root_url + "?symbol=" + symbol + "&interval=" + interval
#
# req_session = requests.Session()
# req_session.headers.update({"User-Agent": user_agent})
# req_session.mount(url, HTTPAdapter(max_retries=3))
#
# try:
# req = req_session.get(url, headers={"User-Agent": user_agent})
#
# if req.ok:
# df = pd.DataFrame(json.loads(req.text))
# df.columns = [
# "open_time",
# "open",
# "high",
# "low",
# "close",
# "volume",
# "close_time",
# "qav",
# "num_trades",
# "taker_base_vol",
# "taker_quote_vol",
# "ignore",
# ]
# df.index = [dt.datetime.fromtimestamp(x / 1000.0) for x in df.close_time]
# df.open = df.open.astype(float)
# df.close = df.close.astype(float)
#
# return df
#
# except Exception as e:
# if show_error == "YES":
# notificator(str(e))
| 35.143646
| 143
| 0.567521
|
af2d5bf91996788bd7279e96df8eea8ea1969a70
| 2,669
|
py
|
Python
|
meal_planner/recipes/forms.py
|
brandonw/meal_planner
|
63200e6987b2794313492bfd3153d68b762f9b84
|
[
"BSD-3-Clause"
] | 1
|
2016-06-16T15:03:34.000Z
|
2016-06-16T15:03:34.000Z
|
meal_planner/recipes/forms.py
|
brandonw/meal_planner
|
63200e6987b2794313492bfd3153d68b762f9b84
|
[
"BSD-3-Clause"
] | null | null | null |
meal_planner/recipes/forms.py
|
brandonw/meal_planner
|
63200e6987b2794313492bfd3153d68b762f9b84
|
[
"BSD-3-Clause"
] | null | null | null |
from django import forms
from django.core.urlresolvers import reverse
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout
from crispy_forms.bootstrap import StrictButton
from recipes.models import Recipe
class RecipeHomeForm(forms.Form):
NAME = 'name'
RATING = 'rating'
SORT_CHOICES = (
(NAME, 'Name'),
(RATING, 'Rating'),
)
sort_by = forms.ChoiceField(
choices=SORT_CHOICES,
label='Sort by', required=False)
class RecipeCreateForm(forms.ModelForm):
class Meta:
model = Recipe
fields = ['name', 'rating', 'url', 'description', 'tags']
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user', None)
super(RecipeCreateForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_id = 'recipe-create-form'
self.helper.form_class = 'form-horizontal'
self.helper.label_class = 'col-sm-2'
self.helper.field_class = 'col-sm-8'
self.helper.layout = Layout(
'name',
'rating',
'url',
'description',
'tags',
StrictButton(
'Add',
css_class='col-sm-offset-2 btn btn-default', type='submit'),
)
self.helper.form_action = reverse('recipe-add')
def clean(self):
cleaned_data = super(RecipeCreateForm, self).clean()
name = cleaned_data.get('name')
if name and Recipe.objects \
.filter(user=self.user) \
.filter(name=name) \
.exists():
raise forms.ValidationError('%s recipe already exists.' % name)
class RecipeUpdateForm(forms.ModelForm):
class Meta:
model = Recipe
fields = ['rating', 'url', 'description', 'tags']
def __init__(self, *args, **kwargs):
super(RecipeUpdateForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_id = 'recipe-update-form'
self.helper.form_class = 'form-horizontal'
self.helper.label_class = 'col-sm-2'
self.helper.field_class = 'col-sm-8'
self.helper.layout = Layout(
'rating',
'url',
'description',
'tags',
StrictButton(
'Update',
css_class='col-sm-offset-2 btn btn-default', type='submit'),
)
self.helper.form_action = reverse(
'recipe-update',
args=[self.instance.slug])
class RecipeUpdateRatingForm(forms.ModelForm):
class Meta:
model = Recipe
fields = ['rating']
| 28.698925
| 76
| 0.576995
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.