hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8cf134599c2f115df844c3fa206b23bc10c5d4ff
| 2,173
|
py
|
Python
|
gym_miniworld/envs/tmaze.py
|
CyberHolmes/gym-miniworld
|
f0665b4af2f89f43fbad3aa408e45ece29c624ae
|
[
"Apache-2.0"
] | null | null | null |
gym_miniworld/envs/tmaze.py
|
CyberHolmes/gym-miniworld
|
f0665b4af2f89f43fbad3aa408e45ece29c624ae
|
[
"Apache-2.0"
] | null | null | null |
gym_miniworld/envs/tmaze.py
|
CyberHolmes/gym-miniworld
|
f0665b4af2f89f43fbad3aa408e45ece29c624ae
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import math
from ..miniworld import MiniWorldEnv, Room
from ..entity import Box
from gym import spaces
class TMaze(MiniWorldEnv):
"""
Two hallways connected in a T-junction
"""
def __init__(
self,
goal_pos=None,
**kwargs
):
self.goal_pos = goal_pos
super().__init__(
max_episode_steps=280,
**kwargs
)
# Allow only movement actions (left/right/forward)
self.action_space = spaces.Discrete(self.actions.move_forward+1)
def _gen_world(self):
room1 = self.add_rect_room(
min_x=-1, max_x=8,
min_z=-2, max_z=2
)
room2 = self.add_rect_room(
min_x=8, max_x=12,
min_z=-8, max_z=8
)
self.connect_rooms(room1, room2, min_z=-2, max_z=2)
# Add a box at a random end of the hallway
self.box = Box(color='red')
# Place the goal in the left or the right arm
if self.goal_pos != None:
self.place_entity(
self.box,
min_x=self.goal_pos[0],
max_x=self.goal_pos[0],
min_z=self.goal_pos[2],
max_z=self.goal_pos[2],
)
else:
if self.rand.bool():
self.place_entity(self.box, room=room2, max_z=room2.min_z + 2)
else:
self.place_entity(self.box, room=room2, min_z=room2.max_z - 2)
# Choose a random room and position to spawn at
self.place_agent(
dir=self.rand.float(-math.pi/4, math.pi/4),
room=room1
)
def step(self, action):
obs, reward, done, info = super().step(action)
#reward -= np.linalg.norm(self.box.pos - self.agent.pos)/1000
if self.near(self.box):
reward += self._reward()
done = True
info['goal_pos'] = self.box.pos
return obs, reward, done, info
class TMazeLeft(TMaze):
def __init__(self):
super().__init__(goal_pos=[10, 0, -6])
class TMazeRight(TMaze):
def __init__(self):
super().__init__(goal_pos=[10, 0, 6])
| 26.82716
| 78
| 0.54809
|
1d73d1724ca47c71fe31c78f64a43ef277f0d71e
| 51,257
|
py
|
Python
|
sympy/core/exprtools.py
|
shivank070701/sympy
|
c681a15b781aeefdbea846d67cf9b9b6ced77b4a
|
[
"BSD-3-Clause"
] | 1
|
2022-01-19T04:02:46.000Z
|
2022-01-19T04:02:46.000Z
|
sympy/core/exprtools.py
|
shivank070701/sympy
|
c681a15b781aeefdbea846d67cf9b9b6ced77b4a
|
[
"BSD-3-Clause"
] | 10
|
2021-07-21T20:56:57.000Z
|
2021-07-31T16:35:28.000Z
|
sympy/core/exprtools.py
|
shivank070701/sympy
|
c681a15b781aeefdbea846d67cf9b9b6ced77b4a
|
[
"BSD-3-Clause"
] | 1
|
2022-01-14T17:15:38.000Z
|
2022-01-14T17:15:38.000Z
|
"""Tools for manipulating of large commutative expressions. """
from sympy.core.add import Add
from sympy.core.compatibility import iterable, is_sequence, SYMPY_INTS
from sympy.core.mul import Mul, _keep_coeff
from sympy.core.power import Pow
from sympy.core.basic import Basic, preorder_traversal
from sympy.core.expr import Expr
from sympy.core.sympify import sympify
from sympy.core.numbers import Rational, Integer, Number, I
from sympy.core.singleton import S
from sympy.core.symbol import Dummy
from sympy.core.coreerrors import NonCommutativeExpression
from sympy.core.containers import Tuple, Dict
from sympy.utilities import default_sort_key
from sympy.utilities.iterables import (common_prefix, common_suffix,
variations, ordered)
from collections import defaultdict
_eps = Dummy(positive=True)
def _isnumber(i):
return isinstance(i, (SYMPY_INTS, float)) or i.is_Number
def _monotonic_sign(self):
"""Return the value closest to 0 that ``self`` may have if all symbols
are signed and the result is uniformly the same sign for all values of symbols.
If a symbol is only signed but not known to be an
integer or the result is 0 then a symbol representative of the sign of self
will be returned. Otherwise, None is returned if a) the sign could be positive
or negative or b) self is not in one of the following forms:
- L(x, y, ...) + A: a function linear in all symbols x, y, ... with an
additive constant; if A is zero then the function can be a monomial whose
sign is monotonic over the range of the variables, e.g. (x + 1)**3 if x is
nonnegative.
- A/L(x, y, ...) + B: the inverse of a function linear in all symbols x, y, ...
that does not have a sign change from positive to negative for any set
of values for the variables.
- M(x, y, ...) + A: a monomial M whose factors are all signed and a constant, A.
- A/M(x, y, ...) + B: the inverse of a monomial and constants A and B.
- P(x): a univariate polynomial
Examples
========
>>> from sympy.core.exprtools import _monotonic_sign as F
>>> from sympy import Dummy
>>> nn = Dummy(integer=True, nonnegative=True)
>>> p = Dummy(integer=True, positive=True)
>>> p2 = Dummy(integer=True, positive=True)
>>> F(nn + 1)
1
>>> F(p - 1)
_nneg
>>> F(nn*p + 1)
1
>>> F(p2*p + 1)
2
>>> F(nn - 1) # could be negative, zero or positive
"""
if not self.is_extended_real:
return
if (-self).is_Symbol:
rv = _monotonic_sign(-self)
return rv if rv is None else -rv
if not self.is_Add and self.as_numer_denom()[1].is_number:
s = self
if s.is_prime:
if s.is_odd:
return S(3)
else:
return S(2)
elif s.is_composite:
if s.is_odd:
return S(9)
else:
return S(4)
elif s.is_positive:
if s.is_even:
if s.is_prime is False:
return S(4)
else:
return S(2)
elif s.is_integer:
return S.One
else:
return _eps
elif s.is_extended_negative:
if s.is_even:
return S(-2)
elif s.is_integer:
return S.NegativeOne
else:
return -_eps
if s.is_zero or s.is_extended_nonpositive or s.is_extended_nonnegative:
return S.Zero
return None
# univariate polynomial
free = self.free_symbols
if len(free) == 1:
if self.is_polynomial():
from sympy.polys.polytools import real_roots
from sympy.polys.polyroots import roots
from sympy.polys.polyerrors import PolynomialError
x = free.pop()
x0 = _monotonic_sign(x)
if x0 == _eps or x0 == -_eps:
x0 = S.Zero
if x0 is not None:
d = self.diff(x)
if d.is_number:
currentroots = []
else:
try:
currentroots = real_roots(d)
except (PolynomialError, NotImplementedError):
currentroots = [r for r in roots(d, x) if r.is_extended_real]
y = self.subs(x, x0)
if x.is_nonnegative and all(r <= x0 for r in currentroots):
if y.is_nonnegative and d.is_positive:
if y:
return y if y.is_positive else Dummy('pos', positive=True)
else:
return Dummy('nneg', nonnegative=True)
if y.is_nonpositive and d.is_negative:
if y:
return y if y.is_negative else Dummy('neg', negative=True)
else:
return Dummy('npos', nonpositive=True)
elif x.is_nonpositive and all(r >= x0 for r in currentroots):
if y.is_nonnegative and d.is_negative:
if y:
return Dummy('pos', positive=True)
else:
return Dummy('nneg', nonnegative=True)
if y.is_nonpositive and d.is_positive:
if y:
return Dummy('neg', negative=True)
else:
return Dummy('npos', nonpositive=True)
else:
n, d = self.as_numer_denom()
den = None
if n.is_number:
den = _monotonic_sign(d)
elif not d.is_number:
if _monotonic_sign(n) is not None:
den = _monotonic_sign(d)
if den is not None and (den.is_positive or den.is_negative):
v = n*den
if v.is_positive:
return Dummy('pos', positive=True)
elif v.is_nonnegative:
return Dummy('nneg', nonnegative=True)
elif v.is_negative:
return Dummy('neg', negative=True)
elif v.is_nonpositive:
return Dummy('npos', nonpositive=True)
return None
# multivariate
c, a = self.as_coeff_Add()
v = None
if not a.is_polynomial():
# F/A or A/F where A is a number and F is a signed, rational monomial
n, d = a.as_numer_denom()
if not (n.is_number or d.is_number):
return
if (
a.is_Mul or a.is_Pow) and \
a.is_rational and \
all(p.exp.is_Integer for p in a.atoms(Pow) if p.is_Pow) and \
(a.is_positive or a.is_negative):
v = S.One
for ai in Mul.make_args(a):
if ai.is_number:
v *= ai
continue
reps = {}
for x in ai.free_symbols:
reps[x] = _monotonic_sign(x)
if reps[x] is None:
return
v *= ai.subs(reps)
elif c:
# signed linear expression
if not any(p for p in a.atoms(Pow) if not p.is_number) and (a.is_nonpositive or a.is_nonnegative):
free = list(a.free_symbols)
p = {}
for i in free:
v = _monotonic_sign(i)
if v is None:
return
p[i] = v or (_eps if i.is_nonnegative else -_eps)
v = a.xreplace(p)
if v is not None:
rv = v + c
if v.is_nonnegative and rv.is_positive:
return rv.subs(_eps, 0)
if v.is_nonpositive and rv.is_negative:
return rv.subs(_eps, 0)
def decompose_power(expr):
"""
Decompose power into symbolic base and integer exponent.
Explanation
===========
This is strictly only valid if the exponent from which
the integer is extracted is itself an integer or the
base is positive. These conditions are assumed and not
checked here.
Examples
========
>>> from sympy.core.exprtools import decompose_power
>>> from sympy.abc import x, y
>>> decompose_power(x)
(x, 1)
>>> decompose_power(x**2)
(x, 2)
>>> decompose_power(x**(2*y))
(x**y, 2)
>>> decompose_power(x**(2*y/3))
(x**(y/3), 2)
"""
base, exp = expr.as_base_exp()
if exp.is_Number:
if exp.is_Rational:
if not exp.is_Integer:
base = Pow(base, Rational(1, exp.q))
exp = exp.p
else:
base, exp = expr, 1
else:
exp, tail = exp.as_coeff_Mul(rational=True)
if exp is S.NegativeOne:
base, exp = Pow(base, tail), -1
elif exp is not S.One:
tail = _keep_coeff(Rational(1, exp.q), tail)
base, exp = Pow(base, tail), exp.p
else:
base, exp = expr, 1
return base, exp
def decompose_power_rat(expr):
"""
Decompose power into symbolic base and rational exponent.
"""
base, exp = expr.as_base_exp()
if exp.is_Number:
if not exp.is_Rational:
base, exp = expr, 1
else:
exp, tail = exp.as_coeff_Mul(rational=True)
if exp is S.NegativeOne:
base, exp = Pow(base, tail), -1
elif exp is not S.One:
tail = _keep_coeff(Rational(1, exp.q), tail)
base, exp = Pow(base, tail), exp.p
else:
base, exp = expr, 1
return base, exp
class Factors:
"""Efficient representation of ``f_1*f_2*...*f_n``."""
__slots__ = ('factors', 'gens')
def __init__(self, factors=None): # Factors
"""Initialize Factors from dict or expr.
Examples
========
>>> from sympy.core.exprtools import Factors
>>> from sympy.abc import x
>>> from sympy import I
>>> e = 2*x**3
>>> Factors(e)
Factors({2: 1, x: 3})
>>> Factors(e.as_powers_dict())
Factors({2: 1, x: 3})
>>> f = _
>>> f.factors # underlying dictionary
{2: 1, x: 3}
>>> f.gens # base of each factor
frozenset({2, x})
>>> Factors(0)
Factors({0: 1})
>>> Factors(I)
Factors({I: 1})
Notes
=====
Although a dictionary can be passed, only minimal checking is
performed: powers of -1 and I are made canonical.
"""
if isinstance(factors, (SYMPY_INTS, float)):
factors = S(factors)
if isinstance(factors, Factors):
factors = factors.factors.copy()
elif factors is None or factors is S.One:
factors = {}
elif factors is S.Zero or factors == 0:
factors = {S.Zero: S.One}
elif isinstance(factors, Number):
n = factors
factors = {}
if n < 0:
factors[S.NegativeOne] = S.One
n = -n
if n is not S.One:
if n.is_Float or n.is_Integer or n is S.Infinity:
factors[n] = S.One
elif n.is_Rational:
# since we're processing Numbers, the denominator is
# stored with a negative exponent; all other factors
# are left .
if n.p != 1:
factors[Integer(n.p)] = S.One
factors[Integer(n.q)] = S.NegativeOne
else:
raise ValueError('Expected Float|Rational|Integer, not %s' % n)
elif isinstance(factors, Basic) and not factors.args:
factors = {factors: S.One}
elif isinstance(factors, Expr):
c, nc = factors.args_cnc()
i = c.count(I)
for _ in range(i):
c.remove(I)
factors = dict(Mul._from_args(c).as_powers_dict())
# Handle all rational Coefficients
for f in list(factors.keys()):
if isinstance(f, Rational) and not isinstance(f, Integer):
p, q = Integer(f.p), Integer(f.q)
factors[p] = (factors[p] if p in factors else S.Zero) + factors[f]
factors[q] = (factors[q] if q in factors else S.Zero) - factors[f]
factors.pop(f)
if i:
factors[I] = factors.get(I, S.Zero) + i
if nc:
factors[Mul(*nc, evaluate=False)] = S.One
else:
factors = factors.copy() # /!\ should be dict-like
# tidy up -/+1 and I exponents if Rational
handle = []
for k in factors:
if k is I or k in (-1, 1):
handle.append(k)
if handle:
i1 = S.One
for k in handle:
if not _isnumber(factors[k]):
continue
i1 *= k**factors.pop(k)
if i1 is not S.One:
for a in i1.args if i1.is_Mul else [i1]: # at worst, -1.0*I*(-1)**e
if a is S.NegativeOne:
factors[a] = S.One
elif a is I:
factors[I] = S.One
elif a.is_Pow:
factors[a.base] = factors.get(a.base, S.Zero) + a.exp
elif a == 1:
factors[a] = S.One
elif a == -1:
factors[-a] = S.One
factors[S.NegativeOne] = S.One
else:
raise ValueError('unexpected factor in i1: %s' % a)
self.factors = factors
keys = getattr(factors, 'keys', None)
if keys is None:
raise TypeError('expecting Expr or dictionary')
self.gens = frozenset(keys())
def __hash__(self): # Factors
keys = tuple(ordered(self.factors.keys()))
values = [self.factors[k] for k in keys]
return hash((keys, values))
def __repr__(self): # Factors
return "Factors({%s})" % ', '.join(
['%s: %s' % (k, v) for k, v in ordered(self.factors.items())])
@property
def is_zero(self): # Factors
"""
>>> from sympy.core.exprtools import Factors
>>> Factors(0).is_zero
True
"""
f = self.factors
return len(f) == 1 and S.Zero in f
@property
def is_one(self): # Factors
"""
>>> from sympy.core.exprtools import Factors
>>> Factors(1).is_one
True
"""
return not self.factors
def as_expr(self): # Factors
"""Return the underlying expression.
Examples
========
>>> from sympy.core.exprtools import Factors
>>> from sympy.abc import x, y
>>> Factors((x*y**2).as_powers_dict()).as_expr()
x*y**2
"""
args = []
for factor, exp in self.factors.items():
if exp != 1:
if isinstance(exp, Integer):
b, e = factor.as_base_exp()
e = _keep_coeff(exp, e)
args.append(b**e)
else:
args.append(factor**exp)
else:
args.append(factor)
return Mul(*args)
def mul(self, other): # Factors
"""Return Factors of ``self * other``.
Examples
========
>>> from sympy.core.exprtools import Factors
>>> from sympy.abc import x, y, z
>>> a = Factors((x*y**2).as_powers_dict())
>>> b = Factors((x*y/z).as_powers_dict())
>>> a.mul(b)
Factors({x: 2, y: 3, z: -1})
>>> a*b
Factors({x: 2, y: 3, z: -1})
"""
if not isinstance(other, Factors):
other = Factors(other)
if any(f.is_zero for f in (self, other)):
return Factors(S.Zero)
factors = dict(self.factors)
for factor, exp in other.factors.items():
if factor in factors:
exp = factors[factor] + exp
if not exp:
del factors[factor]
continue
factors[factor] = exp
return Factors(factors)
def normal(self, other):
"""Return ``self`` and ``other`` with ``gcd`` removed from each.
The only differences between this and method ``div`` is that this
is 1) optimized for the case when there are few factors in common and
2) this does not raise an error if ``other`` is zero.
See Also
========
div
"""
if not isinstance(other, Factors):
other = Factors(other)
if other.is_zero:
return (Factors(), Factors(S.Zero))
if self.is_zero:
return (Factors(S.Zero), Factors())
self_factors = dict(self.factors)
other_factors = dict(other.factors)
for factor, self_exp in self.factors.items():
try:
other_exp = other.factors[factor]
except KeyError:
continue
exp = self_exp - other_exp
if not exp:
del self_factors[factor]
del other_factors[factor]
elif _isnumber(exp):
if exp > 0:
self_factors[factor] = exp
del other_factors[factor]
else:
del self_factors[factor]
other_factors[factor] = -exp
else:
r = self_exp.extract_additively(other_exp)
if r is not None:
if r:
self_factors[factor] = r
del other_factors[factor]
else: # should be handled already
del self_factors[factor]
del other_factors[factor]
else:
sc, sa = self_exp.as_coeff_Add()
if sc:
oc, oa = other_exp.as_coeff_Add()
diff = sc - oc
if diff > 0:
self_factors[factor] -= oc
other_exp = oa
elif diff < 0:
self_factors[factor] -= sc
other_factors[factor] -= sc
other_exp = oa - diff
else:
self_factors[factor] = sa
other_exp = oa
if other_exp:
other_factors[factor] = other_exp
else:
del other_factors[factor]
return Factors(self_factors), Factors(other_factors)
def div(self, other): # Factors
"""Return ``self`` and ``other`` with ``gcd`` removed from each.
This is optimized for the case when there are many factors in common.
Examples
========
>>> from sympy.core.exprtools import Factors
>>> from sympy.abc import x, y, z
>>> from sympy import S
>>> a = Factors((x*y**2).as_powers_dict())
>>> a.div(a)
(Factors({}), Factors({}))
>>> a.div(x*z)
(Factors({y: 2}), Factors({z: 1}))
The ``/`` operator only gives ``quo``:
>>> a/x
Factors({y: 2})
Factors treats its factors as though they are all in the numerator, so
if you violate this assumption the results will be correct but will
not strictly correspond to the numerator and denominator of the ratio:
>>> a.div(x/z)
(Factors({y: 2}), Factors({z: -1}))
Factors is also naive about bases: it does not attempt any denesting
of Rational-base terms, for example the following does not become
2**(2*x)/2.
>>> Factors(2**(2*x + 2)).div(S(8))
(Factors({2: 2*x + 2}), Factors({8: 1}))
factor_terms can clean up such Rational-bases powers:
>>> from sympy.core.exprtools import factor_terms
>>> n, d = Factors(2**(2*x + 2)).div(S(8))
>>> n.as_expr()/d.as_expr()
2**(2*x + 2)/8
>>> factor_terms(_)
2**(2*x)/2
"""
quo, rem = dict(self.factors), {}
if not isinstance(other, Factors):
other = Factors(other)
if other.is_zero:
raise ZeroDivisionError
if self.is_zero:
return (Factors(S.Zero), Factors())
for factor, exp in other.factors.items():
if factor in quo:
d = quo[factor] - exp
if _isnumber(d):
if d <= 0:
del quo[factor]
if d >= 0:
if d:
quo[factor] = d
continue
exp = -d
else:
r = quo[factor].extract_additively(exp)
if r is not None:
if r:
quo[factor] = r
else: # should be handled already
del quo[factor]
else:
other_exp = exp
sc, sa = quo[factor].as_coeff_Add()
if sc:
oc, oa = other_exp.as_coeff_Add()
diff = sc - oc
if diff > 0:
quo[factor] -= oc
other_exp = oa
elif diff < 0:
quo[factor] -= sc
other_exp = oa - diff
else:
quo[factor] = sa
other_exp = oa
if other_exp:
rem[factor] = other_exp
else:
assert factor not in rem
continue
rem[factor] = exp
return Factors(quo), Factors(rem)
def quo(self, other): # Factors
"""Return numerator Factor of ``self / other``.
Examples
========
>>> from sympy.core.exprtools import Factors
>>> from sympy.abc import x, y, z
>>> a = Factors((x*y**2).as_powers_dict())
>>> b = Factors((x*y/z).as_powers_dict())
>>> a.quo(b) # same as a/b
Factors({y: 1})
"""
return self.div(other)[0]
def rem(self, other): # Factors
"""Return denominator Factors of ``self / other``.
Examples
========
>>> from sympy.core.exprtools import Factors
>>> from sympy.abc import x, y, z
>>> a = Factors((x*y**2).as_powers_dict())
>>> b = Factors((x*y/z).as_powers_dict())
>>> a.rem(b)
Factors({z: -1})
>>> a.rem(a)
Factors({})
"""
return self.div(other)[1]
def pow(self, other): # Factors
"""Return self raised to a non-negative integer power.
Examples
========
>>> from sympy.core.exprtools import Factors
>>> from sympy.abc import x, y
>>> a = Factors((x*y**2).as_powers_dict())
>>> a**2
Factors({x: 2, y: 4})
"""
if isinstance(other, Factors):
other = other.as_expr()
if other.is_Integer:
other = int(other)
if isinstance(other, SYMPY_INTS) and other >= 0:
factors = {}
if other:
for factor, exp in self.factors.items():
factors[factor] = exp*other
return Factors(factors)
else:
raise ValueError("expected non-negative integer, got %s" % other)
def gcd(self, other): # Factors
"""Return Factors of ``gcd(self, other)``. The keys are
the intersection of factors with the minimum exponent for
each factor.
Examples
========
>>> from sympy.core.exprtools import Factors
>>> from sympy.abc import x, y, z
>>> a = Factors((x*y**2).as_powers_dict())
>>> b = Factors((x*y/z).as_powers_dict())
>>> a.gcd(b)
Factors({x: 1, y: 1})
"""
if not isinstance(other, Factors):
other = Factors(other)
if other.is_zero:
return Factors(self.factors)
factors = {}
for factor, exp in self.factors.items():
factor, exp = sympify(factor), sympify(exp)
if factor in other.factors:
lt = (exp - other.factors[factor]).is_negative
if lt == True:
factors[factor] = exp
elif lt == False:
factors[factor] = other.factors[factor]
return Factors(factors)
def lcm(self, other): # Factors
"""Return Factors of ``lcm(self, other)`` which are
the union of factors with the maximum exponent for
each factor.
Examples
========
>>> from sympy.core.exprtools import Factors
>>> from sympy.abc import x, y, z
>>> a = Factors((x*y**2).as_powers_dict())
>>> b = Factors((x*y/z).as_powers_dict())
>>> a.lcm(b)
Factors({x: 1, y: 2, z: -1})
"""
if not isinstance(other, Factors):
other = Factors(other)
if any(f.is_zero for f in (self, other)):
return Factors(S.Zero)
factors = dict(self.factors)
for factor, exp in other.factors.items():
if factor in factors:
exp = max(exp, factors[factor])
factors[factor] = exp
return Factors(factors)
def __mul__(self, other): # Factors
return self.mul(other)
def __divmod__(self, other): # Factors
return self.div(other)
def __truediv__(self, other): # Factors
return self.quo(other)
def __mod__(self, other): # Factors
return self.rem(other)
def __pow__(self, other): # Factors
return self.pow(other)
def __eq__(self, other): # Factors
if not isinstance(other, Factors):
other = Factors(other)
return self.factors == other.factors
def __ne__(self, other): # Factors
return not self == other
class Term:
"""Efficient representation of ``coeff*(numer/denom)``. """
__slots__ = ('coeff', 'numer', 'denom')
def __init__(self, term, numer=None, denom=None): # Term
if numer is None and denom is None:
if not term.is_commutative:
raise NonCommutativeExpression(
'commutative expression expected')
coeff, factors = term.as_coeff_mul()
numer, denom = defaultdict(int), defaultdict(int)
for factor in factors:
base, exp = decompose_power(factor)
if base.is_Add:
cont, base = base.primitive()
coeff *= cont**exp
if exp > 0:
numer[base] += exp
else:
denom[base] += -exp
numer = Factors(numer)
denom = Factors(denom)
else:
coeff = term
if numer is None:
numer = Factors()
if denom is None:
denom = Factors()
self.coeff = coeff
self.numer = numer
self.denom = denom
def __hash__(self): # Term
return hash((self.coeff, self.numer, self.denom))
def __repr__(self): # Term
return "Term(%s, %s, %s)" % (self.coeff, self.numer, self.denom)
def as_expr(self): # Term
return self.coeff*(self.numer.as_expr()/self.denom.as_expr())
def mul(self, other): # Term
coeff = self.coeff*other.coeff
numer = self.numer.mul(other.numer)
denom = self.denom.mul(other.denom)
numer, denom = numer.normal(denom)
return Term(coeff, numer, denom)
def inv(self): # Term
return Term(1/self.coeff, self.denom, self.numer)
def quo(self, other): # Term
return self.mul(other.inv())
def pow(self, other): # Term
if other < 0:
return self.inv().pow(-other)
else:
return Term(self.coeff ** other,
self.numer.pow(other),
self.denom.pow(other))
def gcd(self, other): # Term
return Term(self.coeff.gcd(other.coeff),
self.numer.gcd(other.numer),
self.denom.gcd(other.denom))
def lcm(self, other): # Term
return Term(self.coeff.lcm(other.coeff),
self.numer.lcm(other.numer),
self.denom.lcm(other.denom))
def __mul__(self, other): # Term
if isinstance(other, Term):
return self.mul(other)
else:
return NotImplemented
def __truediv__(self, other): # Term
if isinstance(other, Term):
return self.quo(other)
else:
return NotImplemented
def __pow__(self, other): # Term
if isinstance(other, SYMPY_INTS):
return self.pow(other)
else:
return NotImplemented
def __eq__(self, other): # Term
return (self.coeff == other.coeff and
self.numer == other.numer and
self.denom == other.denom)
def __ne__(self, other): # Term
return not self == other
def _gcd_terms(terms, isprimitive=False, fraction=True):
"""Helper function for :func:`gcd_terms`.
Parameters
==========
isprimitive : boolean, optional
If ``isprimitive`` is True then the call to primitive
for an Add will be skipped. This is useful when the
content has already been extrated.
fraction : boolean, optional
If ``fraction`` is True then the expression will appear over a common
denominator, the lcm of all term denominators.
"""
if isinstance(terms, Basic) and not isinstance(terms, Tuple):
terms = Add.make_args(terms)
terms = list(map(Term, [t for t in terms if t]))
# there is some simplification that may happen if we leave this
# here rather than duplicate it before the mapping of Term onto
# the terms
if len(terms) == 0:
return S.Zero, S.Zero, S.One
if len(terms) == 1:
cont = terms[0].coeff
numer = terms[0].numer.as_expr()
denom = terms[0].denom.as_expr()
else:
cont = terms[0]
for term in terms[1:]:
cont = cont.gcd(term)
for i, term in enumerate(terms):
terms[i] = term.quo(cont)
if fraction:
denom = terms[0].denom
for term in terms[1:]:
denom = denom.lcm(term.denom)
numers = []
for term in terms:
numer = term.numer.mul(denom.quo(term.denom))
numers.append(term.coeff*numer.as_expr())
else:
numers = [t.as_expr() for t in terms]
denom = Term(S.One).numer
cont = cont.as_expr()
numer = Add(*numers)
denom = denom.as_expr()
if not isprimitive and numer.is_Add:
_cont, numer = numer.primitive()
cont *= _cont
return cont, numer, denom
def gcd_terms(terms, isprimitive=False, clear=True, fraction=True):
"""Compute the GCD of ``terms`` and put them together.
Parameters
==========
terms : Expr
Can be an expression or a non-Basic sequence of expressions
which will be handled as though they are terms from a sum.
isprimitive : bool, optional
If ``isprimitive`` is True the _gcd_terms will not run the primitive
method on the terms.
clear : bool, optional
It controls the removal of integers from the denominator of an Add
expression. When True (default), all numerical denominator will be cleared;
when False the denominators will be cleared only if all terms had numerical
denominators other than 1.
fraction : bool, optional
When True (default), will put the expression over a common
denominator.
Examples
========
>>> from sympy.core import gcd_terms
>>> from sympy.abc import x, y
>>> gcd_terms((x + 1)**2*y + (x + 1)*y**2)
y*(x + 1)*(x + y + 1)
>>> gcd_terms(x/2 + 1)
(x + 2)/2
>>> gcd_terms(x/2 + 1, clear=False)
x/2 + 1
>>> gcd_terms(x/2 + y/2, clear=False)
(x + y)/2
>>> gcd_terms(x/2 + 1/x)
(x**2 + 2)/(2*x)
>>> gcd_terms(x/2 + 1/x, fraction=False)
(x + 2/x)/2
>>> gcd_terms(x/2 + 1/x, fraction=False, clear=False)
x/2 + 1/x
>>> gcd_terms(x/2/y + 1/x/y)
(x**2 + 2)/(2*x*y)
>>> gcd_terms(x/2/y + 1/x/y, clear=False)
(x**2/2 + 1)/(x*y)
>>> gcd_terms(x/2/y + 1/x/y, clear=False, fraction=False)
(x/2 + 1/x)/y
The ``clear`` flag was ignored in this case because the returned
expression was a rational expression, not a simple sum.
See Also
========
factor_terms, sympy.polys.polytools.terms_gcd
"""
def mask(terms):
"""replace nc portions of each term with a unique Dummy symbols
and return the replacements to restore them"""
args = [(a, []) if a.is_commutative else a.args_cnc() for a in terms]
reps = []
for i, (c, nc) in enumerate(args):
if nc:
nc = Mul(*nc)
d = Dummy()
reps.append((d, nc))
c.append(d)
args[i] = Mul(*c)
else:
args[i] = c
return args, dict(reps)
isadd = isinstance(terms, Add)
addlike = isadd or not isinstance(terms, Basic) and \
is_sequence(terms, include=set) and \
not isinstance(terms, Dict)
if addlike:
if isadd: # i.e. an Add
terms = list(terms.args)
else:
terms = sympify(terms)
terms, reps = mask(terms)
cont, numer, denom = _gcd_terms(terms, isprimitive, fraction)
numer = numer.xreplace(reps)
coeff, factors = cont.as_coeff_Mul()
if not clear:
c, _coeff = coeff.as_coeff_Mul()
if not c.is_Integer and not clear and numer.is_Add:
n, d = c.as_numer_denom()
_numer = numer/d
if any(a.as_coeff_Mul()[0].is_Integer
for a in _numer.args):
numer = _numer
coeff = n*_coeff
return _keep_coeff(coeff, factors*numer/denom, clear=clear)
if not isinstance(terms, Basic):
return terms
if terms.is_Atom:
return terms
if terms.is_Mul:
c, args = terms.as_coeff_mul()
return _keep_coeff(c, Mul(*[gcd_terms(i, isprimitive, clear, fraction)
for i in args]), clear=clear)
def handle(a):
# don't treat internal args like terms of an Add
if not isinstance(a, Expr):
if isinstance(a, Basic):
if not a.args:
return a
return a.func(*[handle(i) for i in a.args])
return type(a)([handle(i) for i in a])
return gcd_terms(a, isprimitive, clear, fraction)
if isinstance(terms, Dict):
return Dict(*[(k, handle(v)) for k, v in terms.args])
return terms.func(*[handle(i) for i in terms.args])
def _factor_sum_int(expr, **kwargs):
"""Return Sum or Integral object with factors that are not
in the wrt variables removed. In cases where there are additive
terms in the function of the object that are independent, the
object will be separated into two objects.
Examples
========
>>> from sympy import Sum, factor_terms
>>> from sympy.abc import x, y
>>> factor_terms(Sum(x + y, (x, 1, 3)))
y*Sum(1, (x, 1, 3)) + Sum(x, (x, 1, 3))
>>> factor_terms(Sum(x*y, (x, 1, 3)))
y*Sum(x, (x, 1, 3))
Notes
=====
If a function in the summand or integrand is replaced
with a symbol, then this simplification should not be
done or else an incorrect result will be obtained when
the symbol is replaced with an expression that depends
on the variables of summation/integration:
>>> eq = Sum(y, (x, 1, 3))
>>> factor_terms(eq).subs(y, x).doit()
3*x
>>> eq.subs(y, x).doit()
6
"""
result = expr.function
if result == 0:
return S.Zero
limits = expr.limits
# get the wrt variables
wrt = {i.args[0] for i in limits}
# factor out any common terms that are independent of wrt
f = factor_terms(result, **kwargs)
i, d = f.as_independent(*wrt)
if isinstance(f, Add):
return i * expr.func(1, *limits) + expr.func(d, *limits)
else:
return i * expr.func(d, *limits)
def factor_terms(expr, radical=False, clear=False, fraction=False, sign=True):
"""Remove common factors from terms in all arguments without
changing the underlying structure of the expr. No expansion or
simplification (and no processing of non-commutatives) is performed.
Parameters
==========
radical: bool, optional
If radical=True then a radical common to all terms will be factored
out of any Add sub-expressions of the expr.
clear : bool, optional
If clear=False (default) then coefficients will not be separated
from a single Add if they can be distributed to leave one or more
terms with integer coefficients.
fraction : bool, optional
If fraction=True (default is False) then a common denominator will be
constructed for the expression.
sign : bool, optional
If sign=True (default) then even if the only factor in common is a -1,
it will be factored out of the expression.
Examples
========
>>> from sympy import factor_terms, Symbol
>>> from sympy.abc import x, y
>>> factor_terms(x + x*(2 + 4*y)**3)
x*(8*(2*y + 1)**3 + 1)
>>> A = Symbol('A', commutative=False)
>>> factor_terms(x*A + x*A + x*y*A)
x*(y*A + 2*A)
When ``clear`` is False, a rational will only be factored out of an
Add expression if all terms of the Add have coefficients that are
fractions:
>>> factor_terms(x/2 + 1, clear=False)
x/2 + 1
>>> factor_terms(x/2 + 1, clear=True)
(x + 2)/2
If a -1 is all that can be factored out, to *not* factor it out, the
flag ``sign`` must be False:
>>> factor_terms(-x - y)
-(x + y)
>>> factor_terms(-x - y, sign=False)
-x - y
>>> factor_terms(-2*x - 2*y, sign=False)
-2*(x + y)
See Also
========
gcd_terms, sympy.polys.polytools.terms_gcd
"""
def do(expr):
from sympy.concrete.summations import Sum
from sympy.integrals.integrals import Integral
is_iterable = iterable(expr)
if not isinstance(expr, Basic) or expr.is_Atom:
if is_iterable:
return type(expr)([do(i) for i in expr])
return expr
if expr.is_Pow or expr.is_Function or \
is_iterable or not hasattr(expr, 'args_cnc'):
args = expr.args
newargs = tuple([do(i) for i in args])
if newargs == args:
return expr
return expr.func(*newargs)
if isinstance(expr, (Sum, Integral)):
return _factor_sum_int(expr,
radical=radical, clear=clear,
fraction=fraction, sign=sign)
cont, p = expr.as_content_primitive(radical=radical, clear=clear)
if p.is_Add:
list_args = [do(a) for a in Add.make_args(p)]
# get a common negative (if there) which gcd_terms does not remove
if all(a.as_coeff_Mul()[0].extract_multiplicatively(-1) is not None
for a in list_args):
cont = -cont
list_args = [-a for a in list_args]
# watch out for exp(-(x+2)) which gcd_terms will change to exp(-x-2)
special = {}
for i, a in enumerate(list_args):
b, e = a.as_base_exp()
if e.is_Mul and e != Mul(*e.args):
list_args[i] = Dummy()
special[list_args[i]] = a
# rebuild p not worrying about the order which gcd_terms will fix
p = Add._from_args(list_args)
p = gcd_terms(p,
isprimitive=True,
clear=clear,
fraction=fraction).xreplace(special)
elif p.args:
p = p.func(
*[do(a) for a in p.args])
rv = _keep_coeff(cont, p, clear=clear, sign=sign)
return rv
expr = sympify(expr)
return do(expr)
def _mask_nc(eq, name=None):
"""
Return ``eq`` with non-commutative objects replaced with Dummy
symbols. A dictionary that can be used to restore the original
values is returned: if it is None, the expression is noncommutative
and cannot be made commutative. The third value returned is a list
of any non-commutative symbols that appear in the returned equation.
Explanation
===========
All non-commutative objects other than Symbols are replaced with
a non-commutative Symbol. Identical objects will be identified
by identical symbols.
If there is only 1 non-commutative object in an expression it will
be replaced with a commutative symbol. Otherwise, the non-commutative
entities are retained and the calling routine should handle
replacements in this case since some care must be taken to keep
track of the ordering of symbols when they occur within Muls.
Parameters
==========
name : str
``name``, if given, is the name that will be used with numbered Dummy
variables that will replace the non-commutative objects and is mainly
used for doctesting purposes.
Examples
========
>>> from sympy.physics.secondquant import Commutator, NO, F, Fd
>>> from sympy import symbols
>>> from sympy.core.exprtools import _mask_nc
>>> from sympy.abc import x, y
>>> A, B, C = symbols('A,B,C', commutative=False)
One nc-symbol:
>>> _mask_nc(A**2 - x**2, 'd')
(_d0**2 - x**2, {_d0: A}, [])
Multiple nc-symbols:
>>> _mask_nc(A**2 - B**2, 'd')
(A**2 - B**2, {}, [A, B])
An nc-object with nc-symbols but no others outside of it:
>>> _mask_nc(1 + x*Commutator(A, B), 'd')
(_d0*x + 1, {_d0: Commutator(A, B)}, [])
>>> _mask_nc(NO(Fd(x)*F(y)), 'd')
(_d0, {_d0: NO(CreateFermion(x)*AnnihilateFermion(y))}, [])
Multiple nc-objects:
>>> eq = x*Commutator(A, B) + x*Commutator(A, C)*Commutator(A, B)
>>> _mask_nc(eq, 'd')
(x*_d0 + x*_d1*_d0, {_d0: Commutator(A, B), _d1: Commutator(A, C)}, [_d0, _d1])
Multiple nc-objects and nc-symbols:
>>> eq = A*Commutator(A, B) + B*Commutator(A, C)
>>> _mask_nc(eq, 'd')
(A*_d0 + B*_d1, {_d0: Commutator(A, B), _d1: Commutator(A, C)}, [_d0, _d1, A, B])
"""
name = name or 'mask'
# Make Dummy() append sequential numbers to the name
def numbered_names():
i = 0
while True:
yield name + str(i)
i += 1
names = numbered_names()
def Dummy(*args, **kwargs):
from sympy import Dummy
return Dummy(next(names), *args, **kwargs)
expr = eq
if expr.is_commutative:
return eq, {}, []
# identify nc-objects; symbols and other
rep = []
nc_obj = set()
nc_syms = set()
pot = preorder_traversal(expr, keys=default_sort_key)
for i, a in enumerate(pot):
if any(a == r[0] for r in rep):
pot.skip()
elif not a.is_commutative:
if a.is_symbol:
nc_syms.add(a)
pot.skip()
elif not (a.is_Add or a.is_Mul or a.is_Pow):
nc_obj.add(a)
pot.skip()
# If there is only one nc symbol or object, it can be factored regularly
# but polys is going to complain, so replace it with a Dummy.
if len(nc_obj) == 1 and not nc_syms:
rep.append((nc_obj.pop(), Dummy()))
elif len(nc_syms) == 1 and not nc_obj:
rep.append((nc_syms.pop(), Dummy()))
# Any remaining nc-objects will be replaced with an nc-Dummy and
# identified as an nc-Symbol to watch out for
nc_obj = sorted(nc_obj, key=default_sort_key)
for n in nc_obj:
nc = Dummy(commutative=False)
rep.append((n, nc))
nc_syms.add(nc)
expr = expr.subs(rep)
nc_syms = list(nc_syms)
nc_syms.sort(key=default_sort_key)
return expr, {v: k for k, v in rep}, nc_syms
def factor_nc(expr):
"""Return the factored form of ``expr`` while handling non-commutative
expressions.
Examples
========
>>> from sympy.core.exprtools import factor_nc
>>> from sympy import Symbol
>>> from sympy.abc import x
>>> A = Symbol('A', commutative=False)
>>> B = Symbol('B', commutative=False)
>>> factor_nc((x**2 + 2*A*x + A**2).expand())
(x + A)**2
>>> factor_nc(((x + A)*(x + B)).expand())
(x + A)*(x + B)
"""
from sympy.simplify.simplify import powsimp
from sympy.polys import gcd, factor
def _pemexpand(expr):
"Expand with the minimal set of hints necessary to check the result."
return expr.expand(deep=True, mul=True, power_exp=True,
power_base=False, basic=False, multinomial=True, log=False)
expr = sympify(expr)
if not isinstance(expr, Expr) or not expr.args:
return expr
if not expr.is_Add:
return expr.func(*[factor_nc(a) for a in expr.args])
expr, rep, nc_symbols = _mask_nc(expr)
if rep:
return factor(expr).subs(rep)
else:
args = [a.args_cnc() for a in Add.make_args(expr)]
c = g = l = r = S.One
hit = False
# find any commutative gcd term
for i, a in enumerate(args):
if i == 0:
c = Mul._from_args(a[0])
elif a[0]:
c = gcd(c, Mul._from_args(a[0]))
else:
c = S.One
if c is not S.One:
hit = True
c, g = c.as_coeff_Mul()
if g is not S.One:
for i, (cc, _) in enumerate(args):
cc = list(Mul.make_args(Mul._from_args(list(cc))/g))
args[i][0] = cc
for i, (cc, _) in enumerate(args):
cc[0] = cc[0]/c
args[i][0] = cc
# find any noncommutative common prefix
for i, a in enumerate(args):
if i == 0:
n = a[1][:]
else:
n = common_prefix(n, a[1])
if not n:
# is there a power that can be extracted?
if not args[0][1]:
break
b, e = args[0][1][0].as_base_exp()
ok = False
if e.is_Integer:
for t in args:
if not t[1]:
break
bt, et = t[1][0].as_base_exp()
if et.is_Integer and bt == b:
e = min(e, et)
else:
break
else:
ok = hit = True
l = b**e
il = b**-e
for _ in args:
_[1][0] = il*_[1][0]
break
if not ok:
break
else:
hit = True
lenn = len(n)
l = Mul(*n)
for _ in args:
_[1] = _[1][lenn:]
# find any noncommutative common suffix
for i, a in enumerate(args):
if i == 0:
n = a[1][:]
else:
n = common_suffix(n, a[1])
if not n:
# is there a power that can be extracted?
if not args[0][1]:
break
b, e = args[0][1][-1].as_base_exp()
ok = False
if e.is_Integer:
for t in args:
if not t[1]:
break
bt, et = t[1][-1].as_base_exp()
if et.is_Integer and bt == b:
e = min(e, et)
else:
break
else:
ok = hit = True
r = b**e
il = b**-e
for _ in args:
_[1][-1] = _[1][-1]*il
break
if not ok:
break
else:
hit = True
lenn = len(n)
r = Mul(*n)
for _ in args:
_[1] = _[1][:len(_[1]) - lenn]
if hit:
mid = Add(*[Mul(*cc)*Mul(*nc) for cc, nc in args])
else:
mid = expr
# sort the symbols so the Dummys would appear in the same
# order as the original symbols, otherwise you may introduce
# a factor of -1, e.g. A**2 - B**2) -- {A:y, B:x} --> y**2 - x**2
# and the former factors into two terms, (A - B)*(A + B) while the
# latter factors into 3 terms, (-1)*(x - y)*(x + y)
rep1 = [(n, Dummy()) for n in sorted(nc_symbols, key=default_sort_key)]
unrep1 = [(v, k) for k, v in rep1]
unrep1.reverse()
new_mid, r2, _ = _mask_nc(mid.subs(rep1))
new_mid = powsimp(factor(new_mid))
new_mid = new_mid.subs(r2).subs(unrep1)
if new_mid.is_Pow:
return _keep_coeff(c, g*l*new_mid*r)
if new_mid.is_Mul:
# XXX TODO there should be a way to inspect what order the terms
# must be in and just select the plausible ordering without
# checking permutations
cfac = []
ncfac = []
for f in new_mid.args:
if f.is_commutative:
cfac.append(f)
else:
b, e = f.as_base_exp()
if e.is_Integer:
ncfac.extend([b]*e)
else:
ncfac.append(f)
pre_mid = g*Mul(*cfac)*l
target = _pemexpand(expr/c)
for s in variations(ncfac, len(ncfac)):
ok = pre_mid*Mul(*s)*r
if _pemexpand(ok) == target:
return _keep_coeff(c, ok)
# mid was an Add that didn't factor successfully
return _keep_coeff(c, g*l*mid*r)
| 32.564803
| 106
| 0.50955
|
f518dccee040d9d5bfc321db9c91e5e0fb62b13d
| 707
|
py
|
Python
|
py/ec_condition.py
|
easycoder/EasyCoder
|
b9a1add42b01dd98059045aac64fda8abc7b5de5
|
[
"Apache-2.0"
] | 2
|
2021-08-15T06:05:25.000Z
|
2021-08-20T18:03:24.000Z
|
py/ec_condition.py
|
easycoder/EasyCoder
|
b9a1add42b01dd98059045aac64fda8abc7b5de5
|
[
"Apache-2.0"
] | 1
|
2021-02-28T20:46:36.000Z
|
2021-02-28T20:46:36.000Z
|
py/ec_condition.py
|
easycoder/EasyCoder
|
b9a1add42b01dd98059045aac64fda8abc7b5de5
|
[
"Apache-2.0"
] | null | null | null |
class Condition:
def __init__(self, compiler):
self.domains = compiler.domains
self.getToken = compiler.getToken
self.nextToken = compiler.nextToken
self.peek = compiler.peek
self.tokenIs = compiler.tokenIs
self.mark = compiler.mark
self.rewind = compiler.rewind
self.program = compiler.program
def compileCondition(self):
self.mark()
for domain in self.domains:
item = domain.compileCondition()
if item != None:
item['domain'] = domain.getName()
return item
self.rewind()
return None
def testCondition(self, condition):
handler = self.program.domainList[condition['domain']]
handler = handler.conditionHandler(condition['type'])
return handler(condition)
| 27.192308
| 56
| 0.729844
|
c047af6df5373732fb07c10c94416e5db5d6fc73
| 23,903
|
py
|
Python
|
venv/lib/python3.6/site-packages/pip-19.0.3-py3.6.egg/pip/_internal/cli/cmdoptions.py
|
nguidjoi/bigData
|
aef722e77c10b8b0261578277892ebb15764d680
|
[
"Apache-2.0"
] | null | null | null |
venv/lib/python3.6/site-packages/pip-19.0.3-py3.6.egg/pip/_internal/cli/cmdoptions.py
|
nguidjoi/bigData
|
aef722e77c10b8b0261578277892ebb15764d680
|
[
"Apache-2.0"
] | null | null | null |
venv/lib/python3.6/site-packages/pip-19.0.3-py3.6.egg/pip/_internal/cli/cmdoptions.py
|
nguidjoi/bigData
|
aef722e77c10b8b0261578277892ebb15764d680
|
[
"Apache-2.0"
] | null | null | null |
"""
shared options and groups
The principle here is to define options once, but *not* instantiate them
globally. One reason being that options with action='append' can carry state
between parses. pip parses general options twice internally, and shouldn't
pass on state. To be consistent, all options will follow this design.
"""
from __future__ import absolute_import
import textwrap
import warnings
from distutils.util import strtobool
from functools import partial
from optparse import SUPPRESS_HELP, Option, OptionGroup
from pip._internal.exceptions import CommandError
from pip._internal.locations import USER_CACHE_DIR, src_prefix
from pip._internal.models.format_control import FormatControl
from pip._internal.models.index import PyPI
from pip._internal.utils.hashes import STRONG_HASHES
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
from pip._internal.utils.ui import BAR_TYPES
if MYPY_CHECK_RUNNING:
from typing import Any, Callable, Dict, Optional # noqa: F401
from optparse import OptionParser, Values # noqa: F401
from pip._internal.cli.parser import ConfigOptionParser # noqa: F401
def raise_option_error(parser, option, msg):
"""
Raise an option parsing error using parser.error().
Args:
parser: an OptionParser instance.
option: an Option instance.
msg: the error text.
"""
msg = '{} error: {}'.format(option, msg)
msg = textwrap.fill(' '.join(msg.split()))
parser.error(msg)
def make_option_group(group, parser):
# type: (Dict[str, Any], ConfigOptionParser) -> OptionGroup
"""
Return an OptionGroup object
group -- assumed to be dict with 'name' and 'options' keys
parser -- an optparse Parser
"""
option_group = OptionGroup(parser, group['name'])
for option in group['options']:
option_group.add_option(option())
return option_group
def check_install_build_global(options, check_options=None):
# type: (Values, Optional[Values]) -> None
"""Disable wheels if per-setup.py call options are set.
:param options: The OptionParser options to update.
:param check_options: The options to check, if not supplied defaults to
options.
"""
if check_options is None:
check_options = options
def getname(n):
return getattr(check_options, n, None)
names = ["build_options", "global_options", "install_options"]
if any(map(getname, names)):
control = options.format_control
control.disallow_binaries()
warnings.warn(
'Disabling all use of wheels due to the use of --build-options '
'/ --global-options / --install-options.', stacklevel=2,
)
def check_dist_restriction(options, check_target=False):
# type: (Values, bool) -> None
"""Function for determining if custom platform options are allowed.
:param options: The OptionParser options.
:param check_target: Whether or not to check if --target is being used.
"""
dist_restriction_set = any([
options.python_version,
options.platform,
options.abi,
options.implementation,
])
binary_only = FormatControl(set(), {':all:'})
sdist_dependencies_allowed = (
options.format_control != binary_only and
not options.ignore_dependencies
)
# Installations or downloads using dist restrictions must not combine
# source distributions and dist-specific wheels, as they are not
# gauranteed to be locally compatible.
if dist_restriction_set and sdist_dependencies_allowed:
raise CommandError(
"When restricting platform and interpreter constraints using "
"--python-version, --platform, --abi, or --implementation, "
"either --no-deps must be set, or --only-binary=:all: must be "
"set and --no-binary must not be set (or must be set to "
":none:)."
)
if check_target:
if dist_restriction_set and not options.target_dir:
raise CommandError(
"Can not use any platform or abi specific options unless "
"installing via '--target'"
)
###########
# options #
###########
help_ = partial(
Option,
'-h', '--help',
dest='help',
action='help',
help='Show help.',
) # type: Callable[..., Option]
isolated_mode = partial(
Option,
"--isolated",
dest="isolated_mode",
action="store_true",
default=False,
help=(
"Run pip in an isolated mode, ignoring environment variables and user "
"configuration."
),
) # type: Callable[..., Option]
require_virtualenv = partial(
Option,
# Run only if inside a virtualenv, bail if not.
'--require-virtualenv', '--require-venv',
dest='require_venv',
action='store_true',
default=False,
help=SUPPRESS_HELP
) # type: Callable[..., Option]
verbose = partial(
Option,
'-v', '--verbose',
dest='verbose',
action='count',
default=0,
help='Give more output. Option is additive, and can be used up to 3 times.'
) # type: Callable[..., Option]
no_color = partial(
Option,
'--no-color',
dest='no_color',
action='store_true',
default=False,
help="Suppress colored output",
) # type: Callable[..., Option]
version = partial(
Option,
'-V', '--version',
dest='version',
action='store_true',
help='Show version and exit.',
) # type: Callable[..., Option]
quiet = partial(
Option,
'-q', '--quiet',
dest='quiet',
action='count',
default=0,
help=(
'Give less output. Option is additive, and can be used up to 3'
' times (corresponding to WARNING, ERROR, and CRITICAL logging'
' levels).'
),
) # type: Callable[..., Option]
progress_bar = partial(
Option,
'--progress-bar',
dest='progress_bar',
type='choice',
choices=list(BAR_TYPES.keys()),
default='on',
help=(
'Specify type of progress to be displayed [' +
'|'.join(BAR_TYPES.keys()) + '] (default: %default)'
),
) # type: Callable[..., Option]
log = partial(
Option,
"--log", "--log-file", "--local-log",
dest="log",
metavar="path",
help="Path to a verbose appending log."
) # type: Callable[..., Option]
no_input = partial(
Option,
# Don't ask for input
'--no-input',
dest='no_input',
action='store_true',
default=False,
help=SUPPRESS_HELP
) # type: Callable[..., Option]
proxy = partial(
Option,
'--proxy',
dest='proxy',
type='str',
default='',
help="Specify a proxy in the form [user:passwd@]proxy.server:port."
) # type: Callable[..., Option]
retries = partial(
Option,
'--retries',
dest='retries',
type='int',
default=5,
help="Maximum number of retries each connection should attempt "
"(default %default times).",
) # type: Callable[..., Option]
timeout = partial(
Option,
'--timeout', '--default-timeout',
metavar='sec',
dest='timeout',
type='float',
default=15,
help='Set the socket timeout (default %default seconds).',
) # type: Callable[..., Option]
skip_requirements_regex = partial(
Option,
# A regex to be used to skip requirements
'--skip-requirements-regex',
dest='skip_requirements_regex',
type='str',
default='',
help=SUPPRESS_HELP,
) # type: Callable[..., Option]
def exists_action():
# type: () -> Option
return Option(
# Option when path already exist
'--exists-action',
dest='exists_action',
type='choice',
choices=['s', 'i', 'w', 'b', 'a'],
default=[],
action='append',
metavar='action',
help="Default action when a path already exists: "
"(s)witch, (i)gnore, (w)ipe, (b)ackup, (a)bort).",
)
cert = partial(
Option,
'--cert',
dest='cert',
type='str',
metavar='path',
help="Path to alternate CA bundle.",
) # type: Callable[..., Option]
client_cert = partial(
Option,
'--client-cert',
dest='client_cert',
type='str',
default=None,
metavar='path',
help="Path to SSL client certificate, a single file containing the "
"private key and the certificate in PEM format.",
) # type: Callable[..., Option]
index_url = partial(
Option,
'-i', '--index-url', '--pypi-url',
dest='index_url',
metavar='URL',
default=PyPI.simple_url,
help="Base URL of Python Package Index (default %default). "
"This should point to a repository compliant with PEP 503 "
"(the simple repository API) or a local directory laid out "
"in the same format.",
) # type: Callable[..., Option]
def extra_index_url():
return Option(
'--extra-index-url',
dest='extra_index_urls',
metavar='URL',
action='append',
default=[],
help="Extra URLs of package indexes to use in addition to "
"--index-url. Should follow the same rules as "
"--index-url.",
)
no_index = partial(
Option,
'--no-index',
dest='no_index',
action='store_true',
default=False,
help='Ignore package index (only looking at --find-links URLs instead).',
) # type: Callable[..., Option]
def find_links():
# type: () -> Option
return Option(
'-f', '--find-links',
dest='find_links',
action='append',
default=[],
metavar='url',
help="If a url or path to an html file, then parse for links to "
"archives. If a local path or file:// url that's a directory, "
"then look for archives in the directory listing.",
)
def trusted_host():
# type: () -> Option
return Option(
"--trusted-host",
dest="trusted_hosts",
action="append",
metavar="HOSTNAME",
default=[],
help="Mark this host as trusted, even though it does not have valid "
"or any HTTPS.",
)
def constraints():
# type: () -> Option
return Option(
'-c', '--constraint',
dest='constraints',
action='append',
default=[],
metavar='file',
help='Constrain versions using the given constraints file. '
'This option can be used multiple times.'
)
def requirements():
# type: () -> Option
return Option(
'-r', '--requirement',
dest='requirements',
action='append',
default=[],
metavar='file',
help='Install from the given requirements file. '
'This option can be used multiple times.'
)
def editable():
# type: () -> Option
return Option(
'-e', '--editable',
dest='editables',
action='append',
default=[],
metavar='path/url',
help=('Install a project in editable mode (i.e. setuptools '
'"develop mode") from a local project path or a VCS url.'),
)
src = partial(
Option,
'--src', '--source', '--source-dir', '--source-directory',
dest='src_dir',
metavar='dir',
default=src_prefix,
help='Directory to check out editable projects into. '
'The default in a virtualenv is "<venv path>/src". '
'The default for global installs is "<current dir>/src".'
) # type: Callable[..., Option]
def _get_format_control(values, option):
# type: (Values, Option) -> Any
"""Get a format_control object."""
return getattr(values, option.dest)
def _handle_no_binary(option, opt_str, value, parser):
# type: (Option, str, str, OptionParser) -> None
existing = _get_format_control(parser.values, option)
FormatControl.handle_mutual_excludes(
value, existing.no_binary, existing.only_binary,
)
def _handle_only_binary(option, opt_str, value, parser):
# type: (Option, str, str, OptionParser) -> None
existing = _get_format_control(parser.values, option)
FormatControl.handle_mutual_excludes(
value, existing.only_binary, existing.no_binary,
)
def no_binary():
# type: () -> Option
format_control = FormatControl(set(), set())
return Option(
"--no-binary", dest="format_control", action="callback",
callback=_handle_no_binary, type="str",
default=format_control,
help="Do not use binary packages. Can be supplied multiple times, and "
"each time adds to the existing value. Accepts either :all: to "
"disable all binary packages, :none: to empty the set, or one or "
"more package names with commas between them. Note that some "
"packages are tricky to compile and may fail to install when "
"this option is used on them.",
)
def only_binary():
# type: () -> Option
format_control = FormatControl(set(), set())
return Option(
"--only-binary", dest="format_control", action="callback",
callback=_handle_only_binary, type="str",
default=format_control,
help="Do not use source packages. Can be supplied multiple times, and "
"each time adds to the existing value. Accepts either :all: to "
"disable all source packages, :none: to empty the set, or one or "
"more package names with commas between them. Packages without "
"binary distributions will fail to install when this option is "
"used on them.",
)
platform = partial(
Option,
'--platform',
dest='platform',
metavar='platform',
default=None,
help=("Only use wheels compatible with <platform>. "
"Defaults to the platform of the running system."),
) # type: Callable[..., Option]
python_version = partial(
Option,
'--python-version',
dest='python_version',
metavar='python_version',
default=None,
help=("Only use wheels compatible with Python "
"interpreter version <version>. If not specified, then the "
"current system interpreter minor version is used. A major "
"version (e.g. '2') can be specified to match all "
"minor revs of that major version. A minor version "
"(e.g. '34') can also be specified."),
) # type: Callable[..., Option]
implementation = partial(
Option,
'--implementation',
dest='implementation',
metavar='implementation',
default=None,
help=("Only use wheels compatible with Python "
"implementation <implementation>, e.g. 'pp', 'jy', 'cp', "
" or 'ip'. If not specified, then the current "
"interpreter implementation is used. Use 'py' to force "
"implementation-agnostic wheels."),
) # type: Callable[..., Option]
abi = partial(
Option,
'--abi',
dest='abi',
metavar='abi',
default=None,
help=("Only use wheels compatible with Python "
"abi <abi>, e.g. 'pypy_41'. If not specified, then the "
"current interpreter abi tag is used. Generally "
"you will need to specify --implementation, "
"--platform, and --python-version when using "
"this option."),
) # type: Callable[..., Option]
def prefer_binary():
# type: () -> Option
return Option(
"--prefer-binary",
dest="prefer_binary",
action="store_true",
default=False,
help="Prefer older binary packages over newer source packages."
)
cache_dir = partial(
Option,
"--cache-dir",
dest="cache_dir",
default=USER_CACHE_DIR,
metavar="dir",
help="Store the cache data in <dir>."
) # type: Callable[..., Option]
def no_cache_dir_callback(option, opt, value, parser):
"""
Process a value provided for the --no-cache-dir option.
This is an optparse.Option callback for the --no-cache-dir option.
"""
# The value argument will be None if --no-cache-dir is passed via the
# command-line, since the option doesn't accept arguments. However,
# the value can be non-None if the option is triggered e.g. by an
# environment variable, like PIP_NO_CACHE_DIR=true.
if value is not None:
# Then parse the string value to get argument error-checking.
try:
strtobool(value)
except ValueError as exc:
raise_option_error(parser, option=option, msg=str(exc))
# Originally, setting PIP_NO_CACHE_DIR to a value that strtobool()
# converted to 0 (like "false" or "no") caused cache_dir to be disabled
# rather than enabled (logic would say the latter). Thus, we disable
# the cache directory not just on values that parse to True, but (for
# backwards compatibility reasons) also on values that parse to False.
# In other words, always set it to False if the option is provided in
# some (valid) form.
parser.values.cache_dir = False
no_cache = partial(
Option,
"--no-cache-dir",
dest="cache_dir",
action="callback",
callback=no_cache_dir_callback,
help="Disable the cache.",
) # type: Callable[..., Option]
no_deps = partial(
Option,
'--no-deps', '--no-dependencies',
dest='ignore_dependencies',
action='store_true',
default=False,
help="Don't install package dependencies.",
) # type: Callable[..., Option]
build_dir = partial(
Option,
'-b', '--build', '--build-dir', '--build-directory',
dest='build_dir',
metavar='dir',
help='Directory to unpack packages into and build in. Note that '
'an initial build still takes place in a temporary directory. '
'The location of temporary directories can be controlled by setting '
'the TMPDIR environment variable (TEMP on Windows) appropriately. '
'When passed, build directories are not cleaned in case of failures.'
) # type: Callable[..., Option]
ignore_requires_python = partial(
Option,
'--ignore-requires-python',
dest='ignore_requires_python',
action='store_true',
help='Ignore the Requires-Python information.'
) # type: Callable[..., Option]
no_build_isolation = partial(
Option,
'--no-build-isolation',
dest='build_isolation',
action='store_false',
default=True,
help='Disable isolation when building a modern source distribution. '
'Build dependencies specified by PEP 518 must be already installed '
'if this option is used.'
) # type: Callable[..., Option]
def no_use_pep517_callback(option, opt, value, parser):
"""
Process a value provided for the --no-use-pep517 option.
This is an optparse.Option callback for the no_use_pep517 option.
"""
# Since --no-use-pep517 doesn't accept arguments, the value argument
# will be None if --no-use-pep517 is passed via the command-line.
# However, the value can be non-None if the option is triggered e.g.
# by an environment variable, for example "PIP_NO_USE_PEP517=true".
if value is not None:
msg = """A value was passed for --no-use-pep517,
probably using either the PIP_NO_USE_PEP517 environment variable
or the "no-use-pep517" config file option. Use an appropriate value
of the PIP_USE_PEP517 environment variable or the "use-pep517"
config file option instead.
"""
raise_option_error(parser, option=option, msg=msg)
# Otherwise, --no-use-pep517 was passed via the command-line.
parser.values.use_pep517 = False
use_pep517 = partial(
Option,
'--use-pep517',
dest='use_pep517',
action='store_true',
default=None,
help='Use PEP 517 for building source distributions '
'(use --no-use-pep517 to force legacy behaviour).'
) # type: Any
no_use_pep517 = partial(
Option,
'--no-use-pep517',
dest='use_pep517',
action='callback',
callback=no_use_pep517_callback,
default=None,
help=SUPPRESS_HELP
) # type: Any
install_options = partial(
Option,
'--install-option',
dest='install_options',
action='append',
metavar='options',
help="Extra arguments to be supplied to the setup.py install "
"command (use like --install-option=\"--install-scripts=/usr/local/"
"bin\"). Use multiple --install-option options to pass multiple "
"options to setup.py install. If you are using an option with a "
"directory path, be sure to use absolute path.",
) # type: Callable[..., Option]
global_options = partial(
Option,
'--global-option',
dest='global_options',
action='append',
metavar='options',
help="Extra global options to be supplied to the setup.py "
"call before the install command.",
) # type: Callable[..., Option]
no_clean = partial(
Option,
'--no-clean',
action='store_true',
default=False,
help="Don't clean up build directories."
) # type: Callable[..., Option]
pre = partial(
Option,
'--pre',
action='store_true',
default=False,
help="Include pre-release and development versions. By default, "
"pip only finds stable versions.",
) # type: Callable[..., Option]
disable_pip_version_check = partial(
Option,
"--disable-pip-version-check",
dest="disable_pip_version_check",
action="store_true",
default=False,
help="Don't periodically check PyPI to determine whether a new version "
"of pip is available for download. Implied with --no-index.",
) # type: Callable[..., Option]
# Deprecated, Remove later
always_unzip = partial(
Option,
'-Z', '--always-unzip',
dest='always_unzip',
action='store_true',
help=SUPPRESS_HELP,
) # type: Callable[..., Option]
def _merge_hash(option, opt_str, value, parser):
# type: (Option, str, str, OptionParser) -> None
"""Given a value spelled "algo:digest", append the digest to a list
pointed to in a dict by the algo name."""
if not parser.values.hashes:
parser.values.hashes = {} # type: ignore
try:
algo, digest = value.split(':', 1)
except ValueError:
parser.error('Arguments to %s must be a hash name '
'followed by a value, like --hash=sha256:abcde...' %
opt_str)
if algo not in STRONG_HASHES:
parser.error('Allowed hash algorithms for %s are %s.' %
(opt_str, ', '.join(STRONG_HASHES)))
parser.values.hashes.setdefault(algo, []).append(digest)
hash = partial(
Option,
'--hash',
# Hash values eventually end up in InstallRequirement.hashes due to
# __dict__ copying in process_line().
dest='hashes',
action='callback',
callback=_merge_hash,
type='string',
help="Verify that the package's archive matches this "
'hash before installing. Example: --hash=sha256:abcdef...',
) # type: Callable[..., Option]
require_hashes = partial(
Option,
'--require-hashes',
dest='require_hashes',
action='store_true',
default=False,
help='Require a hash to check each requirement against, for '
'repeatable installs. This option is implied when any package in a '
'requirements file has a --hash option.',
) # type: Callable[..., Option]
##########
# groups #
##########
general_group = {
'name': 'General Options',
'options': [
help_,
isolated_mode,
require_virtualenv,
verbose,
version,
quiet,
log,
no_input,
proxy,
retries,
timeout,
skip_requirements_regex,
exists_action,
trusted_host,
cert,
client_cert,
cache_dir,
no_cache,
disable_pip_version_check,
no_color,
]
} # type: Dict[str, Any]
index_group = {
'name': 'Package Index Options',
'options': [
index_url,
extra_index_url,
no_index,
find_links,
]
} # type: Dict[str, Any]
| 29.693168
| 79
| 0.623897
|
78454eb0706d5cab92f58509223e3675604201f6
| 3,048
|
py
|
Python
|
openprocurement/auction/texas/forms.py
|
oleksiyVeretiuk/openprocurement.auction.gong
|
783ea355c1633e084aaf26a1d6128cc77ae6f642
|
[
"Apache-2.0"
] | null | null | null |
openprocurement/auction/texas/forms.py
|
oleksiyVeretiuk/openprocurement.auction.gong
|
783ea355c1633e084aaf26a1d6128cc77ae6f642
|
[
"Apache-2.0"
] | null | null | null |
openprocurement/auction/texas/forms.py
|
oleksiyVeretiuk/openprocurement.auction.gong
|
783ea355c1633e084aaf26a1d6128cc77ae6f642
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from datetime import datetime
import wtforms_json
from flask import request, session, current_app as app
from wtforms import Form, FloatField, StringField
from wtforms.validators import InputRequired, ValidationError
from openprocurement.auction.utils import prepare_extra_journal_fields
from openprocurement.auction.worker_core.constants import TIMEZONE
from openprocurement.auction.texas.constants import MAIN_ROUND
from openprocurement.auction.texas.utils import lock_server
wtforms_json.init()
def validate_bid_value(form, field):
"""
Bid must be higher or equal to previous bidder bid amount plus minimalStep
amount. Bid amount should also be multiple of minimalStep amount.
"""
stage_id = form.document['current_stage'] if form.document['current_stage'] >= 0 else 0
minimal_step = form.document['minimalStep']['amount']
current_amount = form.document['stages'][stage_id].get('amount')
if form.document['stages'][stage_id]['type'] != MAIN_ROUND:
raise ValidationError(u'Current stage does not allow bidding')
if field.data < current_amount:
raise ValidationError(u'Too low value')
if field.data % minimal_step:
raise ValidationError(
u'Value should be a multiplier of '
u'a minimalStep amount ({})'.format(minimal_step)
)
class BidsForm(Form):
bidder_id = StringField(
'bidder_id',
validators=[
InputRequired(message=u'No bidder id'),
]
)
bid = FloatField(
'bid',
validators=[
InputRequired(message=u'Bid amount is required'),
validate_bid_value
]
)
def form_handler():
form = app.bids_form.from_json(request.json)
form.document = app.context['auction_document']
current_time = datetime.now(TIMEZONE)
if form.validate():
with lock_server(app.context['server_actions']):
app.bids_handler.add_bid(form.document['current_stage'],
{'amount': form.data['bid'],
'bidder_id': form.data['bidder_id'],
'time': current_time.isoformat()})
app.logger.info(
"Bidder {} with client_id {} placed bid {} in {}".format(
form.data['bidder_id'], session['client_id'],
form.data['bid'], current_time.isoformat()
), extra=prepare_extra_journal_fields(request.headers)
)
return {'status': 'ok', 'data': form.data}
else:
app.logger.info(
"Bidder {} with client_id {} wants place "
"bid {} in {} with errors {}".format(
request.json.get('bidder_id', 'None'), session['client_id'],
request.json.get('bid', 'None'), current_time.isoformat(),
repr(form.errors)
), extra=prepare_extra_journal_fields(request.headers)
)
return {'status': 'failed', 'errors': form.errors}
| 37.62963
| 91
| 0.623031
|
78b52faabc06fd4b363b2e1fb2b09e11319af62e
| 2,640
|
py
|
Python
|
python/ymt_components/ymt_face_eyebrow_01/settingsUI.py
|
yamahigashi/mgear_shifter_components
|
c4e4c19d8a972e4d78df46f4bdf0b3319da5a792
|
[
"MIT"
] | 10
|
2020-01-24T10:10:39.000Z
|
2021-09-16T06:20:55.000Z
|
python/ymt_components/ymt_face_eyebrow_01/settingsUI.py
|
yamahigashi/mgear_shifter_components
|
c4e4c19d8a972e4d78df46f4bdf0b3319da5a792
|
[
"MIT"
] | null | null | null |
python/ymt_components/ymt_face_eyebrow_01/settingsUI.py
|
yamahigashi/mgear_shifter_components
|
c4e4c19d8a972e4d78df46f4bdf0b3319da5a792
|
[
"MIT"
] | 2
|
2020-01-24T10:11:07.000Z
|
2020-04-21T18:17:09.000Z
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'D:/Projects/Pipeline/rez-packages/third/github.com/yamahigashi/ymtshiftercomponents/mgear_shifter_components/python/ymt_components/ymt_face_eyebrow_01/settingsUI.ui'
#
# Created: Sun Apr 11 12:55:04 2021
# by: pyside2-uic running on PySide2 2.0.0~alpha0
#
# WARNING! All changes made in this file will be lost!
from PySide2 import QtCore, QtGui, QtWidgets
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(419, 737)
self.verticalLayout = QtWidgets.QVBoxLayout(Form)
self.verticalLayout.setObjectName("verticalLayout")
self.groupBox_3 = QtWidgets.QGroupBox(Form)
self.groupBox_3.setObjectName("groupBox_3")
self.gridLayout_4 = QtWidgets.QGridLayout(self.groupBox_3)
self.gridLayout_4.setObjectName("gridLayout_4")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.gridLayout_4.addLayout(self.horizontalLayout_2, 0, 0, 1, 1)
self.isSlidingSurface = QtWidgets.QCheckBox(self.groupBox_3)
self.isSlidingSurface.setChecked(True)
self.isSlidingSurface.setTristate(False)
self.isSlidingSurface.setObjectName("isSlidingSurface")
self.gridLayout_4.addWidget(self.isSlidingSurface, 1, 0, 1, 1)
self.verticalLayout.addWidget(self.groupBox_3)
self.overrideNegate_checkBox = QtWidgets.QCheckBox(Form)
self.overrideNegate_checkBox.setText("Override Negate Axis Direction For \"R\" Side")
self.overrideNegate_checkBox.setObjectName("overrideNegate_checkBox")
self.verticalLayout.addWidget(self.overrideNegate_checkBox)
self.addJoints_checkBox = QtWidgets.QCheckBox(Form)
self.addJoints_checkBox.setText("Add Joints")
self.addJoints_checkBox.setChecked(True)
self.addJoints_checkBox.setObjectName("addJoints_checkBox")
self.verticalLayout.addWidget(self.addJoints_checkBox)
spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout.addItem(spacerItem)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(QtWidgets.QApplication.translate("Form", "Form", None, -1))
self.groupBox_3.setTitle(QtWidgets.QApplication.translate("Form", "Surface", None, -1))
self.isSlidingSurface.setText(QtWidgets.QApplication.translate("Form", "Sliding on Surface", None, -1))
| 51.764706
| 219
| 0.737879
|
471f79c791043c367a2345faf3570defe0835229
| 21,815
|
py
|
Python
|
tests/main_test.py
|
flyingbarron/detect-secrets
|
5f9887179794ce037d97c1b343623eb5937ce800
|
[
"Apache-2.0"
] | null | null | null |
tests/main_test.py
|
flyingbarron/detect-secrets
|
5f9887179794ce037d97c1b343623eb5937ce800
|
[
"Apache-2.0"
] | null | null | null |
tests/main_test.py
|
flyingbarron/detect-secrets
|
5f9887179794ce037d97c1b343623eb5937ce800
|
[
"Apache-2.0"
] | null | null | null |
import json
import shlex
import textwrap
from contextlib import contextmanager
import mock
import pytest
from detect_secrets import main as main_module
from detect_secrets import VERSION
from detect_secrets.core import audit as audit_module
from detect_secrets.main import main
from detect_secrets.plugins.common.util import import_plugins
from testing.factories import secrets_collection_factory
from testing.mocks import Any
from testing.mocks import mock_printer
from testing.util import uncolor
try:
FileNotFoundError
except NameError: # pragma: no cover
# support python 2.x
FileNotFoundError = IOError
def get_list_of_plugins(include=None, exclude=None):
"""
:type include: List[Dict[str, Any]]
:type exclude: Iterable[str]
:rtype: List[Dict[str, Any]]
"""
included_plugins = []
if include:
included_plugins = [
config['name']
for config in include
]
output = []
for name, plugin in import_plugins().items():
if (
name in included_plugins or
exclude and name in exclude
):
continue
payload = {
'name': name,
}
payload.update(plugin.default_options)
output.append(payload)
if include:
output.extend(include)
return sorted(output, key=lambda x: x['name'])
def get_plugin_report(extra=None):
"""
:type extra: Dict[str, str]
"""
if not extra: # pragma: no cover
extra = {}
longest_name_length = max([
len(name)
for name in import_plugins()
])
return '\n'.join(
sorted([
'{name}: {result}'.format(
name=name + ' ' * (longest_name_length - len(name)),
result='False' if name not in extra else extra[name],
)
for name in import_plugins()
]),
) + '\n'
class TestMain(object):
"""These are smoke tests for the console usage of detect_secrets.
Most of the functional test cases should be within their own module tests.
"""
def test_scan_basic(self, mock_baseline_initialize):
with mock_stdin():
assert main(['scan']) == 0
mock_baseline_initialize.assert_called_once_with(
plugins=Any(tuple),
exclude_files_regex=None,
exclude_lines_regex=None,
path='.',
should_scan_all_files=False,
output_raw=False,
output_verified_false=False,
word_list_file=None,
word_list_hash=None,
)
def test_scan_with_rootdir(self, mock_baseline_initialize):
with mock_stdin():
assert main('scan test_data'.split()) == 0
mock_baseline_initialize.assert_called_once_with(
plugins=Any(tuple),
exclude_files_regex=None,
exclude_lines_regex=None,
path=['test_data'],
should_scan_all_files=False,
output_raw=False,
output_verified_false=False,
word_list_file=None,
word_list_hash=None,
)
def test_scan_with_exclude_args(self, mock_baseline_initialize):
with mock_stdin():
assert main(
'scan --exclude-files some_pattern_here --exclude-lines other_patt'.split(),
) == 0
mock_baseline_initialize.assert_called_once_with(
plugins=Any(tuple),
exclude_files_regex='some_pattern_here',
exclude_lines_regex='other_patt',
path='.',
should_scan_all_files=False,
output_raw=False,
output_verified_false=False,
word_list_file=None,
word_list_hash=None,
)
@pytest.mark.parametrize(
'string, expected_base64_result, expected_hex_result',
[
(
'012345678ab',
'False (3.459)',
'True (3.459)',
),
(
'Benign',
'False (2.252)',
'False',
),
],
)
def test_scan_string_basic(
self,
mock_baseline_initialize,
string,
expected_base64_result,
expected_hex_result,
):
with mock_stdin(
string,
), mock_printer(
main_module,
) as printer_shim:
assert main('scan --string'.split()) == 0
assert uncolor(printer_shim.message) == get_plugin_report({
'Base64HighEntropyString': expected_base64_result,
'HexHighEntropyString': expected_hex_result,
})
mock_baseline_initialize.assert_not_called()
def test_scan_string_cli_overrides_stdin(self):
with mock_stdin(
'012345678ab',
), mock_printer(
main_module,
) as printer_shim:
assert main('scan --string 012345'.split()) == 0
assert uncolor(printer_shim.message) == get_plugin_report({
'Base64HighEntropyString': 'False (2.585)',
'HexHighEntropyString': 'False (2.121)',
})
def test_scan_with_all_files_flag(self, mock_baseline_initialize):
with mock_stdin():
assert main('scan --all-files'.split()) == 0
mock_baseline_initialize.assert_called_once_with(
plugins=Any(tuple),
exclude_files_regex=None,
exclude_lines_regex=None,
path='.',
should_scan_all_files=True,
output_raw=False,
output_verified_false=False,
word_list_file=None,
word_list_hash=None,
)
def test_reads_from_stdin(self, mock_merge_baseline):
with mock_stdin(json.dumps({'key': 'value'})):
assert main(['scan']) == 0
mock_merge_baseline.assert_called_once_with(
{'key': 'value'},
Any(dict),
)
def test_reads_old_baseline_from_file(self, mock_merge_baseline):
with mock_stdin(), mock.patch(
'detect_secrets.main._read_from_file',
return_value={'key': 'value'},
) as m_read, mock.patch(
'detect_secrets.main.write_baseline_to_file',
) as m_write:
assert main('scan --update old_baseline_file'.split()) == 0
assert m_read.call_args[0][0] == 'old_baseline_file'
assert m_write.call_args[1]['filename'] == 'old_baseline_file'
assert m_write.call_args[1]['data'] == Any(dict)
mock_merge_baseline.assert_called_once_with(
{'key': 'value'},
Any(dict),
)
def test_reads_non_existed_baseline_from_file(
self,
mock_merge_baseline,
mock_baseline_initialize,
):
fnf_error = FileNotFoundError()
fnf_error.errno = 2
with mock_stdin(), mock.patch(
'detect_secrets.main._read_from_file',
side_effect=fnf_error,
) as m_read, mock.patch(
'detect_secrets.main.write_baseline_to_file',
) as m_write:
assert main('scan --update non_existed_baseline_file'.split()) == 0
assert m_read.call_args[0][0] == 'non_existed_baseline_file'
assert m_write.call_args[1]['filename'] == 'non_existed_baseline_file'
assert m_write.call_args[1]['data'] == Any(dict)
mock_baseline_initialize.assert_called_once_with(
plugins=Any(tuple),
exclude_files_regex='^non_existed_baseline_file$',
exclude_lines_regex=None,
path='.',
should_scan_all_files=False,
output_raw=False,
output_verified_false=False,
word_list_file=None,
word_list_hash=None,
)
mock_merge_baseline.assert_not_called()
def test_reads_baseline_from_file_with_other_ioerror(
self,
):
io_error = IOError()
with mock_stdin(), mock.patch(
'detect_secrets.main._read_from_file',
side_effect=io_error,
) as m_read:
with pytest.raises(IOError):
main('scan --update non_existed_baseline_file'.split()) == 0
assert m_read.call_args[0][0] == 'non_existed_baseline_file'
@pytest.mark.parametrize(
'exclude_files_arg, expected_regex',
[
(
'',
'^old_baseline_file$',
),
(
'--exclude-files "secrets/.*"',
'secrets/.*|^old_baseline_file$',
),
(
'--exclude-files "^old_baseline_file$"',
'^old_baseline_file$',
),
],
)
def test_old_baseline_ignored_with_update_flag(
self,
mock_baseline_initialize,
exclude_files_arg,
expected_regex,
):
with mock_stdin(), mock.patch(
'detect_secrets.main._read_from_file',
return_value={},
), mock.patch(
# We don't want to be creating a file during test
'detect_secrets.main.write_baseline_to_file',
) as file_writer:
assert main(
shlex.split(
'scan --update old_baseline_file {}'.format(
exclude_files_arg,
),
),
) == 0
assert (
file_writer.call_args[1]['data']['exclude']['files']
== expected_regex
)
@pytest.mark.parametrize(
'plugins_used, plugins_overwriten, plugins_wrote',
[
( # Remove some plugins from baseline
[
{
'base64_limit': 4.5,
'name': 'Base64HighEntropyString',
},
{
'name': 'PrivateKeyDetector',
},
],
'--no-base64-string-scan --no-keyword-scan',
[
{
'name': 'PrivateKeyDetector',
},
],
),
( # All plugins
[
{
'base64_limit': 1.5,
'name': 'Base64HighEntropyString',
},
],
'--use-all-plugins',
get_list_of_plugins(
include=[
{
'base64_limit': 1.5,
'name': 'Base64HighEntropyString',
},
],
),
),
( # Remove some plugins from all plugins
[
{
'base64_limit': 4.5,
'name': 'Base64HighEntropyString',
},
],
'--use-all-plugins --no-base64-string-scan --no-private-key-scan',
get_list_of_plugins(
exclude=(
'Base64HighEntropyString',
'PrivateKeyDetector',
),
),
),
( # Use same plugin list from baseline
[
{
'base64_limit': 3.5,
'name': 'Base64HighEntropyString',
},
{
'name': 'PrivateKeyDetector',
},
],
'',
[
{
'base64_limit': 3.5,
'name': 'Base64HighEntropyString',
},
{
'name': 'PrivateKeyDetector',
},
],
),
( # Overwrite base limit from CLI
[
{
'base64_limit': 3.5,
'name': 'Base64HighEntropyString',
}, {
'name': 'PrivateKeyDetector',
},
],
'--base64-limit=5.5',
[
{
'base64_limit': 5.5,
'name': 'Base64HighEntropyString',
},
{
'name': 'PrivateKeyDetector',
},
],
),
( # Does not overwrite base limit from CLI if baseline not using the plugin
[
{
'name': 'PrivateKeyDetector',
},
],
'--base64-limit=4.5',
[
{
'name': 'PrivateKeyDetector',
},
],
),
( # Use overwriten option from CLI only when using --use-all-plugins
[
{
'base64_limit': 3.5,
'name': 'Base64HighEntropyString',
},
{
'name': 'PrivateKeyDetector',
},
],
'--use-all-plugins --base64-limit=5.5 --no-hex-string-scan --no-keyword-scan',
get_list_of_plugins(
include=[
{
'base64_limit': 5.5,
'name': 'Base64HighEntropyString',
},
],
exclude=(
'HexHighEntropyString',
'KeywordDetector',
),
),
),
( # Use plugin limit from baseline when using --use-all-plugins and no input limit
[
{
'base64_limit': 2.5,
'name': 'Base64HighEntropyString',
},
{
'name': 'PrivateKeyDetector',
},
],
'--use-all-plugins --no-hex-string-scan --no-keyword-scan',
get_list_of_plugins(
include=[
{
'base64_limit': 2.5,
'name': 'Base64HighEntropyString',
},
],
exclude=(
'HexHighEntropyString',
'KeywordDetector',
),
),
),
],
)
def test_plugin_from_old_baseline_respected_with_update_flag(
self,
mock_baseline_initialize,
plugins_used, plugins_overwriten, plugins_wrote,
):
with mock_stdin(), mock.patch(
'detect_secrets.main._read_from_file',
return_value={
'plugins_used': plugins_used,
'results': {},
'version': VERSION,
'exclude': {
'files': '',
'lines': '',
},
},
), mock.patch(
# We don't want to be creating a file during test
'detect_secrets.main.write_baseline_to_file',
) as file_writer:
assert main(
shlex.split(
'scan --update old_baseline_file {}'.format(
plugins_overwriten,
),
),
) == 0
assert (
file_writer.call_args[1]['data']['plugins_used']
==
plugins_wrote
)
@pytest.mark.parametrize(
'filename, expected_output',
[
(
'test_data/short_files/first_line.php',
textwrap.dedent("""
1:secret = 'notHighEnoughEntropy'
2:skipped_sequential_false_positive = '0123456789a'
3:print('second line')
4:var = 'third line'
""")[1:-1],
),
(
'test_data/short_files/middle_line.yml',
textwrap.dedent("""
1:deploy:
2: user: aaronloo
3: password:
4: secure: thequickbrownfoxjumpsoverthelazydog
5: on:
6: repo: Yelp/detect-secrets
""")[1:-1],
),
(
'test_data/short_files/last_line.ini',
textwrap.dedent("""
1:[some section]
2:secrets_for_no_one_to_find =
3: hunter2
4: password123
5: BEEF0123456789a
""")[1:-1],
),
],
)
def test_audit_short_file(self, filename, expected_output):
with mock_stdin(), mock_printer(
# To extract the baseline output
main_module,
) as printer_shim:
main(['scan', filename])
baseline = printer_shim.message
baseline_dict = json.loads(baseline)
with mock_stdin(), mock.patch(
# To pipe in printer_shim
'detect_secrets.core.audit._get_baseline_from_file',
return_value=baseline_dict,
), mock.patch(
# We don't want to clear the pytest testing screen
'detect_secrets.core.audit._clear_screen',
), mock.patch(
# Gotta mock it, because tests aren't interactive
'detect_secrets.core.audit._get_user_decision',
return_value='s',
), mock.patch(
# We don't want to write an actual file
'detect_secrets.core.audit.write_baseline_to_file',
), mock_printer(
audit_module,
) as printer_shim:
main('audit will_be_mocked'.split())
assert uncolor(printer_shim.message) == textwrap.dedent("""
Secret: 1 of 1
Filename: {}
Secret Type: {}
----------
{}
----------
Saving progress...
""")[1:].format(
filename,
baseline_dict['results'][filename][0]['type'],
expected_output,
)
@pytest.mark.parametrize(
'filename, expected_output',
[
(
'test_data/short_files/first_line.php',
{
'KeywordDetector': {
'config': {
'name': 'KeywordDetector',
'keyword_exclude': None,
},
'results': {
'false-positives': {},
'true-positives': {},
'unknowns': {
'test_data/short_files/first_line.php': [{
'line': "secret = 'notHighEnoughEntropy'",
'plaintext': 'nothighenoughentropy',
}],
},
},
},
},
),
],
)
def test_audit_display_results(self, filename, expected_output):
with mock_stdin(), mock_printer(
main_module,
) as printer_shim:
main(['scan', filename])
baseline = printer_shim.message
baseline_dict = json.loads(baseline)
with mock.patch(
'detect_secrets.core.audit._get_baseline_from_file',
return_value=baseline_dict,
), mock_printer(
audit_module,
) as printer_shim:
main(['audit', '--display-results', 'MOCKED'])
assert json.loads(uncolor(printer_shim.message))['plugins'] == expected_output
def test_audit_diff_not_enough_files(self):
assert main('audit --diff fileA'.split()) == 1
def test_audit_same_file(self):
with mock_printer(main_module) as printer_shim:
assert main('audit --diff .secrets.baseline .secrets.baseline'.split()) == 0
assert printer_shim.message.strip() == (
'No difference, because it\'s the same file!'
)
@contextmanager
def mock_stdin(response=None):
if not response:
with mock.patch('detect_secrets.main.sys') as m:
m.stdin.isatty.return_value = True
yield
else:
with mock.patch('detect_secrets.main.sys') as m:
m.stdin.isatty.return_value = False
m.stdin.read.return_value = response
yield
@pytest.fixture
def mock_baseline_initialize():
def mock_initialize_function(plugins, exclude_files_regex, *args, **kwargs):
return secrets_collection_factory(
plugins=plugins,
exclude_files_regex=exclude_files_regex,
)
with mock.patch(
'detect_secrets.main.baseline.initialize',
side_effect=mock_initialize_function,
) as mock_initialize:
yield mock_initialize
@pytest.fixture
def mock_merge_baseline():
with mock.patch(
'detect_secrets.main.baseline.merge_baseline',
) as m:
# This return value needs to have the `results` key, so that it can
# formatted appropriately for output.
m.return_value = {'results': {}}
yield m
| 32.318519
| 95
| 0.476461
|
56ed1a1c49244c3131011bab0f49446d07144709
| 6,760
|
py
|
Python
|
python/ray/rllib/optimizers/multi_gpu_optimizer.py
|
matthew-z/ray
|
f37c260bdb3cbaafc783c6274f2c4b929fce0f9a
|
[
"Apache-2.0"
] | null | null | null |
python/ray/rllib/optimizers/multi_gpu_optimizer.py
|
matthew-z/ray
|
f37c260bdb3cbaafc783c6274f2c4b929fce0f9a
|
[
"Apache-2.0"
] | null | null | null |
python/ray/rllib/optimizers/multi_gpu_optimizer.py
|
matthew-z/ray
|
f37c260bdb3cbaafc783c6274f2c4b929fce0f9a
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from collections import defaultdict
import os
import tensorflow as tf
import ray
from ray.rllib.evaluation.tf_policy_graph import TFPolicyGraph
from ray.rllib.optimizers.policy_optimizer import PolicyOptimizer
from ray.rllib.optimizers.multi_gpu_impl import LocalSyncParallelOptimizer
from ray.rllib.utils.timer import TimerStat
class LocalMultiGPUOptimizer(PolicyOptimizer):
"""A synchronous optimizer that uses multiple local GPUs.
Samples are pulled synchronously from multiple remote evaluators,
concatenated, and then split across the memory of multiple local GPUs.
A number of SGD passes are then taken over the in-memory data. For more
details, see `multi_gpu_impl.LocalSyncParallelOptimizer`.
This optimizer is Tensorflow-specific and require the underlying
PolicyGraph to be a TFPolicyGraph instance that support `.copy()`.
Note that all replicas of the TFPolicyGraph will merge their
extra_compute_grad and apply_grad feed_dicts and fetches. This
may result in unexpected behavior.
"""
def _init(self,
sgd_batch_size=128,
num_sgd_iter=10,
timesteps_per_batch=1024,
num_gpus=0,
standardize_fields=[]):
self.batch_size = sgd_batch_size
self.num_sgd_iter = num_sgd_iter
self.timesteps_per_batch = timesteps_per_batch
if not num_gpus:
self.devices = ["/cpu:0"]
else:
self.devices = ["/gpu:{}".format(i) for i in range(num_gpus)]
self.batch_size = int(sgd_batch_size / len(self.devices)) * len(
self.devices)
assert self.batch_size % len(self.devices) == 0
assert self.batch_size >= len(self.devices), "batch size too small"
self.per_device_batch_size = int(self.batch_size / len(self.devices))
self.sample_timer = TimerStat()
self.load_timer = TimerStat()
self.grad_timer = TimerStat()
self.update_weights_timer = TimerStat()
self.standardize_fields = standardize_fields
print("LocalMultiGPUOptimizer devices", self.devices)
if set(self.local_evaluator.policy_map.keys()) != {"default"}:
raise ValueError(
"Multi-agent is not supported with multi-GPU. Try using the "
"simple optimizer instead.")
self.policy = self.local_evaluator.policy_map["default"]
if not isinstance(self.policy, TFPolicyGraph):
raise ValueError(
"Only TF policies are supported with multi-GPU. Try using the "
"simple optimizer instead.")
# per-GPU graph copies created below must share vars with the policy
# reuse is set to AUTO_REUSE because Adam nodes are created after
# all of the device copies are created.
with self.local_evaluator.tf_sess.graph.as_default():
with self.local_evaluator.tf_sess.as_default():
with tf.variable_scope("default", reuse=tf.AUTO_REUSE):
if self.policy._state_inputs:
rnn_inputs = self.policy._state_inputs + [
self.policy._seq_lens
]
else:
rnn_inputs = []
self.par_opt = LocalSyncParallelOptimizer(
self.policy.optimizer(), self.devices,
[v for _, v in self.policy.loss_inputs()], rnn_inputs,
self.per_device_batch_size, self.policy.copy,
os.getcwd())
self.sess = self.local_evaluator.tf_sess
self.sess.run(tf.global_variables_initializer())
def step(self):
with self.update_weights_timer:
if self.remote_evaluators:
weights = ray.put(self.local_evaluator.get_weights())
for e in self.remote_evaluators:
e.set_weights.remote(weights)
with self.sample_timer:
if self.remote_evaluators:
# TODO(rliaw): remove when refactoring
from ray.rllib.agents.ppo.rollout import collect_samples
samples = collect_samples(self.remote_evaluators,
self.timesteps_per_batch)
else:
samples = self.local_evaluator.sample()
self._check_not_multiagent(samples)
for field in self.standardize_fields:
value = samples[field]
standardized = (value - value.mean()) / max(1e-4, value.std())
samples[field] = standardized
samples.shuffle()
with self.load_timer:
tuples = self.policy._get_loss_inputs_dict(samples)
data_keys = [ph for _, ph in self.policy.loss_inputs()]
if self.policy._state_inputs:
state_keys = (
self.policy._state_inputs + [self.policy._seq_lens])
else:
state_keys = []
tuples_per_device = self.par_opt.load_data(
self.sess, [tuples[k] for k in data_keys],
[tuples[k] for k in state_keys])
with self.grad_timer:
num_batches = (
int(tuples_per_device) // int(self.per_device_batch_size))
print("== sgd epochs ==")
for i in range(self.num_sgd_iter):
iter_extra_fetches = defaultdict(list)
permutation = np.random.permutation(num_batches)
for batch_index in range(num_batches):
batch_fetches = self.par_opt.optimize(
self.sess,
permutation[batch_index] * self.per_device_batch_size)
for k, v in batch_fetches.items():
iter_extra_fetches[k].append(v)
print(i, _averaged(iter_extra_fetches))
self.num_steps_sampled += samples.count
self.num_steps_trained += samples.count
return _averaged(iter_extra_fetches)
def stats(self):
return dict(
PolicyOptimizer.stats(self), **{
"sample_time_ms": round(1000 * self.sample_timer.mean, 3),
"load_time_ms": round(1000 * self.load_timer.mean, 3),
"grad_time_ms": round(1000 * self.grad_timer.mean, 3),
"update_time_ms": round(1000 * self.update_weights_timer.mean,
3),
})
def _averaged(kv):
out = {}
for k, v in kv.items():
if v[0] is not None:
out[k] = np.mean(v)
return out
| 41.987578
| 79
| 0.605621
|
b230783fc9834d84bc3a06121faa4bb99b564d0f
| 17,951
|
py
|
Python
|
netbox/ipam/views.py
|
pkorovin/netbox
|
bf35b4121fdf51908be3d8df625db828261eecaa
|
[
"Apache-2.0"
] | 1
|
2021-09-06T05:12:45.000Z
|
2021-09-06T05:12:45.000Z
|
netbox/ipam/views.py
|
kovarus/netbox
|
db72a64ef7639a34ebd96038e05443222566a790
|
[
"Apache-2.0"
] | null | null | null |
netbox/ipam/views.py
|
kovarus/netbox
|
db72a64ef7639a34ebd96038e05443222566a790
|
[
"Apache-2.0"
] | 1
|
2020-10-15T07:41:44.000Z
|
2020-10-15T07:41:44.000Z
|
from netaddr import IPSet
from django_tables2 import RequestConfig
from django.contrib.auth.mixins import PermissionRequiredMixin
from django.db.models import Count
from django.shortcuts import get_object_or_404, render
from dcim.models import Device
from utilities.paginator import EnhancedPaginator
from utilities.views import (
BulkDeleteView, BulkEditView, BulkImportView, ObjectDeleteView, ObjectEditView, ObjectListView,
)
from . import filters, forms, tables
from .models import Aggregate, IPAddress, Prefix, RIR, Role, VLAN, VRF
def add_available_prefixes(parent, prefix_list):
"""
Create fake Prefix objects for all unallocated space within a prefix.
"""
# Find all unallocated space
available_prefixes = IPSet(parent) ^ IPSet([p.prefix for p in prefix_list])
available_prefixes = [Prefix(prefix=p) for p in available_prefixes.iter_cidrs()]
# Concatenate and sort complete list of children
prefix_list = list(prefix_list) + available_prefixes
prefix_list.sort(key=lambda p: p.prefix)
return prefix_list
#
# VRFs
#
class VRFListView(ObjectListView):
queryset = VRF.objects.all()
filter = filters.VRFFilter
table = tables.VRFTable
edit_permissions = ['ipam.change_vrf', 'ipam.delete_vrf']
template_name = 'ipam/vrf_list.html'
def vrf(request, pk):
vrf = get_object_or_404(VRF.objects.all(), pk=pk)
prefixes = Prefix.objects.filter(vrf=vrf)
return render(request, 'ipam/vrf.html', {
'vrf': vrf,
'prefixes': prefixes,
})
class VRFEditView(PermissionRequiredMixin, ObjectEditView):
permission_required = 'ipam.change_vrf'
model = VRF
form_class = forms.VRFForm
cancel_url = 'ipam:vrf_list'
class VRFDeleteView(PermissionRequiredMixin, ObjectDeleteView):
permission_required = 'ipam.delete_vrf'
model = VRF
redirect_url = 'ipam:vrf_list'
class VRFBulkImportView(PermissionRequiredMixin, BulkImportView):
permission_required = 'ipam.add_vrf'
form = forms.VRFImportForm
table = tables.VRFTable
template_name = 'ipam/vrf_import.html'
obj_list_url = 'ipam:vrf_list'
class VRFBulkEditView(PermissionRequiredMixin, BulkEditView):
permission_required = 'ipam.change_vrf'
cls = VRF
form = forms.VRFBulkEditForm
template_name = 'ipam/vrf_bulk_edit.html'
default_redirect_url = 'ipam:vrf_list'
def update_objects(self, pk_list, form):
fields_to_update = {}
for field in ['description']:
if form.cleaned_data[field]:
fields_to_update[field] = form.cleaned_data[field]
return self.cls.objects.filter(pk__in=pk_list).update(**fields_to_update)
class VRFBulkDeleteView(PermissionRequiredMixin, BulkDeleteView):
permission_required = 'ipam.delete_vrf'
cls = VRF
form = forms.VRFBulkDeleteForm
default_redirect_url = 'ipam:vrf_list'
#
# RIRs
#
class RIRListView(ObjectListView):
queryset = RIR.objects.annotate(aggregate_count=Count('aggregates'))
table = tables.RIRTable
edit_permissions = ['ipam.change_rir', 'ipam.delete_rir']
template_name = 'ipam/rir_list.html'
class RIREditView(PermissionRequiredMixin, ObjectEditView):
permission_required = 'ipam.change_rir'
model = RIR
form_class = forms.RIRForm
success_url = 'ipam:rir_list'
cancel_url = 'ipam:rir_list'
class RIRBulkDeleteView(PermissionRequiredMixin, BulkDeleteView):
permission_required = 'ipam.delete_rir'
cls = RIR
form = forms.RIRBulkDeleteForm
default_redirect_url = 'ipam:rir_list'
#
# Aggregates
#
class AggregateListView(ObjectListView):
queryset = Aggregate.objects.select_related('rir').extra(select={
'child_count': 'SELECT COUNT(*) FROM ipam_prefix WHERE ipam_prefix.prefix <<= ipam_aggregate.prefix',
})
filter = filters.AggregateFilter
filter_form = forms.AggregateFilterForm
table = tables.AggregateTable
edit_permissions = ['ipam.change_aggregate', 'ipam.delete_aggregate']
template_name = 'ipam/aggregate_list.html'
def extra_context(self):
ipv4_total = 0
ipv6_total = 0
for a in self.queryset:
if a.prefix.version == 4:
ipv4_total += a.prefix.size
elif a.prefix.version == 6:
ipv6_total += a.prefix.size / 2**64
return {
'ipv4_total': ipv4_total,
'ipv6_total': ipv6_total,
}
def aggregate(request, pk):
aggregate = get_object_or_404(Aggregate, pk=pk)
# Find all child prefixes contained by this aggregate
child_prefixes = Prefix.objects.filter(prefix__net_contained_or_equal=str(aggregate.prefix))\
.select_related('site', 'role').annotate_depth(limit=0)
child_prefixes = add_available_prefixes(aggregate.prefix, child_prefixes)
prefix_table = tables.PrefixTable(child_prefixes)
prefix_table.model = Prefix
if request.user.has_perm('ipam.change_prefix') or request.user.has_perm('ipam.delete_prefix'):
prefix_table.base_columns['pk'].visible = True
RequestConfig(request, paginate={'klass': EnhancedPaginator}).configure(prefix_table)
return render(request, 'ipam/aggregate.html', {
'aggregate': aggregate,
'prefix_table': prefix_table,
})
class AggregateEditView(PermissionRequiredMixin, ObjectEditView):
permission_required = 'ipam.change_aggregate'
model = Aggregate
form_class = forms.AggregateForm
cancel_url = 'ipam:aggregate_list'
class AggregateDeleteView(PermissionRequiredMixin, ObjectDeleteView):
permission_required = 'ipam.delete_aggregate'
model = Aggregate
redirect_url = 'ipam:aggregate_list'
class AggregateBulkImportView(PermissionRequiredMixin, BulkImportView):
permission_required = 'ipam.add_aggregate'
form = forms.AggregateImportForm
table = tables.AggregateTable
template_name = 'ipam/aggregate_import.html'
obj_list_url = 'ipam:aggregate_list'
class AggregateBulkEditView(PermissionRequiredMixin, BulkEditView):
permission_required = 'ipam.change_aggregate'
cls = Aggregate
form = forms.AggregateBulkEditForm
template_name = 'ipam/aggregate_bulk_edit.html'
default_redirect_url = 'ipam:aggregate_list'
def update_objects(self, pk_list, form):
fields_to_update = {}
for field in ['rir', 'date_added', 'description']:
if form.cleaned_data[field]:
fields_to_update[field] = form.cleaned_data[field]
return self.cls.objects.filter(pk__in=pk_list).update(**fields_to_update)
class AggregateBulkDeleteView(PermissionRequiredMixin, BulkDeleteView):
permission_required = 'ipam.delete_aggregate'
cls = Aggregate
form = forms.AggregateBulkDeleteForm
default_redirect_url = 'ipam:aggregate_list'
#
# Prefix/VLAN roles
#
class RoleListView(ObjectListView):
queryset = Role.objects.all()
table = tables.RoleTable
edit_permissions = ['ipam.change_role', 'ipam.delete_role']
template_name = 'ipam/role_list.html'
class RoleEditView(PermissionRequiredMixin, ObjectEditView):
permission_required = 'ipam.change_role'
model = Role
form_class = forms.RoleForm
success_url = 'ipam:role_list'
cancel_url = 'ipam:role_list'
class RoleBulkDeleteView(PermissionRequiredMixin, BulkDeleteView):
permission_required = 'ipam.delete_role'
cls = Role
form = forms.RoleBulkDeleteForm
default_redirect_url = 'ipam:role_list'
#
# Prefixes
#
class PrefixListView(ObjectListView):
queryset = Prefix.objects.select_related('site', 'role')
filter = filters.PrefixFilter
filter_form = forms.PrefixFilterForm
table = tables.PrefixTable
edit_permissions = ['ipam.change_prefix', 'ipam.delete_prefix']
template_name = 'ipam/prefix_list.html'
def alter_queryset(self, request):
# Show only top-level prefixes by default (unless searching)
limit = None if request.GET.get('expand') or request.GET.get('q') else 0
return self.queryset.annotate_depth(limit=limit)
def prefix(request, pk):
prefix = get_object_or_404(Prefix.objects.select_related('site', 'vlan', 'role'), pk=pk)
try:
aggregate = Aggregate.objects.get(prefix__net_contains_or_equals=str(prefix.prefix))
except Aggregate.DoesNotExist:
aggregate = None
# Count child IP addresses
ipaddress_count = IPAddress.objects.filter(address__net_contained_or_equal=str(prefix.prefix)).count()
# Parent prefixes table
parent_prefixes = Prefix.objects.filter(vrf=prefix.vrf, prefix__net_contains=str(prefix.prefix))\
.select_related('site', 'role').annotate_depth()
parent_prefix_table = tables.PrefixBriefTable(parent_prefixes)
# Duplicate prefixes table
duplicate_prefixes = Prefix.objects.filter(vrf=prefix.vrf, prefix=str(prefix.prefix)).exclude(pk=prefix.pk)\
.select_related('site', 'role')
duplicate_prefix_table = tables.PrefixBriefTable(duplicate_prefixes)
# Child prefixes table
child_prefixes = Prefix.objects.filter(vrf=prefix.vrf, prefix__net_contained=str(prefix.prefix))\
.select_related('site', 'role').annotate_depth(limit=0)
if child_prefixes:
child_prefixes = add_available_prefixes(prefix.prefix, child_prefixes)
child_prefix_table = tables.PrefixTable(child_prefixes)
child_prefix_table.model = Prefix
if request.user.has_perm('ipam.change_prefix') or request.user.has_perm('ipam.delete_prefix'):
child_prefix_table.base_columns['pk'].visible = True
RequestConfig(request, paginate={'klass': EnhancedPaginator}).configure(child_prefix_table)
return render(request, 'ipam/prefix.html', {
'prefix': prefix,
'aggregate': aggregate,
'ipaddress_count': ipaddress_count,
'parent_prefix_table': parent_prefix_table,
'child_prefix_table': child_prefix_table,
'duplicate_prefix_table': duplicate_prefix_table,
})
class PrefixEditView(PermissionRequiredMixin, ObjectEditView):
permission_required = 'ipam.change_prefix'
model = Prefix
form_class = forms.PrefixForm
fields_initial = ['site', 'vrf', 'prefix']
cancel_url = 'ipam:prefix_list'
class PrefixDeleteView(PermissionRequiredMixin, ObjectDeleteView):
permission_required = 'ipam.delete_prefix'
model = Prefix
redirect_url = 'ipam:prefix_list'
class PrefixBulkImportView(PermissionRequiredMixin, BulkImportView):
permission_required = 'ipam.add_prefix'
form = forms.PrefixImportForm
table = tables.PrefixTable
template_name = 'ipam/prefix_import.html'
obj_list_url = 'ipam:prefix_list'
class PrefixBulkEditView(PermissionRequiredMixin, BulkEditView):
permission_required = 'ipam.change_prefix'
cls = Prefix
form = forms.PrefixBulkEditForm
template_name = 'ipam/prefix_bulk_edit.html'
default_redirect_url = 'ipam:prefix_list'
def update_objects(self, pk_list, form):
fields_to_update = {}
if form.cleaned_data['vrf']:
fields_to_update['vrf'] = form.cleaned_data['vrf']
elif form.cleaned_data['vrf_global']:
fields_to_update['vrf'] = None
for field in ['site', 'status', 'role', 'description']:
if form.cleaned_data[field]:
fields_to_update[field] = form.cleaned_data[field]
return self.cls.objects.filter(pk__in=pk_list).update(**fields_to_update)
class PrefixBulkDeleteView(PermissionRequiredMixin, BulkDeleteView):
permission_required = 'ipam.delete_prefix'
cls = Prefix
form = forms.PrefixBulkDeleteForm
default_redirect_url = 'ipam:prefix_list'
def prefix_ipaddresses(request, pk):
prefix = get_object_or_404(Prefix.objects.all(), pk=pk)
# Find all IPAddresses belonging to this Prefix
ipaddresses = IPAddress.objects.filter(address__net_contained_or_equal=str(prefix.prefix))\
.select_related('vrf', 'interface__device', 'primary_for')
ip_table = tables.IPAddressTable(ipaddresses)
ip_table.model = IPAddress
if request.user.has_perm('ipam.change_ipaddress') or request.user.has_perm('ipam.delete_ipaddress'):
ip_table.base_columns['pk'].visible = True
RequestConfig(request, paginate={'klass': EnhancedPaginator}).configure(ip_table)
return render(request, 'ipam/prefix_ipaddresses.html', {
'prefix': prefix,
'ip_table': ip_table,
})
#
# IP addresses
#
class IPAddressListView(ObjectListView):
queryset = IPAddress.objects.select_related('vrf', 'interface__device', 'primary_for')
filter = filters.IPAddressFilter
filter_form = forms.IPAddressFilterForm
table = tables.IPAddressTable
edit_permissions = ['ipam.change_ipaddress', 'ipam.delete_ipaddress']
template_name = 'ipam/ipaddress_list.html'
def ipaddress(request, pk):
ipaddress = get_object_or_404(IPAddress.objects.select_related('interface__device'), pk=pk)
# Parent prefixes table
parent_prefixes = Prefix.objects.filter(vrf=ipaddress.vrf, prefix__net_contains=str(ipaddress.address.ip))
parent_prefixes_table = tables.PrefixBriefTable(parent_prefixes)
# Duplicate IPs table
duplicate_ips = IPAddress.objects.filter(vrf=ipaddress.vrf, address=str(ipaddress.address))\
.exclude(pk=ipaddress.pk).select_related('interface__device', 'nat_inside')
duplicate_ips_table = tables.IPAddressBriefTable(duplicate_ips)
# Related IP table
related_ips = IPAddress.objects.select_related('interface__device').exclude(address=str(ipaddress.address))\
.filter(vrf=ipaddress.vrf, address__net_contained_or_equal=str(ipaddress.address))
related_ips_table = tables.IPAddressBriefTable(related_ips)
return render(request, 'ipam/ipaddress.html', {
'ipaddress': ipaddress,
'parent_prefixes_table': parent_prefixes_table,
'duplicate_ips_table': duplicate_ips_table,
'related_ips_table': related_ips_table,
})
class IPAddressEditView(PermissionRequiredMixin, ObjectEditView):
permission_required = 'ipam.change_ipaddress'
model = IPAddress
form_class = forms.IPAddressForm
fields_initial = ['address', 'vrf']
template_name = 'ipam/ipaddress_edit.html'
cancel_url = 'ipam:ipaddress_list'
class IPAddressDeleteView(PermissionRequiredMixin, ObjectDeleteView):
permission_required = 'ipam.delete_ipaddress'
model = IPAddress
redirect_url = 'ipam:ipaddress_list'
class IPAddressBulkImportView(PermissionRequiredMixin, BulkImportView):
permission_required = 'ipam.add_ipaddress'
form = forms.IPAddressImportForm
table = tables.IPAddressTable
template_name = 'ipam/ipaddress_import.html'
obj_list_url = 'ipam:ipaddress_list'
def save_obj(self, obj):
obj.save()
# Update primary IP for device if needed
try:
device = obj.primary_for
device.primary_ip = obj
device.save()
except Device.DoesNotExist:
pass
class IPAddressBulkEditView(PermissionRequiredMixin, BulkEditView):
permission_required = 'ipam.change_ipaddress'
cls = IPAddress
form = forms.IPAddressBulkEditForm
template_name = 'ipam/ipaddress_bulk_edit.html'
default_redirect_url = 'ipam:ipaddress_list'
def update_objects(self, pk_list, form):
fields_to_update = {}
if form.cleaned_data['vrf']:
fields_to_update['vrf'] = form.cleaned_data['vrf']
elif form.cleaned_data['vrf_global']:
fields_to_update['vrf'] = None
for field in ['description']:
if form.cleaned_data[field]:
fields_to_update[field] = form.cleaned_data[field]
return self.cls.objects.filter(pk__in=pk_list).update(**fields_to_update)
class IPAddressBulkDeleteView(PermissionRequiredMixin, BulkDeleteView):
permission_required = 'ipam.delete_ipaddress'
cls = IPAddress
form = forms.IPAddressBulkDeleteForm
default_redirect_url = 'ipam:ipaddress_list'
#
# VLANs
#
class VLANListView(ObjectListView):
queryset = VLAN.objects.select_related('site', 'role')
filter = filters.VLANFilter
filter_form = forms.VLANFilterForm
table = tables.VLANTable
edit_permissions = ['ipam.change_vlan', 'ipam.delete_vlan']
template_name = 'ipam/vlan_list.html'
def vlan(request, pk):
vlan = get_object_or_404(VLAN.objects.select_related('site', 'role'), pk=pk)
prefixes = Prefix.objects.filter(vlan=vlan)
return render(request, 'ipam/vlan.html', {
'vlan': vlan,
'prefixes': prefixes,
})
class VLANEditView(PermissionRequiredMixin, ObjectEditView):
permission_required = 'ipam.change_vlan'
model = VLAN
form_class = forms.VLANForm
cancel_url = 'ipam:vlan_list'
class VLANDeleteView(PermissionRequiredMixin, ObjectDeleteView):
permission_required = 'ipam.delete_vlan'
model = VLAN
redirect_url = 'ipam:vlan_list'
class VLANBulkImportView(PermissionRequiredMixin, BulkImportView):
permission_required = 'ipam.add_vlan'
form = forms.VLANImportForm
table = tables.VLANTable
template_name = 'ipam/vlan_import.html'
obj_list_url = 'ipam:vlan_list'
class VLANBulkEditView(PermissionRequiredMixin, BulkEditView):
permission_required = 'ipam.change_vlan'
cls = VLAN
form = forms.VLANBulkEditForm
template_name = 'ipam/vlan_bulk_edit.html'
default_redirect_url = 'ipam:vlan_list'
def update_objects(self, pk_list, form):
fields_to_update = {}
for field in ['site', 'status', 'role']:
if form.cleaned_data[field]:
fields_to_update[field] = form.cleaned_data[field]
return self.cls.objects.filter(pk__in=pk_list).update(**fields_to_update)
class VLANBulkDeleteView(PermissionRequiredMixin, BulkDeleteView):
permission_required = 'ipam.delete_vlan'
cls = VLAN
form = forms.VLANBulkDeleteForm
default_redirect_url = 'ipam:vlan_list'
| 32.757299
| 112
| 0.724584
|
e9668243db28416da2da26a5fbe0bd86fd17d4e4
| 9,504
|
py
|
Python
|
my_tools.py
|
Nykaus/my_tools
|
a23045c8f0d91844c9db2f82a5694da5cc9e4ae1
|
[
"MIT"
] | null | null | null |
my_tools.py
|
Nykaus/my_tools
|
a23045c8f0d91844c9db2f82a5694da5cc9e4ae1
|
[
"MIT"
] | null | null | null |
my_tools.py
|
Nykaus/my_tools
|
a23045c8f0d91844c9db2f82a5694da5cc9e4ae1
|
[
"MIT"
] | null | null | null |
import sublime
import sublime_plugin
import os
import re
import html
import webbrowser
import urllib.request
#import requests
#package_dir = os.path.abspath(os.path.dirname(__file__))
#class GetContentHtmlCommand(sublime_plugin.TextCommand):
# def run(self,edit):
#
# #recuperation des selections faites sur l'editeur
# tabRegion=self.view.sel()
#
# for laRegion in tabRegion:
# lUrl=self.view.substr(laRegion)
# lUrl=re.sub("http[s]?://","",lUrl)
#
# if "/" in lUrl:
# Url=lUrl.split("/",2)
# nomDomaine=Url[0].replace("/","")
# Uri=Url[1]
# else:
# nomDomaine=lUrl
# Uri=""
#
# checkUrl="http://"+lUrl
# statusError=0
#
# # ouverture de la page selectionner
# try:
# #local_filename, headers = urllib.request.urlretrieve(checkUrl,package_dir+"/html/copy.html")
# #print(headers)
# #html = open(local_filename)
# #html.close()
# #urllib.request.urlretrieve(checkUrl,package_dir+"\html\index.html")
# #print(package_dir+"\html\index.html")
# headers={'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3', 'Accept-Encoding': 'none', 'Accept-Language': 'en-US,en;q=0.8', 'Connection': 'keep-alive'}
#
# req = urllib.request.Request(checkUrl, None, headers)
# sock = urllib.request.urlopen(req)
#
# htmlSource = sock.read().decode("utf-8", 'ignore')
# sock.close()
# except Exception as e:
# statusError=1
# msgErreur ="\n\n================ERREUR====================\n\t"
# msgErreur +=str(e)+"\n\t"+self.view.substr(laRegion)
# msgErreur +="\n==========================================\n\n"
# self.view.replace(edit, laRegion,msgErreur)
#
# # recuperation des informations est passé sans erreur
# if(statusError==0):
# # verifier si le contenu du site est récupéré
# if(htmlSource==""):
# self.view.replace(edit, laRegion,"ERREUR !! Auncun contenu pour URL: '"+checkUrl+"'")
# else:
# self.view.replace(edit, laRegion,htmlSource)
#ctrl+alt+g selection un url
class ListMethodCommand(sublime_plugin.TextCommand):
def run(self,edit):
listMethod=[];
exportContent="";
tabRegion=self.view.sel()
for laRegion in tabRegion:
regex = self.view.substr(laRegion);
if(regex==""):
regex="function ([a-zA-Z\_\-]+)\("
print(regex)
listMethod=re.findall(regex,sublime.get_clipboard())
exportContent = "regex : "+regex+"\n"
for nameMethod in listMethod:
exportContent += str(nameMethod+"\n")
self.view.replace(edit,laRegion,exportContent)
#ctrl+alt+g selection un url
class FrToEnCommand(sublime_plugin.TextCommand):
def run(self,edit):
tabRegion=self.view.sel()
for laRegion in tabRegion:
laRecherche=self.view.substr(laRegion).replace(" ","+")
checkUrl="https://translate.google.fr/?hl=fr#fr/en/"+laRecherche
webbrowser.open(checkUrl)
#ctrl+alt+g selection un url
class EnToFrCommand(sublime_plugin.TextCommand):
def run(self,edit):
tabRegion=self.view.sel()
for laRegion in tabRegion:
laRecherche=self.view.substr(laRegion).replace(" ","+")
checkUrl="https://translate.google.fr/?hl=en#en/fr/"+laRecherche
webbrowser.open(checkUrl)
#ctrl+alt+c
class NumerotationCommand(sublime_plugin.TextCommand):
def run(self, edit):
tabRegion=self.view.sel()
compteur=1
for laRegion in tabRegion:
self.view.replace(edit,laRegion,str(compteur))
compteur=compteur+1
#ctrl+alt+v
class ClearConsoleCommand(sublime_plugin.ApplicationCommand):
def run(self):
print('\n' * 50)
#ctrl+alt+space selection 2 region
class SwitchCommand(sublime_plugin.TextCommand):
def run(self , edit):
tabRegion=self.view.sel()
tabContent=[self.view.substr(tabRegion[0]),self.view.substr(tabRegion[1])]
self.view.replace(edit, tabRegion[0], tabContent[1])
self.view.replace(edit, tabRegion[1], tabContent[0])
#ctrl+alt+keypad_plus
class PlusOneCommand(sublime_plugin.TextCommand):
def run(self,edit):
tabRegion=self.view.sel()
for laRegion in tabRegion:
numRegion=int(self.view.substr(laRegion))+1
self.view.replace(edit, laRegion,str(numRegion))
#ctrl+alt+keypad_minus
class MinusOneCommand(sublime_plugin.TextCommand):
def run(self,edit):
tabRegion=self.view.sel()
for laRegion in tabRegion:
numRegion=int(self.view.substr(laRegion))-1
self.view.replace(edit, laRegion,str(numRegion))
#ctrl+alt+f
class GetPathCommand(sublime_plugin.TextCommand):
def run(self , edit):
tabRegion=self.view.sel()
self.view.replace(edit, tabRegion[0], str(self.view.file_name()))
#ctrl+alt+x
class StampHeureCommand(sublime_plugin.TextCommand):
def run(self,edit):
tabRegion=self.view.sel()
laRegion = tabRegion[-1]
self.view.sel().clear()
self.view.sel().add(laRegion)
self.view.show(laRegion)
from datetime import datetime
FMT = '%H:%M'
self.view.replace(edit, laRegion,str(datetime.now().strftime(FMT)))
print(laRegion)
#ctrl+alt+keypad_divide
class DiffHeureCommand(sublime_plugin.TextCommand):
def run(self,edit):
#recuperation des selections faites sur l'editeur
from datetime import datetime
FMT = '%H:%M'
tabRegion=self.view.sel()
for laRegion in tabRegion:
Selection=self.view.substr(laRegion);
Resultat=Selection.replace(" ","").split("-",2)
tdelta = datetime.strptime(Resultat[1], FMT) - datetime.strptime(Resultat[0], FMT)
Res=str(tdelta).replace("-1 day, ","").split(":",2)
self.view.replace(edit, laRegion,Res[0]+":"+Res[1]+"\t"+str(Selection))
#ctrl+alt+keypad_multiply
class SumHeureCommand(sublime_plugin.TextCommand):
def run(self,edit):
#recuperation des selections faites sur l'editeur
tabRegion=self.view.sel()
sumMinute=0
sumHours=0
cpt=0
for laRegion in tabRegion:
Selection=self.view.substr(laRegion);
select=Selection.split(":",2)
sumMinute+=int(select[1])
sumHours+=int(select[0])
if sumMinute>=60:
sumMinute=sumMinute-60
sumHours+=1
if cpt==0:
self.view.replace(edit, laRegion,"\t\t"+str(Selection))
else:
self.view.replace(edit, laRegion,"= "+str(sumHours)+":"+str(sumMinute)+"\t"+str(Selection))
cpt+=1
#ctrl+alt+s
class MergeMultiSelectionCommand(sublime_plugin.TextCommand):
def run(self, edit):
tabRegion=self.view.sel()
laRegion = tabRegion[-1]
self.view.sel().clear()
self.view.sel().add(laRegion)
self.view.show(laRegion)
#ctrl+alt+s
class GetCalendarCommand(sublime_plugin.TextCommand):
def run(self, edit):
from math import floor
from math import ceil
from datetime import datetime
import calendar
arrMonthFr=["Janvier","Février","Mars","Avril","Mai","Juin","Juillet","Août","Septembre","Octobre","Novembre","Décembre"]
tabRegion=self.view.sel()
for laRegion in tabRegion:
espaceTitreTotal = 36
calendarFormat="------------------------------------\n|%_space1_%%_mois_annee_%%_space2_%|\n------------------------------------\n| Lu | Ma | Me | Je | Ve | Sa | Di |\n------------------------------------\n"
Selection=self.view.substr(laRegion);
cal = calendar.Calendar()
Resultat=[]
# verifier si il y a une selection du type M-AAAA
if re.match(r"[0-9]{1,2}-[0-9]{4}", Selection):
Resultat=Selection.replace(" ","").split("-",2)
arrCalendar = cal.monthdayscalendar(int(Resultat[1]), int(Resultat[0]))
Resultat[0]=arrMonthFr[int(Resultat[0])-1]
else:
objDate = datetime.now()
arrCalendar = cal.monthdayscalendar(objDate.year, objDate.month)
Resultat=[arrMonthFr[objDate.month-1]]
Resultat+=[objDate.year]
# Mettre les jours sur chaque ligne
tabCalendar = ""
for rowCalendar in arrCalendar:
for dateCalendar in rowCalendar:
# si le jour n'apartien pas au mois mettre vide
if dateCalendar==0:
tabCalendar+=" "
continue
if dateCalendar<=9:
tabCalendar+="| 0"+str(dateCalendar)+" "
else:
tabCalendar+="| "+str(dateCalendar)+" "
tabCalendar+="|\n------------------------------------\n"
# Preparation du titre le mois puis l'annee
titreMoisAnnée = str(Resultat[0])+" "+str(Resultat[1])
espaceVideTotal= espaceTitreTotal- len(titreMoisAnnée)
# calcul des espaces pour le titre
espaceVide1=""
espaceVide2=""
for i in range(1,floor(espaceVideTotal/2)):
espaceVide1+=" "
for i in range(1,ceil(espaceVideTotal/2)):
espaceVide2+=" "
#creation du titre du table
calendarFormat = calendarFormat.replace("%_mois_annee_%",titreMoisAnnée)
calendarFormat = calendarFormat.replace("%_space1_%",espaceVide1)
calendarFormat = calendarFormat.replace("%_space2_%",espaceVide2)
#Afficher le tableau a la place de la selection
self.view.replace(edit, laRegion,calendarFormat+tabCalendar)
# arreter la selection du tableau et le pointer le curseur a la fin
rowRegion = self.view.rowcol(tabRegion[-1].b)[0]
pointEnd = self.view.text_point(rowRegion, 0)
self.view.sel().clear()
self.view.sel().add(pointEnd)
self.view.show(pointEnd)
#ctrl+alt+q
#TODO a ameliorer test pour recuperer tous les fichiers dans plusieurs dossier
class GetFileCommand(sublime_plugin.TextCommand):
def run(self , edit):
tabRegion=self.view.sel()
folder_path = self.view.substr(tabRegion[0])
filesname="Liste des fichiers:\n"+folder_path+"\n\n"
for path, dirs, files in os.walk(folder_path):
for filename in files:
filesname=filesname+"\n"+filename
self.view.replace(edit, tabRegion[0], filesname)
| 32.108108
| 356
| 0.686553
|
e383dc832d93a413bd49aba2e76d9722897a50a9
| 3,808
|
py
|
Python
|
tests/test_assets_api.py
|
oceanprotocol/provider-backend
|
f9e36e3d6b880de548c6b92c38d10d76daf369ba
|
[
"Apache-2.0"
] | null | null | null |
tests/test_assets_api.py
|
oceanprotocol/provider-backend
|
f9e36e3d6b880de548c6b92c38d10d76daf369ba
|
[
"Apache-2.0"
] | 1
|
2018-08-15T09:57:01.000Z
|
2018-08-15T09:57:01.000Z
|
tests/test_assets_api.py
|
oceanprotocol/provider-backend
|
f9e36e3d6b880de548c6b92c38d10d76daf369ba
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright 2021 Ocean Protocol Foundation
# SPDX-License-Identifier: Apache-2.0
#
import json
from aquarius.constants import BaseURLs
from aquarius.events.constants import EventTypes
from tests.ddo_samples_invalid import json_dict_no_valid_metadata
from tests.ddos.ddo_sample1_v4 import json_dict
from tests.helpers import (
get_web3,
new_ddo,
run_request,
run_request_get_data,
send_create_update_tx,
test_account1,
)
def get_ddo(client, base_ddo_url, did):
rv = client.get(base_ddo_url + f"/{did}", content_type="application/json")
fetched_ddo = json.loads(rv.data.decode("utf-8"))
return fetched_ddo
def add_assets(_events_object, name, total=5):
block = get_web3().eth.block_number
assets = []
txs = []
for i in range(total):
ddo = new_ddo(test_account1, get_web3(), f"{name}.{i+block}", json_dict)
assets.append(ddo)
txs.append(send_create_update_tx("create", ddo, bytes([1]), test_account1)[0])
block = txs[0].blockNumber
_events_object.store_last_processed_block(block)
for ddo in assets:
_events_object.process_current_blocks()
return assets
def test_post_with_no_valid_ddo(client, base_ddo_url, events_object):
block = get_web3().eth.block_number
ddo = new_ddo(test_account1, get_web3(), f"dt.{block}", json_dict_no_valid_metadata)
_ = send_create_update_tx("create", ddo, bytes([1]), test_account1)
events_object.process_current_blocks()
try:
published_ddo = get_ddo(client, base_ddo_url, ddo.id)
assert not published_ddo, (
"publish should fail, Aquarius validation "
"should have failed and skipped the "
f"{EventTypes.EVENT_METADATA_CREATED} event."
)
except Exception:
pass
def test_resolveByDtAddress(client_with_no_data, query_url, events_object):
client = client_with_no_data
block = get_web3().eth.block_number
_ddo = json_dict.copy()
ddo = new_ddo(test_account1, get_web3(), f"dt.{block}", _ddo)
did = ddo["id"]
dt_address = ddo["nftAddress"]
send_create_update_tx("create", ddo, bytes([1]), test_account1)
events_object.process_current_blocks()
result = run_request_get_data(
client.post,
query_url,
{
"query": {
"query_string": {"query": dt_address, "default_field": "nft.address"}
}
},
)
assert len(result["hits"]["hits"]) > 0
base_url = BaseURLs.BASE_AQUARIUS_URL + "/assets"
response = client.get(
base_url + f"/metadata/{did}", content_type="application/json"
)
assert response.status_code == 200
def test_get_assets_names(client, events_object):
base_url = BaseURLs.BASE_AQUARIUS_URL + "/assets"
response = run_request(client.post, base_url + "/names", {"notTheDidList": ["a"]})
assert response.status == "400 BAD REQUEST"
response = run_request(client.post, base_url + "/names", {"didList": []})
assert response.status == "400 BAD REQUEST"
response = run_request(client.post, base_url + "/names", {"didList": "notadict"})
assert response.status == "400 BAD REQUEST"
response = run_request(client.post, base_url + "/names", "notadict")
assert response.status == "400 BAD REQUEST"
assets = add_assets(events_object, "dt_name", 3)
dids = [ddo["id"] for ddo in assets]
did_to_name = run_request_get_data(
client.post, base_url + "/names", {"didList": dids}
)
for did in dids:
assert did in did_to_name, "did not found in response."
assert did_to_name[did], "did name not found."
def test_asset_metadata_not_found(client):
result = run_request(client.get, "api/aquarius/assets/metadata/missing")
assert result.status == "404 NOT FOUND"
| 31.471074
| 88
| 0.678309
|
c063f999257490dccc1087c48d2f3616c4814b83
| 3,225
|
py
|
Python
|
venv/lib/python3.7/site-packages/PyInstaller/hooks/hook-xml.dom.html.HTMLDocument.py
|
alexzacher/BMI-Body-Mass-Index-Calculator-APP
|
f54473757992568b73b066d507059e1053357174
|
[
"MIT"
] | 5
|
2020-08-24T23:29:58.000Z
|
2022-02-07T19:58:07.000Z
|
PyInstaller/hooks/hook-xml.dom.html.HTMLDocument.py
|
samuelhwilliams/pyinstaller
|
8714423aa56803027b5a5585257392024ea9f7a0
|
[
"Apache-2.0"
] | 12
|
2020-02-15T04:04:55.000Z
|
2022-02-18T20:29:49.000Z
|
pyinstaller-develop/PyInstaller/hooks/hook-xml.dom.html.HTMLDocument.py
|
onecklam/ethereum-graphviz
|
6993accf0cb85e23013bf7ae6b04145724a6dbd2
|
[
"Apache-2.0"
] | 2
|
2020-08-24T23:30:06.000Z
|
2021-12-23T18:23:38.000Z
|
#-----------------------------------------------------------------------------
# Copyright (c) 2013-2020, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License (version 2
# or later) with exception for distributing the bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#
# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)
#-----------------------------------------------------------------------------
# xml.dom.html.HTMLDocument
hiddenimports = ['xml.dom.html.HTMLAnchorElement',
'xml.dom.html.HTMLAppletElement',
'xml.dom.html.HTMLAreaElement',
'xml.dom.html.HTMLBaseElement',
'xml.dom.html.HTMLBaseFontElement',
'xml.dom.html.HTMLBodyElement',
'xml.dom.html.HTMLBRElement',
'xml.dom.html.HTMLButtonElement',
'xml.dom.html.HTMLDirectoryElement',
'xml.dom.html.HTMLDivElement',
'xml.dom.html.HTMLDListElement',
'xml.dom.html.HTMLElement',
'xml.dom.html.HTMLFieldSetElement',
'xml.dom.html.HTMLFontElement',
'xml.dom.html.HTMLFormElement',
'xml.dom.html.HTMLFrameElement',
'xml.dom.html.HTMLFrameSetElement',
'xml.dom.html.HTMLHeadElement',
'xml.dom.html.HTMLHeadingElement',
'xml.dom.html.HTMLHRElement',
'xml.dom.html.HTMLHtmlElement',
'xml.dom.html.HTMLIFrameElement',
'xml.dom.html.HTMLImageElement',
'xml.dom.html.HTMLInputElement',
'xml.dom.html.HTMLIsIndexElement',
'xml.dom.html.HTMLLabelElement',
'xml.dom.html.HTMLLegendElement',
'xml.dom.html.HTMLLIElement',
'xml.dom.html.HTMLLinkElement',
'xml.dom.html.HTMLMapElement',
'xml.dom.html.HTMLMenuElement',
'xml.dom.html.HTMLMetaElement',
'xml.dom.html.HTMLModElement',
'xml.dom.html.HTMLObjectElement',
'xml.dom.html.HTMLOListElement',
'xml.dom.html.HTMLOptGroupElement',
'xml.dom.html.HTMLOptionElement',
'xml.dom.html.HTMLParagraphElement',
'xml.dom.html.HTMLParamElement',
'xml.dom.html.HTMLPreElement',
'xml.dom.html.HTMLQuoteElement',
'xml.dom.html.HTMLScriptElement',
'xml.dom.html.HTMLSelectElement',
'xml.dom.html.HTMLStyleElement',
'xml.dom.html.HTMLTableCaptionElement',
'xml.dom.html.HTMLTableCellElement',
'xml.dom.html.HTMLTableColElement',
'xml.dom.html.HTMLTableElement',
'xml.dom.html.HTMLTableRowElement',
'xml.dom.html.HTMLTableSectionElement',
'xml.dom.html.HTMLTextAreaElement',
'xml.dom.html.HTMLTitleElement',
'xml.dom.html.HTMLUListElement',
]
| 47.426471
| 78
| 0.530543
|
50474356d153dd3aa53c88c3e7ffd7b5945fb5d2
| 4,988
|
py
|
Python
|
third_party/unidecode/x0bb.py
|
asysc2020/contentbox
|
5c155976e0ce7ea308d62293ab89624d97b21d09
|
[
"Apache-2.0"
] | 39
|
2015-06-10T23:18:07.000Z
|
2021-10-21T04:29:06.000Z
|
third_party/unidecode/x0bb.py
|
asysc2020/contentbox
|
5c155976e0ce7ea308d62293ab89624d97b21d09
|
[
"Apache-2.0"
] | 2
|
2016-08-22T12:38:10.000Z
|
2017-01-26T18:37:33.000Z
|
third_party/unidecode/x0bb.py
|
asysc2020/contentbox
|
5c155976e0ce7ea308d62293ab89624d97b21d09
|
[
"Apache-2.0"
] | 26
|
2015-06-10T22:09:15.000Z
|
2021-06-27T15:45:15.000Z
|
data = (
'moen', # 0x00
'moenj', # 0x01
'moenh', # 0x02
'moed', # 0x03
'moel', # 0x04
'moelg', # 0x05
'moelm', # 0x06
'moelb', # 0x07
'moels', # 0x08
'moelt', # 0x09
'moelp', # 0x0a
'moelh', # 0x0b
'moem', # 0x0c
'moeb', # 0x0d
'moebs', # 0x0e
'moes', # 0x0f
'moess', # 0x10
'moeng', # 0x11
'moej', # 0x12
'moec', # 0x13
'moek', # 0x14
'moet', # 0x15
'moep', # 0x16
'moeh', # 0x17
'myo', # 0x18
'myog', # 0x19
'myogg', # 0x1a
'myogs', # 0x1b
'myon', # 0x1c
'myonj', # 0x1d
'myonh', # 0x1e
'myod', # 0x1f
'myol', # 0x20
'myolg', # 0x21
'myolm', # 0x22
'myolb', # 0x23
'myols', # 0x24
'myolt', # 0x25
'myolp', # 0x26
'myolh', # 0x27
'myom', # 0x28
'myob', # 0x29
'myobs', # 0x2a
'myos', # 0x2b
'myoss', # 0x2c
'myong', # 0x2d
'myoj', # 0x2e
'myoc', # 0x2f
'myok', # 0x30
'myot', # 0x31
'myop', # 0x32
'myoh', # 0x33
'mu', # 0x34
'mug', # 0x35
'mugg', # 0x36
'mugs', # 0x37
'mun', # 0x38
'munj', # 0x39
'munh', # 0x3a
'mud', # 0x3b
'mul', # 0x3c
'mulg', # 0x3d
'mulm', # 0x3e
'mulb', # 0x3f
'muls', # 0x40
'mult', # 0x41
'mulp', # 0x42
'mulh', # 0x43
'mum', # 0x44
'mub', # 0x45
'mubs', # 0x46
'mus', # 0x47
'muss', # 0x48
'mung', # 0x49
'muj', # 0x4a
'muc', # 0x4b
'muk', # 0x4c
'mut', # 0x4d
'mup', # 0x4e
'muh', # 0x4f
'mweo', # 0x50
'mweog', # 0x51
'mweogg', # 0x52
'mweogs', # 0x53
'mweon', # 0x54
'mweonj', # 0x55
'mweonh', # 0x56
'mweod', # 0x57
'mweol', # 0x58
'mweolg', # 0x59
'mweolm', # 0x5a
'mweolb', # 0x5b
'mweols', # 0x5c
'mweolt', # 0x5d
'mweolp', # 0x5e
'mweolh', # 0x5f
'mweom', # 0x60
'mweob', # 0x61
'mweobs', # 0x62
'mweos', # 0x63
'mweoss', # 0x64
'mweong', # 0x65
'mweoj', # 0x66
'mweoc', # 0x67
'mweok', # 0x68
'mweot', # 0x69
'mweop', # 0x6a
'mweoh', # 0x6b
'mwe', # 0x6c
'mweg', # 0x6d
'mwegg', # 0x6e
'mwegs', # 0x6f
'mwen', # 0x70
'mwenj', # 0x71
'mwenh', # 0x72
'mwed', # 0x73
'mwel', # 0x74
'mwelg', # 0x75
'mwelm', # 0x76
'mwelb', # 0x77
'mwels', # 0x78
'mwelt', # 0x79
'mwelp', # 0x7a
'mwelh', # 0x7b
'mwem', # 0x7c
'mweb', # 0x7d
'mwebs', # 0x7e
'mwes', # 0x7f
'mwess', # 0x80
'mweng', # 0x81
'mwej', # 0x82
'mwec', # 0x83
'mwek', # 0x84
'mwet', # 0x85
'mwep', # 0x86
'mweh', # 0x87
'mwi', # 0x88
'mwig', # 0x89
'mwigg', # 0x8a
'mwigs', # 0x8b
'mwin', # 0x8c
'mwinj', # 0x8d
'mwinh', # 0x8e
'mwid', # 0x8f
'mwil', # 0x90
'mwilg', # 0x91
'mwilm', # 0x92
'mwilb', # 0x93
'mwils', # 0x94
'mwilt', # 0x95
'mwilp', # 0x96
'mwilh', # 0x97
'mwim', # 0x98
'mwib', # 0x99
'mwibs', # 0x9a
'mwis', # 0x9b
'mwiss', # 0x9c
'mwing', # 0x9d
'mwij', # 0x9e
'mwic', # 0x9f
'mwik', # 0xa0
'mwit', # 0xa1
'mwip', # 0xa2
'mwih', # 0xa3
'myu', # 0xa4
'myug', # 0xa5
'myugg', # 0xa6
'myugs', # 0xa7
'myun', # 0xa8
'myunj', # 0xa9
'myunh', # 0xaa
'myud', # 0xab
'myul', # 0xac
'myulg', # 0xad
'myulm', # 0xae
'myulb', # 0xaf
'myuls', # 0xb0
'myult', # 0xb1
'myulp', # 0xb2
'myulh', # 0xb3
'myum', # 0xb4
'myub', # 0xb5
'myubs', # 0xb6
'myus', # 0xb7
'myuss', # 0xb8
'myung', # 0xb9
'myuj', # 0xba
'myuc', # 0xbb
'myuk', # 0xbc
'myut', # 0xbd
'myup', # 0xbe
'myuh', # 0xbf
'meu', # 0xc0
'meug', # 0xc1
'meugg', # 0xc2
'meugs', # 0xc3
'meun', # 0xc4
'meunj', # 0xc5
'meunh', # 0xc6
'meud', # 0xc7
'meul', # 0xc8
'meulg', # 0xc9
'meulm', # 0xca
'meulb', # 0xcb
'meuls', # 0xcc
'meult', # 0xcd
'meulp', # 0xce
'meulh', # 0xcf
'meum', # 0xd0
'meub', # 0xd1
'meubs', # 0xd2
'meus', # 0xd3
'meuss', # 0xd4
'meung', # 0xd5
'meuj', # 0xd6
'meuc', # 0xd7
'meuk', # 0xd8
'meut', # 0xd9
'meup', # 0xda
'meuh', # 0xdb
'myi', # 0xdc
'myig', # 0xdd
'myigg', # 0xde
'myigs', # 0xdf
'myin', # 0xe0
'myinj', # 0xe1
'myinh', # 0xe2
'myid', # 0xe3
'myil', # 0xe4
'myilg', # 0xe5
'myilm', # 0xe6
'myilb', # 0xe7
'myils', # 0xe8
'myilt', # 0xe9
'myilp', # 0xea
'myilh', # 0xeb
'myim', # 0xec
'myib', # 0xed
'myibs', # 0xee
'myis', # 0xef
'myiss', # 0xf0
'mying', # 0xf1
'myij', # 0xf2
'myic', # 0xf3
'myik', # 0xf4
'myit', # 0xf5
'myip', # 0xf6
'myih', # 0xf7
'mi', # 0xf8
'mig', # 0xf9
'migg', # 0xfa
'migs', # 0xfb
'min', # 0xfc
'minj', # 0xfd
'minh', # 0xfe
'mid', # 0xff
)
| 19.258687
| 20
| 0.433641
|
4044fd554accc710808183904c7905a1a811f040
| 7,574
|
py
|
Python
|
isi_sdk_9_0_0/isi_sdk_9_0_0/models/ndmp_sessions_node.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 24
|
2018-06-22T14:13:23.000Z
|
2022-03-23T01:21:26.000Z
|
isi_sdk_9_0_0/isi_sdk_9_0_0/models/ndmp_sessions_node.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 46
|
2018-04-30T13:28:22.000Z
|
2022-03-21T21:11:07.000Z
|
isi_sdk_9_0_0/isi_sdk_9_0_0/models/ndmp_sessions_node.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 29
|
2018-06-19T00:14:04.000Z
|
2022-02-08T17:51:19.000Z
|
# coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 10
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from isi_sdk_9_0_0.models.ndmp_sessions_node_session import NdmpSessionsNodeSession # noqa: F401,E501
class NdmpSessionsNode(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'error': 'str',
'id': 'int',
'lnn': 'int',
'sessions': 'list[NdmpSessionsNodeSession]',
'status': 'int'
}
attribute_map = {
'error': 'error',
'id': 'id',
'lnn': 'lnn',
'sessions': 'sessions',
'status': 'status'
}
def __init__(self, error=None, id=None, lnn=None, sessions=None, status=None): # noqa: E501
"""NdmpSessionsNode - a model defined in Swagger""" # noqa: E501
self._error = None
self._id = None
self._lnn = None
self._sessions = None
self._status = None
self.discriminator = None
if error is not None:
self.error = error
if id is not None:
self.id = id
if lnn is not None:
self.lnn = lnn
if sessions is not None:
self.sessions = sessions
if status is not None:
self.status = status
@property
def error(self):
"""Gets the error of this NdmpSessionsNode. # noqa: E501
Error message, if the HTTP status returned from this node was not 200. # noqa: E501
:return: The error of this NdmpSessionsNode. # noqa: E501
:rtype: str
"""
return self._error
@error.setter
def error(self, error):
"""Sets the error of this NdmpSessionsNode.
Error message, if the HTTP status returned from this node was not 200. # noqa: E501
:param error: The error of this NdmpSessionsNode. # noqa: E501
:type: str
"""
if error is not None and len(error) > 8192:
raise ValueError("Invalid value for `error`, length must be less than or equal to `8192`") # noqa: E501
if error is not None and len(error) < 0:
raise ValueError("Invalid value for `error`, length must be greater than or equal to `0`") # noqa: E501
self._error = error
@property
def id(self):
"""Gets the id of this NdmpSessionsNode. # noqa: E501
Node ID (Device Number) of a node. # noqa: E501
:return: The id of this NdmpSessionsNode. # noqa: E501
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this NdmpSessionsNode.
Node ID (Device Number) of a node. # noqa: E501
:param id: The id of this NdmpSessionsNode. # noqa: E501
:type: int
"""
if id is not None and id > 2147483647: # noqa: E501
raise ValueError("Invalid value for `id`, must be a value less than or equal to `2147483647`") # noqa: E501
if id is not None and id < 0: # noqa: E501
raise ValueError("Invalid value for `id`, must be a value greater than or equal to `0`") # noqa: E501
self._id = id
@property
def lnn(self):
"""Gets the lnn of this NdmpSessionsNode. # noqa: E501
Logical Node Number (LNN) of a node. # noqa: E501
:return: The lnn of this NdmpSessionsNode. # noqa: E501
:rtype: int
"""
return self._lnn
@lnn.setter
def lnn(self, lnn):
"""Sets the lnn of this NdmpSessionsNode.
Logical Node Number (LNN) of a node. # noqa: E501
:param lnn: The lnn of this NdmpSessionsNode. # noqa: E501
:type: int
"""
if lnn is not None and lnn > 65535: # noqa: E501
raise ValueError("Invalid value for `lnn`, must be a value less than or equal to `65535`") # noqa: E501
if lnn is not None and lnn < 1: # noqa: E501
raise ValueError("Invalid value for `lnn`, must be a value greater than or equal to `1`") # noqa: E501
self._lnn = lnn
@property
def sessions(self):
"""Gets the sessions of this NdmpSessionsNode. # noqa: E501
:return: The sessions of this NdmpSessionsNode. # noqa: E501
:rtype: list[NdmpSessionsNodeSession]
"""
return self._sessions
@sessions.setter
def sessions(self, sessions):
"""Sets the sessions of this NdmpSessionsNode.
:param sessions: The sessions of this NdmpSessionsNode. # noqa: E501
:type: list[NdmpSessionsNodeSession]
"""
self._sessions = sessions
@property
def status(self):
"""Gets the status of this NdmpSessionsNode. # noqa: E501
Status of the HTTP response from this node if not 200. If 200, this field does not appear. # noqa: E501
:return: The status of this NdmpSessionsNode. # noqa: E501
:rtype: int
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this NdmpSessionsNode.
Status of the HTTP response from this node if not 200. If 200, this field does not appear. # noqa: E501
:param status: The status of this NdmpSessionsNode. # noqa: E501
:type: int
"""
if status is not None and status > 4294967295: # noqa: E501
raise ValueError("Invalid value for `status`, must be a value less than or equal to `4294967295`") # noqa: E501
if status is not None and status < 0: # noqa: E501
raise ValueError("Invalid value for `status`, must be a value greater than or equal to `0`") # noqa: E501
self._status = status
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, NdmpSessionsNode):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 31.168724
| 124
| 0.577238
|
54d1c3d06301c20e74cabc64b6760c098a746bd4
| 932
|
py
|
Python
|
test/proj4/proj-regression-meteosat_145.py
|
dvuckovic/magics-test
|
bd8baf97b0db986f6adf63700d3cf77bbcbad2f2
|
[
"Apache-2.0"
] | 7
|
2019-03-19T09:32:41.000Z
|
2022-02-07T13:20:33.000Z
|
test/proj4/proj-regression-meteosat_145.py
|
dvuckovic/magics-test
|
bd8baf97b0db986f6adf63700d3cf77bbcbad2f2
|
[
"Apache-2.0"
] | 2
|
2021-03-30T05:37:20.000Z
|
2021-08-17T13:58:04.000Z
|
test/proj4/proj-regression-meteosat_145.py
|
dvuckovic/magics-test
|
bd8baf97b0db986f6adf63700d3cf77bbcbad2f2
|
[
"Apache-2.0"
] | 5
|
2019-03-19T10:43:46.000Z
|
2021-09-09T14:28:39.000Z
|
from Magics.macro import *
import os
def plot_area(epsg):
img = os.path.basename(__file__).split('.')[0]
title = "Projection {} ".format(epsg)
#Setting output
png = output(
output_formats = ['png'],
output_name = img,
output_name_first_page_number = 'off')
#Setting the geographical area
area = mmap(
subpage_map_projection = epsg,
subpage_map_area_definition = "full"
)
#Setting the coastlines
background = mcoast(
map_coastline_land_shade = 'on',
map_coastline_resolution = "medium",
map_coastline_land_shade_colour = 'cream')
#Picking the grib metadata
title = mtext(
text_lines = [title],
text_justification = 'left',
text_font_size = 0.6,
text_colour = 'charcoal')
#Plotting
plot(png,area,background,title,)
plot_area("meteosat_145")
| 23.3
| 54
| 0.60515
|
fcba2d280840edea0e2e62a1021eb18085fe6c38
| 132,479
|
py
|
Python
|
gui/qt/main_window.py
|
quietnan/electrum-ftc
|
5f72cadd777d80a7235e4860589c425287a67fe9
|
[
"MIT"
] | null | null | null |
gui/qt/main_window.py
|
quietnan/electrum-ftc
|
5f72cadd777d80a7235e4860589c425287a67fe9
|
[
"MIT"
] | null | null | null |
gui/qt/main_window.py
|
quietnan/electrum-ftc
|
5f72cadd777d80a7235e4860589c425287a67fe9
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys, time, threading
import os, json, traceback
import shutil
import weakref
import webbrowser
import csv
from decimal import Decimal
import base64
from functools import partial
from PyQt5.QtGui import *
from PyQt5.QtCore import *
import PyQt5.QtCore as QtCore
from .exception_window import Exception_Hook
from PyQt5.QtWidgets import *
from electrum import keystore, simple_config
from electrum.bitcoin import COIN, is_address, TYPE_ADDRESS
from electrum import constants
from electrum.plugins import run_hook
from electrum.i18n import _
from electrum.util import (format_time, format_satoshis, PrintError,
format_satoshis_plain, NotEnoughFunds,
UserCancelled, NoDynamicFeeEstimates, profiler,
export_meta, import_meta, bh2u, bfh, InvalidPassword)
from electrum import Transaction
from electrum import util, bitcoin, commands, coinchooser
from electrum import paymentrequest
from electrum.wallet import Multisig_Wallet, AddTransactionException
from .amountedit import AmountEdit, BTCAmountEdit, MyLineEdit, FeerateEdit
from .qrcodewidget import QRCodeWidget, QRDialog
from .qrtextedit import ShowQRTextEdit, ScanQRTextEdit
from .transaction_dialog import show_transaction
from .fee_slider import FeeSlider
from .util import *
class StatusBarButton(QPushButton):
def __init__(self, icon, tooltip, func):
QPushButton.__init__(self, icon, '')
self.setToolTip(tooltip)
self.setFlat(True)
self.setMaximumWidth(25)
self.clicked.connect(self.onPress)
self.func = func
self.setIconSize(QSize(25,25))
def onPress(self, checked=False):
'''Drops the unwanted PyQt5 "checked" argument'''
self.func()
def keyPressEvent(self, e):
if e.key() == Qt.Key_Return:
self.func()
from electrum.paymentrequest import PR_PAID
class ElectrumWindow(QMainWindow, MessageBoxMixin, PrintError):
payment_request_ok_signal = pyqtSignal()
payment_request_error_signal = pyqtSignal()
notify_transactions_signal = pyqtSignal()
new_fx_quotes_signal = pyqtSignal()
new_fx_history_signal = pyqtSignal()
network_signal = pyqtSignal(str, object)
alias_received_signal = pyqtSignal()
computing_privkeys_signal = pyqtSignal()
show_privkeys_signal = pyqtSignal()
def __init__(self, gui_object, wallet):
QMainWindow.__init__(self)
self.gui_object = gui_object
self.config = config = gui_object.config
self.setup_exception_hook()
self.network = gui_object.daemon.network
self.fx = gui_object.daemon.fx
self.invoices = wallet.invoices
self.contacts = wallet.contacts
self.tray = gui_object.tray
self.app = gui_object.app
self.cleaned_up = False
self.is_max = False
self.payment_request = None
self.checking_accounts = False
self.qr_window = None
self.not_enough_funds = False
self.pluginsdialog = None
self.require_fee_update = False
self.tx_notifications = []
self.tl_windows = []
self.tx_external_keypairs = {}
self.create_status_bar()
self.need_update = threading.Event()
self.decimal_point = config.get('decimal_point', 8)
self.num_zeros = int(config.get('num_zeros',0))
self.completions = QStringListModel()
self.tabs = tabs = QTabWidget(self)
self.send_tab = self.create_send_tab()
self.receive_tab = self.create_receive_tab()
self.addresses_tab = self.create_addresses_tab()
self.utxo_tab = self.create_utxo_tab()
self.console_tab = self.create_console_tab()
self.contacts_tab = self.create_contacts_tab()
tabs.addTab(self.create_history_tab(), QIcon(":icons/tab_history.png"), _('History'))
tabs.addTab(self.send_tab, QIcon(":icons/tab_send.png"), _('Send'))
tabs.addTab(self.receive_tab, QIcon(":icons/tab_receive.png"), _('Receive'))
def add_optional_tab(tabs, tab, icon, description, name):
tab.tab_icon = icon
tab.tab_description = description
tab.tab_pos = len(tabs)
tab.tab_name = name
if self.config.get('show_{}_tab'.format(name), False):
tabs.addTab(tab, icon, description.replace("&", ""))
add_optional_tab(tabs, self.addresses_tab, QIcon(":icons/tab_addresses.png"), _("&Addresses"), "addresses")
add_optional_tab(tabs, self.utxo_tab, QIcon(":icons/tab_coins.png"), _("Co&ins"), "utxo")
add_optional_tab(tabs, self.contacts_tab, QIcon(":icons/tab_contacts.png"), _("Con&tacts"), "contacts")
add_optional_tab(tabs, self.console_tab, QIcon(":icons/tab_console.png"), _("Con&sole"), "console")
tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.setCentralWidget(tabs)
if self.config.get("is_maximized"):
self.showMaximized()
self.setWindowIcon(QIcon(":icons/electrum_light.png"))
self.init_menubar()
wrtabs = weakref.proxy(tabs)
QShortcut(QKeySequence("Ctrl+W"), self, self.close)
QShortcut(QKeySequence("Ctrl+Q"), self, self.close)
QShortcut(QKeySequence("Ctrl+R"), self, self.update_wallet)
QShortcut(QKeySequence("Ctrl+PgUp"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() - 1)%wrtabs.count()))
QShortcut(QKeySequence("Ctrl+PgDown"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() + 1)%wrtabs.count()))
for i in range(wrtabs.count()):
QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: wrtabs.setCurrentIndex(i))
self.payment_request_ok_signal.connect(self.payment_request_ok)
self.payment_request_error_signal.connect(self.payment_request_error)
self.notify_transactions_signal.connect(self.notify_transactions)
self.history_list.setFocus(True)
# network callbacks
if self.network:
self.network_signal.connect(self.on_network_qt)
interests = ['updated', 'new_transaction', 'status',
'banner', 'verified', 'fee']
# To avoid leaking references to "self" that prevent the
# window from being GC-ed when closed, callbacks should be
# methods of this class only, and specifically not be
# partials, lambdas or methods of subobjects. Hence...
self.network.register_callback(self.on_network, interests)
# set initial message
self.console.showMessage(self.network.banner)
self.network.register_callback(self.on_quotes, ['on_quotes'])
self.network.register_callback(self.on_history, ['on_history'])
self.new_fx_quotes_signal.connect(self.on_fx_quotes)
self.new_fx_history_signal.connect(self.on_fx_history)
# update fee slider in case we missed the callback
self.fee_slider.update()
self.load_wallet(wallet)
self.connect_slots(gui_object.timer)
self.fetch_alias()
def on_history(self, b):
self.new_fx_history_signal.emit()
def setup_exception_hook(self):
Exception_Hook(self)
def on_fx_history(self):
self.history_list.refresh_headers()
self.history_list.update()
self.address_list.update()
def on_quotes(self, b):
self.new_fx_quotes_signal.emit()
def on_fx_quotes(self):
self.update_status()
# Refresh edits with the new rate
edit = self.fiat_send_e if self.fiat_send_e.is_last_edited else self.amount_e
edit.textEdited.emit(edit.text())
edit = self.fiat_receive_e if self.fiat_receive_e.is_last_edited else self.receive_amount_e
edit.textEdited.emit(edit.text())
# History tab needs updating if it used spot
if self.fx.history_used_spot:
self.history_list.update()
def toggle_tab(self, tab):
show = not self.config.get('show_{}_tab'.format(tab.tab_name), False)
self.config.set_key('show_{}_tab'.format(tab.tab_name), show)
item_text = (_("Hide") if show else _("Show")) + " " + tab.tab_description
tab.menu_action.setText(item_text)
if show:
# Find out where to place the tab
index = len(self.tabs)
for i in range(len(self.tabs)):
try:
if tab.tab_pos < self.tabs.widget(i).tab_pos:
index = i
break
except AttributeError:
pass
self.tabs.insertTab(index, tab, tab.tab_icon, tab.tab_description.replace("&", ""))
else:
i = self.tabs.indexOf(tab)
self.tabs.removeTab(i)
def push_top_level_window(self, window):
'''Used for e.g. tx dialog box to ensure new dialogs are appropriately
parented. This used to be done by explicitly providing the parent
window, but that isn't something hardware wallet prompts know.'''
self.tl_windows.append(window)
def pop_top_level_window(self, window):
self.tl_windows.remove(window)
def top_level_window(self, test_func=None):
'''Do the right thing in the presence of tx dialog windows'''
override = self.tl_windows[-1] if self.tl_windows else None
if override and test_func and not test_func(override):
override = None # only override if ok for test_func
return self.top_level_window_recurse(override, test_func)
def diagnostic_name(self):
return "%s/%s" % (PrintError.diagnostic_name(self),
self.wallet.basename() if self.wallet else "None")
def is_hidden(self):
return self.isMinimized() or self.isHidden()
def show_or_hide(self):
if self.is_hidden():
self.bring_to_top()
else:
self.hide()
def bring_to_top(self):
self.show()
self.raise_()
def on_error(self, exc_info):
if not isinstance(exc_info[1], UserCancelled):
traceback.print_exception(*exc_info)
self.show_error(str(exc_info[1]))
def on_network(self, event, *args):
if event == 'updated':
self.need_update.set()
self.gui_object.network_updated_signal_obj.network_updated_signal \
.emit(event, args)
elif event == 'new_transaction':
self.tx_notifications.append(args[0])
self.notify_transactions_signal.emit()
elif event in ['status', 'banner', 'verified', 'fee']:
# Handle in GUI thread
self.network_signal.emit(event, args)
else:
self.print_error("unexpected network message:", event, args)
def on_network_qt(self, event, args=None):
# Handle a network message in the GUI thread
if event == 'status':
self.update_status()
elif event == 'banner':
self.console.showMessage(args[0])
elif event == 'verified':
self.history_list.update_item(*args)
elif event == 'fee':
if self.config.is_dynfee():
self.fee_slider.update()
self.do_update_fee()
elif event == 'fee_histogram':
if self.config.is_dynfee():
self.fee_slider.update()
self.do_update_fee()
# todo: update only unconfirmed tx
self.history_list.update()
else:
self.print_error("unexpected network_qt signal:", event, args)
def fetch_alias(self):
self.alias_info = None
alias = self.config.get('alias')
if alias:
alias = str(alias)
def f():
self.alias_info = self.contacts.resolve_openalias(alias)
self.alias_received_signal.emit()
t = threading.Thread(target=f)
t.setDaemon(True)
t.start()
def close_wallet(self):
if self.wallet:
self.print_error('close_wallet', self.wallet.storage.path)
run_hook('close_wallet', self.wallet)
@profiler
def load_wallet(self, wallet):
wallet.thread = TaskThread(self, self.on_error)
self.wallet = wallet
self.update_recently_visited(wallet.storage.path)
# address used to create a dummy transaction and estimate transaction fee
self.history_list.update()
self.address_list.update()
self.utxo_list.update()
self.need_update.set()
# Once GUI has been initialized check if we want to announce something since the callback has been called before the GUI was initialized
self.notify_transactions()
# update menus
self.seed_menu.setEnabled(self.wallet.has_seed())
self.update_lock_icon()
self.update_buttons_on_seed()
self.update_console()
self.clear_receive_tab()
self.request_list.update()
self.tabs.show()
self.init_geometry()
if self.config.get('hide_gui') and self.gui_object.tray.isVisible():
self.hide()
else:
self.show()
self.watching_only_changed()
run_hook('load_wallet', wallet, self)
def init_geometry(self):
winpos = self.wallet.storage.get("winpos-qt")
try:
screen = self.app.desktop().screenGeometry()
assert screen.contains(QRect(*winpos))
self.setGeometry(*winpos)
except:
self.print_error("using default geometry")
self.setGeometry(100, 100, 840, 400)
def watching_only_changed(self):
name = "Electrum-FTC Testnet" if constants.net.TESTNET else "Electrum-FTC"
title = '%s %s - %s' % (name, self.wallet.electrum_version,
self.wallet.basename())
extra = [self.wallet.storage.get('wallet_type', '?')]
if self.wallet.is_watching_only():
self.warn_if_watching_only()
extra.append(_('watching only'))
title += ' [%s]'% ', '.join(extra)
self.setWindowTitle(title)
self.password_menu.setEnabled(self.wallet.may_have_password())
self.import_privkey_menu.setVisible(self.wallet.can_import_privkey())
self.import_address_menu.setVisible(self.wallet.can_import_address())
self.export_menu.setEnabled(self.wallet.can_export())
def warn_if_watching_only(self):
if self.wallet.is_watching_only():
msg = ' '.join([
_("This wallet is watching-only."),
_("This means you will not be able to spend Feathercoins with it."),
_("Make sure you own the seed phrase or the private keys, before you request Feathercoins to be sent to this wallet.")
])
self.show_warning(msg, title=_('Information'))
def open_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if not filename:
return
self.gui_object.new_window(filename)
def backup_wallet(self):
path = self.wallet.storage.path
wallet_folder = os.path.dirname(path)
filename, __ = QFileDialog.getSaveFileName(self, _('Enter a filename for the copy of your wallet'), wallet_folder)
if not filename:
return
new_path = os.path.join(wallet_folder, filename)
if new_path != path:
try:
shutil.copy2(path, new_path)
self.show_message(_("A copy of your wallet file was created in")+" '%s'" % str(new_path), title=_("Wallet backup created"))
except BaseException as reason:
self.show_critical(_("Electrum was unable to copy your wallet file to the specified location.") + "\n" + str(reason), title=_("Unable to create backup"))
def update_recently_visited(self, filename):
recent = self.config.get('recently_open', [])
try:
sorted(recent)
except:
recent = []
if filename in recent:
recent.remove(filename)
recent.insert(0, filename)
recent = recent[:5]
self.config.set_key('recently_open', recent)
self.recently_visited_menu.clear()
for i, k in enumerate(sorted(recent)):
b = os.path.basename(k)
def loader(k):
return lambda: self.gui_object.new_window(k)
self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d"%(i+1)))
self.recently_visited_menu.setEnabled(len(recent))
def get_wallet_folder(self):
return os.path.dirname(os.path.abspath(self.config.get_wallet_path()))
def new_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
i = 1
while True:
filename = "wallet_%d" % i
if filename in os.listdir(wallet_folder):
i += 1
else:
break
full_path = os.path.join(wallet_folder, filename)
self.gui_object.start_new_window(full_path, None)
def init_menubar(self):
menubar = QMenuBar()
file_menu = menubar.addMenu(_("&File"))
self.recently_visited_menu = file_menu.addMenu(_("&Recently open"))
file_menu.addAction(_("&Open"), self.open_wallet).setShortcut(QKeySequence.Open)
file_menu.addAction(_("&New/Restore"), self.new_wallet).setShortcut(QKeySequence.New)
file_menu.addAction(_("&Save Copy"), self.backup_wallet).setShortcut(QKeySequence.SaveAs)
file_menu.addAction(_("Delete"), self.remove_wallet)
file_menu.addSeparator()
file_menu.addAction(_("&Quit"), self.close)
wallet_menu = menubar.addMenu(_("&Wallet"))
wallet_menu.addAction(_("&Information"), self.show_master_public_keys)
wallet_menu.addSeparator()
self.password_menu = wallet_menu.addAction(_("&Password"), self.change_password_dialog)
self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog)
self.private_keys_menu = wallet_menu.addMenu(_("&Private keys"))
self.private_keys_menu.addAction(_("&Sweep"), self.sweep_key_dialog)
self.import_privkey_menu = self.private_keys_menu.addAction(_("&Import"), self.do_import_privkey)
self.export_menu = self.private_keys_menu.addAction(_("&Export"), self.export_privkeys_dialog)
self.import_address_menu = wallet_menu.addAction(_("Import addresses"), self.import_addresses)
wallet_menu.addSeparator()
addresses_menu = wallet_menu.addMenu(_("&Addresses"))
addresses_menu.addAction(_("&Filter"), lambda: self.address_list.toggle_toolbar(self.config))
labels_menu = wallet_menu.addMenu(_("&Labels"))
labels_menu.addAction(_("&Import"), self.do_import_labels)
labels_menu.addAction(_("&Export"), self.do_export_labels)
history_menu = wallet_menu.addMenu(_("&History"))
history_menu.addAction(_("&Filter"), lambda: self.history_list.toggle_toolbar(self.config))
history_menu.addAction(_("&Summary"), self.history_list.show_summary)
history_menu.addAction(_("&Plot"), self.history_list.plot_history_dialog)
history_menu.addAction(_("&Export"), self.history_list.export_history_dialog)
contacts_menu = wallet_menu.addMenu(_("Contacts"))
contacts_menu.addAction(_("&New"), self.new_contact_dialog)
contacts_menu.addAction(_("Import"), lambda: self.contact_list.import_contacts())
contacts_menu.addAction(_("Export"), lambda: self.contact_list.export_contacts())
invoices_menu = wallet_menu.addMenu(_("Invoices"))
invoices_menu.addAction(_("Import"), lambda: self.invoice_list.import_invoices())
invoices_menu.addAction(_("Export"), lambda: self.invoice_list.export_invoices())
wallet_menu.addSeparator()
wallet_menu.addAction(_("Find"), self.toggle_search).setShortcut(QKeySequence("Ctrl+F"))
def add_toggle_action(view_menu, tab):
is_shown = self.config.get('show_{}_tab'.format(tab.tab_name), False)
item_name = (_("Hide") if is_shown else _("Show")) + " " + tab.tab_description
tab.menu_action = view_menu.addAction(item_name, lambda: self.toggle_tab(tab))
view_menu = menubar.addMenu(_("&View"))
add_toggle_action(view_menu, self.addresses_tab)
add_toggle_action(view_menu, self.utxo_tab)
add_toggle_action(view_menu, self.contacts_tab)
add_toggle_action(view_menu, self.console_tab)
tools_menu = menubar.addMenu(_("&Tools"))
# Settings / Preferences are all reserved keywords in macOS using this as work around
tools_menu.addAction(_("Electrum preferences") if sys.platform == 'darwin' else _("Preferences"), self.settings_dialog)
tools_menu.addAction(_("&Network"), lambda: self.gui_object.show_network_dialog(self))
tools_menu.addAction(_("&Plugins"), self.plugins_dialog)
tools_menu.addSeparator()
tools_menu.addAction(_("&Sign/verify message"), self.sign_verify_message)
tools_menu.addAction(_("&Encrypt/decrypt message"), self.encrypt_message)
tools_menu.addSeparator()
paytomany_menu = tools_menu.addAction(_("&Pay to many"), self.paytomany)
raw_transaction_menu = tools_menu.addMenu(_("&Load transaction"))
raw_transaction_menu.addAction(_("&From file"), self.do_process_from_file)
raw_transaction_menu.addAction(_("&From text"), self.do_process_from_text)
raw_transaction_menu.addAction(_("&From the blockchain"), self.do_process_from_txid)
self.raw_transaction_menu = raw_transaction_menu
run_hook('init_menubar_tools', self, tools_menu)
help_menu = menubar.addMenu(_("&Help"))
help_menu.addAction(_("&About"), self.show_about)
help_menu.addAction(_("&Official website"), lambda: webbrowser.open("https://www.feathercoin.com/"))
help_menu.addSeparator()
help_menu.addAction(_("&Documentation"), lambda: webbrowser.open("http://docs.electrum.org/")).setShortcut(QKeySequence.HelpContents)
help_menu.addAction(_("&Report Bug"), self.show_report_bug)
help_menu.addSeparator()
help_menu.addAction(_("&Donate to server"), self.donate_to_server)
self.setMenuBar(menubar)
def donate_to_server(self):
d = self.network.get_donation_address()
if d:
host = self.network.get_parameters()[0]
self.pay_to_URI('feathercoin:%s?message=donation for %s'%(d, host))
else:
self.show_error(_('No donation address for this server'))
def show_about(self):
QMessageBox.about(self, "Electrum",
_("Version")+" %s" % (self.wallet.electrum_version) + "\n\n" +
_("Electrum's focus is speed, with low resource usage and simplifying Feathercoin. You do not need to perform regular backups, because your wallet can be recovered from a secret phrase that you can memorize or write on paper. Startup times are instant because it operates in conjunction with high-performance servers that handle the most complicated parts of the Feathercoin system." + "\n\n" +
_("Uses icons from the Icons8 icon pack (icons8.com).")))
def show_report_bug(self):
msg = ' '.join([
_("Please report any bugs as issues on github:<br/>"),
"<a href=\"https://github.com/Feathercoin-Foundation/electrum-ftc/issues\">https://github.com/Feathercoin-Foundation/electrum-ftc/issues</a><br/><br/>",
_("Before reporting a bug, upgrade to the most recent version of Electrum-FTC (latest release or git HEAD), and include the version number in your report."),
_("Try to explain not only what the bug is, but how it occurs.")
])
self.show_message(msg, title="Electrum-FTC - " + _("Reporting Bugs"))
def notify_transactions(self):
if not self.network or not self.network.is_connected():
return
self.print_error("Notifying GUI")
if len(self.tx_notifications) > 0:
# Combine the transactions if there are at least three
num_txns = len(self.tx_notifications)
if num_txns >= 3:
total_amount = 0
for tx in self.tx_notifications:
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if v > 0:
total_amount += v
self.notify(_("{} new transactions received: Total amount received in the new transactions {}")
.format(num_txns, self.format_amount_and_units(total_amount)))
self.tx_notifications = []
else:
for tx in self.tx_notifications:
if tx:
self.tx_notifications.remove(tx)
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if v > 0:
self.notify(_("New transaction received: {}").format(self.format_amount_and_units(v)))
def notify(self, message):
if self.tray:
try:
# this requires Qt 5.9
self.tray.showMessage("Electrum-FTC", message, QIcon(":icons/electrum_dark_icon"), 20000)
except TypeError:
self.tray.showMessage("Electrum-FTC", message, QSystemTrayIcon.Information, 20000)
# custom wrappers for getOpenFileName and getSaveFileName, that remember the path selected by the user
def getOpenFileName(self, title, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
fileName, __ = QFileDialog.getOpenFileName(self, title, directory, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def getSaveFileName(self, title, filename, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
path = os.path.join( directory, filename )
fileName, __ = QFileDialog.getSaveFileName(self, title, path, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def connect_slots(self, sender):
sender.timer_signal.connect(self.timer_actions)
def timer_actions(self):
# Note this runs in the GUI thread
if self.need_update.is_set():
self.need_update.clear()
self.update_wallet()
# resolve aliases
# FIXME this is a blocking network call that has a timeout of 5 sec
self.payto_e.resolve()
# update fee
if self.require_fee_update:
self.do_update_fee()
self.require_fee_update = False
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, is_diff, self.num_zeros, self.decimal_point, whitespaces)
def format_amount_and_units(self, amount):
text = self.format_amount(amount) + ' '+ self.base_unit()
x = self.fx.format_amount_and_units(amount) if self.fx else None
if text and x:
text += ' (%s)'%x
return text
def format_fee_rate(self, fee_rate):
return format_satoshis(fee_rate/1000, False, self.num_zeros, 0, False) + ' sat/byte'
def get_decimal_point(self):
return self.decimal_point
def base_unit(self):
assert self.decimal_point in [2, 5, 8]
if self.decimal_point == 2:
return 'uFTC'
if self.decimal_point == 5:
return 'mFTC'
if self.decimal_point == 8:
return 'FTC'
raise Exception('Unknown base unit')
def connect_fields(self, window, btc_e, fiat_e, fee_e):
def edit_changed(edit):
if edit.follows:
return
edit.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
fiat_e.is_last_edited = (edit == fiat_e)
amount = edit.get_amount()
rate = self.fx.exchange_rate() if self.fx else Decimal('NaN')
if rate.is_nan() or amount is None:
if edit is fiat_e:
btc_e.setText("")
if fee_e:
fee_e.setText("")
else:
fiat_e.setText("")
else:
if edit is fiat_e:
btc_e.follows = True
btc_e.setAmount(int(amount / Decimal(rate) * COIN))
btc_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
btc_e.follows = False
if fee_e:
window.update_fee()
else:
fiat_e.follows = True
fiat_e.setText(self.fx.ccy_amount_str(
amount * Decimal(rate) / COIN, False))
fiat_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
fiat_e.follows = False
btc_e.follows = False
fiat_e.follows = False
fiat_e.textChanged.connect(partial(edit_changed, fiat_e))
btc_e.textChanged.connect(partial(edit_changed, btc_e))
fiat_e.is_last_edited = False
def update_status(self):
if not self.wallet:
return
if self.network is None or not self.network.is_running():
text = _("Offline")
icon = QIcon(":icons/status_disconnected.png")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
# Server height can be 0 after switching to a new server
# until we get a headers subscription request response.
# Display the synchronizing message in that case.
if not self.wallet.up_to_date or server_height == 0:
text = _("Synchronizing...")
icon = QIcon(":icons/status_waiting.png")
elif server_lag > 1:
text = _("Server is lagging ({} blocks)").format(server_lag)
icon = QIcon(":icons/status_lagging.png")
else:
c, u, x = self.wallet.get_balance()
text = _("Balance" ) + ": %s "%(self.format_amount_and_units(c))
if u:
text += " [%s unconfirmed]"%(self.format_amount(u, True).strip())
if x:
text += " [%s unmatured]"%(self.format_amount(x, True).strip())
# append fiat balance and price
if self.fx.is_enabled():
text += self.fx.get_fiat_status_text(c + u + x,
self.base_unit(), self.get_decimal_point()) or ''
if not self.network.proxy:
icon = QIcon(":icons/status_connected.png")
else:
icon = QIcon(":icons/status_connected_proxy.png")
else:
text = _("Not connected")
icon = QIcon(":icons/status_disconnected.png")
self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename()))
self.balance_label.setText(text)
self.status_button.setIcon( icon )
def update_wallet(self):
self.update_status()
if self.wallet.up_to_date or not self.network or not self.network.is_connected():
self.update_tabs()
def update_tabs(self):
self.history_list.update()
self.request_list.update()
self.address_list.update()
self.utxo_list.update()
self.contact_list.update()
self.invoice_list.update()
self.update_completions()
def create_history_tab(self):
from .history_list import HistoryList
self.history_list = l = HistoryList(self)
l.searchable_list = l
toolbar = l.create_toolbar(self.config)
toolbar_shown = self.config.get('show_toolbar_history', False)
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def show_address(self, addr):
from . import address_dialog
d = address_dialog.AddressDialog(self, addr)
d.exec_()
def show_transaction(self, tx, tx_desc = None):
'''tx_desc is set only for txs created in the Send tab'''
show_transaction(tx, self, tx_desc)
def create_receive_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.receive_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
self.receive_address_e = ButtonsLineEdit()
self.receive_address_e.addCopyButton(self.app)
self.receive_address_e.setReadOnly(True)
msg = _('Feathercoin address where the payment should be received. Note that each payment request uses a different Feathercoin address.')
self.receive_address_label = HelpLabel(_('Receiving address'), msg)
self.receive_address_e.textChanged.connect(self.update_receive_qr)
self.receive_address_e.setFocusPolicy(Qt.ClickFocus)
grid.addWidget(self.receive_address_label, 0, 0)
grid.addWidget(self.receive_address_e, 0, 1, 1, -1)
self.receive_message_e = QLineEdit()
grid.addWidget(QLabel(_('Description')), 1, 0)
grid.addWidget(self.receive_message_e, 1, 1, 1, -1)
self.receive_message_e.textChanged.connect(self.update_receive_qr)
self.receive_amount_e = BTCAmountEdit(self.get_decimal_point)
grid.addWidget(QLabel(_('Requested amount')), 2, 0)
grid.addWidget(self.receive_amount_e, 2, 1)
self.receive_amount_e.textChanged.connect(self.update_receive_qr)
self.fiat_receive_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_receive_e.setVisible(False)
grid.addWidget(self.fiat_receive_e, 2, 2, Qt.AlignLeft)
self.connect_fields(self, self.receive_amount_e, self.fiat_receive_e, None)
self.expires_combo = QComboBox()
self.expires_combo.addItems([i[0] for i in expiration_values])
self.expires_combo.setCurrentIndex(3)
self.expires_combo.setFixedWidth(self.receive_amount_e.width())
msg = ' '.join([
_('Expiration date of your request.'),
_('This information is seen by the recipient if you send them a signed payment request.'),
_('Expired requests have to be deleted manually from your list, in order to free the corresponding Feathercoin addresses.'),
_('The Feathercoin address never expires and will always be part of this electrum wallet.'),
])
grid.addWidget(HelpLabel(_('Request expires'), msg), 3, 0)
grid.addWidget(self.expires_combo, 3, 1)
self.expires_label = QLineEdit('')
self.expires_label.setReadOnly(1)
self.expires_label.setFocusPolicy(Qt.NoFocus)
self.expires_label.hide()
grid.addWidget(self.expires_label, 3, 1)
self.save_request_button = QPushButton(_('Save'))
self.save_request_button.clicked.connect(self.save_payment_request)
self.new_request_button = QPushButton(_('New'))
self.new_request_button.clicked.connect(self.new_payment_request)
self.receive_qr = QRCodeWidget(fixedSize=200)
self.receive_qr.mouseReleaseEvent = lambda x: self.toggle_qr_window()
self.receive_qr.enterEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor))
self.receive_qr.leaveEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.ArrowCursor))
self.receive_buttons = buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.save_request_button)
buttons.addWidget(self.new_request_button)
grid.addLayout(buttons, 4, 1, 1, 2)
self.receive_requests_label = QLabel(_('Requests'))
from .request_list import RequestList
self.request_list = RequestList(self)
# layout
vbox_g = QVBoxLayout()
vbox_g.addLayout(grid)
vbox_g.addStretch()
hbox = QHBoxLayout()
hbox.addLayout(vbox_g)
hbox.addWidget(self.receive_qr)
w = QWidget()
w.searchable_list = self.request_list
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.receive_requests_label)
vbox.addWidget(self.request_list)
vbox.setStretchFactor(self.request_list, 1000)
return w
def delete_payment_request(self, addr):
self.wallet.remove_payment_request(addr, self.config)
self.request_list.update()
self.clear_receive_tab()
def get_request_URI(self, addr):
req = self.wallet.receive_requests[addr]
message = self.wallet.labels.get(addr, '')
amount = req['amount']
URI = util.create_URI(addr, amount, message)
if req.get('time'):
URI += "&time=%d"%req.get('time')
if req.get('exp'):
URI += "&exp=%d"%req.get('exp')
if req.get('name') and req.get('sig'):
sig = bfh(req.get('sig'))
sig = bitcoin.base_encode(sig, base=58)
URI += "&name=" + req['name'] + "&sig="+sig
return str(URI)
def sign_payment_request(self, addr):
alias = self.config.get('alias')
alias_privkey = None
if alias and self.alias_info:
alias_addr, alias_name, validated = self.alias_info
if alias_addr:
if self.wallet.is_mine(alias_addr):
msg = _('This payment request will be signed.') + '\n' + _('Please enter your password')
password = None
if self.wallet.has_keystore_encryption():
password = self.password_dialog(msg)
if not password:
return
try:
self.wallet.sign_payment_request(addr, alias, alias_addr, password)
except Exception as e:
self.show_error(str(e))
return
else:
return
def save_payment_request(self):
addr = str(self.receive_address_e.text())
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
if not message and not amount:
self.show_error(_('No message or amount'))
return False
i = self.expires_combo.currentIndex()
expiration = list(map(lambda x: x[1], expiration_values))[i]
req = self.wallet.make_payment_request(addr, amount, message, expiration)
try:
self.wallet.add_payment_request(req, self.config)
except Exception as e:
traceback.print_exc(file=sys.stderr)
self.show_error(_('Error adding payment request') + ':\n' + str(e))
else:
self.sign_payment_request(addr)
self.save_request_button.setEnabled(False)
finally:
self.request_list.update()
self.address_list.update()
def view_and_paste(self, title, msg, data):
dialog = WindowModalDialog(self, title)
vbox = QVBoxLayout()
label = QLabel(msg)
label.setWordWrap(True)
vbox.addWidget(label)
pr_e = ShowQRTextEdit(text=data)
vbox.addWidget(pr_e)
vbox.addLayout(Buttons(CopyCloseButton(pr_e.text, self.app, dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def export_payment_request(self, addr):
r = self.wallet.receive_requests.get(addr)
pr = paymentrequest.serialize_request(r).SerializeToString()
name = r['id'] + '.bip70'
fileName = self.getSaveFileName(_("Select where to save your payment request"), name, "*.bip70")
if fileName:
with open(fileName, "wb+") as f:
f.write(util.to_bytes(pr))
self.show_message(_("Request saved successfully"))
self.saved = True
def new_payment_request(self):
addr = self.wallet.get_unused_address()
if addr is None:
if not self.wallet.is_deterministic():
msg = [
_('No more addresses in your wallet.'),
_('You are using a non-deterministic wallet, which cannot create new addresses.'),
_('If you want to create new addresses, use a deterministic wallet instead.')
]
self.show_message(' '.join(msg))
return
if not self.question(_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")):
return
addr = self.wallet.create_new_address(False)
self.set_receive_address(addr)
self.expires_label.hide()
self.expires_combo.show()
self.new_request_button.setEnabled(False)
self.receive_message_e.setFocus(1)
def set_receive_address(self, addr):
self.receive_address_e.setText(addr)
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
def clear_receive_tab(self):
addr = self.wallet.get_receiving_address() or ''
self.receive_address_e.setText(addr)
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
self.expires_label.hide()
self.expires_combo.show()
def toggle_qr_window(self):
from . import qrwindow
if not self.qr_window:
self.qr_window = qrwindow.QR_Window(self)
self.qr_window.setVisible(True)
self.qr_window_geometry = self.qr_window.geometry()
else:
if not self.qr_window.isVisible():
self.qr_window.setVisible(True)
self.qr_window.setGeometry(self.qr_window_geometry)
else:
self.qr_window_geometry = self.qr_window.geometry()
self.qr_window.setVisible(False)
self.update_receive_qr()
def show_send_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.send_tab))
def show_receive_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.receive_tab))
def receive_at(self, addr):
if not bitcoin.is_address(addr):
return
self.show_receive_tab()
self.receive_address_e.setText(addr)
self.new_request_button.setEnabled(True)
def update_receive_qr(self):
addr = str(self.receive_address_e.text())
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
self.save_request_button.setEnabled((amount is not None) or (message != ""))
uri = util.create_URI(addr, amount, message)
self.receive_qr.setData(uri)
if self.qr_window and self.qr_window.isVisible():
self.qr_window.set_content(addr, amount, message, uri)
def set_feerounding_text(self, num_satoshis_added):
self.feerounding_text = (_('Additional {} satoshis are going to be added.')
.format(num_satoshis_added))
def create_send_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.send_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
from .paytoedit import PayToEdit
self.amount_e = BTCAmountEdit(self.get_decimal_point)
self.payto_e = PayToEdit(self)
msg = _('Recipient of the funds.') + '\n\n'\
+ _('You may enter a Feathercoin address, a label from your list of contacts (a list of completions will be proposed), or an alias (email-like address that forwards to a Feathercoin address)')
payto_label = HelpLabel(_('Pay to'), msg)
grid.addWidget(payto_label, 1, 0)
grid.addWidget(self.payto_e, 1, 1, 1, -1)
completer = QCompleter()
completer.setCaseSensitivity(False)
self.payto_e.set_completer(completer)
completer.setModel(self.completions)
msg = _('Description of the transaction (not mandatory).') + '\n\n'\
+ _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.')
description_label = HelpLabel(_('Description'), msg)
grid.addWidget(description_label, 2, 0)
self.message_e = MyLineEdit()
grid.addWidget(self.message_e, 2, 1, 1, -1)
self.from_label = QLabel(_('From'))
grid.addWidget(self.from_label, 3, 0)
self.from_list = MyTreeWidget(self, self.from_list_menu, ['',''])
self.from_list.setHeaderHidden(True)
self.from_list.setMaximumHeight(80)
grid.addWidget(self.from_list, 3, 1, 1, -1)
self.set_pay_from([])
msg = _('Amount to be sent.') + '\n\n' \
+ _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' ' \
+ _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n' \
+ _('Keyboard shortcut: type "!" to send all your coins.')
amount_label = HelpLabel(_('Amount'), msg)
grid.addWidget(amount_label, 4, 0)
grid.addWidget(self.amount_e, 4, 1)
self.fiat_send_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_send_e.setVisible(False)
grid.addWidget(self.fiat_send_e, 4, 2)
self.amount_e.frozen.connect(
lambda: self.fiat_send_e.setFrozen(self.amount_e.isReadOnly()))
self.max_button = EnterButton(_("Max"), self.spend_max)
self.max_button.setFixedWidth(140)
grid.addWidget(self.max_button, 4, 3)
hbox = QHBoxLayout()
hbox.addStretch(1)
grid.addLayout(hbox, 4, 4)
msg = _('Feathercoin transactions are in general not free. A transaction fee is paid by the sender of the funds.') + '\n\n'\
+ _('The amount of fee can be decided freely by the sender. However, transactions with low fees take more time to be processed.') + '\n\n'\
+ _('A suggested fee is automatically added to this field. You may override it. The suggested fee increases with the size of the transaction.')
self.fee_e_label = HelpLabel(_('Fee'), msg)
def fee_cb(dyn, pos, fee_rate):
if dyn:
if self.config.use_mempool_fees():
self.config.set_key('depth_level', pos, False)
else:
self.config.set_key('fee_level', pos, False)
else:
self.config.set_key('fee_per_kb', fee_rate, False)
if fee_rate:
self.feerate_e.setAmount(fee_rate // 1000)
else:
self.feerate_e.setAmount(None)
self.fee_e.setModified(False)
self.fee_slider.activate()
self.spend_max() if self.is_max else self.update_fee()
self.fee_slider = FeeSlider(self, self.config, fee_cb)
self.fee_slider.setFixedWidth(140)
def on_fee_or_feerate(edit_changed, editing_finished):
edit_other = self.feerate_e if edit_changed == self.fee_e else self.fee_e
if editing_finished:
if not edit_changed.get_amount():
# This is so that when the user blanks the fee and moves on,
# we go back to auto-calculate mode and put a fee back.
edit_changed.setModified(False)
else:
# edit_changed was edited just now, so make sure we will
# freeze the correct fee setting (this)
edit_other.setModified(False)
self.fee_slider.deactivate()
self.update_fee()
class TxSizeLabel(QLabel):
def setAmount(self, byte_size):
self.setText(('x %s bytes =' % byte_size) if byte_size else '')
self.size_e = TxSizeLabel()
self.size_e.setAlignment(Qt.AlignCenter)
self.size_e.setAmount(0)
self.size_e.setFixedWidth(140)
self.size_e.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
self.feerate_e = FeerateEdit(lambda: 0)
self.feerate_e.setAmount(self.config.fee_per_byte())
self.feerate_e.textEdited.connect(partial(on_fee_or_feerate, self.feerate_e, False))
self.feerate_e.editingFinished.connect(partial(on_fee_or_feerate, self.feerate_e, True))
self.fee_e = BTCAmountEdit(self.get_decimal_point)
self.fee_e.textEdited.connect(partial(on_fee_or_feerate, self.fee_e, False))
self.fee_e.editingFinished.connect(partial(on_fee_or_feerate, self.fee_e, True))
def feerounding_onclick():
text = (self.feerounding_text + '\n\n' +
_('To somewhat protect your privacy, Electrum tries to create change with similar precision to other outputs.') + ' ' +
_('At most 100 satoshis might be lost due to this rounding.') + ' ' +
_("You can disable this setting in '{}'.").format(_('Preferences')) + '\n' +
_('Also, dust is not kept as change, but added to the fee.'))
QMessageBox.information(self, 'Fee rounding', text)
self.feerounding_icon = QPushButton(QIcon(':icons/info.png'), '')
self.feerounding_icon.setFixedWidth(20)
self.feerounding_icon.setFlat(True)
self.feerounding_icon.clicked.connect(feerounding_onclick)
self.feerounding_icon.setVisible(False)
self.connect_fields(self, self.amount_e, self.fiat_send_e, self.fee_e)
vbox_feelabel = QVBoxLayout()
vbox_feelabel.addWidget(self.fee_e_label)
vbox_feelabel.addStretch(1)
grid.addLayout(vbox_feelabel, 5, 0)
self.fee_adv_controls = QWidget()
hbox = QHBoxLayout(self.fee_adv_controls)
hbox.setContentsMargins(0, 0, 0, 0)
hbox.addWidget(self.feerate_e)
hbox.addWidget(self.size_e)
hbox.addWidget(self.fee_e)
hbox.addWidget(self.feerounding_icon, Qt.AlignLeft)
hbox.addStretch(1)
vbox_feecontrol = QVBoxLayout()
vbox_feecontrol.addWidget(self.fee_adv_controls)
vbox_feecontrol.addWidget(self.fee_slider)
grid.addLayout(vbox_feecontrol, 5, 1, 1, -1)
if not self.config.get('show_fee', False):
self.fee_adv_controls.setVisible(False)
self.preview_button = EnterButton(_("Preview"), self.do_preview)
self.preview_button.setToolTip(_('Display the details of your transaction before signing it.'))
self.send_button = EnterButton(_("Send"), self.do_send)
self.clear_button = EnterButton(_("Clear"), self.do_clear)
buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_button)
buttons.addWidget(self.preview_button)
buttons.addWidget(self.send_button)
grid.addLayout(buttons, 6, 1, 1, 3)
self.amount_e.shortcut.connect(self.spend_max)
self.payto_e.textChanged.connect(self.update_fee)
self.amount_e.textEdited.connect(self.update_fee)
def reset_max(t):
self.is_max = False
self.max_button.setEnabled(not bool(t))
self.amount_e.textEdited.connect(reset_max)
self.fiat_send_e.textEdited.connect(reset_max)
def entry_changed():
text = ""
amt_color = ColorScheme.DEFAULT
fee_color = ColorScheme.DEFAULT
feerate_color = ColorScheme.DEFAULT
if self.not_enough_funds:
amt_color, fee_color = ColorScheme.RED, ColorScheme.RED
feerate_color = ColorScheme.RED
text = _( "Not enough funds" )
c, u, x = self.wallet.get_frozen_balance()
if c+u+x:
text += ' (' + self.format_amount(c+u+x).strip() + ' ' + self.base_unit() + ' ' +_("are frozen") + ')'
# blue color denotes auto-filled values
elif self.fee_e.isModified():
feerate_color = ColorScheme.BLUE
elif self.feerate_e.isModified():
fee_color = ColorScheme.BLUE
elif self.amount_e.isModified():
fee_color = ColorScheme.BLUE
feerate_color = ColorScheme.BLUE
else:
amt_color = ColorScheme.BLUE
fee_color = ColorScheme.BLUE
feerate_color = ColorScheme.BLUE
self.statusBar().showMessage(text)
self.amount_e.setStyleSheet(amt_color.as_stylesheet())
self.fee_e.setStyleSheet(fee_color.as_stylesheet())
self.feerate_e.setStyleSheet(feerate_color.as_stylesheet())
self.amount_e.textChanged.connect(entry_changed)
self.fee_e.textChanged.connect(entry_changed)
self.feerate_e.textChanged.connect(entry_changed)
self.invoices_label = QLabel(_('Invoices'))
from .invoice_list import InvoiceList
self.invoice_list = InvoiceList(self)
vbox0 = QVBoxLayout()
vbox0.addLayout(grid)
hbox = QHBoxLayout()
hbox.addLayout(vbox0)
w = QWidget()
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.invoices_label)
vbox.addWidget(self.invoice_list)
vbox.setStretchFactor(self.invoice_list, 1000)
w.searchable_list = self.invoice_list
run_hook('create_send_tab', grid)
return w
def spend_max(self):
self.is_max = True
self.do_update_fee()
def update_fee(self):
self.require_fee_update = True
def get_payto_or_dummy(self):
r = self.payto_e.get_recipient()
if r:
return r
return (TYPE_ADDRESS, self.wallet.dummy_address())
def do_update_fee(self):
'''Recalculate the fee. If the fee was manually input, retain it, but
still build the TX to see if there are enough funds.
'''
freeze_fee = self.is_send_fee_frozen()
freeze_feerate = self.is_send_feerate_frozen()
amount = '!' if self.is_max else self.amount_e.get_amount()
if amount is None:
if not freeze_fee:
self.fee_e.setAmount(None)
self.not_enough_funds = False
self.statusBar().showMessage('')
else:
fee_estimator = self.get_send_fee_estimator()
outputs = self.payto_e.get_outputs(self.is_max)
if not outputs:
_type, addr = self.get_payto_or_dummy()
outputs = [(_type, addr, amount)]
is_sweep = bool(self.tx_external_keypairs)
make_tx = lambda fee_est: \
self.wallet.make_unsigned_transaction(
self.get_coins(), outputs, self.config,
fixed_fee=fee_est, is_sweep=is_sweep)
try:
tx = make_tx(fee_estimator)
self.not_enough_funds = False
except (NotEnoughFunds, NoDynamicFeeEstimates) as e:
if not freeze_fee:
self.fee_e.setAmount(None)
if not freeze_feerate:
self.feerate_e.setAmount(None)
self.feerounding_icon.setVisible(False)
if isinstance(e, NotEnoughFunds):
self.not_enough_funds = True
elif isinstance(e, NoDynamicFeeEstimates):
try:
tx = make_tx(0)
size = tx.estimated_size()
self.size_e.setAmount(size)
except BaseException:
pass
return
except BaseException:
traceback.print_exc(file=sys.stderr)
return
size = tx.estimated_size()
self.size_e.setAmount(size)
fee = tx.get_fee()
fee = None if self.not_enough_funds else fee
# Displayed fee/fee_rate values are set according to user input.
# Due to rounding or dropping dust in CoinChooser,
# actual fees often differ somewhat.
if freeze_feerate or self.fee_slider.is_active():
displayed_feerate = self.feerate_e.get_amount()
if displayed_feerate:
displayed_feerate = displayed_feerate // 1000
else:
# fallback to actual fee
displayed_feerate = fee // size if fee is not None else None
self.feerate_e.setAmount(displayed_feerate)
displayed_fee = displayed_feerate * size if displayed_feerate is not None else None
self.fee_e.setAmount(displayed_fee)
else:
if freeze_fee:
displayed_fee = self.fee_e.get_amount()
else:
# fallback to actual fee if nothing is frozen
displayed_fee = fee
self.fee_e.setAmount(displayed_fee)
displayed_fee = displayed_fee if displayed_fee else 0
displayed_feerate = displayed_fee // size if displayed_fee is not None else None
self.feerate_e.setAmount(displayed_feerate)
# show/hide fee rounding icon
feerounding = (fee - displayed_fee) if fee else 0
self.set_feerounding_text(feerounding)
self.feerounding_icon.setToolTip(self.feerounding_text)
self.feerounding_icon.setVisible(bool(feerounding))
if self.is_max:
amount = tx.output_value()
self.amount_e.setAmount(amount)
def from_list_delete(self, item):
i = self.from_list.indexOfTopLevelItem(item)
self.pay_from.pop(i)
self.redraw_from_list()
self.update_fee()
def from_list_menu(self, position):
item = self.from_list.itemAt(position)
menu = QMenu()
menu.addAction(_("Remove"), lambda: self.from_list_delete(item))
menu.exec_(self.from_list.viewport().mapToGlobal(position))
def set_pay_from(self, coins):
self.pay_from = list(coins)
self.redraw_from_list()
def redraw_from_list(self):
self.from_list.clear()
self.from_label.setHidden(len(self.pay_from) == 0)
self.from_list.setHidden(len(self.pay_from) == 0)
def format(x):
h = x.get('prevout_hash')
return h[0:10] + '...' + h[-10:] + ":%d"%x.get('prevout_n') + u'\t' + "%s"%x.get('address')
for item in self.pay_from:
self.from_list.addTopLevelItem(QTreeWidgetItem( [format(item), self.format_amount(item['value']) ]))
def get_contact_payto(self, key):
_type, label = self.contacts.get(key)
return label + ' <' + key + '>' if _type == 'address' else key
def update_completions(self):
l = [self.get_contact_payto(key) for key in self.contacts.keys()]
self.completions.setStringList(l)
def protected(func):
'''Password request wrapper. The password is passed to the function
as the 'password' named argument. "None" indicates either an
unencrypted wallet, or the user cancelled the password request.
An empty input is passed as the empty string.'''
def request_password(self, *args, **kwargs):
parent = self.top_level_window()
password = None
while self.wallet.has_keystore_encryption():
password = self.password_dialog(parent=parent)
if password is None:
# User cancelled password input
return
try:
self.wallet.check_password(password)
break
except Exception as e:
self.show_error(str(e), parent=parent)
continue
kwargs['password'] = password
return func(self, *args, **kwargs)
return request_password
def is_send_fee_frozen(self):
return self.fee_e.isVisible() and self.fee_e.isModified() \
and (self.fee_e.text() or self.fee_e.hasFocus())
def is_send_feerate_frozen(self):
return self.feerate_e.isVisible() and self.feerate_e.isModified() \
and (self.feerate_e.text() or self.feerate_e.hasFocus())
def get_send_fee_estimator(self):
if self.is_send_fee_frozen():
fee_estimator = self.fee_e.get_amount()
elif self.is_send_feerate_frozen():
amount = self.feerate_e.get_amount()
amount = 0 if amount is None else amount
fee_estimator = partial(
simple_config.SimpleConfig.estimate_fee_for_feerate, amount)
else:
fee_estimator = None
return fee_estimator
def read_send_tab(self):
if self.payment_request and self.payment_request.has_expired():
self.show_error(_('Payment request has expired'))
return
label = self.message_e.text()
if self.payment_request:
outputs = self.payment_request.get_outputs()
else:
errors = self.payto_e.get_errors()
if errors:
self.show_warning(_("Invalid Lines found:") + "\n\n" + '\n'.join([ _("Line #") + str(x[0]+1) + ": " + x[1] for x in errors]))
return
outputs = self.payto_e.get_outputs(self.is_max)
if self.payto_e.is_alias and self.payto_e.validated is False:
alias = self.payto_e.toPlainText()
msg = _('WARNING: the alias "{}" could not be validated via an additional '
'security check, DNSSEC, and thus may not be correct.').format(alias) + '\n'
msg += _('Do you wish to continue?')
if not self.question(msg):
return
if not outputs:
self.show_error(_('No outputs'))
return
for _type, addr, amount in outputs:
if addr is None:
self.show_error(_('Feathercoin Address is None'))
return
if _type == TYPE_ADDRESS and not bitcoin.is_address(addr):
self.show_error(_('Invalid Feathercoin Address'))
return
if amount is None:
self.show_error(_('Invalid Amount'))
return
fee_estimator = self.get_send_fee_estimator()
coins = self.get_coins()
return outputs, fee_estimator, label, coins
def do_preview(self):
self.do_send(preview = True)
def do_send(self, preview = False):
if run_hook('abort_send', self):
return
r = self.read_send_tab()
if not r:
return
outputs, fee_estimator, tx_desc, coins = r
try:
is_sweep = bool(self.tx_external_keypairs)
tx = self.wallet.make_unsigned_transaction(
coins, outputs, self.config, fixed_fee=fee_estimator,
is_sweep=is_sweep)
except NotEnoughFunds:
self.show_message(_("Insufficient funds"))
return
except BaseException as e:
traceback.print_exc(file=sys.stdout)
self.show_message(str(e))
return
amount = tx.output_value() if self.is_max else sum(map(lambda x:x[2], outputs))
fee = tx.get_fee()
use_rbf = self.config.get('use_rbf', True)
if use_rbf:
tx.set_rbf(True)
if fee < self.wallet.relayfee() * tx.estimated_size() / 1000:
self.show_error('\n'.join([
_("This transaction requires a higher fee, or it will not be propagated by your current server"),
_("Try to raise your transaction fee, or use a server with a lower relay fee.")
]))
return
if preview:
self.show_transaction(tx, tx_desc)
return
if not self.network:
self.show_error(_("You can't broadcast a transaction without a live network connection."))
return
# confirmation dialog
msg = [
_("Amount to be sent") + ": " + self.format_amount_and_units(amount),
_("Mining fee") + ": " + self.format_amount_and_units(fee),
]
x_fee = run_hook('get_tx_extra_fee', self.wallet, tx)
if x_fee:
x_fee_address, x_fee_amount = x_fee
msg.append( _("Additional fees") + ": " + self.format_amount_and_units(x_fee_amount) )
if self.wallet.has_keystore_encryption():
msg.append("")
msg.append(_("Enter your password to proceed"))
password = self.password_dialog('\n'.join(msg))
if not password:
return
else:
msg.append(_('Proceed?'))
password = None
if not self.question('\n'.join(msg)):
return
def sign_done(success):
if success:
if not tx.is_complete():
self.show_transaction(tx)
self.do_clear()
else:
self.broadcast_transaction(tx, tx_desc)
self.sign_tx_with_password(tx, sign_done, password)
@protected
def sign_tx(self, tx, callback, password):
self.sign_tx_with_password(tx, callback, password)
def sign_tx_with_password(self, tx, callback, password):
'''Sign the transaction in a separate thread. When done, calls
the callback with a success code of True or False.
'''
def on_signed(result):
callback(True)
def on_failed(exc_info):
self.on_error(exc_info)
callback(False)
if self.tx_external_keypairs:
# can sign directly
task = partial(Transaction.sign, tx, self.tx_external_keypairs)
else:
# call hook to see if plugin needs gui interaction
run_hook('sign_tx', self, tx)
task = partial(self.wallet.sign_transaction, tx, password)
WaitingDialog(self, _('Signing transaction...'), task,
on_signed, on_failed)
def broadcast_transaction(self, tx, tx_desc):
def broadcast_thread():
# non-GUI thread
pr = self.payment_request
if pr and pr.has_expired():
self.payment_request = None
return False, _("Payment request has expired")
status, msg = self.network.broadcast(tx)
if pr and status is True:
self.invoices.set_paid(pr, tx.txid())
self.invoices.save()
self.payment_request = None
refund_address = self.wallet.get_receiving_addresses()[0]
ack_status, ack_msg = pr.send_ack(str(tx), refund_address)
if ack_status:
msg = ack_msg
return status, msg
# Capture current TL window; override might be removed on return
parent = self.top_level_window(lambda win: isinstance(win, MessageBoxMixin))
def broadcast_done(result):
# GUI thread
if result:
status, msg = result
if status:
if tx_desc is not None and tx.is_complete():
self.wallet.set_label(tx.txid(), tx_desc)
parent.show_message(_('Payment sent.') + '\n' + msg)
self.invoice_list.update()
self.do_clear()
else:
parent.show_error(msg)
WaitingDialog(self, _('Broadcasting transaction...'),
broadcast_thread, broadcast_done, self.on_error)
def query_choice(self, msg, choices):
# Needed by QtHandler for hardware wallets
dialog = WindowModalDialog(self.top_level_window())
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout(dialog)
vbox.addLayout(clayout.layout())
vbox.addLayout(Buttons(OkButton(dialog)))
if not dialog.exec_():
return None
return clayout.selected_index()
def lock_amount(self, b):
self.amount_e.setFrozen(b)
self.max_button.setEnabled(not b)
def prepare_for_payment_request(self):
self.show_send_tab()
self.payto_e.is_pr = True
for e in [self.payto_e, self.amount_e, self.message_e]:
e.setFrozen(True)
self.payto_e.setText(_("please wait..."))
return True
def delete_invoice(self, key):
self.invoices.remove(key)
self.invoice_list.update()
def payment_request_ok(self):
pr = self.payment_request
key = self.invoices.add(pr)
status = self.invoices.get_status(key)
self.invoice_list.update()
if status == PR_PAID:
self.show_message("invoice already paid")
self.do_clear()
self.payment_request = None
return
self.payto_e.is_pr = True
if not pr.has_expired():
self.payto_e.setGreen()
else:
self.payto_e.setExpired()
self.payto_e.setText(pr.get_requestor())
self.amount_e.setText(format_satoshis_plain(pr.get_amount(), self.decimal_point))
self.message_e.setText(pr.get_memo())
# signal to set fee
self.amount_e.textEdited.emit("")
def payment_request_error(self):
self.show_message(self.payment_request.error)
self.payment_request = None
self.do_clear()
def on_pr(self, request):
self.payment_request = request
if self.payment_request.verify(self.contacts):
self.payment_request_ok_signal.emit()
else:
self.payment_request_error_signal.emit()
def pay_to_URI(self, URI):
if not URI:
return
try:
out = util.parse_URI(URI, self.on_pr)
except BaseException as e:
self.show_error(_('Invalid Feathercoin URI:') + '\n' + str(e))
return
self.show_send_tab()
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if r or (name and sig):
self.prepare_for_payment_request()
return
address = out.get('address')
amount = out.get('amount')
label = out.get('label')
message = out.get('message')
# use label as description (not BIP21 compliant)
if label and not message:
message = label
if address:
self.payto_e.setText(address)
if message:
self.message_e.setText(message)
if amount:
self.amount_e.setAmount(amount)
self.amount_e.textEdited.emit("")
def do_clear(self):
self.is_max = False
self.not_enough_funds = False
self.payment_request = None
self.payto_e.is_pr = False
for e in [self.payto_e, self.message_e, self.amount_e, self.fiat_send_e,
self.fee_e, self.feerate_e]:
e.setText('')
e.setFrozen(False)
self.fee_slider.activate()
self.feerate_e.setAmount(self.config.fee_per_byte())
self.size_e.setAmount(0)
self.feerounding_icon.setVisible(False)
self.set_pay_from([])
self.tx_external_keypairs = {}
self.update_status()
run_hook('do_clear', self)
def set_frozen_state(self, addrs, freeze):
self.wallet.set_frozen_state(addrs, freeze)
self.address_list.update()
self.utxo_list.update()
self.update_fee()
def create_list_tab(self, l, toolbar=None):
w = QWidget()
w.searchable_list = l
vbox = QVBoxLayout()
w.setLayout(vbox)
vbox.setContentsMargins(0, 0, 0, 0)
vbox.setSpacing(0)
if toolbar:
vbox.addLayout(toolbar)
vbox.addWidget(l)
return w
def create_addresses_tab(self):
from .address_list import AddressList
self.address_list = l = AddressList(self)
toolbar = l.create_toolbar(self.config)
toolbar_shown = self.config.get('show_toolbar_addresses', False)
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def create_utxo_tab(self):
from .utxo_list import UTXOList
self.utxo_list = l = UTXOList(self)
return self.create_list_tab(l)
def create_contacts_tab(self):
from .contact_list import ContactList
self.contact_list = l = ContactList(self)
return self.create_list_tab(l)
def remove_address(self, addr):
if self.question(_("Do you want to remove")+" %s "%addr +_("from your wallet?")):
self.wallet.delete_address(addr)
self.need_update.set() # history, addresses, coins
self.clear_receive_tab()
def get_coins(self):
if self.pay_from:
return self.pay_from
else:
return self.wallet.get_spendable_coins(None, self.config)
def spend_coins(self, coins):
self.set_pay_from(coins)
self.show_send_tab()
self.update_fee()
def paytomany(self):
self.show_send_tab()
self.payto_e.paytomany()
msg = '\n'.join([
_('Enter a list of outputs in the \'Pay to\' field.'),
_('One output per line.'),
_('Format: address, amount'),
_('You may load a CSV file using the file icon.')
])
self.show_message(msg, title=_('Pay to many'))
def payto_contacts(self, labels):
paytos = [self.get_contact_payto(label) for label in labels]
self.show_send_tab()
if len(paytos) == 1:
self.payto_e.setText(paytos[0])
self.amount_e.setFocus()
else:
text = "\n".join([payto + ", 0" for payto in paytos])
self.payto_e.setText(text)
self.payto_e.setFocus()
def set_contact(self, label, address):
if not is_address(address):
self.show_error(_('Invalid Address'))
self.contact_list.update() # Displays original unchanged value
return False
self.contacts[address] = ('address', label)
self.contact_list.update()
self.history_list.update()
self.update_completions()
return True
def delete_contacts(self, labels):
if not self.question(_("Remove {} from your list of contacts?")
.format(" + ".join(labels))):
return
for label in labels:
self.contacts.pop(label)
self.history_list.update()
self.contact_list.update()
self.update_completions()
def show_invoice(self, key):
pr = self.invoices.get(key)
if pr is None:
self.show_error('Cannot find payment request in wallet.')
return
pr.verify(self.contacts)
self.show_pr_details(pr)
def show_pr_details(self, pr):
key = pr.get_id()
d = WindowModalDialog(self, _("Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Requestor") + ':'), 0, 0)
grid.addWidget(QLabel(pr.get_requestor()), 0, 1)
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
outputs_str = '\n'.join(map(lambda x: self.format_amount(x[2])+ self.base_unit() + ' @ ' + x[1], pr.get_outputs()))
grid.addWidget(QLabel(outputs_str), 1, 1)
expires = pr.get_expiration_date()
grid.addWidget(QLabel(_("Memo") + ':'), 2, 0)
grid.addWidget(QLabel(pr.get_memo()), 2, 1)
grid.addWidget(QLabel(_("Signature") + ':'), 3, 0)
grid.addWidget(QLabel(pr.get_verify_status()), 3, 1)
if expires:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(expires)), 4, 1)
vbox.addLayout(grid)
def do_export():
fn = self.getSaveFileName(_("Save invoice to file"), "*.bip70")
if not fn:
return
with open(fn, 'wb') as f:
data = f.write(pr.raw)
self.show_message(_('Invoice saved as' + ' ' + fn))
exportButton = EnterButton(_('Save'), do_export)
def do_delete():
if self.question(_('Delete invoice?')):
self.invoices.remove(key)
self.history_list.update()
self.invoice_list.update()
d.close()
deleteButton = EnterButton(_('Delete'), do_delete)
vbox.addLayout(Buttons(exportButton, deleteButton, CloseButton(d)))
d.exec_()
def do_pay_invoice(self, key):
pr = self.invoices.get(key)
self.payment_request = pr
self.prepare_for_payment_request()
pr.error = None # this forces verify() to re-run
if pr.verify(self.contacts):
self.payment_request_ok()
else:
self.payment_request_error()
def create_console_tab(self):
from .console import Console
self.console = console = Console()
return console
def update_console(self):
console = self.console
console.history = self.config.get("console-history",[])
console.history_index = len(console.history)
console.updateNamespace({'wallet' : self.wallet,
'network' : self.network,
'plugins' : self.gui_object.plugins,
'window': self})
console.updateNamespace({'util' : util, 'bitcoin':bitcoin})
c = commands.Commands(self.config, self.wallet, self.network, lambda: self.console.set_json(True))
methods = {}
def mkfunc(f, method):
return lambda *args: f(method, args, self.password_dialog)
for m in dir(c):
if m[0]=='_' or m in ['network','wallet']: continue
methods[m] = mkfunc(c._run, m)
console.updateNamespace(methods)
def create_status_bar(self):
sb = QStatusBar()
sb.setFixedHeight(35)
qtVersion = qVersion()
self.balance_label = QLabel("")
self.balance_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
self.balance_label.setStyleSheet("""QLabel { padding: 0 }""")
sb.addWidget(self.balance_label)
self.search_box = QLineEdit()
self.search_box.textChanged.connect(self.do_search)
self.search_box.hide()
sb.addPermanentWidget(self.search_box)
self.lock_icon = QIcon()
self.password_button = StatusBarButton(self.lock_icon, _("Password"), self.change_password_dialog )
sb.addPermanentWidget(self.password_button)
sb.addPermanentWidget(StatusBarButton(QIcon(":icons/preferences.png"), _("Preferences"), self.settings_dialog ) )
self.seed_button = StatusBarButton(QIcon(":icons/seed.png"), _("Seed"), self.show_seed_dialog )
sb.addPermanentWidget(self.seed_button)
self.status_button = StatusBarButton(QIcon(":icons/status_disconnected.png"), _("Network"), lambda: self.gui_object.show_network_dialog(self))
sb.addPermanentWidget(self.status_button)
run_hook('create_status_bar', sb)
self.setStatusBar(sb)
def update_lock_icon(self):
icon = QIcon(":icons/lock.png") if self.wallet.has_password() else QIcon(":icons/unlock.png")
self.password_button.setIcon(icon)
def update_buttons_on_seed(self):
self.seed_button.setVisible(self.wallet.has_seed())
self.password_button.setVisible(self.wallet.may_have_password())
self.send_button.setVisible(not self.wallet.is_watching_only())
def change_password_dialog(self):
from electrum.storage import STO_EV_XPUB_PW
if self.wallet.get_available_storage_encryption_version() == STO_EV_XPUB_PW:
from .password_dialog import ChangePasswordDialogForHW
d = ChangePasswordDialogForHW(self, self.wallet)
ok, encrypt_file = d.run()
if not ok:
return
try:
hw_dev_pw = self.wallet.keystore.get_password_for_storage_encryption()
except UserCancelled:
return
except BaseException as e:
traceback.print_exc(file=sys.stderr)
self.show_error(str(e))
return
old_password = hw_dev_pw if self.wallet.has_password() else None
new_password = hw_dev_pw if encrypt_file else None
else:
from .password_dialog import ChangePasswordDialogForSW
d = ChangePasswordDialogForSW(self, self.wallet)
ok, old_password, new_password, encrypt_file = d.run()
if not ok:
return
try:
self.wallet.update_password(old_password, new_password, encrypt_file)
except InvalidPassword as e:
self.show_error(str(e))
return
except BaseException:
traceback.print_exc(file=sys.stdout)
self.show_error(_('Failed to update password'))
return
msg = _('Password was updated successfully') if self.wallet.has_password() else _('Password is disabled, this wallet is not protected')
self.show_message(msg, title=_("Success"))
self.update_lock_icon()
def toggle_search(self):
tab = self.tabs.currentWidget()
#if hasattr(tab, 'searchable_list'):
# tab.searchable_list.toggle_toolbar()
#return
self.search_box.setHidden(not self.search_box.isHidden())
if not self.search_box.isHidden():
self.search_box.setFocus(1)
else:
self.do_search('')
def do_search(self, t):
tab = self.tabs.currentWidget()
if hasattr(tab, 'searchable_list'):
tab.searchable_list.filter(t)
def new_contact_dialog(self):
d = WindowModalDialog(self, _("New Contact"))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('New Contact') + ':'))
grid = QGridLayout()
line1 = QLineEdit()
line1.setFixedWidth(280)
line2 = QLineEdit()
line2.setFixedWidth(280)
grid.addWidget(QLabel(_("Address")), 1, 0)
grid.addWidget(line1, 1, 1)
grid.addWidget(QLabel(_("Name")), 2, 0)
grid.addWidget(line2, 2, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if d.exec_():
self.set_contact(line2.text(), line1.text())
def show_master_public_keys(self):
dialog = WindowModalDialog(self, _("Wallet Information"))
dialog.setMinimumSize(500, 100)
mpk_list = self.wallet.get_master_public_keys()
vbox = QVBoxLayout()
wallet_type = self.wallet.storage.get('wallet_type', '')
grid = QGridLayout()
basename = os.path.basename(self.wallet.storage.path)
grid.addWidget(QLabel(_("Wallet name")+ ':'), 0, 0)
grid.addWidget(QLabel(basename), 0, 1)
grid.addWidget(QLabel(_("Wallet type")+ ':'), 1, 0)
grid.addWidget(QLabel(wallet_type), 1, 1)
grid.addWidget(QLabel(_("Script type")+ ':'), 2, 0)
grid.addWidget(QLabel(self.wallet.txin_type), 2, 1)
vbox.addLayout(grid)
if self.wallet.is_deterministic():
mpk_text = ShowQRTextEdit()
mpk_text.setMaximumHeight(150)
mpk_text.addCopyButton(self.app)
def show_mpk(index):
mpk_text.setText(mpk_list[index])
# only show the combobox in case multiple accounts are available
if len(mpk_list) > 1:
def label(key):
if isinstance(self.wallet, Multisig_Wallet):
return _("cosigner") + ' ' + str(key+1)
return ''
labels = [label(i) for i in range(len(mpk_list))]
on_click = lambda clayout: show_mpk(clayout.selected_index())
labels_clayout = ChoicesLayout(_("Master Public Keys"), labels, on_click)
vbox.addLayout(labels_clayout.layout())
else:
vbox.addWidget(QLabel(_("Master Public Key")))
show_mpk(0)
vbox.addWidget(mpk_text)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def remove_wallet(self):
if self.question('\n'.join([
_('Delete wallet file?'),
"%s"%self.wallet.storage.path,
_('If your wallet contains funds, make sure you have saved its seed.')])):
self._delete_wallet()
@protected
def _delete_wallet(self, password):
wallet_path = self.wallet.storage.path
basename = os.path.basename(wallet_path)
self.gui_object.daemon.stop_wallet(wallet_path)
self.close()
os.unlink(wallet_path)
self.show_error("Wallet removed:" + basename)
@protected
def show_seed_dialog(self, password):
if not self.wallet.has_seed():
self.show_message(_('This wallet has no seed'))
return
keystore = self.wallet.get_keystore()
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except BaseException as e:
self.show_error(str(e))
return
from .seed_dialog import SeedDialog
d = SeedDialog(self, seed, passphrase)
d.exec_()
def show_qrcode(self, data, title = _("QR code"), parent=None):
if not data:
return
d = QRDialog(data, parent or self, title)
d.exec_()
@protected
def show_private_key(self, address, password):
if not address:
return
try:
pk, redeem_script = self.wallet.export_private_key(address, password)
except Exception as e:
traceback.print_exc(file=sys.stdout)
self.show_message(str(e))
return
xtype = bitcoin.deserialize_privkey(pk)[0]
d = WindowModalDialog(self, _("Private key"))
d.setMinimumSize(600, 150)
vbox = QVBoxLayout()
vbox.addWidget(QLabel(_("Address") + ': ' + address))
vbox.addWidget(QLabel(_("Script type") + ': ' + xtype))
vbox.addWidget(QLabel(_("Private key") + ':'))
keys_e = ShowQRTextEdit(text=pk)
keys_e.addCopyButton(self.app)
vbox.addWidget(keys_e)
if redeem_script:
vbox.addWidget(QLabel(_("Redeem Script") + ':'))
rds_e = ShowQRTextEdit(text=redeem_script)
rds_e.addCopyButton(self.app)
vbox.addWidget(rds_e)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
d.exec_()
msg_sign = _("Signing with an address actually means signing with the corresponding "
"private key, and verifying with the corresponding public key. The "
"address you have entered does not have a unique public key, so these "
"operations cannot be performed.") + '\n\n' + \
_('The operation is undefined. Not just in Electrum, but in general.')
@protected
def do_sign(self, address, message, signature, password):
address = address.text().strip()
message = message.toPlainText().strip()
if not bitcoin.is_address(address):
self.show_message(_('Invalid Feathercoin address.'))
return
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
if not self.wallet.is_mine(address):
self.show_message(_('Address not in wallet.'))
return
txin_type = self.wallet.get_txin_type(address)
if txin_type not in ['p2pkh', 'p2wpkh', 'p2wpkh-p2sh']:
self.show_message(_('Cannot sign messages with this type of address:') + \
' ' + txin_type + '\n\n' + self.msg_sign)
return
task = partial(self.wallet.sign_message, address, message, password)
def show_signed_message(sig):
try:
signature.setText(base64.b64encode(sig).decode('ascii'))
except RuntimeError:
# (signature) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=show_signed_message)
def do_verify(self, address, message, signature):
address = address.text().strip()
message = message.toPlainText().strip().encode('utf-8')
if not bitcoin.is_address(address):
self.show_message(_('Invalid Feathercoin address.'))
return
try:
# This can throw on invalid base64
sig = base64.b64decode(str(signature.toPlainText()))
verified = bitcoin.verify_message(address, sig, message)
except Exception as e:
verified = False
if verified:
self.show_message(_("Signature verified"))
else:
self.show_error(_("Wrong signature"))
def sign_verify_message(self, address=''):
d = WindowModalDialog(self, _('Sign/verify Message'))
d.setMinimumSize(610, 290)
layout = QGridLayout(d)
message_e = QTextEdit()
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
address_e = QLineEdit()
address_e.setText(address)
layout.addWidget(QLabel(_('Address')), 2, 0)
layout.addWidget(address_e, 2, 1)
signature_e = QTextEdit()
layout.addWidget(QLabel(_('Signature')), 3, 0)
layout.addWidget(signature_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Sign"))
b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Verify"))
b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
@protected
def do_decrypt(self, message_e, pubkey_e, encrypted_e, password):
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
cyphertext = encrypted_e.toPlainText()
task = partial(self.wallet.decrypt_message, pubkey_e.text(), cyphertext, password)
def setText(text):
try:
message_e.setText(text.decode('utf-8'))
except RuntimeError:
# (message_e) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=setText)
def do_encrypt(self, message_e, pubkey_e, encrypted_e):
message = message_e.toPlainText()
message = message.encode('utf-8')
try:
encrypted = bitcoin.encrypt_message(message, pubkey_e.text())
encrypted_e.setText(encrypted.decode('ascii'))
except BaseException as e:
traceback.print_exc(file=sys.stdout)
self.show_warning(str(e))
def encrypt_message(self, address=''):
d = WindowModalDialog(self, _('Encrypt/decrypt Message'))
d.setMinimumSize(610, 490)
layout = QGridLayout(d)
message_e = QTextEdit()
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
pubkey_e = QLineEdit()
if address:
pubkey = self.wallet.get_public_key(address)
pubkey_e.setText(pubkey)
layout.addWidget(QLabel(_('Public key')), 2, 0)
layout.addWidget(pubkey_e, 2, 1)
encrypted_e = QTextEdit()
layout.addWidget(QLabel(_('Encrypted')), 3, 0)
layout.addWidget(encrypted_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Encrypt"))
b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Decrypt"))
b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
def password_dialog(self, msg=None, parent=None):
from .password_dialog import PasswordDialog
parent = parent or self
d = PasswordDialog(parent, msg)
return d.run()
def tx_from_text(self, txt):
from electrum.transaction import tx_from_str
try:
tx = tx_from_str(txt)
return Transaction(tx)
except BaseException as e:
self.show_critical(_("Electrum was unable to parse your transaction") + ":\n" + str(e))
return
def read_tx_from_qrcode(self):
from electrum import qrscanner
try:
data = qrscanner.scan_barcode(self.config.get_video_device())
except BaseException as e:
self.show_error(str(e))
return
if not data:
return
# if the user scanned a bitcoin URI
if str(data).startswith("feathercoin:"):
self.pay_to_URI(data)
return
# else if the user scanned an offline signed tx
try:
data = bh2u(bitcoin.base_decode(data, length=None, base=43))
except BaseException as e:
self.show_error((_('Could not decode QR code')+':\n{}').format(e))
return
tx = self.tx_from_text(data)
if not tx:
return
self.show_transaction(tx)
def read_tx_from_file(self):
fileName = self.getOpenFileName(_("Select your transaction file"), "*.txn")
if not fileName:
return
try:
with open(fileName, "r") as f:
file_content = f.read()
except (ValueError, IOError, os.error) as reason:
self.show_critical(_("Electrum was unable to open your transaction file") + "\n" + str(reason), title=_("Unable to read file or no transaction found"))
return
return self.tx_from_text(file_content)
def do_process_from_text(self):
text = text_dialog(self, _('Input raw transaction'), _("Transaction:"), _("Load transaction"))
if not text:
return
tx = self.tx_from_text(text)
if tx:
self.show_transaction(tx)
def do_process_from_file(self):
tx = self.read_tx_from_file()
if tx:
self.show_transaction(tx)
def do_process_from_txid(self):
from electrum import transaction
txid, ok = QInputDialog.getText(self, _('Lookup transaction'), _('Transaction ID') + ':')
if ok and txid:
txid = str(txid).strip()
try:
r = self.network.synchronous_get(('blockchain.transaction.get',[txid]))
except BaseException as e:
self.show_message(str(e))
return
tx = transaction.Transaction(r)
self.show_transaction(tx)
@protected
def export_privkeys_dialog(self, password):
if self.wallet.is_watching_only():
self.show_message(_("This is a watching-only wallet"))
return
if isinstance(self.wallet, Multisig_Wallet):
self.show_message(_('WARNING: This is a multi-signature wallet.') + '\n' +
_('It cannot be "backed up" by simply exporting these private keys.'))
d = WindowModalDialog(self, _('Private keys'))
d.setMinimumSize(980, 300)
vbox = QVBoxLayout(d)
msg = "%s\n%s\n%s" % (_("WARNING: ALL your private keys are secret."),
_("Exposing a single private key can compromise your entire wallet!"),
_("In particular, DO NOT use 'redeem private key' services proposed by third parties."))
vbox.addWidget(QLabel(msg))
e = QTextEdit()
e.setReadOnly(True)
vbox.addWidget(e)
defaultname = 'electrum-ftc-private-keys.csv'
select_msg = _('Select file to export your private keys to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
b = OkButton(d, _('Export'))
b.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(d), b))
private_keys = {}
addresses = self.wallet.get_addresses()
done = False
cancelled = False
def privkeys_thread():
for addr in addresses:
time.sleep(0.1)
if done or cancelled:
break
privkey = self.wallet.export_private_key(addr, password)[0]
private_keys[addr] = privkey
self.computing_privkeys_signal.emit()
if not cancelled:
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.emit()
def show_privkeys():
s = "\n".join( map( lambda x: x[0] + "\t"+ x[1], private_keys.items()))
e.setText(s)
b.setEnabled(True)
self.show_privkeys_signal.disconnect()
nonlocal done
done = True
def on_dialog_closed(*args):
nonlocal done
nonlocal cancelled
if not done:
cancelled = True
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.disconnect()
self.computing_privkeys_signal.connect(lambda: e.setText("Please wait... %d/%d"%(len(private_keys),len(addresses))))
self.show_privkeys_signal.connect(show_privkeys)
d.finished.connect(on_dialog_closed)
threading.Thread(target=privkeys_thread).start()
if not d.exec_():
done = True
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_privkeys(filename, private_keys, csv_button.isChecked())
except (IOError, os.error) as reason:
txt = "\n".join([
_("Electrum was unable to produce a private key-export."),
str(reason)
])
self.show_critical(txt, title=_("Unable to create csv"))
except Exception as e:
self.show_message(str(e))
return
self.show_message(_("Private keys exported."))
def do_export_privkeys(self, fileName, pklist, is_csv):
with open(fileName, "w+") as f:
if is_csv:
transaction = csv.writer(f)
transaction.writerow(["address", "private_key"])
for addr, pk in pklist.items():
transaction.writerow(["%34s"%addr,pk])
else:
import json
f.write(json.dumps(pklist, indent = 4))
def do_import_labels(self):
def import_labels(path):
def _validate(data):
return data # TODO
def import_labels_assign(data):
for key, value in data.items():
self.wallet.set_label(key, value)
import_meta(path, _validate, import_labels_assign)
def on_import():
self.need_update.set()
import_meta_gui(self, _('labels'), import_labels, on_import)
def do_export_labels(self):
def export_labels(filename):
export_meta(self.wallet.labels, filename)
export_meta_gui(self, _('labels'), export_labels)
def sweep_key_dialog(self):
d = WindowModalDialog(self, title=_('Sweep private keys'))
d.setMinimumSize(600, 300)
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_("Enter private keys:")))
keys_e = ScanQRTextEdit(allow_multi=True)
keys_e.setTabChangesFocus(True)
vbox.addWidget(keys_e)
addresses = self.wallet.get_unused_addresses()
if not addresses:
try:
addresses = self.wallet.get_receiving_addresses()
except AttributeError:
addresses = self.wallet.get_addresses()
h, address_e = address_field(addresses)
vbox.addLayout(h)
vbox.addStretch(1)
button = OkButton(d, _('Sweep'))
vbox.addLayout(Buttons(CancelButton(d), button))
button.setEnabled(False)
def get_address():
addr = str(address_e.text()).strip()
if bitcoin.is_address(addr):
return addr
def get_pk():
text = str(keys_e.toPlainText())
return keystore.get_private_keys(text)
f = lambda: button.setEnabled(get_address() is not None and get_pk() is not None)
on_address = lambda text: address_e.setStyleSheet((ColorScheme.DEFAULT if get_address() else ColorScheme.RED).as_stylesheet())
keys_e.textChanged.connect(f)
address_e.textChanged.connect(f)
address_e.textChanged.connect(on_address)
if not d.exec_():
return
from electrum.wallet import sweep_preparations
try:
self.do_clear()
coins, keypairs = sweep_preparations(get_pk(), self.network)
self.tx_external_keypairs = keypairs
self.spend_coins(coins)
self.payto_e.setText(get_address())
self.spend_max()
self.payto_e.setFrozen(True)
self.amount_e.setFrozen(True)
except BaseException as e:
self.show_message(str(e))
return
self.warn_if_watching_only()
def _do_import(self, title, msg, func):
text = text_dialog(self, title, msg + ' :', _('Import'),
allow_multi=True)
if not text:
return
bad = []
good = []
for key in str(text).split():
try:
addr = func(key)
good.append(addr)
except BaseException as e:
bad.append(key)
continue
if good:
self.show_message(_("The following addresses were added") + ':\n' + '\n'.join(good))
if bad:
self.show_critical(_("The following inputs could not be imported") + ':\n'+ '\n'.join(bad))
self.address_list.update()
self.history_list.update()
def import_addresses(self):
if not self.wallet.can_import_address():
return
title, msg = _('Import addresses'), _("Enter addresses")
self._do_import(title, msg, self.wallet.import_address)
@protected
def do_import_privkey(self, password):
if not self.wallet.can_import_privkey():
return
title, msg = _('Import private keys'), _("Enter private keys")
self._do_import(title, msg, lambda x: self.wallet.import_private_key(x, password))
def update_fiat(self):
b = self.fx and self.fx.is_enabled()
self.fiat_send_e.setVisible(b)
self.fiat_receive_e.setVisible(b)
self.history_list.refresh_headers()
self.history_list.update()
self.address_list.refresh_headers()
self.address_list.update()
self.update_status()
def settings_dialog(self):
self.need_restart = False
d = WindowModalDialog(self, _('Preferences'))
vbox = QVBoxLayout()
tabs = QTabWidget()
gui_widgets = []
fee_widgets = []
tx_widgets = []
id_widgets = []
# language
lang_help = _('Select which language is used in the GUI (after restart).')
lang_label = HelpLabel(_('Language') + ':', lang_help)
lang_combo = QComboBox()
from electrum.i18n import languages
lang_combo.addItems(list(languages.values()))
try:
index = languages.keys().index(self.config.get("language",''))
except Exception:
index = 0
lang_combo.setCurrentIndex(index)
if not self.config.is_modifiable('language'):
for w in [lang_combo, lang_label]: w.setEnabled(False)
def on_lang(x):
lang_request = list(languages.keys())[lang_combo.currentIndex()]
if lang_request != self.config.get('language'):
self.config.set_key("language", lang_request, True)
self.need_restart = True
lang_combo.currentIndexChanged.connect(on_lang)
gui_widgets.append((lang_label, lang_combo))
nz_help = _('Number of zeros displayed after the decimal point. For example, if this is set to 2, "1." will be displayed as "1.00"')
nz_label = HelpLabel(_('Zeros after decimal point') + ':', nz_help)
nz = QSpinBox()
nz.setMinimum(0)
nz.setMaximum(self.decimal_point)
nz.setValue(self.num_zeros)
if not self.config.is_modifiable('num_zeros'):
for w in [nz, nz_label]: w.setEnabled(False)
def on_nz():
value = nz.value()
if self.num_zeros != value:
self.num_zeros = value
self.config.set_key('num_zeros', value, True)
self.history_list.update()
self.address_list.update()
nz.valueChanged.connect(on_nz)
gui_widgets.append((nz_label, nz))
msg = '\n'.join([
_('Time based: fee rate is based on average confirmation time estimates'),
_('Mempool based: fee rate is targeting a depth in the memory pool')
]
)
fee_type_label = HelpLabel(_('Fee estimation') + ':', msg)
fee_type_combo = QComboBox()
fee_type_combo.addItems([_('Static'), _('ETA'), _('Mempool')])
fee_type_combo.setCurrentIndex((2 if self.config.use_mempool_fees() else 1) if self.config.is_dynfee() else 0)
def on_fee_type(x):
self.config.set_key('mempool_fees', x==2)
self.config.set_key('dynamic_fees', x>0)
self.fee_slider.update()
fee_type_combo.currentIndexChanged.connect(on_fee_type)
fee_widgets.append((fee_type_label, fee_type_combo))
feebox_cb = QCheckBox(_('Edit fees manually'))
feebox_cb.setChecked(self.config.get('show_fee', False))
feebox_cb.setToolTip(_("Show fee edit box in send tab."))
def on_feebox(x):
self.config.set_key('show_fee', x == Qt.Checked)
self.fee_adv_controls.setVisible(bool(x))
feebox_cb.stateChanged.connect(on_feebox)
fee_widgets.append((feebox_cb, None))
use_rbf_cb = QCheckBox(_('Use Replace-By-Fee'))
use_rbf_cb.setChecked(self.config.get('use_rbf', True))
use_rbf_cb.setToolTip(
_('If you check this box, your transactions will be marked as non-final,') + '\n' + \
_('and you will have the possibility, while they are unconfirmed, to replace them with transactions that pay higher fees.') + '\n' + \
_('Note that some merchants do not accept non-final transactions until they are confirmed.'))
def on_use_rbf(x):
self.config.set_key('use_rbf', x == Qt.Checked)
use_rbf_cb.stateChanged.connect(on_use_rbf)
fee_widgets.append((use_rbf_cb, None))
msg = _('OpenAlias record, used to receive coins and to sign payment requests.') + '\n\n'\
+ _('The following alias providers are available:') + '\n'\
+ '\n'.join(['https://cryptoname.co/', 'http://xmr.link']) + '\n\n'\
+ 'For more information, see https://openalias.org'
alias_label = HelpLabel(_('OpenAlias') + ':', msg)
alias = self.config.get('alias','')
alias_e = QLineEdit(alias)
def set_alias_color():
if not self.config.get('alias'):
alias_e.setStyleSheet("")
return
if self.alias_info:
alias_addr, alias_name, validated = self.alias_info
alias_e.setStyleSheet((ColorScheme.GREEN if validated else ColorScheme.RED).as_stylesheet(True))
else:
alias_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True))
def on_alias_edit():
alias_e.setStyleSheet("")
alias = str(alias_e.text())
self.config.set_key('alias', alias, True)
if alias:
self.fetch_alias()
set_alias_color()
self.alias_received_signal.connect(set_alias_color)
alias_e.editingFinished.connect(on_alias_edit)
id_widgets.append((alias_label, alias_e))
# SSL certificate
msg = ' '.join([
_('SSL certificate used to sign payment requests.'),
_('Use setconfig to set ssl_chain and ssl_privkey.'),
])
if self.config.get('ssl_privkey') or self.config.get('ssl_chain'):
try:
SSL_identity = paymentrequest.check_ssl_config(self.config)
SSL_error = None
except BaseException as e:
SSL_identity = "error"
SSL_error = str(e)
else:
SSL_identity = ""
SSL_error = None
SSL_id_label = HelpLabel(_('SSL certificate') + ':', msg)
SSL_id_e = QLineEdit(SSL_identity)
SSL_id_e.setStyleSheet((ColorScheme.RED if SSL_error else ColorScheme.GREEN).as_stylesheet(True) if SSL_identity else '')
if SSL_error:
SSL_id_e.setToolTip(SSL_error)
SSL_id_e.setReadOnly(True)
id_widgets.append((SSL_id_label, SSL_id_e))
units = ['FTC', 'mFTC', 'uFTC']
msg = (_('Base unit of your wallet.')
+ '\n1 FTC = 1000 mFTC. 1 mFTC = 1000 uFTC.\n'
+ _('This setting affects the Send tab, and all balance related fields.'))
unit_label = HelpLabel(_('Base unit') + ':', msg)
unit_combo = QComboBox()
unit_combo.addItems(units)
unit_combo.setCurrentIndex(units.index(self.base_unit()))
def on_unit(x, nz):
unit_result = units[unit_combo.currentIndex()]
if self.base_unit() == unit_result:
return
edits = self.amount_e, self.fee_e, self.receive_amount_e
amounts = [edit.get_amount() for edit in edits]
if unit_result == 'FTC':
self.decimal_point = 8
elif unit_result == 'mFTC':
self.decimal_point = 5
elif unit_result == 'uFTC':
self.decimal_point = 2
else:
raise Exception('Unknown base unit')
self.config.set_key('decimal_point', self.decimal_point, True)
nz.setMaximum(self.decimal_point)
self.history_list.update()
self.request_list.update()
self.address_list.update()
for edit, amount in zip(edits, amounts):
edit.setAmount(amount)
self.update_status()
unit_combo.currentIndexChanged.connect(lambda x: on_unit(x, nz))
gui_widgets.append((unit_label, unit_combo))
block_explorers = sorted(util.block_explorer_info().keys())
msg = _('Choose which online block explorer to use for functions that open a web browser')
block_ex_label = HelpLabel(_('Online Block Explorer') + ':', msg)
block_ex_combo = QComboBox()
block_ex_combo.addItems(block_explorers)
block_ex_combo.setCurrentIndex(block_ex_combo.findText(util.block_explorer(self.config)))
def on_be(x):
be_result = block_explorers[block_ex_combo.currentIndex()]
self.config.set_key('block_explorer', be_result, True)
block_ex_combo.currentIndexChanged.connect(on_be)
gui_widgets.append((block_ex_label, block_ex_combo))
from electrum import qrscanner
system_cameras = qrscanner._find_system_cameras()
qr_combo = QComboBox()
qr_combo.addItem("Default","default")
for camera, device in system_cameras.items():
qr_combo.addItem(camera, device)
#combo.addItem("Manually specify a device", config.get("video_device"))
index = qr_combo.findData(self.config.get("video_device"))
qr_combo.setCurrentIndex(index)
msg = _("Install the zbar package to enable this.")
qr_label = HelpLabel(_('Video Device') + ':', msg)
qr_combo.setEnabled(qrscanner.libzbar is not None)
on_video_device = lambda x: self.config.set_key("video_device", qr_combo.itemData(x), True)
qr_combo.currentIndexChanged.connect(on_video_device)
gui_widgets.append((qr_label, qr_combo))
usechange_cb = QCheckBox(_('Use change addresses'))
usechange_cb.setChecked(self.wallet.use_change)
if not self.config.is_modifiable('use_change'): usechange_cb.setEnabled(False)
def on_usechange(x):
usechange_result = x == Qt.Checked
if self.wallet.use_change != usechange_result:
self.wallet.use_change = usechange_result
self.wallet.storage.put('use_change', self.wallet.use_change)
multiple_cb.setEnabled(self.wallet.use_change)
usechange_cb.stateChanged.connect(on_usechange)
usechange_cb.setToolTip(_('Using change addresses makes it more difficult for other people to track your transactions.'))
tx_widgets.append((usechange_cb, None))
def on_multiple(x):
multiple = x == Qt.Checked
if self.wallet.multiple_change != multiple:
self.wallet.multiple_change = multiple
self.wallet.storage.put('multiple_change', multiple)
multiple_change = self.wallet.multiple_change
multiple_cb = QCheckBox(_('Use multiple change addresses'))
multiple_cb.setEnabled(self.wallet.use_change)
multiple_cb.setToolTip('\n'.join([
_('In some cases, use up to 3 change addresses in order to break '
'up large coin amounts and obfuscate the recipient address.'),
_('This may result in higher transactions fees.')
]))
multiple_cb.setChecked(multiple_change)
multiple_cb.stateChanged.connect(on_multiple)
tx_widgets.append((multiple_cb, None))
def fmt_docs(key, klass):
lines = [ln.lstrip(" ") for ln in klass.__doc__.split("\n")]
return '\n'.join([key, "", " ".join(lines)])
choosers = sorted(coinchooser.COIN_CHOOSERS.keys())
if len(choosers) > 1:
chooser_name = coinchooser.get_name(self.config)
msg = _('Choose coin (UTXO) selection method. The following are available:\n\n')
msg += '\n\n'.join(fmt_docs(*item) for item in coinchooser.COIN_CHOOSERS.items())
chooser_label = HelpLabel(_('Coin selection') + ':', msg)
chooser_combo = QComboBox()
chooser_combo.addItems(choosers)
i = choosers.index(chooser_name) if chooser_name in choosers else 0
chooser_combo.setCurrentIndex(i)
def on_chooser(x):
chooser_name = choosers[chooser_combo.currentIndex()]
self.config.set_key('coin_chooser', chooser_name)
chooser_combo.currentIndexChanged.connect(on_chooser)
tx_widgets.append((chooser_label, chooser_combo))
def on_unconf(x):
self.config.set_key('confirmed_only', bool(x))
conf_only = self.config.get('confirmed_only', False)
unconf_cb = QCheckBox(_('Spend only confirmed coins'))
unconf_cb.setToolTip(_('Spend only confirmed inputs.'))
unconf_cb.setChecked(conf_only)
unconf_cb.stateChanged.connect(on_unconf)
tx_widgets.append((unconf_cb, None))
def on_outrounding(x):
self.config.set_key('coin_chooser_output_rounding', bool(x))
enable_outrounding = self.config.get('coin_chooser_output_rounding', False)
outrounding_cb = QCheckBox(_('Enable output value rounding'))
outrounding_cb.setToolTip(
_('Set the value of the change output so that it has similar precision to the other outputs.') + '\n' +
_('This might improve your privacy somewhat.') + '\n' +
_('If enabled, at most 100 satoshis might be lost due to this, per transaction.'))
outrounding_cb.setChecked(enable_outrounding)
outrounding_cb.stateChanged.connect(on_outrounding)
tx_widgets.append((outrounding_cb, None))
# Fiat Currency
hist_checkbox = QCheckBox()
hist_capgains_checkbox = QCheckBox()
fiat_address_checkbox = QCheckBox()
ccy_combo = QComboBox()
ex_combo = QComboBox()
def update_currencies():
if not self.fx: return
currencies = sorted(self.fx.get_currencies(self.fx.get_history_config()))
ccy_combo.clear()
ccy_combo.addItems([_('None')] + currencies)
if self.fx.is_enabled():
ccy_combo.setCurrentIndex(ccy_combo.findText(self.fx.get_currency()))
def update_history_cb():
if not self.fx: return
hist_checkbox.setChecked(self.fx.get_history_config())
hist_checkbox.setEnabled(self.fx.is_enabled())
def update_fiat_address_cb():
if not self.fx: return
fiat_address_checkbox.setChecked(self.fx.get_fiat_address_config())
def update_history_capgains_cb():
if not self.fx: return
hist_capgains_checkbox.setChecked(self.fx.get_history_capital_gains_config())
hist_capgains_checkbox.setEnabled(hist_checkbox.isChecked())
def update_exchanges():
if not self.fx: return
b = self.fx.is_enabled()
ex_combo.setEnabled(b)
if b:
h = self.fx.get_history_config()
c = self.fx.get_currency()
exchanges = self.fx.get_exchanges_by_ccy(c, h)
else:
exchanges = self.fx.get_exchanges_by_ccy('USD', False)
ex_combo.clear()
ex_combo.addItems(sorted(exchanges))
ex_combo.setCurrentIndex(ex_combo.findText(self.fx.config_exchange()))
def on_currency(hh):
if not self.fx: return
b = bool(ccy_combo.currentIndex())
ccy = str(ccy_combo.currentText()) if b else None
self.fx.set_enabled(b)
if b and ccy != self.fx.ccy:
self.fx.set_currency(ccy)
update_history_cb()
update_exchanges()
self.update_fiat()
def on_exchange(idx):
exchange = str(ex_combo.currentText())
if self.fx and self.fx.is_enabled() and exchange and exchange != self.fx.exchange.name():
self.fx.set_exchange(exchange)
def on_history(checked):
if not self.fx: return
self.fx.set_history_config(checked)
update_exchanges()
self.history_list.refresh_headers()
if self.fx.is_enabled() and checked:
# reset timeout to get historical rates
self.fx.timeout = 0
update_history_capgains_cb()
def on_history_capgains(checked):
if not self.fx: return
self.fx.set_history_capital_gains_config(checked)
self.history_list.refresh_headers()
def on_fiat_address(checked):
if not self.fx: return
self.fx.set_fiat_address_config(checked)
self.address_list.refresh_headers()
self.address_list.update()
update_currencies()
update_history_cb()
update_history_capgains_cb()
update_fiat_address_cb()
update_exchanges()
ccy_combo.currentIndexChanged.connect(on_currency)
hist_checkbox.stateChanged.connect(on_history)
hist_capgains_checkbox.stateChanged.connect(on_history_capgains)
fiat_address_checkbox.stateChanged.connect(on_fiat_address)
ex_combo.currentIndexChanged.connect(on_exchange)
fiat_widgets = []
fiat_widgets.append((QLabel(_('Fiat currency')), ccy_combo))
fiat_widgets.append((QLabel(_('Show history rates')), hist_checkbox))
fiat_widgets.append((QLabel(_('Show capital gains in history')), hist_capgains_checkbox))
fiat_widgets.append((QLabel(_('Show Fiat balance for addresses')), fiat_address_checkbox))
fiat_widgets.append((QLabel(_('Source')), ex_combo))
tabs_info = [
(fee_widgets, _('Fees')),
(tx_widgets, _('Transactions')),
(gui_widgets, _('Appearance')),
(fiat_widgets, _('Fiat')),
(id_widgets, _('Identity')),
]
for widgets, name in tabs_info:
tab = QWidget()
grid = QGridLayout(tab)
grid.setColumnStretch(0,1)
for a,b in widgets:
i = grid.rowCount()
if b:
if a:
grid.addWidget(a, i, 0)
grid.addWidget(b, i, 1)
else:
grid.addWidget(a, i, 0, 1, 2)
tabs.addTab(tab, name)
vbox.addWidget(tabs)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
# run the dialog
d.exec_()
if self.fx:
self.fx.timeout = 0
self.alias_received_signal.disconnect(set_alias_color)
run_hook('close_settings_dialog')
if self.need_restart:
self.show_warning(_('Please restart Electrum to activate the new GUI settings'), title=_('Success'))
def closeEvent(self, event):
# It seems in some rare cases this closeEvent() is called twice
if not self.cleaned_up:
self.cleaned_up = True
self.clean_up()
event.accept()
def clean_up(self):
self.wallet.thread.stop()
if self.network:
self.network.unregister_callback(self.on_network)
self.config.set_key("is_maximized", self.isMaximized())
if not self.isMaximized():
g = self.geometry()
self.wallet.storage.put("winpos-qt", [g.left(),g.top(),
g.width(),g.height()])
self.config.set_key("console-history", self.console.history[-50:],
True)
if self.qr_window:
self.qr_window.close()
self.close_wallet()
self.gui_object.close_window(self)
def plugins_dialog(self):
self.pluginsdialog = d = WindowModalDialog(self, _('Electrum Plugins'))
plugins = self.gui_object.plugins
vbox = QVBoxLayout(d)
# plugins
scroll = QScrollArea()
scroll.setEnabled(True)
scroll.setWidgetResizable(True)
scroll.setMinimumSize(400,250)
vbox.addWidget(scroll)
w = QWidget()
scroll.setWidget(w)
w.setMinimumHeight(plugins.count() * 35)
grid = QGridLayout()
grid.setColumnStretch(0,1)
w.setLayout(grid)
settings_widgets = {}
def enable_settings_widget(p, name, i):
widget = settings_widgets.get(name)
if not widget and p and p.requires_settings():
widget = settings_widgets[name] = p.settings_widget(d)
grid.addWidget(widget, i, 1)
if widget:
widget.setEnabled(bool(p and p.is_enabled()))
def do_toggle(cb, name, i):
p = plugins.toggle(name)
cb.setChecked(bool(p))
enable_settings_widget(p, name, i)
run_hook('init_qt', self.gui_object)
for i, descr in enumerate(plugins.descriptions.values()):
name = descr['__name__']
p = plugins.get(name)
if descr.get('registers_keystore'):
continue
try:
cb = QCheckBox(descr['fullname'])
plugin_is_loaded = p is not None
cb_enabled = (not plugin_is_loaded and plugins.is_available(name, self.wallet)
or plugin_is_loaded and p.can_user_disable())
cb.setEnabled(cb_enabled)
cb.setChecked(plugin_is_loaded and p.is_enabled())
grid.addWidget(cb, i, 0)
enable_settings_widget(p, name, i)
cb.clicked.connect(partial(do_toggle, cb, name, i))
msg = descr['description']
if descr.get('requires'):
msg += '\n\n' + _('Requires') + ':\n' + '\n'.join(map(lambda x: x[1], descr.get('requires')))
grid.addWidget(HelpButton(msg), i, 2)
except Exception:
self.print_msg("error: cannot display plugin", name)
traceback.print_exc(file=sys.stdout)
grid.setRowStretch(len(plugins.descriptions.values()), 1)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
def cpfp(self, parent_tx, new_tx):
total_size = parent_tx.estimated_size() + new_tx.estimated_size()
d = WindowModalDialog(self, _('Child Pays for Parent'))
vbox = QVBoxLayout(d)
msg = (
"A CPFP is a transaction that sends an unconfirmed output back to "
"yourself, with a high fee. The goal is to have miners confirm "
"the parent transaction in order to get the fee attached to the "
"child transaction.")
vbox.addWidget(WWLabel(_(msg)))
msg2 = ("The proposed fee is computed using your "
"fee/kB settings, applied to the total size of both child and "
"parent transactions. After you broadcast a CPFP transaction, "
"it is normal to see a new unconfirmed transaction in your history.")
vbox.addWidget(WWLabel(_(msg2)))
grid = QGridLayout()
grid.addWidget(QLabel(_('Total size') + ':'), 0, 0)
grid.addWidget(QLabel('%d bytes'% total_size), 0, 1)
max_fee = new_tx.output_value()
grid.addWidget(QLabel(_('Input amount') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_amount(max_fee) + ' ' + self.base_unit()), 1, 1)
output_amount = QLabel('')
grid.addWidget(QLabel(_('Output amount') + ':'), 2, 0)
grid.addWidget(output_amount, 2, 1)
fee_e = BTCAmountEdit(self.get_decimal_point)
# FIXME with dyn fees, without estimates, there are all kinds of crashes here
def f(x):
a = max_fee - fee_e.get_amount()
output_amount.setText((self.format_amount(a) + ' ' + self.base_unit()) if a else '')
fee_e.textChanged.connect(f)
fee = self.config.fee_per_kb() * total_size / 1000
fee_e.setAmount(fee)
grid.addWidget(QLabel(_('Fee' + ':')), 3, 0)
grid.addWidget(fee_e, 3, 1)
def on_rate(dyn, pos, fee_rate):
fee = fee_rate * total_size / 1000
fee = min(max_fee, fee)
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
fee_slider.update()
grid.addWidget(fee_slider, 4, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
fee = fee_e.get_amount()
if fee > max_fee:
self.show_error(_('Max fee exceeded'))
return
new_tx = self.wallet.cpfp(parent_tx, fee)
new_tx.set_rbf(True)
self.show_transaction(new_tx)
def bump_fee_dialog(self, tx):
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
tx_label = self.wallet.get_label(tx.txid())
tx_size = tx.estimated_size()
d = WindowModalDialog(self, _('Bump Fee'))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('Current fee') + ': %s'% self.format_amount(fee) + ' ' + self.base_unit()))
vbox.addWidget(QLabel(_('New fee' + ':')))
fee_e = BTCAmountEdit(self.get_decimal_point)
fee_e.setAmount(fee * 1.5)
vbox.addWidget(fee_e)
def on_rate(dyn, pos, fee_rate):
fee = fee_rate * tx_size / 1000
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
vbox.addWidget(fee_slider)
cb = QCheckBox(_('Final'))
vbox.addWidget(cb)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
is_final = cb.isChecked()
new_fee = fee_e.get_amount()
delta = new_fee - fee
if delta < 0:
self.show_error("fee too low")
return
try:
new_tx = self.wallet.bump_fee(tx, delta)
except BaseException as e:
self.show_error(str(e))
return
if is_final:
new_tx.set_rbf(False)
self.show_transaction(new_tx, tx_label)
def save_transaction_into_wallet(self, tx):
try:
if not self.wallet.add_transaction(tx.txid(), tx):
self.show_error(_("Transaction could not be saved.") + "\n" +
_("It conflicts with current history."))
return False
except AddTransactionException as e:
self.show_error(e)
return False
else:
self.wallet.save_transactions(write=True)
# need to update at least: history_list, utxo_list, address_list
self.need_update.set()
self.msg_box(QPixmap(":icons/offline_tx.png"), None, _('Success'), _("Transaction added to wallet history"))
return True
| 41.56856
| 411
| 0.609319
|
33370113c567c884153457d2ed20d816e08647fc
| 3,617
|
py
|
Python
|
bridge/pearl.py
|
joshdabosh/dhbridge
|
dfef6fe37d5dda45af65880875bb9914c31b5d11
|
[
"MIT"
] | 6
|
2020-03-30T22:55:51.000Z
|
2021-12-25T21:17:29.000Z
|
bridge/pearl.py
|
joshdabosh/dhbridge
|
dfef6fe37d5dda45af65880875bb9914c31b5d11
|
[
"MIT"
] | 1
|
2021-12-25T17:10:41.000Z
|
2021-12-25T17:10:41.000Z
|
bridge/pearl.py
|
joshdabosh/dhbridge
|
dfef6fe37d5dda45af65880875bb9914c31b5d11
|
[
"MIT"
] | null | null | null |
import asyncio, importlib, json, os
from threading import Thread
import hangups
import discord
import nacre
import lockfile
class Pearl:
def __init__(self, auth, config):
self.auth = auth
self.config = config
self.client = hangups.client.Client(self.authenticate())
self.admins = json.load(open("admins.json"))
self.DH = json.load(open("DH.json"))
self.HD = json.load(open("HD.json"))
self.hangouts = nacre.hangouts.Hangouts(self.client)
self.updateEvent = nacre.event.Event()
self.discordClient = discord.Client()
self.load()
def authenticate(self):
authenticator = nacre.auth.Authenticator(self.auth['email'], self.auth['password'], self.auth['secret'])
token = hangups.RefreshTokenCache(os.path.join(os.getcwd(), self.auth['token']))
return hangups.get_auth(authenticator, token)
def load(self):
self.plugins = {}
plugins = self.config['plugins']
for name in plugins:
path = os.path.join(os.getcwd(), plugins[name]['path'])
spec = importlib.util.spec_from_file_location(name, path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
self.plugins[name] = module.load(self, plugins[name])
self.plugins[name].build()
def run2(self):
self.client.on_connect.add_observer(self.hangouts.start)
self.client.on_state_update.add_observer(self.updateEvent.fire)
self.loop = asyncio.new_event_loop()
self.loop.run_until_complete(self.client.connect())
def run(self):
t = Thread(target=self.run2)
t.daemon = True
t.start()
print("started hangouts")
self.startDiscord()
def save(self):
with lockfile.LockFile("DH.json"):
f = open("DH.json", 'w')
f.write(json.dumps(self.DH))
f.close()
with lockfile.LockFile("HD.json"):
f = open("HD.json", "w")
f.write(json.dumps(self.HD))
f.close()
def saveAdmins(self):
with lockfile.LockFile("admins.json"):
f = open("admins.json", "w")
f.write(json.dumps(self.admins))
f.close()
def startDiscord(self):
@self.discordClient.event
async def on_ready():
print("started discord")
@self.discordClient.event
async def on_message(message):
if message.author == self.discordClient.user:
return
#print("Message received from %s: %s" %( message.author, message.content))
c = message.content
# this loop is terrible but it's only a few plugins
# so it SHOULD be fine
for _, p in self.plugins.items():
await p.respond(message, caller='d')
self.discordClient.run(open(self.auth["disc_token"]).read())
async def send(self, message, channel):
await channel.send(message)
async def embed(self, embed, channel):
await channel.send(embed=embed)
async def getChannels(self):
text_channel_list = {}
for server in self.discordClient.guilds:
text_channel_list[server] = server.text_channels
return text_channel_list
async def getServers(self):
return self.discordClient.guilds
def main():
config = json.load(open('config.json'))
auth = json.load(open(config['auth']))
bridge = Pearl(auth, config)
bridge.run()
if __name__ == '__main__':
main()
| 28.480315
| 112
| 0.601051
|
b61f4be37da644bea54dddb50d24749981da2ced
| 294
|
py
|
Python
|
leet/slidingwindow/longestOnes.py
|
monishshah18/python-cp-cheatsheet
|
a5514b08816959de1198156f7764c54a7a585f20
|
[
"Apache-2.0"
] | 1
|
2021-12-16T10:31:50.000Z
|
2021-12-16T10:31:50.000Z
|
leet/slidingwindow/longestOnes.py
|
Rahul-k25/python-cp-cheatsheet
|
17ea967f669e4992fba51b26accdd44a01aaac32
|
[
"Apache-2.0"
] | null | null | null |
leet/slidingwindow/longestOnes.py
|
Rahul-k25/python-cp-cheatsheet
|
17ea967f669e4992fba51b26accdd44a01aaac32
|
[
"Apache-2.0"
] | 1
|
2021-09-22T04:41:47.000Z
|
2021-09-22T04:41:47.000Z
|
class Solution:
def longestOnes(self, A: List[int], K: int) -> int:
l = 0
cnt = 0
for r in range(len(A)):
K -= 1 - A[r]
if K < 0:
K += 1 - A[l]
l += 1
return r-l+1
| 22.615385
| 55
| 0.306122
|
2cfb7a294a361e72983473a4d12b0585548611c6
| 1,980
|
py
|
Python
|
fundamental_blockchain.py
|
AnandaRauf/Fundamental-Blockchain
|
583f70bbc75b703a953e5fb6a857cc15aaf9ff0a
|
[
"MIT"
] | 2
|
2021-05-21T12:59:13.000Z
|
2021-07-21T02:50:28.000Z
|
fundamental_blockchain.py
|
AnandaRauf/Fundamental-Blockchain
|
583f70bbc75b703a953e5fb6a857cc15aaf9ff0a
|
[
"MIT"
] | null | null | null |
fundamental_blockchain.py
|
AnandaRauf/Fundamental-Blockchain
|
583f70bbc75b703a953e5fb6a857cc15aaf9ff0a
|
[
"MIT"
] | null | null | null |
def lightning_hash(data): #Example hash function
return data + '*' #star and plus is asterix for as return value #for generate unique hash #Lightning Version Blockhain #Suffix start in 1 for hashing data
class Block: #Primarily Storage unit
#Function method / define method. #Have parameter is self and have atributes data,hash and last hash :
def __init__(self,data,hash,last_hash):
self.data = data #data field is block is soaring #Create argument self, self is calling self block class
self.hash = hash #hash field is unique value generated for the block based al the blocks
self.last_hash = last_hash #last_hash field is
foo_block = Block('foo_data','foo_hash','foo_last_hash') #field block instance #Calling block class
#print(foo_block.data) #Relationship arguments #Output is foo_data
#print(foo_block.hash)
#print(foo_block.last_hash)
class Blockchain():
def __init__(self):
#Set to a block instance
genesis = Block('gen_data','gen_hash','gen_last_hash')
#Create field
self.chain = [genesis] #Create pair of brackets #Brackets List
def add_block(self,data):
#Set arguments for find very last block
last_hash = self.chain[-1].hash #hash values access #Brackets list final
hash = lightning_hash(data + last_hash)
block = Block(data,hash,last_hash)
self.chain.append(block) #for passing blocks on class block
foo_blockchain = Blockchain()
foo_blockchain.add_block('one') #data block menjadi sring
foo_blockchain.add_block('two')
foo_blockchain.add_block('three')
#looping blockchain one block at time
for block in foo_blockchain.chain:
#attribute better than before #for writing three seperate line
#for dictionary representation
#turn block into a key value collection
print(block.__dict__) #Create a key value collection all block atributes
| 42.12766
| 160
| 0.699495
|
d3af5e3685a63ddd5def106bf8ae3fd309056715
| 1,683
|
py
|
Python
|
Python/areas.py
|
chernyshov-dev/ideal-octo-waffle
|
c50f29795352766752dbbbcd46693ff54f23369b
|
[
"WTFPL"
] | 3
|
2021-08-29T15:22:08.000Z
|
2021-08-29T17:12:01.000Z
|
Python/areas.py
|
chernyshov-dev/ideal-octo-waffle
|
c50f29795352766752dbbbcd46693ff54f23369b
|
[
"WTFPL"
] | 11
|
2021-09-07T19:24:15.000Z
|
2022-01-13T19:51:25.000Z
|
Python/areas.py
|
chernyshov-dev/university-practice-heap
|
c50f29795352766752dbbbcd46693ff54f23369b
|
[
"WTFPL"
] | null | null | null |
import math
print('Данная программа вычисляет площадь выбранной вами фигуры')
print('Доступные фигуры: прямоугольник, треугольник, трапеция, круг, эллипс')
f = input('Введите название фигуры -> ')
def rec():
a = float(input('Введите длину: '))
b = float(input('Введите ширину: '))
sr = a * b
return sr
def tri():
a = float(input('Введите длину основания: '))
h = float(input('Введите длину высоты: '))
st = 1/2 * a * h
return st
def tra():
a = float(input('Введите длину основания: '))
b = float(input('Введите длину второго основания: '))
h = float(input('Введите длину высоты: '))
stra = (1/2 * (a + b)) * h
return stra
def cir():
r = float(input('Введите радиус круга: '))
scir = (math.pi * r)**2
return scir
def ell():
a = float(input('Введите первый радиус эллипса: '))
b = float(input('Введите второй радиус эллипса: '))
sell = math.pi * a * b
return sell
if f.lower() == 'прямоугольник':
sr = rec()
print()
print('Площадь прямоугольника -> ',round(sr,2))
elif f.lower() == 'треугольник':
st = tri()
print()
print('Площадь треуголньика -> ',round(st,2))
elif f.lower() == 'трапеция':
stra = tra()
print()
print('Площадь трапеции -> ',round(stra,2))
elif f.lower() == 'круг':
scir = cir()
print()
print('Площадь круга -> ',round(scir,2))
elif f.lower() == 'эллипс':
sell = ell()
print()
print('Площадь эллипса -> ',round(sell,2))
else:
print()
print('ЗАДАНО НЕВЕРНОЕ ЗНАЧЕНИЕ!')
| 21.857143
| 78
| 0.545455
|
1a8d4dfbd1caf130cf1077c8115198124f6c4fb3
| 1,244
|
py
|
Python
|
jscodestyle/common/lintrunner.py
|
zeth/jscodestyle
|
43c98de7b544bf2203b23792677a7cefb5daf1d9
|
[
"Apache-2.0"
] | null | null | null |
jscodestyle/common/lintrunner.py
|
zeth/jscodestyle
|
43c98de7b544bf2203b23792677a7cefb5daf1d9
|
[
"Apache-2.0"
] | null | null | null |
jscodestyle/common/lintrunner.py
|
zeth/jscodestyle
|
43c98de7b544bf2203b23792677a7cefb5daf1d9
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
#
# Copyright 2018 The JsCodeStyle Authors.
# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Interface for a lint running wrapper."""
class LintRunner(object):
"""Interface for a lint running wrapper."""
def __init__(self):
if self.__class__ == LintRunner:
raise NotImplementedError('class LintRunner is abstract')
def Run(self, filenames, error_handler):
"""Run a linter on the given filenames.
Args:
filenames: The filenames to check
error_handler: An ErrorHandler object
Returns:
The error handler, which may have been used to collect error info.
"""
| 32.736842
| 76
| 0.70418
|
07d6502cf18b334835952d168a77e6acc301facb
| 11,866
|
py
|
Python
|
step1_generate_dataset_IndexImage.py
|
imiled/DL_Tools_For_Finance
|
7b1d3246a4271170af0a99a7ab6790b7377249fd
|
[
"Apache-2.0"
] | 1
|
2020-09-04T18:04:02.000Z
|
2020-09-04T18:04:02.000Z
|
step1_generate_dataset_IndexImage.py
|
imiled/DL_Tools_For_Finance
|
7b1d3246a4271170af0a99a7ab6790b7377249fd
|
[
"Apache-2.0"
] | 3
|
2020-09-19T15:23:38.000Z
|
2021-08-25T16:13:04.000Z
|
step1_generate_dataset_IndexImage.py
|
imiled/DL_Tools_For_Finance
|
7b1d3246a4271170af0a99a7ab6790b7377249fd
|
[
"Apache-2.0"
] | 1
|
2020-11-26T00:42:58.000Z
|
2020-11-26T00:42:58.000Z
|
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import bs4 as bs
import requests
import yfinance as yf
#import fix_yahoo_finance as yf
import datetime
import io
import cv2
import skimage
import datetime
from PIL import Image
from pandas_datareader import data as pdr
from skimage import measure
from skimage.measure import block_reduce
from datetime import datetime
'''
Functions to be used for data generation
'''
def get_img_from_fig(fig, dpi=180):
# get_img_from_fig is function which returns an image as numpy array from figure
buf = io.BytesIO()
fig.savefig(buf, format="png", dpi=dpi)
buf.seek(0)
img_arr = np.frombuffer(buf.getvalue(), dtype=np.uint8)
buf.close()
img = cv2.imdecode(img_arr, 1)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
return img
def build_image(stockindex, idate=10, pastlag=10, futlag=3,nb_dates=1000):
# Build image from a table stockindex price list
#return a (32,32,3) np.array representing the image in color
#ising idate as a starting point
#paslag futlag number of days to consider for translate
sp500close=stockindex
nb_days=nb_dates
x_datas=[]
x_datas=np.zeros((32,32,3))
i=idate
fig=plt.figure()
ax=fig.add_subplot(111)
ax.plot(sp500close[(i-pastlag):i])
plot_img_np = get_img_from_fig(fig)
x_tmp= skimage.measure.block_reduce(plot_img_np[90:620,140:970], (18,28,1), np.mean)
(x_datas[1:-1])[:,1:-1][:]=x_tmp
fig.clear()
plt.close(fig)
x_datas=x_datas/255
return x_datas
'''
MAIN FUNCTION OF CLASSIFICATION
build y state y fut
and x
'''
def class_shortterm_returnfut(x, yfut, indexforpast,tpastlag):
#this function is use to classifiy the future state based on the position of future value with the past range
#Put the value from the 2 boxes (max min) or (open close) on the time range and check if it is within
#go down go up or exit the box
#the fucntion return 5 state depending on the future value position on the boxes and one state for error cases
xpast_min=np.min(x[(indexforpast-tpastlag):indexforpast])
xpast_max=np.max(x[(indexforpast-tpastlag):indexforpast])
x_open=x[int(indexforpast-tpastlag)]
x_close=x[indexforpast]
if (yfut < xpast_min ): return 0
elif (yfut < min(x_open,x_close)): return 1
elif (yfut < max(x_open,x_close)): return 2
elif (yfut < xpast_max): return 3
elif (yfut > xpast_max): return 4
else : return -1
def main_class_shortterm_returnfut(iterable):
return class_shortterm_returnfut(sp500close, iterable, pastlag,futlag)
def normalise_df_image(xdf):
#normalisation to 0,1 range of the equity index
df_tmp=xdf
maxval=np.max(df_tmp)
df_tmp=df_tmp/maxval
return df_tmp, maxval
def build_image(stockindex, idate=10, pastlag=10, futlag=3):
#another version of returning image from a data frame index
#using the pastlag as range for the graph
#ising idate as a starting point
#return a (32,32,3) np array
#number of days to consider for translate
sp500close=stockindex
x_datas=[]
x_datas=np.zeros((32,32,3))
i=idate
fig=plt.figure()
ax=fig.add_subplot(111)
ax.plot(sp500close[(i-pastlag):i])
plot_img_np = get_img_from_fig(fig)
x_tmp= skimage.measure.block_reduce(plot_img_np[90:620,140:970], (18,28,1), np.mean)
(x_datas[1:-1])[:,1:-1][:]=x_tmp
fig.clear()
plt.close(fig)
x_datas=x_datas/255
return x_datas
def build_image_optimfig(fig, stockindex, idate=10, pastlag=10, futlag=3):
#version of returning image from a data frame index
#using the pastlag as range for the graph
#ising idate as a starting point
#return a (32,32,3) np array
#this one is optimisng the use of ram
#number of days to consider for translate
sp500close=stockindex
x_datas=[]
x_datas=np.zeros((32,32,3))
i=idate
plt.plot(sp500close[(i-pastlag):i])
plot_img_np = get_img_from_fig(fig)
x_tmp= skimage.measure.block_reduce(plot_img_np[90:620,140:970], (18,28,1), np.mean)
(x_datas[1:-1])[:,1:-1][:]=x_tmp
x_datas=x_datas/255
return x_datas
def build_image_df(xdf, past_step,fut_step) :
'''
returning a dictionary of time series dataframes to be used in setup_input_NN_image so a to generate
Input X Result Y_StateClass, Y_FutPredict
pastlag as range for the graph
fut _step the future value lag in time to predict or to check the financial state of the market
#times series to get information from the stock index value
'stock_value':the time serie of the index normalised on the whole period
'moving_average': time serie of the rolling moving average value of the index for past step image
"max": time serie of the rolling max value of the index for past step image
"min": time serie of the rolling min value of the index for past step image
'volatility': time serie of the rolling vol value of the index for past step image
'df_x_image': is a time series of flattened (1, ) calculed from images (32, 32, 3) list
#I had to flatten it because panda does not create table with this format
'market_state': future markket state to be predicted time lag is futlag
'future_value': future value of stock price to predict time lag is futlag
'future_volatility': time serie of the future volatility of the index time lag is futlag
'''
df_stockvaluecorrected=xdf
df_stockvaluecorrected, _ = normalise_df_image(df_stockvaluecorrected)
df_pctchge = df_stockvaluecorrected.pct_change(periods=past_step)
df_movave = df_stockvaluecorrected.rolling(window=past_step).mean()
df_volaty = np.sqrt(252)*df_pctchge.rolling(window=past_step).std()
df_max =df_stockvaluecorrected.rolling(window=past_step).max()
df_min =df_stockvaluecorrected.rolling(window=past_step).min()
df_Fut_value =df_stockvaluecorrected.shift(periods=-fut_step)
df_Fut_value.name='future_value'
df_Fut_volaty =df_volaty.shift(periods=-fut_step)
df_market_state=pd.DataFrame(index=df_stockvaluecorrected.index,columns=['market_state'],dtype=np.float64)
tmpimage=build_image(df_stockvaluecorrected,past_step+1,pastlag=past_step,futlag=fut_step)
flatten_image=np.reshape(tmpimage,(1,-1))
colname_d_x_image_flattened = ['Image Col'+str(j) for j in range(flatten_image.shape[1])]
np_x_image=np.zeros((len(df_stockvaluecorrected.index),flatten_image.shape[1]))
for i in range(len(df_stockvaluecorrected.index)):
yfut=df_Fut_value.iloc[i]
df_market_state.iloc[i]=class_shortterm_returnfut(df_stockvaluecorrected,yfut, i,tpastlag=past_step)
print("loop 1 market state :", "step ",i,"market state fut", df_market_state.iloc[i]," future value",df_Fut_value.iloc[i] )
df_market_state.index=df_Fut_value.index
fig=plt.figure()
for i in range(len(df_stockvaluecorrected.index)):
try:
tmpimage=build_image_optimfig(fig, df_stockvaluecorrected,i,pastlag=past_step,futlag=fut_step)
np_x_image[i,:]=np.reshape(tmpimage,(1,-1))
print("loop 2 image :", "step ",i,"market state fut", df_market_state.iloc[i]," future value",df_Fut_value.iloc[i] )
except:
print("error at index", i)
df_x_image=pd.DataFrame(data=np_x_image,columns=colname_d_x_image_flattened, index=df_stockvaluecorrected.index)
fig.clear
plt.close(fig)
df_data= {
'stock_value': df_stockvaluecorrected,
'moving_average': df_movave,
"max": df_max,
"min": df_max,
'volatility': df_volaty,
'future_volatility': df_Fut_volaty,
'df_x_image':df_x_image,
'market_state':df_market_state,
'future_value': df_Fut_value,
}
return df_data
def build_image_clean(stockindex_ohlcv, ret_image_size=(32,32,3), idate=10, pastlag=32):
'''
TO BE COMPLETED
NOT USED NOW
change one date into an array (32,32,3)
Each absciss pixel is one day
in ordinate the min value of ohlc shall be 0 (volume is tabled on the third image)
in ordinate the max value of ohlc shall be (volume is tabled on the third image)
1st image: 32 x32
based on each day we place the open and close point
in ordinate int (255 * price /max ohlc)
with value of 255 for close and 127 for open
2nd image: 32 x32
based on each day we place the high low point
in ordinate int (255 * price /max ohlc)
with 64 for high and 32 for low
3rd image: 32 x32
each column value is a equal to int 255* volume of day / volume max period)
'''
#number of days to consider for translate
tsindexstock=stockindex_ohlcv.iloc[(idate-pastlag):idate]
valmax=np.max(np.array(tsindexstock[tsindexstock.columns[:-1]]))
valmin=np.min(np.array(tsindexstock[tsindexstock.columns[:-1]]))
vol=tsindexstock[tsindexstock.columns[-1]]
x_datas=np.zeros(ret_image_size)
return x_datas
def setup_input_NN_image(xdf, past_step=25,fut_step=5, split=0.8):
'''
this function the time serie of the index price
and generate the random dataset with split value from the whole time serie
X is a time serie of the flattened 32, 32 ,3 image list
Y_StateClass is a time serie of future state to predict with a classification made with class_shortterm_returnfut
Y_FutPredict is the time serie of stocke index shifted in time to be predicted
we randomize the dates and retun 2 set of dataframes
'''
xdf_data=build_image_df(xdf,past_step,fut_step)
tmp_data=pd.concat([xdf_data['market_state'],xdf_data['future_value'],xdf_data['df_x_image']],axis=1)
tmp_data=tmp_data.dropna()
Y_StateClass= tmp_data['market_state']
Y_FutPredict= tmp_data['future_value']
X=tmp_data.drop(columns=['market_state','future_value'])
nb_dates=len(Y_StateClass.index)
rng = np.random.default_rng()
list_shuffle = np.arange(nb_dates)
rng.shuffle(list_shuffle)
split_index=int(split*nb_dates)
train_split=list_shuffle[:split_index]
test_split=list_shuffle[(split_index+1):]
X_train=(X.iloc[train_split])
Y_train_StateClass=(Y_StateClass.iloc[train_split])
Y_train_FutPredict=(Y_FutPredict.iloc[train_split])
X_test=(X.iloc[test_split])
Y_test_StateClass=(Y_StateClass.iloc[test_split])
Y_test_FutPredict=(Y_FutPredict.iloc[test_split])
return (X_train, Y_train_StateClass, Y_train_FutPredict), (X_test, Y_test_StateClass, Y_test_FutPredict)
def change_X_df__nparray_image(df_X_train_image_flattened ):
'''
setup_input_NN_image returns a dataframe of flaten image for x train and xtest
then this function will change each date into a nparray list of images with 32, 32, 3 size
'''
X_train_image=df_X_train_image_flattened
nb_train=len(X_train_image.index)
x_train=np.zeros((nb_train,32,32,3))
for i in range(nb_train):
tmp=np.array(X_train_image.iloc[i])
tmp=tmp.reshape(32,32,3)
x_train[i]=tmp
return x_train
'''
COMMAND NOW FOR THE DATSET GENERATION
'''
#Recuperation from yahoo of sp500 large history
start = datetime(1920,1,1)
end = datetime(2020,7,31)
yf.pdr_override() # <== that's all it takes :-)
sp500 = pdr.get_data_yahoo('^GSPC',
start,
end)
#generate the dataset it can take 6 - 8 hours
#Need to be optimzed with more time
testsp500=(sp500['Close'])[1000:2000]
(X_train_image, Y_train_StateClass_image, Y_train_FutPredict_image) , (X_test_image, Y_test_StateClass_image, Y_test_FutPredict_image) = setup_input_NN_image(testsp500)
#copy the datafrae dataset in csv format to be used after
#dateTimeObj = datetime.now()
#timeStr = dateTimeObj.strftime("%Y_%m_%d_%H_%M_%S_%f")
X_train_image.to_csv('datas/X_train_image.csv')
Y_train_StateClass_image.to_csv('datas/Y_train_StateClass_image.csv')
Y_train_FutPredict_image.to_csv('datas/Y_train_FutPredict_image.csv')
X_test_image.to_csv('datas/X_test_image.csv')
Y_test_StateClass_image.to_csv('datas/Y_test_StateClass_image.csv')
Y_test_FutPredict_image.to_csv('datas/Y_test_FutPredict_image.csv')
| 36.398773
| 168
| 0.740182
|
a57dedc50984afe55e13d67dcf5bf6e6ddbfd537
| 26,805
|
py
|
Python
|
tricircle-6.0.0/tricircle/db/api.py
|
scottwedge/OpenStack-Stein
|
7077d1f602031dace92916f14e36b124f474de15
|
[
"Apache-2.0"
] | 1
|
2021-03-19T16:48:54.000Z
|
2021-03-19T16:48:54.000Z
|
tricircle-6.0.0/tricircle/db/api.py
|
scottwedge/OpenStack-Stein
|
7077d1f602031dace92916f14e36b124f474de15
|
[
"Apache-2.0"
] | 5
|
2019-08-14T06:46:03.000Z
|
2021-12-13T20:01:25.000Z
|
tricircle-6.0.0/tricircle/db/api.py
|
scottwedge/OpenStack-Stein
|
7077d1f602031dace92916f14e36b124f474de15
|
[
"Apache-2.0"
] | 2
|
2020-03-15T01:24:15.000Z
|
2020-07-22T20:34:26.000Z
|
# Copyright 2015 Huawei Technologies Co., Ltd.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import functools
import sqlalchemy as sql
from sqlalchemy import or_
import time
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_log import log as logging
from oslo_utils import timeutils
from oslo_utils import uuidutils
from tricircle.common import constants
from tricircle.common.context import is_admin_context as _is_admin_context
from tricircle.common import exceptions
from tricircle.common.i18n import _
from tricircle.db import core
from tricircle.db import models
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
def db_test_stub(*args):
pass
def create_pod(context, pod_dict):
with context.session.begin():
return core.create_resource(context, models.Pod, pod_dict)
def delete_pod(context, pod_id):
with context.session.begin():
return core.delete_resource(context, models.Pod, pod_id)
def get_pod(context, pod_id):
with context.session.begin():
return core.get_resource(context, models.Pod, pod_id)
def list_pods(context, filters=None, sorts=None):
return core.query_resource(context, models.Pod, filters or [],
sorts or [])
def update_pod(context, pod_id, update_dict):
with context.session.begin():
return core.update_resource(context, models.Pod, pod_id, update_dict)
def create_cached_endpoints(context, config_dict):
with context.session.begin():
return core.create_resource(context, models.CachedEndpoint,
config_dict)
def delete_cached_endpoints(context, config_id):
with context.session.begin():
return core.delete_resource(context, models.CachedEndpoint,
config_id)
def get_cached_endpoints(context, config_id):
with context.session.begin():
return core.get_resource(context, models.CachedEndpoint,
config_id)
def list_cached_endpoints(context, filters=None, sorts=None):
return core.query_resource(context, models.CachedEndpoint,
filters or [], sorts or [])
def update_cached_endpoints(context, config_id, update_dict):
with context.session.begin():
return core.update_resource(
context, models.CachedEndpoint, config_id, update_dict)
def create_resource_mapping(context, top_id, bottom_id, pod_id, project_id,
resource_type):
try:
context.session.begin()
route = core.create_resource(context, models.ResourceRouting,
{'top_id': top_id,
'bottom_id': bottom_id,
'pod_id': pod_id,
'project_id': project_id,
'resource_type': resource_type})
context.session.commit()
return route
except db_exc.DBDuplicateEntry:
# entry has already been created
context.session.rollback()
return None
finally:
context.session.close()
def list_resource_routings(context, filters=None, limit=None, marker=None,
sorts=None):
"""Return a list of limited number of resource routings
:param context:
:param filters: list of filter dict with key 'key', 'comparator', 'value'
:param limit: an integer that limits the maximum number of items
returned in a single response
:param marker: id of the last item in the previous list
:param sorts: a list of (sort_key, sort_dir) pair,
for example, [('id', 'desc')]
:return: a list of limited number of items
"""
with context.session.begin():
return core.paginate_query(context, models.ResourceRouting,
limit,
models.ResourceRouting(
id=marker) if marker else None,
filters or [], sorts or [])
def get_resource_routing(context, id):
with context.session.begin():
return core.get_resource(context, models.ResourceRouting, id)
def delete_resource_routing(context, id):
with context.session.begin():
return core.delete_resource(context, models.ResourceRouting, id)
def update_resource_routing(context, id, update_dict):
with context.session.begin():
return core.update_resource(context, models.ResourceRouting, id,
update_dict)
def get_bottom_mappings_by_top_id(context, top_id, resource_type):
"""Get resource id and pod name on bottom
:param context: context object
:param top_id: resource id on top
:param resource_type: resource type
:return: a list of tuple (pod dict, bottom_id)
"""
route_filters = [{'key': 'top_id', 'comparator': 'eq', 'value': top_id},
{'key': 'resource_type',
'comparator': 'eq',
'value': resource_type}]
mappings = []
with context.session.begin():
routes = core.query_resource(
context, models.ResourceRouting, route_filters, [])
for route in routes:
if not route['bottom_id']:
continue
pod = core.get_resource(context, models.Pod, route['pod_id'])
mappings.append((pod, route['bottom_id']))
return mappings
def delete_pre_created_resource_mapping(context, name):
with context.session.begin():
entries = core.query_resource(
context, models.ResourceRouting,
filters=[{'key': 'top_id', 'comparator': 'eq',
'value': name}], sorts=[])
if entries:
core.delete_resources(
context, models.ResourceRouting,
filters=[{'key': 'top_id', 'comparator': 'eq',
'value': entries[0]['bottom_id']}])
core.delete_resource(context, models.ResourceRouting,
entries[0]['id'])
def get_pod_by_top_id(context, _id):
"""Get pod resource from pod table by top id of resource
:param context: context object
:param _id: the top id of resource
:returns: pod resource
"""
route_filters = [{'key': 'top_id', 'comparator': 'eq', 'value': _id}]
with context.session.begin():
routes = core.query_resource(
context, models.ResourceRouting, route_filters, [])
if not routes or len(routes) != 1:
return None
route = routes[0]
if not route['bottom_id']:
return None
return core.get_resource(context, models.Pod, route['pod_id'])
def get_bottom_id_by_top_id_region_name(context, top_id,
region_name, resource_type):
"""Get resource bottom id by top id and bottom pod name
:param context: context object
:param top_id: resource id on top
:param region_name: name of bottom pod
:param resource_type: resource type
:return:
"""
mappings = get_bottom_mappings_by_top_id(context, top_id, resource_type)
for pod, bottom_id in mappings:
if pod['region_name'] == region_name:
return bottom_id
return None
def get_bottom_mappings_by_tenant_pod(context,
tenant_id,
pod_id,
resource_type):
"""Get resource routing for specific tenant and pod
:param context: context object
:param tenant_id: tenant id to look up
:param pod_id: pod to look up
:param resource_type: specific resource
:return: a dic {top_id : route}
"""
route_filters = [{'key': 'pod_id',
'comparator': 'eq',
'value': pod_id},
{'key': 'project_id',
'comparator': 'eq',
'value': tenant_id},
{'key': 'resource_type',
'comparator': 'eq',
'value': resource_type}]
routings = {}
with context.session.begin():
routes = core.query_resource(
context, models.ResourceRouting, route_filters, [])
for _route in routes:
if not _route['bottom_id']:
continue
routings[_route['top_id']] = _route
return routings
def delete_mappings_by_top_id(context, top_id, pod_id=None):
"""Delete resource routing entry based on top resource ID
If pod ID is also provided, only entry in the specific pod will be deleted
:param context: context object
:param top_id: top resource ID
:param pod_id: optional pod ID
:return: None
"""
filters = [{'key': 'top_id', 'comparator': 'eq', 'value': top_id}]
if pod_id:
filters.append({'key': 'pod_id', 'comparator': 'eq', 'value': pod_id})
with context.session.begin():
core.delete_resources(context, models.ResourceRouting, filters=filters)
def delete_mappings_by_bottom_id(context, bottom_id):
with context.session.begin():
core.delete_resources(
context, models.ResourceRouting,
filters=[{'key': 'bottom_id', 'comparator': 'eq',
'value': bottom_id}])
def get_next_bottom_pod(context, current_pod_id=None):
pods = list_pods(context, sorts=[(models.Pod.pod_id, True)])
# NOTE(zhiyuan) number of pods is small, just traverse to filter top pod
pods = [pod for pod in pods if pod['az_name']]
for index, pod in enumerate(pods):
if not current_pod_id:
return pod
if pod['pod_id'] == current_pod_id and index < len(pods) - 1:
return pods[index + 1]
return None
def get_top_pod(context):
filters = [{'key': 'az_name', 'comparator': 'eq', 'value': ''}]
pods = list_pods(context, filters=filters)
# only one should be searched
for pod in pods:
if (pod['region_name'] != '') and \
(pod['az_name'] == ''):
return pod
return None
def get_pod_by_name(context, region_name):
filters = [{'key': 'region_name',
'comparator': 'eq', 'value': region_name}]
pods = list_pods(context, filters=filters)
# only one should be searched
for pod in pods:
if pod['region_name'] == region_name:
return pod
return None
def find_pods_by_az_or_region(context, az_or_region):
# if az_or_region is None or empty, returning None value directly.
if not az_or_region:
return None
query = context.session.query(models.Pod)
query = query.filter(or_(models.Pod.region_name == az_or_region,
models.Pod.az_name == az_or_region))
return [obj.to_dict() for obj in query]
def find_pod_by_az_or_region(context, az_or_region):
pods = find_pods_by_az_or_region(context, az_or_region)
# if pods is None, returning None value directly.
if pods is None:
return None
# if no pod is matched, then we will raise an exception
if len(pods) < 1:
raise exceptions.PodNotFound(az_or_region)
# if the pods list only contain one pod, then this pod will be
# returned back
if len(pods) == 1:
return pods[0]
# if the pods list contains more than one pod, then we will raise an
# exception
if len(pods) > 1:
raise exceptions.InvalidInput(
reason='Multiple pods with the same az_name are found')
def new_job(context, project_id, _type, resource_id):
with context.session.begin():
job_dict = {'id': uuidutils.generate_uuid(),
'type': _type,
'status': constants.JS_New,
'project_id': project_id,
'resource_id': resource_id,
'extra_id': uuidutils.generate_uuid()}
job = core.create_resource(context,
models.AsyncJob, job_dict)
return job
def register_job(context, project_id, _type, resource_id):
try:
context.session.begin()
job_dict = {'id': uuidutils.generate_uuid(),
'type': _type,
'status': constants.JS_Running,
'project_id': project_id,
'resource_id': resource_id,
'extra_id': constants.SP_EXTRA_ID}
job = core.create_resource(context,
models.AsyncJob, job_dict)
context.session.commit()
return job
except db_exc.DBDuplicateEntry:
context.session.rollback()
return None
except db_exc.DBDeadlock:
context.session.rollback()
return None
finally:
context.session.close()
def get_latest_failed_or_new_jobs(context):
current_timestamp = timeutils.utcnow()
time_span = datetime.timedelta(seconds=CONF.redo_time_span)
latest_timestamp = current_timestamp - time_span
failed_jobs = []
new_jobs = []
# first we group the jobs by type and resource id, and in each group we
# pick the latest timestamp
stmt = context.session.query(
models.AsyncJob.type, models.AsyncJob.resource_id,
sql.func.max(models.AsyncJob.timestamp).label('timestamp'))
stmt = stmt.filter(models.AsyncJob.timestamp >= latest_timestamp)
stmt = stmt.group_by(models.AsyncJob.type,
models.AsyncJob.resource_id).subquery()
# then we join the result with the original table and group again, in each
# group, we pick the "minimum" of the status, for status, the ascendant
# sort sequence is "0_Fail", "1_Success", "2_Running", "3_New"
query = context.session.query(models.AsyncJob.type,
models.AsyncJob.resource_id,
models.AsyncJob.project_id,
sql.func.min(models.AsyncJob.status)).join(
stmt, sql.and_(models.AsyncJob.type == stmt.c.type,
models.AsyncJob.resource_id == stmt.c.resource_id,
models.AsyncJob.timestamp == stmt.c.timestamp))
query = query.group_by(models.AsyncJob.project_id,
models.AsyncJob.type,
models.AsyncJob.resource_id)
for job_type, resource_id, project_id, status in query:
if status == constants.JS_Fail:
failed_jobs.append({'type': job_type, 'resource_id': resource_id,
'project_id': project_id})
elif status == constants.JS_New:
new_jobs.append({'type': job_type, 'resource_id': resource_id,
'project_id': project_id})
return failed_jobs, new_jobs
def get_job(context, job_id):
with context.session.begin():
return core.get_resource(context, models.AsyncJob, job_id)
def get_job_from_log(context, job_id):
with context.session.begin():
return core.get_resource(context, models.AsyncJobLog, job_id)
def delete_job(context, job_id):
with context.session.begin():
return core.delete_resource(context, models.AsyncJob, job_id)
def list_jobs(context, filters=None, sorts=None, limit=None, marker=None):
with context.session.begin():
marker_obj = None
if marker is not None:
marker_obj = context.session.query(models.AsyncJob).filter(
models.AsyncJob.id == marker).first()
return core.paginate_query(
context, models.AsyncJob, limit, marker_obj,
filters or [], sorts or [])
def list_jobs_from_log(context, filters=None, sorts=None,
limit=None, marker=None):
with context.session.begin():
marker_obj = None
if marker is not None:
marker_obj = context.session.query(models.AsyncJobLog).filter(
models.AsyncJobLog.id == marker).first()
filter_is_success = True
if filters is not None and len(filters) > 0:
for filter in filters:
if filter.get('key') == 'status':
job_status = filter['value']
# job entry in job log table has no
# status attribute.
if job_status == constants.JS_Success:
filters.remove(filter)
else:
filter_is_success = False
break
if filter_is_success:
return core.paginate_query(context, models.AsyncJobLog, limit,
marker_obj,
filters or [], sorts or [])
return []
def get_latest_job(context, status, _type, resource_id):
jobs = core.query_resource(
context, models.AsyncJob,
[{'key': 'status', 'comparator': 'eq', 'value': status},
{'key': 'type', 'comparator': 'eq', 'value': _type},
{'key': 'resource_id', 'comparator': 'eq', 'value': resource_id}],
[('timestamp', False)])
if jobs:
return jobs[0]
else:
return None
def get_running_job(context, _type, resource_id):
jobs = core.query_resource(
context, models.AsyncJob,
[{'key': 'resource_id', 'comparator': 'eq', 'value': resource_id},
{'key': 'status', 'comparator': 'eq', 'value': constants.JS_Running},
{'key': 'type', 'comparator': 'eq', 'value': _type}], [])
if jobs:
return jobs[0]
else:
return None
def finish_job(context, job_id, successful, timestamp):
status = constants.JS_Success if successful else constants.JS_Fail
retries = 5
for i in range(retries + 1):
try:
with context.session.begin():
db_test_stub(i)
job_dict = {'status': status,
'timestamp': timestamp,
'extra_id': uuidutils.generate_uuid()}
job = core.update_resource(context, models.AsyncJob, job_id,
job_dict)
if status == constants.JS_Success:
log_dict = {'id': uuidutils.generate_uuid(),
'type': job['type'],
'project_id': job['project_id'],
'timestamp': timestamp,
'resource_id': job['resource_id']}
context.session.query(models.AsyncJob).filter(
sql.and_(
models.AsyncJob.type == job['type'],
models.AsyncJob.resource_id == job['resource_id'],
models.AsyncJob.timestamp <= timestamp)).delete(
synchronize_session=False)
core.create_resource(context, models.AsyncJobLog, log_dict)
else:
# sqlite has problem handling "<" operator on timestamp,
# so we slide the timestamp a bit and use "<="
timestamp = timestamp - datetime.timedelta(microseconds=1)
context.session.query(models.AsyncJob).filter(
sql.and_(
models.AsyncJob.type == job['type'],
models.AsyncJob.resource_id == job['resource_id'],
models.AsyncJob.timestamp <= timestamp)).delete(
synchronize_session=False)
except db_exc.DBDeadlock:
if i == retries:
raise
time.sleep(1)
continue
return
def ensure_agent_exists(context, pod_id, host, _type, tunnel_ip):
try:
context.session.begin()
agents = core.query_resource(
context, models.ShadowAgent,
[{'key': 'host', 'comparator': 'eq', 'value': host},
{'key': 'type', 'comparator': 'eq', 'value': _type}], [])
if agents:
return
core.create_resource(context, models.ShadowAgent,
{'id': uuidutils.generate_uuid(),
'pod_id': pod_id,
'host': host,
'type': _type,
'tunnel_ip': tunnel_ip})
context.session.commit()
except db_exc.DBDuplicateEntry:
# agent has already been created
context.session.rollback()
finally:
context.session.close()
def get_agent_by_host_type(context, host, _type):
agents = core.query_resource(
context, models.ShadowAgent,
[{'key': 'host', 'comparator': 'eq', 'value': host},
{'key': 'type', 'comparator': 'eq', 'value': _type}], [])
return agents[0] if agents else None
def _is_user_context(context):
"""Indicates if the request context is a normal user."""
if not context:
return False
if context.is_admin:
return False
if not context.user_id or not context.project_id:
return False
return True
def authorize_project_context(context, project_id):
"""Ensures a request has permission to access the given project."""
if _is_user_context(context):
if not context.project_id:
raise exceptions.NotAuthorized()
elif context.project_id != project_id:
raise exceptions.NotAuthorized()
def authorize_user_context(context, user_id):
"""Ensures a request has permission to access the given user."""
if _is_user_context(context):
if not context.user_id:
raise exceptions.NotAuthorized()
elif context.user_id != user_id:
raise exceptions.NotAuthorized()
def require_admin_context(f):
"""Decorator to require admin request context.
The first argument to the wrapped function must be the context.
"""
def wrapper(*args, **kwargs):
if not _is_admin_context(args[0]):
raise exceptions.AdminRequired()
return f(*args, **kwargs)
return wrapper
def require_context(f):
"""Decorator to require *any* user or admin context.
This does no authorization for user or project access matching, see
:py:func:`authorize_project_context` and
:py:func:`authorize_user_context`.
The first argument to the wrapped function must be the context.
"""
def wrapper(*args, **kwargs):
if not _is_admin_context(args[0]) and not _is_user_context(args[0]):
raise exceptions.NotAuthorized()
return f(*args, **kwargs)
return wrapper
def _retry_on_deadlock(f):
"""Decorator to retry a DB API call if Deadlock was received."""
@functools.wraps(f)
def wrapped(*args, **kwargs):
while True:
try:
return f(*args, **kwargs)
except db_exc.DBDeadlock:
LOG.warning("Deadlock detected when running "
"'%(func_name)s': Retrying...",
dict(func_name=f.__name__))
# Retry!
time.sleep(0.5)
continue
functools.update_wrapper(wrapped, f)
return wrapped
def handle_db_data_error(f):
def wrapper(*args, **kwargs):
try:
return f(*args, **kwargs)
except db_exc.DBDataError:
msg = _('Error writing field to database')
LOG.exception(msg)
raise exceptions.Invalid(msg)
except Exception as e:
LOG.exception(str(e))
raise
return wrapper
def model_query(context, *args, **kwargs):
"""Query helper that accounts for context's `read_deleted` field.
:param context: context to query under
:param session: if present, the session to use
:param read_deleted: if present, overrides context's read_deleted field.
:param project_only: if present and context is user-type, then restrict
query to match the context's project_id.
"""
session = kwargs.get('session') or context.session
read_deleted = kwargs.get('read_deleted') or context.read_deleted
project_only = kwargs.get('project_only')
query = session.query(*args)
if read_deleted == 'no':
query = query.filter_by(deleted=False)
elif read_deleted == 'yes':
pass # omit the filter to include deleted and active
elif read_deleted == 'only':
query = query.filter_by(deleted=True)
elif read_deleted == 'int_no':
query = query.filter_by(deleted=0)
else:
raise Exception(
_("Unrecognized read_deleted value '%s'") % read_deleted)
if project_only and _is_user_context(context):
query = query.filter_by(project_id=context.project_id)
return query
def is_valid_model_filters(model, filters):
"""Return True if filter values exist on the model
:param model: a Cinder model
:param filters: dictionary of filters
"""
for key in filters.keys():
if not hasattr(model, key):
return False
return True
def create_recycle_resource(context, resource_id, resource_type, project_id):
try:
context.session.begin()
route = core.create_resource(context, models.RecycleResources,
{'resource_id': resource_id,
'resource_type': resource_type,
'project_id': project_id})
context.session.commit()
return route
except db_exc.DBDuplicateEntry:
# entry has already been created
context.session.rollback()
return None
finally:
context.session.close()
def list_recycle_resources(context, filters=None, sorts=None):
with context.session.begin():
resources = core.query_resource(
context, models.RecycleResources, filters or [], sorts or [])
return resources
def delete_recycle_resource(context, resource_id):
with context.session.begin():
return core.delete_resource(
context, models.RecycleResources, resource_id)
| 35.550398
| 79
| 0.597426
|
b3334e92c4a8286f002e874a64be432e10b0e37f
| 93,885
|
py
|
Python
|
xarray/tests/test_variable.py
|
AlexBodner/xarray
|
39dc75d76b00bc60e567269a2dfd463fa482a3fd
|
[
"CC-BY-4.0",
"PSF-2.0",
"BSD-2-Clause",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
xarray/tests/test_variable.py
|
AlexBodner/xarray
|
39dc75d76b00bc60e567269a2dfd463fa482a3fd
|
[
"CC-BY-4.0",
"PSF-2.0",
"BSD-2-Clause",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
xarray/tests/test_variable.py
|
AlexBodner/xarray
|
39dc75d76b00bc60e567269a2dfd463fa482a3fd
|
[
"CC-BY-4.0",
"PSF-2.0",
"BSD-2-Clause",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
import warnings
from copy import copy, deepcopy
from datetime import datetime, timedelta
from textwrap import dedent
import numpy as np
import pandas as pd
import pytest
import pytz
from xarray import Coordinate, DataArray, Dataset, IndexVariable, Variable, set_options
from xarray.core import dtypes, duck_array_ops, indexing
from xarray.core.common import full_like, ones_like, zeros_like
from xarray.core.indexing import (
BasicIndexer,
CopyOnWriteArray,
DaskIndexingAdapter,
LazilyIndexedArray,
MemoryCachedArray,
NumpyIndexingAdapter,
OuterIndexer,
PandasIndexAdapter,
VectorizedIndexer,
)
from xarray.core.pycompat import dask_array_type
from xarray.core.utils import NDArrayMixin
from xarray.core.variable import as_compatible_data, as_variable
from xarray.tests import requires_bottleneck
from . import (
assert_allclose,
assert_array_equal,
assert_equal,
assert_identical,
raise_if_dask_computes,
requires_dask,
requires_sparse,
source_ndarray,
)
_PAD_XR_NP_ARGS = [
[{"x": (2, 1)}, ((2, 1), (0, 0), (0, 0))],
[{"x": 1}, ((1, 1), (0, 0), (0, 0))],
[{"y": (0, 3)}, ((0, 0), (0, 3), (0, 0))],
[{"x": (3, 1), "z": (2, 0)}, ((3, 1), (0, 0), (2, 0))],
[{"x": (3, 1), "z": 2}, ((3, 1), (0, 0), (2, 2))],
]
@pytest.fixture
def var():
return Variable(dims=list("xyz"), data=np.random.rand(3, 4, 5))
class VariableSubclassobjects:
def test_properties(self):
data = 0.5 * np.arange(10)
v = self.cls(["time"], data, {"foo": "bar"})
assert v.dims == ("time",)
assert_array_equal(v.values, data)
assert v.dtype == float
assert v.shape == (10,)
assert v.size == 10
assert v.sizes == {"time": 10}
assert v.nbytes == 80
assert v.ndim == 1
assert len(v) == 10
assert v.attrs == {"foo": "bar"}
def test_attrs(self):
v = self.cls(["time"], 0.5 * np.arange(10))
assert v.attrs == {}
attrs = {"foo": "bar"}
v.attrs = attrs
assert v.attrs == attrs
assert isinstance(v.attrs, dict)
v.attrs["foo"] = "baz"
assert v.attrs["foo"] == "baz"
def test_getitem_dict(self):
v = self.cls(["x"], np.random.randn(5))
actual = v[{"x": 0}]
expected = v[0]
assert_identical(expected, actual)
def test_getitem_1d(self):
data = np.array([0, 1, 2])
v = self.cls(["x"], data)
v_new = v[dict(x=[0, 1])]
assert v_new.dims == ("x",)
assert_array_equal(v_new, data[[0, 1]])
v_new = v[dict(x=slice(None))]
assert v_new.dims == ("x",)
assert_array_equal(v_new, data)
v_new = v[dict(x=Variable("a", [0, 1]))]
assert v_new.dims == ("a",)
assert_array_equal(v_new, data[[0, 1]])
v_new = v[dict(x=1)]
assert v_new.dims == ()
assert_array_equal(v_new, data[1])
# tuple argument
v_new = v[slice(None)]
assert v_new.dims == ("x",)
assert_array_equal(v_new, data)
def test_getitem_1d_fancy(self):
v = self.cls(["x"], [0, 1, 2])
# 1d-variable should be indexable by multi-dimensional Variable
ind = Variable(("a", "b"), [[0, 1], [0, 1]])
v_new = v[ind]
assert v_new.dims == ("a", "b")
expected = np.array(v._data)[([0, 1], [0, 1]), ...]
assert_array_equal(v_new, expected)
# boolean indexing
ind = Variable(("x",), [True, False, True])
v_new = v[ind]
assert_identical(v[[0, 2]], v_new)
v_new = v[[True, False, True]]
assert_identical(v[[0, 2]], v_new)
with pytest.raises(IndexError, match=r"Boolean indexer should"):
ind = Variable(("a",), [True, False, True])
v[ind]
def test_getitem_with_mask(self):
v = self.cls(["x"], [0, 1, 2])
assert_identical(v._getitem_with_mask(-1), Variable((), np.nan))
assert_identical(
v._getitem_with_mask([0, -1, 1]), self.cls(["x"], [0, np.nan, 1])
)
assert_identical(v._getitem_with_mask(slice(2)), self.cls(["x"], [0, 1]))
assert_identical(
v._getitem_with_mask([0, -1, 1], fill_value=-99),
self.cls(["x"], [0, -99, 1]),
)
def test_getitem_with_mask_size_zero(self):
v = self.cls(["x"], [])
assert_identical(v._getitem_with_mask(-1), Variable((), np.nan))
assert_identical(
v._getitem_with_mask([-1, -1, -1]),
self.cls(["x"], [np.nan, np.nan, np.nan]),
)
def test_getitem_with_mask_nd_indexer(self):
v = self.cls(["x"], [0, 1, 2])
indexer = Variable(("x", "y"), [[0, -1], [-1, 2]])
assert_identical(v._getitem_with_mask(indexer, fill_value=-1), indexer)
def _assertIndexedLikeNDArray(self, variable, expected_value0, expected_dtype=None):
"""Given a 1-dimensional variable, verify that the variable is indexed
like a numpy.ndarray.
"""
assert variable[0].shape == ()
assert variable[0].ndim == 0
assert variable[0].size == 1
# test identity
assert variable.equals(variable.copy())
assert variable.identical(variable.copy())
# check value is equal for both ndarray and Variable
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "In the future, 'NAT == x'")
np.testing.assert_equal(variable.values[0], expected_value0)
np.testing.assert_equal(variable[0].values, expected_value0)
# check type or dtype is consistent for both ndarray and Variable
if expected_dtype is None:
# check output type instead of array dtype
assert type(variable.values[0]) == type(expected_value0)
assert type(variable[0].values) == type(expected_value0)
elif expected_dtype is not False:
assert variable.values[0].dtype == expected_dtype
assert variable[0].values.dtype == expected_dtype
def test_index_0d_int(self):
for value, dtype in [(0, np.int_), (np.int32(0), np.int32)]:
x = self.cls(["x"], [value])
self._assertIndexedLikeNDArray(x, value, dtype)
def test_index_0d_float(self):
for value, dtype in [(0.5, np.float_), (np.float32(0.5), np.float32)]:
x = self.cls(["x"], [value])
self._assertIndexedLikeNDArray(x, value, dtype)
def test_index_0d_string(self):
value = "foo"
dtype = np.dtype("U3")
x = self.cls(["x"], [value])
self._assertIndexedLikeNDArray(x, value, dtype)
def test_index_0d_datetime(self):
d = datetime(2000, 1, 1)
x = self.cls(["x"], [d])
self._assertIndexedLikeNDArray(x, np.datetime64(d))
x = self.cls(["x"], [np.datetime64(d)])
self._assertIndexedLikeNDArray(x, np.datetime64(d), "datetime64[ns]")
x = self.cls(["x"], pd.DatetimeIndex([d]))
self._assertIndexedLikeNDArray(x, np.datetime64(d), "datetime64[ns]")
def test_index_0d_timedelta64(self):
td = timedelta(hours=1)
x = self.cls(["x"], [np.timedelta64(td)])
self._assertIndexedLikeNDArray(x, np.timedelta64(td), "timedelta64[ns]")
x = self.cls(["x"], pd.to_timedelta([td]))
self._assertIndexedLikeNDArray(x, np.timedelta64(td), "timedelta64[ns]")
def test_index_0d_not_a_time(self):
d = np.datetime64("NaT", "ns")
x = self.cls(["x"], [d])
self._assertIndexedLikeNDArray(x, d)
def test_index_0d_object(self):
class HashableItemWrapper:
def __init__(self, item):
self.item = item
def __eq__(self, other):
return self.item == other.item
def __hash__(self):
return hash(self.item)
def __repr__(self):
return "{}(item={!r})".format(type(self).__name__, self.item)
item = HashableItemWrapper((1, 2, 3))
x = self.cls("x", [item])
self._assertIndexedLikeNDArray(x, item, expected_dtype=False)
def test_0d_object_array_with_list(self):
listarray = np.empty((1,), dtype=object)
listarray[0] = [1, 2, 3]
x = self.cls("x", listarray)
assert_array_equal(x.data, listarray)
assert_array_equal(x[0].data, listarray.squeeze())
assert_array_equal(x.squeeze().data, listarray.squeeze())
def test_index_and_concat_datetime(self):
# regression test for #125
date_range = pd.date_range("2011-09-01", periods=10)
for dates in [date_range, date_range.values, date_range.to_pydatetime()]:
expected = self.cls("t", dates)
for times in [
[expected[i] for i in range(10)],
[expected[i : (i + 1)] for i in range(10)],
[expected[[i]] for i in range(10)],
]:
actual = Variable.concat(times, "t")
assert expected.dtype == actual.dtype
assert_array_equal(expected, actual)
def test_0d_time_data(self):
# regression test for #105
x = self.cls("time", pd.date_range("2000-01-01", periods=5))
expected = np.datetime64("2000-01-01", "ns")
assert x[0].values == expected
def test_datetime64_conversion(self):
times = pd.date_range("2000-01-01", periods=3)
for values, preserve_source in [
(times, True),
(times.values, True),
(times.values.astype("datetime64[s]"), False),
(times.to_pydatetime(), False),
]:
v = self.cls(["t"], values)
assert v.dtype == np.dtype("datetime64[ns]")
assert_array_equal(v.values, times.values)
assert v.values.dtype == np.dtype("datetime64[ns]")
same_source = source_ndarray(v.values) is source_ndarray(values)
assert preserve_source == same_source
def test_timedelta64_conversion(self):
times = pd.timedelta_range(start=0, periods=3)
for values, preserve_source in [
(times, True),
(times.values, True),
(times.values.astype("timedelta64[s]"), False),
(times.to_pytimedelta(), False),
]:
v = self.cls(["t"], values)
assert v.dtype == np.dtype("timedelta64[ns]")
assert_array_equal(v.values, times.values)
assert v.values.dtype == np.dtype("timedelta64[ns]")
same_source = source_ndarray(v.values) is source_ndarray(values)
assert preserve_source == same_source
def test_object_conversion(self):
data = np.arange(5).astype(str).astype(object)
actual = self.cls("x", data)
assert actual.dtype == data.dtype
def test_datetime64_valid_range(self):
data = np.datetime64("1250-01-01", "us")
pderror = pd.errors.OutOfBoundsDatetime
with pytest.raises(pderror, match=r"Out of bounds nanosecond"):
self.cls(["t"], [data])
@pytest.mark.xfail(reason="pandas issue 36615")
def test_timedelta64_valid_range(self):
data = np.timedelta64("200000", "D")
pderror = pd.errors.OutOfBoundsTimedelta
with pytest.raises(pderror, match=r"Out of bounds nanosecond"):
self.cls(["t"], [data])
def test_pandas_data(self):
v = self.cls(["x"], pd.Series([0, 1, 2], index=[3, 2, 1]))
assert_identical(v, v[[0, 1, 2]])
v = self.cls(["x"], pd.Index([0, 1, 2]))
assert v[0].values == v.values[0]
def test_pandas_period_index(self):
v = self.cls(["x"], pd.period_range(start="2000", periods=20, freq="B"))
v = v.load() # for dask-based Variable
assert v[0] == pd.Period("2000", freq="B")
assert "Period('2000-01-03', 'B')" in repr(v)
def test_1d_math(self):
x = 1.0 * np.arange(5)
y = np.ones(5)
# should we need `.to_base_variable()`?
# probably a break that `+v` changes type?
v = self.cls(["x"], x)
base_v = v.to_base_variable()
# unary ops
assert_identical(base_v, +v)
assert_identical(base_v, abs(v))
assert_array_equal((-v).values, -x)
# binary ops with numbers
assert_identical(base_v, v + 0)
assert_identical(base_v, 0 + v)
assert_identical(base_v, v * 1)
# binary ops with numpy arrays
assert_array_equal((v * x).values, x ** 2)
assert_array_equal((x * v).values, x ** 2)
assert_array_equal(v - y, v - 1)
assert_array_equal(y - v, 1 - v)
# verify attributes are dropped
v2 = self.cls(["x"], x, {"units": "meters"})
with set_options(keep_attrs=False):
assert_identical(base_v, +v2)
# binary ops with all variables
assert_array_equal(v + v, 2 * v)
w = self.cls(["x"], y, {"foo": "bar"})
assert_identical(v + w, self.cls(["x"], x + y).to_base_variable())
assert_array_equal((v * w).values, x * y)
# something complicated
assert_array_equal((v ** 2 * w - 1 + x).values, x ** 2 * y - 1 + x)
# make sure dtype is preserved (for Index objects)
assert float == (+v).dtype
assert float == (+v).values.dtype
assert float == (0 + v).dtype
assert float == (0 + v).values.dtype
# check types of returned data
assert isinstance(+v, Variable)
assert not isinstance(+v, IndexVariable)
assert isinstance(0 + v, Variable)
assert not isinstance(0 + v, IndexVariable)
def test_1d_reduce(self):
x = np.arange(5)
v = self.cls(["x"], x)
actual = v.sum()
expected = Variable((), 10)
assert_identical(expected, actual)
assert type(actual) is Variable
def test_array_interface(self):
x = np.arange(5)
v = self.cls(["x"], x)
assert_array_equal(np.asarray(v), x)
# test patched in methods
assert_array_equal(v.astype(float), x.astype(float))
# think this is a break, that argsort changes the type
assert_identical(v.argsort(), v.to_base_variable())
assert_identical(v.clip(2, 3), self.cls("x", x.clip(2, 3)).to_base_variable())
# test ufuncs
assert_identical(np.sin(v), self.cls(["x"], np.sin(x)).to_base_variable())
assert isinstance(np.sin(v), Variable)
assert not isinstance(np.sin(v), IndexVariable)
def example_1d_objects(self):
for data in [
range(3),
0.5 * np.arange(3),
0.5 * np.arange(3, dtype=np.float32),
pd.date_range("2000-01-01", periods=3),
np.array(["a", "b", "c"], dtype=object),
]:
yield (self.cls("x", data), data)
def test___array__(self):
for v, data in self.example_1d_objects():
assert_array_equal(v.values, np.asarray(data))
assert_array_equal(np.asarray(v), np.asarray(data))
assert v[0].values == np.asarray(data)[0]
assert np.asarray(v[0]) == np.asarray(data)[0]
def test_equals_all_dtypes(self):
for v, _ in self.example_1d_objects():
v2 = v.copy()
assert v.equals(v2)
assert v.identical(v2)
assert v.no_conflicts(v2)
assert v[0].equals(v2[0])
assert v[0].identical(v2[0])
assert v[0].no_conflicts(v2[0])
assert v[:2].equals(v2[:2])
assert v[:2].identical(v2[:2])
assert v[:2].no_conflicts(v2[:2])
def test_eq_all_dtypes(self):
# ensure that we don't choke on comparisons for which numpy returns
# scalars
expected = Variable("x", 3 * [False])
for v, _ in self.example_1d_objects():
actual = "z" == v
assert_identical(expected, actual)
actual = ~("z" != v)
assert_identical(expected, actual)
def test_encoding_preserved(self):
expected = self.cls("x", range(3), {"foo": 1}, {"bar": 2})
for actual in [
expected.T,
expected[...],
expected.squeeze(),
expected.isel(x=slice(None)),
expected.set_dims({"x": 3}),
expected.copy(deep=True),
expected.copy(deep=False),
]:
assert_identical(expected.to_base_variable(), actual.to_base_variable())
assert expected.encoding == actual.encoding
def test_concat(self):
x = np.arange(5)
y = np.arange(5, 10)
v = self.cls(["a"], x)
w = self.cls(["a"], y)
assert_identical(
Variable(["b", "a"], np.array([x, y])), Variable.concat([v, w], "b")
)
assert_identical(
Variable(["b", "a"], np.array([x, y])), Variable.concat((v, w), "b")
)
assert_identical(
Variable(["b", "a"], np.array([x, y])), Variable.concat((v, w), "b")
)
with pytest.raises(ValueError, match=r"Variable has dimensions"):
Variable.concat([v, Variable(["c"], y)], "b")
# test indexers
actual = Variable.concat(
[v, w], positions=[np.arange(0, 10, 2), np.arange(1, 10, 2)], dim="a"
)
expected = Variable("a", np.array([x, y]).ravel(order="F"))
assert_identical(expected, actual)
# test concatenating along a dimension
v = Variable(["time", "x"], np.random.random((10, 8)))
assert_identical(v, Variable.concat([v[:5], v[5:]], "time"))
assert_identical(v, Variable.concat([v[:5], v[5:6], v[6:]], "time"))
assert_identical(v, Variable.concat([v[:1], v[1:]], "time"))
# test dimension order
assert_identical(v, Variable.concat([v[:, :5], v[:, 5:]], "x"))
with pytest.raises(ValueError, match=r"all input arrays must have"):
Variable.concat([v[:, 0], v[:, 1:]], "x")
def test_concat_attrs(self):
# always keep attrs from first variable
v = self.cls("a", np.arange(5), {"foo": "bar"})
w = self.cls("a", np.ones(5))
expected = self.cls(
"a", np.concatenate([np.arange(5), np.ones(5)])
).to_base_variable()
expected.attrs["foo"] = "bar"
assert_identical(expected, Variable.concat([v, w], "a"))
def test_concat_fixed_len_str(self):
# regression test for #217
for kind in ["S", "U"]:
x = self.cls("animal", np.array(["horse"], dtype=kind))
y = self.cls("animal", np.array(["aardvark"], dtype=kind))
actual = Variable.concat([x, y], "animal")
expected = Variable("animal", np.array(["horse", "aardvark"], dtype=kind))
assert_equal(expected, actual)
def test_concat_number_strings(self):
# regression test for #305
a = self.cls("x", ["0", "1", "2"])
b = self.cls("x", ["3", "4"])
actual = Variable.concat([a, b], dim="x")
expected = Variable("x", np.arange(5).astype(str))
assert_identical(expected, actual)
assert actual.dtype.kind == expected.dtype.kind
def test_concat_mixed_dtypes(self):
a = self.cls("x", [0, 1])
b = self.cls("x", ["two"])
actual = Variable.concat([a, b], dim="x")
expected = Variable("x", np.array([0, 1, "two"], dtype=object))
assert_identical(expected, actual)
assert actual.dtype == object
@pytest.mark.parametrize("deep", [True, False])
@pytest.mark.parametrize("astype", [float, int, str])
def test_copy(self, deep, astype):
v = self.cls("x", (0.5 * np.arange(10)).astype(astype), {"foo": "bar"})
w = v.copy(deep=deep)
assert type(v) is type(w)
assert_identical(v, w)
assert v.dtype == w.dtype
if self.cls is Variable:
if deep:
assert source_ndarray(v.values) is not source_ndarray(w.values)
else:
assert source_ndarray(v.values) is source_ndarray(w.values)
assert_identical(v, copy(v))
def test_copy_index(self):
midx = pd.MultiIndex.from_product(
[["a", "b"], [1, 2], [-1, -2]], names=("one", "two", "three")
)
v = self.cls("x", midx)
for deep in [True, False]:
w = v.copy(deep=deep)
assert isinstance(w._data, PandasIndexAdapter)
assert isinstance(w.to_index(), pd.MultiIndex)
assert_array_equal(v._data.array, w._data.array)
def test_copy_with_data(self):
orig = Variable(("x", "y"), [[1.5, 2.0], [3.1, 4.3]], {"foo": "bar"})
new_data = np.array([[2.5, 5.0], [7.1, 43]])
actual = orig.copy(data=new_data)
expected = orig.copy()
expected.data = new_data
assert_identical(expected, actual)
def test_copy_with_data_errors(self):
orig = Variable(("x", "y"), [[1.5, 2.0], [3.1, 4.3]], {"foo": "bar"})
new_data = [2.5, 5.0]
with pytest.raises(ValueError, match=r"must match shape of object"):
orig.copy(data=new_data)
def test_copy_index_with_data(self):
orig = IndexVariable("x", np.arange(5))
new_data = np.arange(5, 10)
actual = orig.copy(data=new_data)
expected = IndexVariable("x", np.arange(5, 10))
assert_identical(expected, actual)
def test_copy_index_with_data_errors(self):
orig = IndexVariable("x", np.arange(5))
new_data = np.arange(5, 20)
with pytest.raises(ValueError, match=r"must match shape of object"):
orig.copy(data=new_data)
with pytest.raises(ValueError, match=r"Cannot assign to the .data"):
orig.data = new_data
with pytest.raises(ValueError, match=r"Cannot assign to the .values"):
orig.values = new_data
def test_replace(self):
var = Variable(("x", "y"), [[1.5, 2.0], [3.1, 4.3]], {"foo": "bar"})
result = var._replace()
assert_identical(result, var)
new_data = np.arange(4).reshape(2, 2)
result = var._replace(data=new_data)
assert_array_equal(result.data, new_data)
def test_real_and_imag(self):
v = self.cls("x", np.arange(3) - 1j * np.arange(3), {"foo": "bar"})
expected_re = self.cls("x", np.arange(3), {"foo": "bar"})
assert_identical(v.real, expected_re)
expected_im = self.cls("x", -np.arange(3), {"foo": "bar"})
assert_identical(v.imag, expected_im)
expected_abs = self.cls("x", np.sqrt(2 * np.arange(3) ** 2)).to_base_variable()
assert_allclose(abs(v), expected_abs)
def test_aggregate_complex(self):
# should skip NaNs
v = self.cls("x", [1, 2j, np.nan])
expected = Variable((), 0.5 + 1j)
assert_allclose(v.mean(), expected)
def test_pandas_cateogrical_dtype(self):
data = pd.Categorical(np.arange(10, dtype="int64"))
v = self.cls("x", data)
print(v) # should not error
assert v.dtype == "int64"
def test_pandas_datetime64_with_tz(self):
data = pd.date_range(
start="2000-01-01",
tz=pytz.timezone("America/New_York"),
periods=10,
freq="1h",
)
v = self.cls("x", data)
print(v) # should not error
if "America/New_York" in str(data.dtype):
# pandas is new enough that it has datetime64 with timezone dtype
assert v.dtype == "object"
def test_multiindex(self):
idx = pd.MultiIndex.from_product([list("abc"), [0, 1]])
v = self.cls("x", idx)
assert_identical(Variable((), ("a", 0)), v[0])
assert_identical(v, v[:])
def test_load(self):
array = self.cls("x", np.arange(5))
orig_data = array._data
copied = array.copy(deep=True)
if array.chunks is None:
array.load()
assert type(array._data) is type(orig_data)
assert type(copied._data) is type(orig_data)
assert_identical(array, copied)
def test_getitem_advanced(self):
v = self.cls(["x", "y"], [[0, 1, 2], [3, 4, 5]])
v_data = v.compute().data
# orthogonal indexing
v_new = v[([0, 1], [1, 0])]
assert v_new.dims == ("x", "y")
assert_array_equal(v_new, v_data[[0, 1]][:, [1, 0]])
v_new = v[[0, 1]]
assert v_new.dims == ("x", "y")
assert_array_equal(v_new, v_data[[0, 1]])
# with mixed arguments
ind = Variable(["a"], [0, 1])
v_new = v[dict(x=[0, 1], y=ind)]
assert v_new.dims == ("x", "a")
assert_array_equal(v_new, v_data[[0, 1]][:, [0, 1]])
# boolean indexing
v_new = v[dict(x=[True, False], y=[False, True, False])]
assert v_new.dims == ("x", "y")
assert_array_equal(v_new, v_data[0][1])
# with scalar variable
ind = Variable((), 2)
v_new = v[dict(y=ind)]
expected = v[dict(y=2)]
assert_array_equal(v_new, expected)
# with boolean variable with wrong shape
ind = np.array([True, False])
with pytest.raises(IndexError, match=r"Boolean array size 2 is "):
v[Variable(("a", "b"), [[0, 1]]), ind]
# boolean indexing with different dimension
ind = Variable(["a"], [True, False, False])
with pytest.raises(IndexError, match=r"Boolean indexer should be"):
v[dict(y=ind)]
def test_getitem_uint_1d(self):
# regression test for #1405
v = self.cls(["x"], [0, 1, 2])
v_data = v.compute().data
v_new = v[np.array([0])]
assert_array_equal(v_new, v_data[0])
v_new = v[np.array([0], dtype="uint64")]
assert_array_equal(v_new, v_data[0])
def test_getitem_uint(self):
# regression test for #1405
v = self.cls(["x", "y"], [[0, 1, 2], [3, 4, 5]])
v_data = v.compute().data
v_new = v[np.array([0])]
assert_array_equal(v_new, v_data[[0], :])
v_new = v[np.array([0], dtype="uint64")]
assert_array_equal(v_new, v_data[[0], :])
v_new = v[np.uint64(0)]
assert_array_equal(v_new, v_data[0, :])
def test_getitem_0d_array(self):
# make sure 0d-np.array can be used as an indexer
v = self.cls(["x"], [0, 1, 2])
v_data = v.compute().data
v_new = v[np.array([0])[0]]
assert_array_equal(v_new, v_data[0])
v_new = v[np.array(0)]
assert_array_equal(v_new, v_data[0])
v_new = v[Variable((), np.array(0))]
assert_array_equal(v_new, v_data[0])
def test_getitem_fancy(self):
v = self.cls(["x", "y"], [[0, 1, 2], [3, 4, 5]])
v_data = v.compute().data
ind = Variable(["a", "b"], [[0, 1, 1], [1, 1, 0]])
v_new = v[ind]
assert v_new.dims == ("a", "b", "y")
assert_array_equal(v_new, v_data[[[0, 1, 1], [1, 1, 0]], :])
# It would be ok if indexed with the multi-dimensional array including
# the same name
ind = Variable(["x", "b"], [[0, 1, 1], [1, 1, 0]])
v_new = v[ind]
assert v_new.dims == ("x", "b", "y")
assert_array_equal(v_new, v_data[[[0, 1, 1], [1, 1, 0]], :])
ind = Variable(["a", "b"], [[0, 1, 2], [2, 1, 0]])
v_new = v[dict(y=ind)]
assert v_new.dims == ("x", "a", "b")
assert_array_equal(v_new, v_data[:, ([0, 1, 2], [2, 1, 0])])
ind = Variable(["a", "b"], [[0, 0], [1, 1]])
v_new = v[dict(x=[1, 0], y=ind)]
assert v_new.dims == ("x", "a", "b")
assert_array_equal(v_new, v_data[[1, 0]][:, ind])
# along diagonal
ind = Variable(["a"], [0, 1])
v_new = v[ind, ind]
assert v_new.dims == ("a",)
assert_array_equal(v_new, v_data[[0, 1], [0, 1]])
# with integer
ind = Variable(["a", "b"], [[0, 0], [1, 1]])
v_new = v[dict(x=0, y=ind)]
assert v_new.dims == ("a", "b")
assert_array_equal(v_new[0], v_data[0][[0, 0]])
assert_array_equal(v_new[1], v_data[0][[1, 1]])
# with slice
ind = Variable(["a", "b"], [[0, 0], [1, 1]])
v_new = v[dict(x=slice(None), y=ind)]
assert v_new.dims == ("x", "a", "b")
assert_array_equal(v_new, v_data[:, [[0, 0], [1, 1]]])
ind = Variable(["a", "b"], [[0, 0], [1, 1]])
v_new = v[dict(x=ind, y=slice(None))]
assert v_new.dims == ("a", "b", "y")
assert_array_equal(v_new, v_data[[[0, 0], [1, 1]], :])
ind = Variable(["a", "b"], [[0, 0], [1, 1]])
v_new = v[dict(x=ind, y=slice(None, 1))]
assert v_new.dims == ("a", "b", "y")
assert_array_equal(v_new, v_data[[[0, 0], [1, 1]], slice(None, 1)])
# slice matches explicit dimension
ind = Variable(["y"], [0, 1])
v_new = v[ind, :2]
assert v_new.dims == ("y",)
assert_array_equal(v_new, v_data[[0, 1], [0, 1]])
# with multiple slices
v = self.cls(["x", "y", "z"], [[[1, 2, 3], [4, 5, 6]]])
ind = Variable(["a", "b"], [[0]])
v_new = v[ind, :, :]
expected = Variable(["a", "b", "y", "z"], v.data[np.newaxis, ...])
assert_identical(v_new, expected)
v = Variable(["w", "x", "y", "z"], [[[[1, 2, 3], [4, 5, 6]]]])
ind = Variable(["y"], [0])
v_new = v[ind, :, 1:2, 2]
expected = Variable(["y", "x"], [[6]])
assert_identical(v_new, expected)
# slice and vector mixed indexing resulting in the same dimension
v = Variable(["x", "y", "z"], np.arange(60).reshape(3, 4, 5))
ind = Variable(["x"], [0, 1, 2])
v_new = v[:, ind]
expected = Variable(("x", "z"), np.zeros((3, 5)))
expected[0] = v.data[0, 0]
expected[1] = v.data[1, 1]
expected[2] = v.data[2, 2]
assert_identical(v_new, expected)
v_new = v[:, ind.data]
assert v_new.shape == (3, 3, 5)
def test_getitem_error(self):
v = self.cls(["x", "y"], [[0, 1, 2], [3, 4, 5]])
with pytest.raises(IndexError, match=r"labeled multi-"):
v[[[0, 1], [1, 2]]]
ind_x = Variable(["a"], [0, 1, 1])
ind_y = Variable(["a"], [0, 1])
with pytest.raises(IndexError, match=r"Dimensions of indexers "):
v[ind_x, ind_y]
ind = Variable(["a", "b"], [[True, False], [False, True]])
with pytest.raises(IndexError, match=r"2-dimensional boolean"):
v[dict(x=ind)]
v = Variable(["x", "y", "z"], np.arange(60).reshape(3, 4, 5))
ind = Variable(["x"], [0, 1])
with pytest.raises(IndexError, match=r"Dimensions of indexers mis"):
v[:, ind]
@pytest.mark.parametrize(
"mode",
[
"mean",
pytest.param(
"median",
marks=pytest.mark.xfail(reason="median is not implemented by Dask"),
),
pytest.param(
"reflect", marks=pytest.mark.xfail(reason="dask.array.pad bug")
),
"edge",
pytest.param(
"linear_ramp",
marks=pytest.mark.xfail(
reason="pint bug: https://github.com/hgrecco/pint/issues/1026"
),
),
"maximum",
"minimum",
"symmetric",
"wrap",
],
)
@pytest.mark.parametrize("xr_arg, np_arg", _PAD_XR_NP_ARGS)
@pytest.mark.filterwarnings(
r"ignore:dask.array.pad.+? converts integers to floats."
)
def test_pad(self, mode, xr_arg, np_arg):
data = np.arange(4 * 3 * 2).reshape(4, 3, 2)
v = self.cls(["x", "y", "z"], data)
actual = v.pad(mode=mode, **xr_arg)
expected = np.pad(data, np_arg, mode=mode)
assert_array_equal(actual, expected)
assert isinstance(actual._data, type(v._data))
@pytest.mark.parametrize("xr_arg, np_arg", _PAD_XR_NP_ARGS)
def test_pad_constant_values(self, xr_arg, np_arg):
data = np.arange(4 * 3 * 2).reshape(4, 3, 2)
v = self.cls(["x", "y", "z"], data)
actual = v.pad(**xr_arg)
expected = np.pad(
np.array(v.data.astype(float)),
np_arg,
mode="constant",
constant_values=np.nan,
)
assert_array_equal(actual, expected)
assert isinstance(actual._data, type(v._data))
# for the boolean array, we pad False
data = np.full_like(data, False, dtype=bool).reshape(4, 3, 2)
v = self.cls(["x", "y", "z"], data)
actual = v.pad(mode="constant", constant_values=False, **xr_arg)
expected = np.pad(
np.array(v.data), np_arg, mode="constant", constant_values=False
)
assert_array_equal(actual, expected)
@pytest.mark.parametrize("d, w", (("x", 3), ("y", 5)))
def test_rolling_window(self, d, w):
# Just a working test. See test_nputils for the algorithm validation
v = self.cls(["x", "y", "z"], np.arange(40 * 30 * 2).reshape(40, 30, 2))
v_rolling = v.rolling_window(d, w, d + "_window")
assert v_rolling.dims == ("x", "y", "z", d + "_window")
assert v_rolling.shape == v.shape + (w,)
v_rolling = v.rolling_window(d, w, d + "_window", center=True)
assert v_rolling.dims == ("x", "y", "z", d + "_window")
assert v_rolling.shape == v.shape + (w,)
# dask and numpy result should be the same
v_loaded = v.load().rolling_window(d, w, d + "_window", center=True)
assert_array_equal(v_rolling, v_loaded)
# numpy backend should not be over-written
if isinstance(v._data, np.ndarray):
with pytest.raises(ValueError):
v_loaded[0] = 1.0
def test_rolling_1d(self):
x = self.cls("x", np.array([1, 2, 3, 4], dtype=float))
kwargs = dict(dim="x", window=3, window_dim="xw")
actual = x.rolling_window(**kwargs, center=True, fill_value=np.nan)
expected = Variable(
("x", "xw"),
np.array(
[[np.nan, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, np.nan]], dtype=float
),
)
assert_equal(actual, expected)
actual = x.rolling_window(**kwargs, center=False, fill_value=0.0)
expected = self.cls(
("x", "xw"),
np.array([[0, 0, 1], [0, 1, 2], [1, 2, 3], [2, 3, 4]], dtype=float),
)
assert_equal(actual, expected)
x = self.cls(("y", "x"), np.stack([x, x * 1.1]))
actual = x.rolling_window(**kwargs, center=False, fill_value=0.0)
expected = self.cls(
("y", "x", "xw"), np.stack([expected.data, expected.data * 1.1], axis=0)
)
assert_equal(actual, expected)
@pytest.mark.parametrize("center", [[True, True], [False, False]])
@pytest.mark.parametrize("dims", [("x", "y"), ("y", "z"), ("z", "x")])
def test_nd_rolling(self, center, dims):
x = self.cls(
("x", "y", "z"),
np.arange(7 * 6 * 8).reshape(7, 6, 8).astype(float),
)
window = [3, 3]
actual = x.rolling_window(
dim=dims,
window=window,
window_dim=[f"{k}w" for k in dims],
center=center,
fill_value=np.nan,
)
expected = x
for dim, win, cent in zip(dims, window, center):
expected = expected.rolling_window(
dim=dim,
window=win,
window_dim=f"{dim}w",
center=cent,
fill_value=np.nan,
)
assert_equal(actual, expected)
@pytest.mark.parametrize(
("dim, window, window_dim, center"),
[
("x", [3, 3], "x_w", True),
("x", 3, ("x_w", "x_w"), True),
("x", 3, "x_w", [True, True]),
],
)
def test_rolling_window_errors(self, dim, window, window_dim, center):
x = self.cls(
("x", "y", "z"),
np.arange(7 * 6 * 8).reshape(7, 6, 8).astype(float),
)
with pytest.raises(ValueError):
x.rolling_window(
dim=dim,
window=window,
window_dim=window_dim,
center=center,
)
class TestVariable(VariableSubclassobjects):
cls = staticmethod(Variable)
@pytest.fixture(autouse=True)
def setup(self):
self.d = np.random.random((10, 3)).astype(np.float64)
def test_data_and_values(self):
v = Variable(["time", "x"], self.d)
assert_array_equal(v.data, self.d)
assert_array_equal(v.values, self.d)
assert source_ndarray(v.values) is self.d
with pytest.raises(ValueError):
# wrong size
v.values = np.random.random(5)
d2 = np.random.random((10, 3))
v.values = d2
assert source_ndarray(v.values) is d2
d3 = np.random.random((10, 3))
v.data = d3
assert source_ndarray(v.data) is d3
def test_numpy_same_methods(self):
v = Variable([], np.float32(0.0))
assert v.item() == 0
assert type(v.item()) is float
v = IndexVariable("x", np.arange(5))
assert 2 == v.searchsorted(2)
def test_datetime64_conversion_scalar(self):
expected = np.datetime64("2000-01-01", "ns")
for values in [
np.datetime64("2000-01-01"),
pd.Timestamp("2000-01-01T00"),
datetime(2000, 1, 1),
]:
v = Variable([], values)
assert v.dtype == np.dtype("datetime64[ns]")
assert v.values == expected
assert v.values.dtype == np.dtype("datetime64[ns]")
def test_timedelta64_conversion_scalar(self):
expected = np.timedelta64(24 * 60 * 60 * 10 ** 9, "ns")
for values in [
np.timedelta64(1, "D"),
pd.Timedelta("1 day"),
timedelta(days=1),
]:
v = Variable([], values)
assert v.dtype == np.dtype("timedelta64[ns]")
assert v.values == expected
assert v.values.dtype == np.dtype("timedelta64[ns]")
def test_0d_str(self):
v = Variable([], "foo")
assert v.dtype == np.dtype("U3")
assert v.values == "foo"
v = Variable([], np.string_("foo"))
assert v.dtype == np.dtype("S3")
assert v.values == bytes("foo", "ascii")
def test_0d_datetime(self):
v = Variable([], pd.Timestamp("2000-01-01"))
assert v.dtype == np.dtype("datetime64[ns]")
assert v.values == np.datetime64("2000-01-01", "ns")
def test_0d_timedelta(self):
for td in [pd.to_timedelta("1s"), np.timedelta64(1, "s")]:
v = Variable([], td)
assert v.dtype == np.dtype("timedelta64[ns]")
assert v.values == np.timedelta64(10 ** 9, "ns")
def test_equals_and_identical(self):
d = np.random.rand(10, 3)
d[0, 0] = np.nan
v1 = Variable(("dim1", "dim2"), data=d, attrs={"att1": 3, "att2": [1, 2, 3]})
v2 = Variable(("dim1", "dim2"), data=d, attrs={"att1": 3, "att2": [1, 2, 3]})
assert v1.equals(v2)
assert v1.identical(v2)
v3 = Variable(("dim1", "dim3"), data=d)
assert not v1.equals(v3)
v4 = Variable(("dim1", "dim2"), data=d)
assert v1.equals(v4)
assert not v1.identical(v4)
v5 = deepcopy(v1)
v5.values[:] = np.random.rand(10, 3)
assert not v1.equals(v5)
assert not v1.equals(None)
assert not v1.equals(d)
assert not v1.identical(None)
assert not v1.identical(d)
def test_broadcast_equals(self):
v1 = Variable((), np.nan)
v2 = Variable(("x"), [np.nan, np.nan])
assert v1.broadcast_equals(v2)
assert not v1.equals(v2)
assert not v1.identical(v2)
v3 = Variable(("x"), [np.nan])
assert v1.broadcast_equals(v3)
assert not v1.equals(v3)
assert not v1.identical(v3)
assert not v1.broadcast_equals(None)
v4 = Variable(("x"), [np.nan] * 3)
assert not v2.broadcast_equals(v4)
def test_no_conflicts(self):
v1 = Variable(("x"), [1, 2, np.nan, np.nan])
v2 = Variable(("x"), [np.nan, 2, 3, np.nan])
assert v1.no_conflicts(v2)
assert not v1.equals(v2)
assert not v1.broadcast_equals(v2)
assert not v1.identical(v2)
assert not v1.no_conflicts(None)
v3 = Variable(("y"), [np.nan, 2, 3, np.nan])
assert not v3.no_conflicts(v1)
d = np.array([1, 2, np.nan, np.nan])
assert not v1.no_conflicts(d)
assert not v2.no_conflicts(d)
v4 = Variable(("w", "x"), [d])
assert v1.no_conflicts(v4)
def test_as_variable(self):
data = np.arange(10)
expected = Variable("x", data)
expected_extra = Variable(
"x", data, attrs={"myattr": "val"}, encoding={"scale_factor": 1}
)
assert_identical(expected, as_variable(expected))
ds = Dataset({"x": expected})
var = as_variable(ds["x"]).to_base_variable()
assert_identical(expected, var)
assert not isinstance(ds["x"], Variable)
assert isinstance(as_variable(ds["x"]), Variable)
xarray_tuple = (
expected_extra.dims,
expected_extra.values,
expected_extra.attrs,
expected_extra.encoding,
)
assert_identical(expected_extra, as_variable(xarray_tuple))
with pytest.raises(TypeError, match=r"tuple of form"):
as_variable(tuple(data))
with pytest.raises(ValueError, match=r"tuple of form"): # GH1016
as_variable(("five", "six", "seven"))
with pytest.raises(TypeError, match=r"without an explicit list of dimensions"):
as_variable(data)
actual = as_variable(data, name="x")
assert_identical(expected.to_index_variable(), actual)
actual = as_variable(0)
expected = Variable([], 0)
assert_identical(expected, actual)
data = np.arange(9).reshape((3, 3))
expected = Variable(("x", "y"), data)
with pytest.raises(ValueError, match=r"without explicit dimension names"):
as_variable(data, name="x")
with pytest.raises(ValueError, match=r"has more than 1-dimension"):
as_variable(expected, name="x")
# test datetime, timedelta conversion
dt = np.array([datetime(1999, 1, 1) + timedelta(days=x) for x in range(10)])
assert as_variable(dt, "time").dtype.kind == "M"
td = np.array([timedelta(days=x) for x in range(10)])
assert as_variable(td, "time").dtype.kind == "m"
with pytest.warns(DeprecationWarning):
as_variable(("x", DataArray([])))
def test_repr(self):
v = Variable(["time", "x"], [[1, 2, 3], [4, 5, 6]], {"foo": "bar"})
expected = dedent(
"""
<xarray.Variable (time: 2, x: 3)>
array([[1, 2, 3],
[4, 5, 6]])
Attributes:
foo: bar
"""
).strip()
assert expected == repr(v)
def test_repr_lazy_data(self):
v = Variable("x", LazilyIndexedArray(np.arange(2e5)))
assert "200000 values with dtype" in repr(v)
assert isinstance(v._data, LazilyIndexedArray)
def test_detect_indexer_type(self):
"""Tests indexer type was correctly detected."""
data = np.random.random((10, 11))
v = Variable(["x", "y"], data)
_, ind, _ = v._broadcast_indexes((0, 1))
assert type(ind) == indexing.BasicIndexer
_, ind, _ = v._broadcast_indexes((0, slice(0, 8, 2)))
assert type(ind) == indexing.BasicIndexer
_, ind, _ = v._broadcast_indexes((0, [0, 1]))
assert type(ind) == indexing.OuterIndexer
_, ind, _ = v._broadcast_indexes(([0, 1], 1))
assert type(ind) == indexing.OuterIndexer
_, ind, _ = v._broadcast_indexes(([0, 1], [1, 2]))
assert type(ind) == indexing.OuterIndexer
_, ind, _ = v._broadcast_indexes(([0, 1], slice(0, 8, 2)))
assert type(ind) == indexing.OuterIndexer
vind = Variable(("a",), [0, 1])
_, ind, _ = v._broadcast_indexes((vind, slice(0, 8, 2)))
assert type(ind) == indexing.OuterIndexer
vind = Variable(("y",), [0, 1])
_, ind, _ = v._broadcast_indexes((vind, 3))
assert type(ind) == indexing.OuterIndexer
vind = Variable(("a",), [0, 1])
_, ind, _ = v._broadcast_indexes((vind, vind))
assert type(ind) == indexing.VectorizedIndexer
vind = Variable(("a", "b"), [[0, 2], [1, 3]])
_, ind, _ = v._broadcast_indexes((vind, 3))
assert type(ind) == indexing.VectorizedIndexer
def test_indexer_type(self):
# GH:issue:1688. Wrong indexer type induces NotImplementedError
data = np.random.random((10, 11))
v = Variable(["x", "y"], data)
def assert_indexer_type(key, object_type):
dims, index_tuple, new_order = v._broadcast_indexes(key)
assert isinstance(index_tuple, object_type)
# should return BasicIndexer
assert_indexer_type((0, 1), BasicIndexer)
assert_indexer_type((0, slice(None, None)), BasicIndexer)
assert_indexer_type((Variable([], 3), slice(None, None)), BasicIndexer)
assert_indexer_type((Variable([], 3), (Variable([], 6))), BasicIndexer)
# should return OuterIndexer
assert_indexer_type(([0, 1], 1), OuterIndexer)
assert_indexer_type(([0, 1], [1, 2]), OuterIndexer)
assert_indexer_type((Variable(("x"), [0, 1]), 1), OuterIndexer)
assert_indexer_type((Variable(("x"), [0, 1]), slice(None, None)), OuterIndexer)
assert_indexer_type(
(Variable(("x"), [0, 1]), Variable(("y"), [0, 1])), OuterIndexer
)
# should return VectorizedIndexer
assert_indexer_type((Variable(("y"), [0, 1]), [0, 1]), VectorizedIndexer)
assert_indexer_type(
(Variable(("z"), [0, 1]), Variable(("z"), [0, 1])), VectorizedIndexer
)
assert_indexer_type(
(
Variable(("a", "b"), [[0, 1], [1, 2]]),
Variable(("a", "b"), [[0, 1], [1, 2]]),
),
VectorizedIndexer,
)
def test_items(self):
data = np.random.random((10, 11))
v = Variable(["x", "y"], data)
# test slicing
assert_identical(v, v[:])
assert_identical(v, v[...])
assert_identical(Variable(["y"], data[0]), v[0])
assert_identical(Variable(["x"], data[:, 0]), v[:, 0])
assert_identical(Variable(["x", "y"], data[:3, :2]), v[:3, :2])
# test array indexing
x = Variable(["x"], np.arange(10))
y = Variable(["y"], np.arange(11))
assert_identical(v, v[x.values])
assert_identical(v, v[x])
assert_identical(v[:3], v[x < 3])
assert_identical(v[:, 3:], v[:, y >= 3])
assert_identical(v[:3, 3:], v[x < 3, y >= 3])
assert_identical(v[:3, :2], v[x[:3], y[:2]])
assert_identical(v[:3, :2], v[range(3), range(2)])
# test iteration
for n, item in enumerate(v):
assert_identical(Variable(["y"], data[n]), item)
with pytest.raises(TypeError, match=r"iteration over a 0-d"):
iter(Variable([], 0))
# test setting
v.values[:] = 0
assert np.all(v.values == 0)
# test orthogonal setting
v[range(10), range(11)] = 1
assert_array_equal(v.values, np.ones((10, 11)))
def test_getitem_basic(self):
v = self.cls(["x", "y"], [[0, 1, 2], [3, 4, 5]])
# int argument
v_new = v[0]
assert v_new.dims == ("y",)
assert_array_equal(v_new, v._data[0])
# slice argument
v_new = v[:2]
assert v_new.dims == ("x", "y")
assert_array_equal(v_new, v._data[:2])
# list arguments
v_new = v[[0]]
assert v_new.dims == ("x", "y")
assert_array_equal(v_new, v._data[[0]])
v_new = v[[]]
assert v_new.dims == ("x", "y")
assert_array_equal(v_new, v._data[[]])
# dict arguments
v_new = v[dict(x=0)]
assert v_new.dims == ("y",)
assert_array_equal(v_new, v._data[0])
v_new = v[dict(x=0, y=slice(None))]
assert v_new.dims == ("y",)
assert_array_equal(v_new, v._data[0])
v_new = v[dict(x=0, y=1)]
assert v_new.dims == ()
assert_array_equal(v_new, v._data[0, 1])
v_new = v[dict(y=1)]
assert v_new.dims == ("x",)
assert_array_equal(v_new, v._data[:, 1])
# tuple argument
v_new = v[(slice(None), 1)]
assert v_new.dims == ("x",)
assert_array_equal(v_new, v._data[:, 1])
# test that we obtain a modifiable view when taking a 0d slice
v_new = v[0, 0]
v_new[...] += 99
assert_array_equal(v_new, v._data[0, 0])
def test_getitem_with_mask_2d_input(self):
v = Variable(("x", "y"), [[0, 1, 2], [3, 4, 5]])
assert_identical(
v._getitem_with_mask(([-1, 0], [1, -1])),
Variable(("x", "y"), [[np.nan, np.nan], [1, np.nan]]),
)
assert_identical(v._getitem_with_mask((slice(2), [0, 1, 2])), v)
def test_isel(self):
v = Variable(["time", "x"], self.d)
assert_identical(v.isel(time=slice(None)), v)
assert_identical(v.isel(time=0), v[0])
assert_identical(v.isel(time=slice(0, 3)), v[:3])
assert_identical(v.isel(x=0), v[:, 0])
assert_identical(v.isel(x=[0, 2]), v[:, [0, 2]])
assert_identical(v.isel(time=[]), v[[]])
with pytest.raises(
ValueError,
match=r"Dimensions {'not_a_dim'} do not exist. Expected one or more of "
r"\('time', 'x'\)",
):
v.isel(not_a_dim=0)
with pytest.warns(
UserWarning,
match=r"Dimensions {'not_a_dim'} do not exist. Expected one or more of "
r"\('time', 'x'\)",
):
v.isel(not_a_dim=0, missing_dims="warn")
assert_identical(v, v.isel(not_a_dim=0, missing_dims="ignore"))
def test_index_0d_numpy_string(self):
# regression test to verify our work around for indexing 0d strings
v = Variable([], np.string_("asdf"))
assert_identical(v[()], v)
v = Variable([], np.unicode_("asdf"))
assert_identical(v[()], v)
def test_indexing_0d_unicode(self):
# regression test for GH568
actual = Variable(("x"), ["tmax"])[0][()]
expected = Variable((), "tmax")
assert_identical(actual, expected)
@pytest.mark.parametrize("fill_value", [dtypes.NA, 2, 2.0])
def test_shift(self, fill_value):
v = Variable("x", [1, 2, 3, 4, 5])
assert_identical(v, v.shift(x=0))
assert v is not v.shift(x=0)
expected = Variable("x", [np.nan, np.nan, 1, 2, 3])
assert_identical(expected, v.shift(x=2))
if fill_value == dtypes.NA:
# if we supply the default, we expect the missing value for a
# float array
fill_value_exp = np.nan
else:
fill_value_exp = fill_value
expected = Variable("x", [fill_value_exp, 1, 2, 3, 4])
assert_identical(expected, v.shift(x=1, fill_value=fill_value))
expected = Variable("x", [2, 3, 4, 5, fill_value_exp])
assert_identical(expected, v.shift(x=-1, fill_value=fill_value))
expected = Variable("x", [fill_value_exp] * 5)
assert_identical(expected, v.shift(x=5, fill_value=fill_value))
assert_identical(expected, v.shift(x=6, fill_value=fill_value))
with pytest.raises(ValueError, match=r"dimension"):
v.shift(z=0)
v = Variable("x", [1, 2, 3, 4, 5], {"foo": "bar"})
assert_identical(v, v.shift(x=0))
expected = Variable("x", [fill_value_exp, 1, 2, 3, 4], {"foo": "bar"})
assert_identical(expected, v.shift(x=1, fill_value=fill_value))
def test_shift2d(self):
v = Variable(("x", "y"), [[1, 2], [3, 4]])
expected = Variable(("x", "y"), [[np.nan, np.nan], [np.nan, 1]])
assert_identical(expected, v.shift(x=1, y=1))
def test_roll(self):
v = Variable("x", [1, 2, 3, 4, 5])
assert_identical(v, v.roll(x=0))
assert v is not v.roll(x=0)
expected = Variable("x", [5, 1, 2, 3, 4])
assert_identical(expected, v.roll(x=1))
assert_identical(expected, v.roll(x=-4))
assert_identical(expected, v.roll(x=6))
expected = Variable("x", [4, 5, 1, 2, 3])
assert_identical(expected, v.roll(x=2))
assert_identical(expected, v.roll(x=-3))
with pytest.raises(ValueError, match=r"dimension"):
v.roll(z=0)
def test_roll_consistency(self):
v = Variable(("x", "y"), np.random.randn(5, 6))
for axis, dim in [(0, "x"), (1, "y")]:
for shift in [-3, 0, 1, 7, 11]:
expected = np.roll(v.values, shift, axis=axis)
actual = v.roll(**{dim: shift}).values
assert_array_equal(expected, actual)
def test_transpose(self):
v = Variable(["time", "x"], self.d)
v2 = Variable(["x", "time"], self.d.T)
assert_identical(v, v2.transpose())
assert_identical(v.transpose(), v.T)
x = np.random.randn(2, 3, 4, 5)
w = Variable(["a", "b", "c", "d"], x)
w2 = Variable(["d", "b", "c", "a"], np.einsum("abcd->dbca", x))
assert w2.shape == (5, 3, 4, 2)
assert_identical(w2, w.transpose("d", "b", "c", "a"))
assert_identical(w2, w.transpose("d", ..., "a"))
assert_identical(w2, w.transpose("d", "b", "c", ...))
assert_identical(w2, w.transpose(..., "b", "c", "a"))
assert_identical(w, w2.transpose("a", "b", "c", "d"))
w3 = Variable(["b", "c", "d", "a"], np.einsum("abcd->bcda", x))
assert_identical(w, w3.transpose("a", "b", "c", "d"))
def test_transpose_0d(self):
for value in [
3.5,
("a", 1),
np.datetime64("2000-01-01"),
np.timedelta64(1, "h"),
None,
object(),
]:
variable = Variable([], value)
actual = variable.transpose()
assert_identical(actual, variable)
def test_squeeze(self):
v = Variable(["x", "y"], [[1]])
assert_identical(Variable([], 1), v.squeeze())
assert_identical(Variable(["y"], [1]), v.squeeze("x"))
assert_identical(Variable(["y"], [1]), v.squeeze(["x"]))
assert_identical(Variable(["x"], [1]), v.squeeze("y"))
assert_identical(Variable([], 1), v.squeeze(["x", "y"]))
v = Variable(["x", "y"], [[1, 2]])
assert_identical(Variable(["y"], [1, 2]), v.squeeze())
assert_identical(Variable(["y"], [1, 2]), v.squeeze("x"))
with pytest.raises(ValueError, match=r"cannot select a dimension"):
v.squeeze("y")
def test_get_axis_num(self):
v = Variable(["x", "y", "z"], np.random.randn(2, 3, 4))
assert v.get_axis_num("x") == 0
assert v.get_axis_num(["x"]) == (0,)
assert v.get_axis_num(["x", "y"]) == (0, 1)
assert v.get_axis_num(["z", "y", "x"]) == (2, 1, 0)
with pytest.raises(ValueError, match=r"not found in array dim"):
v.get_axis_num("foobar")
def test_set_dims(self):
v = Variable(["x"], [0, 1])
actual = v.set_dims(["x", "y"])
expected = Variable(["x", "y"], [[0], [1]])
assert_identical(actual, expected)
actual = v.set_dims(["y", "x"])
assert_identical(actual, expected.T)
actual = v.set_dims({"x": 2, "y": 2})
expected = Variable(["x", "y"], [[0, 0], [1, 1]])
assert_identical(actual, expected)
v = Variable(["foo"], [0, 1])
actual = v.set_dims("foo")
expected = v
assert_identical(actual, expected)
with pytest.raises(ValueError, match=r"must be a superset"):
v.set_dims(["z"])
def test_set_dims_object_dtype(self):
v = Variable([], ("a", 1))
actual = v.set_dims(("x",), (3,))
exp_values = np.empty((3,), dtype=object)
for i in range(3):
exp_values[i] = ("a", 1)
expected = Variable(["x"], exp_values)
assert_identical(actual, expected)
def test_stack(self):
v = Variable(["x", "y"], [[0, 1], [2, 3]], {"foo": "bar"})
actual = v.stack(z=("x", "y"))
expected = Variable("z", [0, 1, 2, 3], v.attrs)
assert_identical(actual, expected)
actual = v.stack(z=("x",))
expected = Variable(("y", "z"), v.data.T, v.attrs)
assert_identical(actual, expected)
actual = v.stack(z=())
assert_identical(actual, v)
actual = v.stack(X=("x",), Y=("y",)).transpose("X", "Y")
expected = Variable(("X", "Y"), v.data, v.attrs)
assert_identical(actual, expected)
def test_stack_errors(self):
v = Variable(["x", "y"], [[0, 1], [2, 3]], {"foo": "bar"})
with pytest.raises(ValueError, match=r"invalid existing dim"):
v.stack(z=("x1",))
with pytest.raises(ValueError, match=r"cannot create a new dim"):
v.stack(x=("x",))
def test_unstack(self):
v = Variable("z", [0, 1, 2, 3], {"foo": "bar"})
actual = v.unstack(z={"x": 2, "y": 2})
expected = Variable(("x", "y"), [[0, 1], [2, 3]], v.attrs)
assert_identical(actual, expected)
actual = v.unstack(z={"x": 4, "y": 1})
expected = Variable(("x", "y"), [[0], [1], [2], [3]], v.attrs)
assert_identical(actual, expected)
actual = v.unstack(z={"x": 4})
expected = Variable("x", [0, 1, 2, 3], v.attrs)
assert_identical(actual, expected)
def test_unstack_errors(self):
v = Variable("z", [0, 1, 2, 3])
with pytest.raises(ValueError, match=r"invalid existing dim"):
v.unstack(foo={"x": 4})
with pytest.raises(ValueError, match=r"cannot create a new dim"):
v.stack(z=("z",))
with pytest.raises(ValueError, match=r"the product of the new dim"):
v.unstack(z={"x": 5})
def test_unstack_2d(self):
v = Variable(["x", "y"], [[0, 1], [2, 3]])
actual = v.unstack(y={"z": 2})
expected = Variable(["x", "z"], v.data)
assert_identical(actual, expected)
actual = v.unstack(x={"z": 2})
expected = Variable(["y", "z"], v.data.T)
assert_identical(actual, expected)
def test_stack_unstack_consistency(self):
v = Variable(["x", "y"], [[0, 1], [2, 3]])
actual = v.stack(z=("x", "y")).unstack(z={"x": 2, "y": 2})
assert_identical(actual, v)
def test_broadcasting_math(self):
x = np.random.randn(2, 3)
v = Variable(["a", "b"], x)
# 1d to 2d broadcasting
assert_identical(v * v, Variable(["a", "b"], np.einsum("ab,ab->ab", x, x)))
assert_identical(v * v[0], Variable(["a", "b"], np.einsum("ab,b->ab", x, x[0])))
assert_identical(v[0] * v, Variable(["b", "a"], np.einsum("b,ab->ba", x[0], x)))
assert_identical(
v[0] * v[:, 0], Variable(["b", "a"], np.einsum("b,a->ba", x[0], x[:, 0]))
)
# higher dim broadcasting
y = np.random.randn(3, 4, 5)
w = Variable(["b", "c", "d"], y)
assert_identical(
v * w, Variable(["a", "b", "c", "d"], np.einsum("ab,bcd->abcd", x, y))
)
assert_identical(
w * v, Variable(["b", "c", "d", "a"], np.einsum("bcd,ab->bcda", y, x))
)
assert_identical(
v * w[0], Variable(["a", "b", "c", "d"], np.einsum("ab,cd->abcd", x, y[0]))
)
def test_broadcasting_failures(self):
a = Variable(["x"], np.arange(10))
b = Variable(["x"], np.arange(5))
c = Variable(["x", "x"], np.arange(100).reshape(10, 10))
with pytest.raises(ValueError, match=r"mismatched lengths"):
a + b
with pytest.raises(ValueError, match=r"duplicate dimensions"):
a + c
def test_inplace_math(self):
x = np.arange(5)
v = Variable(["x"], x)
v2 = v
v2 += 1
assert v is v2
# since we provided an ndarray for data, it is also modified in-place
assert source_ndarray(v.values) is x
assert_array_equal(v.values, np.arange(5) + 1)
with pytest.raises(ValueError, match=r"dimensions cannot change"):
v += Variable("y", np.arange(5))
def test_reduce(self):
v = Variable(["x", "y"], self.d, {"ignored": "attributes"})
assert_identical(v.reduce(np.std, "x"), Variable(["y"], self.d.std(axis=0)))
assert_identical(v.reduce(np.std, axis=0), v.reduce(np.std, dim="x"))
assert_identical(
v.reduce(np.std, ["y", "x"]), Variable([], self.d.std(axis=(0, 1)))
)
assert_identical(v.reduce(np.std), Variable([], self.d.std()))
assert_identical(
v.reduce(np.mean, "x").reduce(np.std, "y"),
Variable([], self.d.mean(axis=0).std()),
)
assert_allclose(v.mean("x"), v.reduce(np.mean, "x"))
with pytest.raises(ValueError, match=r"cannot supply both"):
v.mean(dim="x", axis=0)
@pytest.mark.parametrize("skipna", [True, False])
@pytest.mark.parametrize("q", [0.25, [0.50], [0.25, 0.75]])
@pytest.mark.parametrize(
"axis, dim", zip([None, 0, [0], [0, 1]], [None, "x", ["x"], ["x", "y"]])
)
def test_quantile(self, q, axis, dim, skipna):
v = Variable(["x", "y"], self.d)
actual = v.quantile(q, dim=dim, skipna=skipna)
_percentile_func = np.nanpercentile if skipna else np.percentile
expected = _percentile_func(self.d, np.array(q) * 100, axis=axis)
np.testing.assert_allclose(actual.values, expected)
@requires_dask
@pytest.mark.parametrize("q", [0.25, [0.50], [0.25, 0.75]])
@pytest.mark.parametrize("axis, dim", [[1, "y"], [[1], ["y"]]])
def test_quantile_dask(self, q, axis, dim):
v = Variable(["x", "y"], self.d).chunk({"x": 2})
actual = v.quantile(q, dim=dim)
assert isinstance(actual.data, dask_array_type)
expected = np.nanpercentile(self.d, np.array(q) * 100, axis=axis)
np.testing.assert_allclose(actual.values, expected)
@requires_dask
def test_quantile_chunked_dim_error(self):
v = Variable(["x", "y"], self.d).chunk({"x": 2})
# this checks for ValueError in dask.array.apply_gufunc
with pytest.raises(ValueError, match=r"consists of multiple chunks"):
v.quantile(0.5, dim="x")
@pytest.mark.parametrize("q", [-0.1, 1.1, [2], [0.25, 2]])
def test_quantile_out_of_bounds(self, q):
v = Variable(["x", "y"], self.d)
# escape special characters
with pytest.raises(
ValueError, match=r"Quantiles must be in the range \[0, 1\]"
):
v.quantile(q, dim="x")
@requires_dask
@requires_bottleneck
def test_rank_dask_raises(self):
v = Variable(["x"], [3.0, 1.0, np.nan, 2.0, 4.0]).chunk(2)
with pytest.raises(TypeError, match=r"arrays stored as dask"):
v.rank("x")
@requires_bottleneck
def test_rank(self):
import bottleneck as bn
# floats
v = Variable(["x", "y"], [[3, 4, np.nan, 1]])
expect_0 = bn.nanrankdata(v.data, axis=0)
expect_1 = bn.nanrankdata(v.data, axis=1)
np.testing.assert_allclose(v.rank("x").values, expect_0)
np.testing.assert_allclose(v.rank("y").values, expect_1)
# int
v = Variable(["x"], [3, 2, 1])
expect = bn.rankdata(v.data, axis=0)
np.testing.assert_allclose(v.rank("x").values, expect)
# str
v = Variable(["x"], ["c", "b", "a"])
expect = bn.rankdata(v.data, axis=0)
np.testing.assert_allclose(v.rank("x").values, expect)
# pct
v = Variable(["x"], [3.0, 1.0, np.nan, 2.0, 4.0])
v_expect = Variable(["x"], [0.75, 0.25, np.nan, 0.5, 1.0])
assert_equal(v.rank("x", pct=True), v_expect)
# invalid dim
with pytest.raises(ValueError, match=r"not found"):
v.rank("y")
def test_big_endian_reduce(self):
# regression test for GH489
data = np.ones(5, dtype=">f4")
v = Variable(["x"], data)
expected = Variable([], 5)
assert_identical(expected, v.sum())
def test_reduce_funcs(self):
v = Variable("x", np.array([1, np.nan, 2, 3]))
assert_identical(v.mean(), Variable([], 2))
assert_identical(v.mean(skipna=True), Variable([], 2))
assert_identical(v.mean(skipna=False), Variable([], np.nan))
assert_identical(np.mean(v), Variable([], 2))
assert_identical(v.prod(), Variable([], 6))
assert_identical(v.cumsum(axis=0), Variable("x", np.array([1, 1, 3, 6])))
assert_identical(v.cumprod(axis=0), Variable("x", np.array([1, 1, 2, 6])))
assert_identical(v.var(), Variable([], 2.0 / 3))
assert_identical(v.median(), Variable([], 2))
v = Variable("x", [True, False, False])
assert_identical(v.any(), Variable([], True))
assert_identical(v.all(dim="x"), Variable([], False))
v = Variable("t", pd.date_range("2000-01-01", periods=3))
assert v.argmax(skipna=True, dim="t") == 2
assert_identical(v.max(), Variable([], pd.Timestamp("2000-01-03")))
def test_reduce_keepdims(self):
v = Variable(["x", "y"], self.d)
assert_identical(
v.mean(keepdims=True), Variable(v.dims, np.mean(self.d, keepdims=True))
)
assert_identical(
v.mean(dim="x", keepdims=True),
Variable(v.dims, np.mean(self.d, axis=0, keepdims=True)),
)
assert_identical(
v.mean(dim="y", keepdims=True),
Variable(v.dims, np.mean(self.d, axis=1, keepdims=True)),
)
assert_identical(
v.mean(dim=["y", "x"], keepdims=True),
Variable(v.dims, np.mean(self.d, axis=(1, 0), keepdims=True)),
)
v = Variable([], 1.0)
assert_identical(
v.mean(keepdims=True), Variable([], np.mean(v.data, keepdims=True))
)
@requires_dask
def test_reduce_keepdims_dask(self):
import dask.array
v = Variable(["x", "y"], self.d).chunk()
actual = v.mean(keepdims=True)
assert isinstance(actual.data, dask.array.Array)
expected = Variable(v.dims, np.mean(self.d, keepdims=True))
assert_identical(actual, expected)
actual = v.mean(dim="y", keepdims=True)
assert isinstance(actual.data, dask.array.Array)
expected = Variable(v.dims, np.mean(self.d, axis=1, keepdims=True))
assert_identical(actual, expected)
def test_reduce_keep_attrs(self):
_attrs = {"units": "test", "long_name": "testing"}
v = Variable(["x", "y"], self.d, _attrs)
# Test dropped attrs
vm = v.mean()
assert len(vm.attrs) == 0
assert vm.attrs == {}
# Test kept attrs
vm = v.mean(keep_attrs=True)
assert len(vm.attrs) == len(_attrs)
assert vm.attrs == _attrs
def test_binary_ops_keep_attrs(self):
_attrs = {"units": "test", "long_name": "testing"}
a = Variable(["x", "y"], np.random.randn(3, 3), _attrs)
b = Variable(["x", "y"], np.random.randn(3, 3), _attrs)
# Test dropped attrs
d = a - b # just one operation
assert d.attrs == {}
# Test kept attrs
with set_options(keep_attrs=True):
d = a - b
assert d.attrs == _attrs
def test_count(self):
expected = Variable([], 3)
actual = Variable(["x"], [1, 2, 3, np.nan]).count()
assert_identical(expected, actual)
v = Variable(["x"], np.array(["1", "2", "3", np.nan], dtype=object))
actual = v.count()
assert_identical(expected, actual)
actual = Variable(["x"], [True, False, True]).count()
assert_identical(expected, actual)
assert actual.dtype == int
expected = Variable(["x"], [2, 3])
actual = Variable(["x", "y"], [[1, 0, np.nan], [1, 1, 1]]).count("y")
assert_identical(expected, actual)
def test_setitem(self):
v = Variable(["x", "y"], [[0, 3, 2], [3, 4, 5]])
v[0, 1] = 1
assert v[0, 1] == 1
v = Variable(["x", "y"], [[0, 3, 2], [3, 4, 5]])
v[dict(x=[0, 1])] = 1
assert_array_equal(v[[0, 1]], np.ones_like(v[[0, 1]]))
# boolean indexing
v = Variable(["x", "y"], [[0, 3, 2], [3, 4, 5]])
v[dict(x=[True, False])] = 1
assert_array_equal(v[0], np.ones_like(v[0]))
v = Variable(["x", "y"], [[0, 3, 2], [3, 4, 5]])
v[dict(x=[True, False], y=[False, True, False])] = 1
assert v[0, 1] == 1
def test_setitem_fancy(self):
# assignment which should work as np.ndarray does
def assert_assigned_2d(array, key_x, key_y, values):
expected = array.copy()
expected[key_x, key_y] = values
v = Variable(["x", "y"], array)
v[dict(x=key_x, y=key_y)] = values
assert_array_equal(expected, v)
# 1d vectorized indexing
assert_assigned_2d(
np.random.randn(4, 3),
key_x=Variable(["a"], [0, 1]),
key_y=Variable(["a"], [0, 1]),
values=0,
)
assert_assigned_2d(
np.random.randn(4, 3),
key_x=Variable(["a"], [0, 1]),
key_y=Variable(["a"], [0, 1]),
values=Variable((), 0),
)
assert_assigned_2d(
np.random.randn(4, 3),
key_x=Variable(["a"], [0, 1]),
key_y=Variable(["a"], [0, 1]),
values=Variable(("a"), [3, 2]),
)
assert_assigned_2d(
np.random.randn(4, 3),
key_x=slice(None),
key_y=Variable(["a"], [0, 1]),
values=Variable(("a"), [3, 2]),
)
# 2d-vectorized indexing
assert_assigned_2d(
np.random.randn(4, 3),
key_x=Variable(["a", "b"], [[0, 1]]),
key_y=Variable(["a", "b"], [[1, 0]]),
values=0,
)
assert_assigned_2d(
np.random.randn(4, 3),
key_x=Variable(["a", "b"], [[0, 1]]),
key_y=Variable(["a", "b"], [[1, 0]]),
values=[0],
)
assert_assigned_2d(
np.random.randn(5, 4),
key_x=Variable(["a", "b"], [[0, 1], [2, 3]]),
key_y=Variable(["a", "b"], [[1, 0], [3, 3]]),
values=[2, 3],
)
# vindex with slice
v = Variable(["x", "y", "z"], np.ones((4, 3, 2)))
ind = Variable(["a"], [0, 1])
v[dict(x=ind, z=ind)] = 0
expected = Variable(["x", "y", "z"], np.ones((4, 3, 2)))
expected[0, :, 0] = 0
expected[1, :, 1] = 0
assert_identical(expected, v)
# dimension broadcast
v = Variable(["x", "y"], np.ones((3, 2)))
ind = Variable(["a", "b"], [[0, 1]])
v[ind, :] = 0
expected = Variable(["x", "y"], [[0, 0], [0, 0], [1, 1]])
assert_identical(expected, v)
with pytest.raises(ValueError, match=r"shape mismatch"):
v[ind, ind] = np.zeros((1, 2, 1))
v = Variable(["x", "y"], [[0, 3, 2], [3, 4, 5]])
ind = Variable(["a"], [0, 1])
v[dict(x=ind)] = Variable(["a", "y"], np.ones((2, 3), dtype=int) * 10)
assert_array_equal(v[0], np.ones_like(v[0]) * 10)
assert_array_equal(v[1], np.ones_like(v[1]) * 10)
assert v.dims == ("x", "y") # dimension should not change
# increment
v = Variable(["x", "y"], np.arange(6).reshape(3, 2))
ind = Variable(["a"], [0, 1])
v[dict(x=ind)] += 1
expected = Variable(["x", "y"], [[1, 2], [3, 4], [4, 5]])
assert_identical(v, expected)
ind = Variable(["a"], [0, 0])
v[dict(x=ind)] += 1
expected = Variable(["x", "y"], [[2, 3], [3, 4], [4, 5]])
assert_identical(v, expected)
def test_coarsen(self):
v = self.cls(["x"], [0, 1, 2, 3, 4])
actual = v.coarsen({"x": 2}, boundary="pad", func="mean")
expected = self.cls(["x"], [0.5, 2.5, 4])
assert_identical(actual, expected)
actual = v.coarsen({"x": 2}, func="mean", boundary="pad", side="right")
expected = self.cls(["x"], [0, 1.5, 3.5])
assert_identical(actual, expected)
actual = v.coarsen({"x": 2}, func=np.mean, side="right", boundary="trim")
expected = self.cls(["x"], [1.5, 3.5])
assert_identical(actual, expected)
# working test
v = self.cls(["x", "y", "z"], np.arange(40 * 30 * 2).reshape(40, 30, 2))
for windows, func, side, boundary in [
({"x": 2}, np.mean, "left", "trim"),
({"x": 2}, np.median, {"x": "left"}, "pad"),
({"x": 2, "y": 3}, np.max, "left", {"x": "pad", "y": "trim"}),
]:
v.coarsen(windows, func, boundary, side)
def test_coarsen_2d(self):
# 2d-mean should be the same with the successive 1d-mean
v = self.cls(["x", "y"], np.arange(6 * 12).reshape(6, 12))
actual = v.coarsen({"x": 3, "y": 4}, func="mean")
expected = v.coarsen({"x": 3}, func="mean").coarsen({"y": 4}, func="mean")
assert_equal(actual, expected)
v = self.cls(["x", "y"], np.arange(7 * 12).reshape(7, 12))
actual = v.coarsen({"x": 3, "y": 4}, func="mean", boundary="trim")
expected = v.coarsen({"x": 3}, func="mean", boundary="trim").coarsen(
{"y": 4}, func="mean", boundary="trim"
)
assert_equal(actual, expected)
# if there is nan, the two should be different
v = self.cls(["x", "y"], 1.0 * np.arange(6 * 12).reshape(6, 12))
v[2, 4] = np.nan
v[3, 5] = np.nan
actual = v.coarsen({"x": 3, "y": 4}, func="mean", boundary="trim")
expected = (
v.coarsen({"x": 3}, func="sum", boundary="trim").coarsen(
{"y": 4}, func="sum", boundary="trim"
)
/ 12
)
assert not actual.equals(expected)
# adjusting the nan count
expected[0, 1] *= 12 / 11
expected[1, 1] *= 12 / 11
assert_allclose(actual, expected)
v = self.cls(("x", "y"), np.arange(4 * 4, dtype=np.float32).reshape(4, 4))
actual = v.coarsen(dict(x=2, y=2), func="count", boundary="exact")
expected = self.cls(("x", "y"), 4 * np.ones((2, 2)))
assert_equal(actual, expected)
v[0, 0] = np.nan
v[-1, -1] = np.nan
expected[0, 0] = 3
expected[-1, -1] = 3
actual = v.coarsen(dict(x=2, y=2), func="count", boundary="exact")
assert_equal(actual, expected)
actual = v.coarsen(dict(x=2, y=2), func="sum", boundary="exact", skipna=False)
expected = self.cls(("x", "y"), [[np.nan, 18], [42, np.nan]])
assert_equal(actual, expected)
actual = v.coarsen(dict(x=2, y=2), func="sum", boundary="exact", skipna=True)
expected = self.cls(("x", "y"), [[10, 18], [42, 35]])
assert_equal(actual, expected)
# perhaps @pytest.mark.parametrize("operation", [f for f in duck_array_ops])
def test_coarsen_keep_attrs(self, operation="mean"):
_attrs = {"units": "test", "long_name": "testing"}
test_func = getattr(duck_array_ops, operation, None)
# Test dropped attrs
with set_options(keep_attrs=False):
new = Variable(["coord"], np.linspace(1, 10, 100), attrs=_attrs).coarsen(
windows={"coord": 1}, func=test_func, boundary="exact", side="left"
)
assert new.attrs == {}
# Test kept attrs
with set_options(keep_attrs=True):
new = Variable(["coord"], np.linspace(1, 10, 100), attrs=_attrs).coarsen(
windows={"coord": 1},
func=test_func,
boundary="exact",
side="left",
)
assert new.attrs == _attrs
@requires_dask
class TestVariableWithDask(VariableSubclassobjects):
cls = staticmethod(lambda *args: Variable(*args).chunk())
@pytest.mark.xfail
def test_0d_object_array_with_list(self):
super().test_0d_object_array_with_list()
@pytest.mark.xfail
def test_array_interface(self):
# dask array does not have `argsort`
super().test_array_interface()
@pytest.mark.xfail
def test_copy_index(self):
super().test_copy_index()
@pytest.mark.xfail
def test_eq_all_dtypes(self):
super().test_eq_all_dtypes()
def test_getitem_fancy(self):
super().test_getitem_fancy()
def test_getitem_1d_fancy(self):
super().test_getitem_1d_fancy()
def test_getitem_with_mask_nd_indexer(self):
import dask.array as da
v = Variable(["x"], da.arange(3, chunks=3))
indexer = Variable(("x", "y"), [[0, -1], [-1, 2]])
assert_identical(
v._getitem_with_mask(indexer, fill_value=-1),
self.cls(("x", "y"), [[0, -1], [-1, 2]]),
)
@pytest.mark.parametrize("dim", ["x", "y"])
@pytest.mark.parametrize("window", [3, 8, 11])
@pytest.mark.parametrize("center", [True, False])
def test_dask_rolling(self, dim, window, center):
import dask
import dask.array as da
dask.config.set(scheduler="single-threaded")
x = Variable(("x", "y"), np.array(np.random.randn(100, 40), dtype=float))
dx = Variable(("x", "y"), da.from_array(x, chunks=[(6, 30, 30, 20, 14), 8]))
expected = x.rolling_window(
dim, window, "window", center=center, fill_value=np.nan
)
with raise_if_dask_computes():
actual = dx.rolling_window(
dim, window, "window", center=center, fill_value=np.nan
)
assert isinstance(actual.data, da.Array)
assert actual.shape == expected.shape
assert_equal(actual, expected)
@requires_sparse
class TestVariableWithSparse:
# TODO inherit VariableSubclassobjects to cover more tests
def test_as_sparse(self):
data = np.arange(12).reshape(3, 4)
var = Variable(("x", "y"), data)._as_sparse(fill_value=-1)
actual = var._to_dense()
assert_identical(var, actual)
class TestIndexVariable(VariableSubclassobjects):
cls = staticmethod(IndexVariable)
def test_init(self):
with pytest.raises(ValueError, match=r"must be 1-dimensional"):
IndexVariable((), 0)
def test_to_index(self):
data = 0.5 * np.arange(10)
v = IndexVariable(["time"], data, {"foo": "bar"})
assert pd.Index(data, name="time").identical(v.to_index())
def test_multiindex_default_level_names(self):
midx = pd.MultiIndex.from_product([["a", "b"], [1, 2]])
v = IndexVariable(["x"], midx, {"foo": "bar"})
assert v.to_index().names == ("x_level_0", "x_level_1")
def test_data(self):
x = IndexVariable("x", np.arange(3.0))
assert isinstance(x._data, PandasIndexAdapter)
assert isinstance(x.data, np.ndarray)
assert float == x.dtype
assert_array_equal(np.arange(3), x)
assert float == x.values.dtype
with pytest.raises(TypeError, match=r"cannot be modified"):
x[:] = 0
def test_name(self):
coord = IndexVariable("x", [10.0])
assert coord.name == "x"
with pytest.raises(AttributeError):
coord.name = "y"
def test_level_names(self):
midx = pd.MultiIndex.from_product(
[["a", "b"], [1, 2]], names=["level_1", "level_2"]
)
x = IndexVariable("x", midx)
assert x.level_names == midx.names
assert IndexVariable("y", [10.0]).level_names is None
def test_get_level_variable(self):
midx = pd.MultiIndex.from_product(
[["a", "b"], [1, 2]], names=["level_1", "level_2"]
)
x = IndexVariable("x", midx)
level_1 = IndexVariable("x", midx.get_level_values("level_1"))
assert_identical(x.get_level_variable("level_1"), level_1)
with pytest.raises(ValueError, match=r"has no MultiIndex"):
IndexVariable("y", [10.0]).get_level_variable("level")
def test_concat_periods(self):
periods = pd.period_range("2000-01-01", periods=10)
coords = [IndexVariable("t", periods[:5]), IndexVariable("t", periods[5:])]
expected = IndexVariable("t", periods)
actual = IndexVariable.concat(coords, dim="t")
assert_identical(actual, expected)
assert isinstance(actual.to_index(), pd.PeriodIndex)
positions = [list(range(5)), list(range(5, 10))]
actual = IndexVariable.concat(coords, dim="t", positions=positions)
assert_identical(actual, expected)
assert isinstance(actual.to_index(), pd.PeriodIndex)
def test_concat_multiindex(self):
idx = pd.MultiIndex.from_product([[0, 1, 2], ["a", "b"]])
coords = [IndexVariable("x", idx[:2]), IndexVariable("x", idx[2:])]
expected = IndexVariable("x", idx)
actual = IndexVariable.concat(coords, dim="x")
assert_identical(actual, expected)
assert isinstance(actual.to_index(), pd.MultiIndex)
@pytest.mark.parametrize("dtype", [str, bytes])
def test_concat_str_dtype(self, dtype):
a = IndexVariable("x", np.array(["a"], dtype=dtype))
b = IndexVariable("x", np.array(["b"], dtype=dtype))
expected = IndexVariable("x", np.array(["a", "b"], dtype=dtype))
actual = IndexVariable.concat([a, b])
assert actual.identical(expected)
assert np.issubdtype(actual.dtype, dtype)
def test_coordinate_alias(self):
with pytest.warns(Warning, match="deprecated"):
x = Coordinate("x", [1, 2, 3])
assert isinstance(x, IndexVariable)
def test_datetime64(self):
# GH:1932 Make sure indexing keeps precision
t = np.array([1518418799999986560, 1518418799999996560], dtype="datetime64[ns]")
v = IndexVariable("t", t)
assert v[0].data == t[0]
# These tests make use of multi-dimensional variables, which are not valid
# IndexVariable objects:
@pytest.mark.skip
def test_getitem_error(self):
super().test_getitem_error()
@pytest.mark.skip
def test_getitem_advanced(self):
super().test_getitem_advanced()
@pytest.mark.skip
def test_getitem_fancy(self):
super().test_getitem_fancy()
@pytest.mark.skip
def test_getitem_uint(self):
super().test_getitem_fancy()
@pytest.mark.skip
@pytest.mark.parametrize(
"mode",
[
"mean",
"median",
"reflect",
"edge",
"linear_ramp",
"maximum",
"minimum",
"symmetric",
"wrap",
],
)
@pytest.mark.parametrize("xr_arg, np_arg", _PAD_XR_NP_ARGS)
def test_pad(self, mode, xr_arg, np_arg):
super().test_pad(mode, xr_arg, np_arg)
@pytest.mark.skip
def test_pad_constant_values(self, xr_arg, np_arg):
super().test_pad_constant_values(xr_arg, np_arg)
@pytest.mark.skip
def test_rolling_window(self):
super().test_rolling_window()
@pytest.mark.skip
def test_rolling_1d(self):
super().test_rolling_1d()
@pytest.mark.skip
def test_nd_rolling(self):
super().test_nd_rolling()
@pytest.mark.skip
def test_rolling_window_errors(self):
super().test_rolling_window_errors()
@pytest.mark.skip
def test_coarsen_2d(self):
super().test_coarsen_2d()
class TestAsCompatibleData:
def test_unchanged_types(self):
types = (np.asarray, PandasIndexAdapter, LazilyIndexedArray)
for t in types:
for data in [
np.arange(3),
pd.date_range("2000-01-01", periods=3),
pd.date_range("2000-01-01", periods=3).values,
]:
x = t(data)
assert source_ndarray(x) is source_ndarray(as_compatible_data(x))
def test_converted_types(self):
for input_array in [[[0, 1, 2]], pd.DataFrame([[0, 1, 2]])]:
actual = as_compatible_data(input_array)
assert_array_equal(np.asarray(input_array), actual)
assert np.ndarray == type(actual)
assert np.asarray(input_array).dtype == actual.dtype
def test_masked_array(self):
original = np.ma.MaskedArray(np.arange(5))
expected = np.arange(5)
actual = as_compatible_data(original)
assert_array_equal(expected, actual)
assert np.dtype(int) == actual.dtype
original = np.ma.MaskedArray(np.arange(5), mask=4 * [False] + [True])
expected = np.arange(5.0)
expected[-1] = np.nan
actual = as_compatible_data(original)
assert_array_equal(expected, actual)
assert np.dtype(float) == actual.dtype
def test_datetime(self):
expected = np.datetime64("2000-01-01")
actual = as_compatible_data(expected)
assert expected == actual
assert np.ndarray == type(actual)
assert np.dtype("datetime64[ns]") == actual.dtype
expected = np.array([np.datetime64("2000-01-01")])
actual = as_compatible_data(expected)
assert np.asarray(expected) == actual
assert np.ndarray == type(actual)
assert np.dtype("datetime64[ns]") == actual.dtype
expected = np.array([np.datetime64("2000-01-01", "ns")])
actual = as_compatible_data(expected)
assert np.asarray(expected) == actual
assert np.ndarray == type(actual)
assert np.dtype("datetime64[ns]") == actual.dtype
assert expected is source_ndarray(np.asarray(actual))
expected = np.datetime64("2000-01-01", "ns")
actual = as_compatible_data(datetime(2000, 1, 1))
assert np.asarray(expected) == actual
assert np.ndarray == type(actual)
assert np.dtype("datetime64[ns]") == actual.dtype
def test_full_like(self):
# For more thorough tests, see test_variable.py
orig = Variable(
dims=("x", "y"), data=[[1.5, 2.0], [3.1, 4.3]], attrs={"foo": "bar"}
)
expect = orig.copy(deep=True)
expect.values = [[2.0, 2.0], [2.0, 2.0]]
assert_identical(expect, full_like(orig, 2))
# override dtype
expect.values = [[True, True], [True, True]]
assert expect.dtype == bool
assert_identical(expect, full_like(orig, True, dtype=bool))
# raise error on non-scalar fill_value
with pytest.raises(ValueError, match=r"must be scalar"):
full_like(orig, [1.0, 2.0])
with pytest.raises(ValueError, match="'dtype' cannot be dict-like"):
full_like(orig, True, dtype={"x": bool})
@requires_dask
def test_full_like_dask(self):
orig = Variable(
dims=("x", "y"), data=[[1.5, 2.0], [3.1, 4.3]], attrs={"foo": "bar"}
).chunk(((1, 1), (2,)))
def check(actual, expect_dtype, expect_values):
assert actual.dtype == expect_dtype
assert actual.shape == orig.shape
assert actual.dims == orig.dims
assert actual.attrs == orig.attrs
assert actual.chunks == orig.chunks
assert_array_equal(actual.values, expect_values)
check(full_like(orig, 2), orig.dtype, np.full_like(orig.values, 2))
# override dtype
check(
full_like(orig, True, dtype=bool),
bool,
np.full_like(orig.values, True, dtype=bool),
)
# Check that there's no array stored inside dask
# (e.g. we didn't create a numpy array and then we chunked it!)
dsk = full_like(orig, 1).data.dask
for v in dsk.values():
if isinstance(v, tuple):
for vi in v:
assert not isinstance(vi, np.ndarray)
else:
assert not isinstance(v, np.ndarray)
def test_zeros_like(self):
orig = Variable(
dims=("x", "y"), data=[[1.5, 2.0], [3.1, 4.3]], attrs={"foo": "bar"}
)
assert_identical(zeros_like(orig), full_like(orig, 0))
assert_identical(zeros_like(orig, dtype=int), full_like(orig, 0, dtype=int))
def test_ones_like(self):
orig = Variable(
dims=("x", "y"), data=[[1.5, 2.0], [3.1, 4.3]], attrs={"foo": "bar"}
)
assert_identical(ones_like(orig), full_like(orig, 1))
assert_identical(ones_like(orig, dtype=int), full_like(orig, 1, dtype=int))
def test_unsupported_type(self):
# Non indexable type
class CustomArray(NDArrayMixin):
def __init__(self, array):
self.array = array
class CustomIndexable(CustomArray, indexing.ExplicitlyIndexed):
pass
# Type with data stored in values attribute
class CustomWithValuesAttr:
def __init__(self, array):
self.values = array
array = CustomArray(np.arange(3))
orig = Variable(dims=("x"), data=array, attrs={"foo": "bar"})
assert isinstance(orig._data, np.ndarray) # should not be CustomArray
array = CustomIndexable(np.arange(3))
orig = Variable(dims=("x"), data=array, attrs={"foo": "bar"})
assert isinstance(orig._data, CustomIndexable)
array = CustomWithValuesAttr(np.arange(3))
orig = Variable(dims=(), data=array)
assert isinstance(orig._data.item(), CustomWithValuesAttr)
def test_raise_no_warning_for_nan_in_binary_ops():
with pytest.warns(None) as record:
Variable("x", [1, 2, np.NaN]) > 0
assert len(record) == 0
class TestBackendIndexing:
"""Make sure all the array wrappers can be indexed."""
@pytest.fixture(autouse=True)
def setUp(self):
self.d = np.random.random((10, 3)).astype(np.float64)
def check_orthogonal_indexing(self, v):
assert np.allclose(v.isel(x=[8, 3], y=[2, 1]), self.d[[8, 3]][:, [2, 1]])
def check_vectorized_indexing(self, v):
ind_x = Variable("z", [0, 2])
ind_y = Variable("z", [2, 1])
assert np.allclose(v.isel(x=ind_x, y=ind_y), self.d[ind_x, ind_y])
def test_NumpyIndexingAdapter(self):
v = Variable(dims=("x", "y"), data=NumpyIndexingAdapter(self.d))
self.check_orthogonal_indexing(v)
self.check_vectorized_indexing(v)
# could not doubly wrapping
with pytest.raises(TypeError, match=r"NumpyIndexingAdapter only wraps "):
v = Variable(
dims=("x", "y"), data=NumpyIndexingAdapter(NumpyIndexingAdapter(self.d))
)
def test_LazilyIndexedArray(self):
v = Variable(dims=("x", "y"), data=LazilyIndexedArray(self.d))
self.check_orthogonal_indexing(v)
self.check_vectorized_indexing(v)
# doubly wrapping
v = Variable(
dims=("x", "y"),
data=LazilyIndexedArray(LazilyIndexedArray(self.d)),
)
self.check_orthogonal_indexing(v)
# hierarchical wrapping
v = Variable(
dims=("x", "y"), data=LazilyIndexedArray(NumpyIndexingAdapter(self.d))
)
self.check_orthogonal_indexing(v)
def test_CopyOnWriteArray(self):
v = Variable(dims=("x", "y"), data=CopyOnWriteArray(self.d))
self.check_orthogonal_indexing(v)
self.check_vectorized_indexing(v)
# doubly wrapping
v = Variable(dims=("x", "y"), data=CopyOnWriteArray(LazilyIndexedArray(self.d)))
self.check_orthogonal_indexing(v)
self.check_vectorized_indexing(v)
def test_MemoryCachedArray(self):
v = Variable(dims=("x", "y"), data=MemoryCachedArray(self.d))
self.check_orthogonal_indexing(v)
self.check_vectorized_indexing(v)
# doubly wrapping
v = Variable(dims=("x", "y"), data=CopyOnWriteArray(MemoryCachedArray(self.d)))
self.check_orthogonal_indexing(v)
self.check_vectorized_indexing(v)
@requires_dask
def test_DaskIndexingAdapter(self):
import dask.array as da
da = da.asarray(self.d)
v = Variable(dims=("x", "y"), data=DaskIndexingAdapter(da))
self.check_orthogonal_indexing(v)
self.check_vectorized_indexing(v)
# doubly wrapping
v = Variable(dims=("x", "y"), data=CopyOnWriteArray(DaskIndexingAdapter(da)))
self.check_orthogonal_indexing(v)
self.check_vectorized_indexing(v)
def test_clip(var):
# Copied from test_dataarray (would there be a way to combine the tests?)
result = var.clip(min=0.5)
assert result.min(...) >= 0.5
result = var.clip(max=0.5)
assert result.max(...) <= 0.5
result = var.clip(min=0.25, max=0.75)
assert result.min(...) >= 0.25
assert result.max(...) <= 0.75
result = var.clip(min=var.mean("x"), max=var.mean("z"))
assert result.dims == var.dims
assert_array_equal(
result.data,
np.clip(
var.data,
var.mean("x").data[np.newaxis, :, :],
var.mean("z").data[:, :, np.newaxis],
),
)
| 36.918993
| 88
| 0.550301
|
eb6f0b9fe83d901a74fad547e2e3fc8e4bf7e7c7
| 14,552
|
py
|
Python
|
inspectors/utils/admin.py
|
divergentdave/inspectors-general
|
611d5ab2009390076ff59d3a638fc0cf8c23dbd4
|
[
"CC0-1.0"
] | null | null | null |
inspectors/utils/admin.py
|
divergentdave/inspectors-general
|
611d5ab2009390076ff59d3a638fc0cf8c23dbd4
|
[
"CC0-1.0"
] | null | null | null |
inspectors/utils/admin.py
|
divergentdave/inspectors-general
|
611d5ab2009390076ff59d3a638fc0cf8c23dbd4
|
[
"CC0-1.0"
] | null | null | null |
# functions for communicating with an admin
import os
import sys
import traceback
import yaml
import logging
import re
import atexit
import requests
import scrapelib
import smtplib
import email.utils
from email.mime.text import MIMEText
import json
import urllib.request
import urllib.parse
# read in an opt-in config file for changing directories and supplying settings
# returns None if it's not there, and this should always be handled gracefully
path = "admin.yml"
if os.path.exists(path):
config = yaml.load(open(path))
else:
config = None
def log_exception(e):
for error_handler in error_handlers:
try:
error_handler.log_exception(e)
except Exception as exception:
print("Exception logging message to admin, halting as to avoid loop")
print(format_exception(exception))
def log_duplicate_id(scraper, report_id, msg):
for error_handler in error_handlers:
try:
error_handler.log_duplicate_id(scraper, report_id, msg)
except Exception as exception:
print("Exception logging message to admin, halting as to avoid loop")
print(format_exception(exception))
def log_no_date(scraper, report_id, title, url=None):
for error_handler in error_handlers:
try:
error_handler.log_no_date(scraper, report_id, title, url)
except Exception as exception:
print("Exception logging message to admin, halting as to avoid loop")
print(format_exception(exception))
def log_report(scraper):
for error_handler in error_handlers:
try:
error_handler.log_report(scraper)
except Exception as exception:
print(format_exception(exception))
def log_qa(report_text):
for error_handler in error_handlers:
try:
error_handler.log_qa(report_text)
except Exception as exception:
print(format_exception(exception))
def log_http_error(e, url, scraper=None):
if isinstance(e, scrapelib.HTTPError):
for error_handler in error_handlers:
try:
error_handler.log_http_error(e, url, scraper)
except Exception as exception:
print(format_exception(exception))
elif isinstance(e, requests.exceptions.ConnectionError):
for error_handler in error_handlers:
try:
error_handler.log_connection_error(e, url, scraper)
except Exception as exception:
print(format_exception(exception))
else:
# fallback for connection errors, retry errors
log_exception(e)
def format_exception(exception):
exc_type, exc_value, exc_traceback = sys.exc_info()
return "\n".join(traceback.format_exception(exc_type, exc_value, exc_traceback))
def parse_scraper_traceback():
exc_traceback = sys.exc_info()[2]
for filename, line_number, function_name, text in traceback.extract_tb(exc_traceback):
inspector_match = INSPECTOR_RE.match(filename)
if inspector_match:
return inspector_match.group(1), line_number, function_name
return None, None, None
def copy_if_present(key, src, dst):
if key in src:
dst[key] = src[key]
INSPECTOR_RE = re.compile("^inspectors(?:/|\\\\)([a-z]+)\\.py$")
HTTP_ERROR_RE = re.compile('''scrapelib\\.HTTPError: ([0-9]+) while retrieving ([^\n]+)\n''')
TRACEBACK_STR = "Traceback (most recent call last):"
class ErrorHandler(object):
def log_report(self, scraper):
pass
def log_no_date(self, scraper, report_id, title, url):
if url is None:
message = ("[%s] No date was found for %s, \"%s\""
% (scraper, report_id, title))
else:
message = ("[%s] No date was found for %s, \"%s\" (%s)"
% (scraper, report_id, title, url.replace(" ", "%20")))
self.log(message)
def log_exception(self, exception):
self.log(format_exception(exception))
class ConsoleErrorHandler(ErrorHandler):
def __init__(self):
self.uniqueness_messages = []
atexit.register(self.print_duplicate_messages)
def log_duplicate_id(self, scraper, report_id, msg):
self.uniqueness_messages.append(msg)
def print_duplicate_messages(self):
if self.uniqueness_messages:
self.log("\n".join(self.uniqueness_messages))
def log(self, body):
logging.error(body)
def log_http_error(self, exception, url, scraper):
# intentionally print instead of using logging,
# so that all 404s get printed at the end of the log
print("Error downloading %s:\n\n%s" % (url, format_exception(exception)))
def log_connection_error(self, exception, url, scraper):
# intentionally print instead of using logging,
# so that all connection errors get printed at the end of the log
print("Error downloading %s:\n\n%s" % (url, format_exception(exception)))
def log_qa(self, text):
self.log(text)
class EmailErrorHandler(ErrorHandler):
def __init__(self):
self.uniqueness_messages = []
atexit.register(self.print_duplicate_messages)
def log_duplicate_id(self, scraper, report_id, msg):
self.uniqueness_messages.append(msg)
def print_duplicate_messages(self):
if self.uniqueness_messages:
self.log("\n".join(self.uniqueness_messages))
def log(self, body):
settings = config['email']
if (not settings.get('to') or not settings.get('from') or
not settings.get('from_name') or not settings.get('hostname')):
return
# adapted from http://www.doughellmann.com/PyMOTW/smtplib/
msg = MIMEText(body)
msg.set_unixfrom('author')
msg['To'] = email.utils.formataddr(('Recipient', settings['to']))
msg['From'] = email.utils.formataddr((settings['from_name'], settings['from']))
msg['Subject'] = settings['subject']
server = smtplib.SMTP(settings['hostname'], settings.get('port', 0))
try:
server.ehlo()
if settings.get('starttls') and server.has_extn('STARTTLS'):
server.starttls()
server.ehlo()
if 'user_name' in settings and 'password' in settings:
server.login(settings['user_name'], settings['password'])
server.sendmail(settings['from'], [settings['to']], msg.as_string())
finally:
server.quit()
logging.info("Sent email to %s" % settings['to'])
def log_http_error(self, exception, url, scraper):
pass
def log_connection_error(self, exception, url, scraper):
pass
def log_qa(self, text):
pass
def exception_name(exception):
try:
return "%s.%s" % (exception.__module__, exception.__class__.__name__)
except AttributeError:
return exception.__class__.__name__
def unwrap_exception(e):
if isinstance(e, requests.exceptions.ConnectionError):
if len(e.args) > 0 and isinstance(e.args[0], BaseException):
return unwrap_exception(e.args[0])
if isinstance(e, requests.packages.urllib3.exceptions.MaxRetryError):
if isinstance(e.reason, BaseException):
return unwrap_exception(e.reason)
if isinstance(e, requests.packages.urllib3.exceptions.SSLError):
if len(e.args) > 0 and isinstance(e.args[0], BaseException):
return unwrap_exception(e.args[0])
return e
class SlackErrorHandler(ErrorHandler):
def __init__(self):
self.options = config.get("slack")
self.uniqueness_messages = []
atexit.register(self.print_duplicate_messages)
def log_duplicate_id(self, scraper, report_id, msg):
self.uniqueness_messages.append(msg)
def print_duplicate_messages(self):
if self.uniqueness_messages:
self.send_message({
"text": "\n".join(self.uniqueness_messages)
})
def send_message(self, message):
copy_if_present("username", self.options, message)
copy_if_present("icon_url", self.options, message)
copy_if_present("icon_emoji", self.options, message)
copy_if_present("channel", self.options, message)
message_json = json.dumps(message)
message_bytes = message_json.encode("utf-8")
request = urllib.request.Request(self.options["webhook"], message_bytes)
request.add_header("Content-Type", "application/json; charset=utf-8")
urllib.request.urlopen(request)
def log_http_error(self, exception, url, scraper):
http_status_code = exception.response.status_code
body = format_exception(exception)
pretext = ("[%s] %s error while downloading %s" %
(scraper, http_status_code, url))
self.send_message({
"attachments": [
{
"fallback": pretext,
"text": body,
"color": "warning",
"pretext": pretext
}
]
})
def log_connection_error(self, exception, url, scraper):
body = format_exception(exception)
class_name = exception_name(unwrap_exception(exception))
pretext = ("[%s] %s while downloading %s" %
(scraper, class_name, url))
self.send_message({
"attachments": [
{
"fallback": pretext,
"text": body,
"color": "warning",
"pretext": pretext
}
]
})
def log_exception(self, exception):
class_name = exception_name(exception)
scraper, line_num, function = parse_scraper_traceback()
fallback = "%s: %s" % (class_name, exception)
pretext = ("%s was thrown while running %s.py (line %s, in function %s)" %
(class_name, scraper, line_num, function))
self.send_message({
"attachments": [
{
"fallback": fallback,
"text": format_exception(exception),
"color": "danger",
"pretext": pretext
}
]
})
def log_qa(self, text):
fallback = text.split("\n", 1)[0]
self.send_message({
"attachments": [
{
"fallback": fallback,
"text": text,
"color": "danger",
"pretext": fallback
}
]
})
def log_no_date(self, scraper, report_id, title, url):
if url is None:
message = ("[%s] No date was found for %s, \"%s\""
% (scraper, report_id, title))
else:
message = ("[%s] No date was found for %s, \"%s\" (%s)"
% (scraper, report_id, title, url.replace(" ", "%20")))
self.send_message({
"attachments": [
{
"fallback": message,
"text": message,
"color": "warning"
}
]
})
class DashboardErrorHandler(ErrorHandler):
def __init__(self):
self.options = config.get("dashboard")
self.dashboard_data = {}
atexit.register(self.dashboard_send)
def log_http_error(self, exception, url, scraper):
if scraper is None:
return
http_status_code = exception.response.status_code
if scraper not in self.dashboard_data:
self.dashboard_data[scraper] = {}
if "http_errors" not in self.dashboard_data[scraper]:
self.dashboard_data[scraper]["http_errors"] = []
entry = {
"status_code": http_status_code,
"url": url
}
self.dashboard_data[scraper]["http_errors"].append(entry)
def log_connection_error(self, exception, url, scraper):
if scraper is None:
return
if scraper not in self.dashboard_data:
self.dashboard_data[scraper] = {}
if "http_errors" not in self.dashboard_data[scraper]:
self.dashboard_data[scraper]["http_errors"] = []
class_name = exception_name(unwrap_exception(exception))
entry = {
"status_code": None,
"url": url,
"exception_name": class_name
}
self.dashboard_data[scraper]["http_errors"].append(entry)
def log_duplicate_id(self, scraper, report_id, msg):
if scraper not in self.dashboard_data:
self.dashboard_data[scraper] = {}
if "duplicate_ids" not in self.dashboard_data[scraper]:
self.dashboard_data[scraper]["duplicate_ids"] = []
self.dashboard_data[scraper]["duplicate_ids"].append(str(report_id))
def log_exception(self, exception):
class_name = exception_name(exception)
scraper, line_num, function = parse_scraper_traceback()
if scraper not in self.dashboard_data:
self.dashboard_data[scraper] = {}
if "exceptions" not in self.dashboard_data[scraper]:
self.dashboard_data[scraper]["exceptions"] = []
entry = {
"class_name": class_name,
"filename": "inspectors/%s.py" % scraper,
"line_num": line_num,
"function": function,
"traceback": format_exception(exception)
}
self.dashboard_data[scraper]["exceptions"].append(entry)
def log_no_date(self, scraper, report_id, title, url):
if scraper not in self.dashboard_data:
self.dashboard_data[scraper] = {}
if "missing_dates" not in self.dashboard_data[scraper]:
self.dashboard_data[scraper]["missing_dates"] = []
entry = {
"report_id": report_id,
"title": title,
"url": url
}
self.dashboard_data[scraper]["missing_dates"].append(entry)
def log_qa(self, text):
pass
def dashboard_send(self):
if not self.dashboard_data:
return
for scraper in self.dashboard_data:
if "exceptions" in self.dashboard_data[scraper]:
severity = 2
elif "duplicate_ids" in self.dashboard_data[scraper]:
severity = 1
elif "missing_dates" in self.dashboard_data[scraper]:
severity = 1
elif "http_errors" in self.dashboard_data[scraper]:
severity = 1
else:
severity = 0
self.dashboard_data[scraper]["severity"] = severity
if "duplicate_ids" in self.dashboard_data[scraper]:
self.dashboard_data[scraper]["duplicate_ids"].sort()
if "report_count" not in self.dashboard_data[scraper]:
self.dashboard_data[scraper]["report_count"] = 0
options = config["dashboard"]
message_json = json.dumps(self.dashboard_data)
message_bytes = message_json.encode("utf-8")
url = options["url"] + "?secret=" + urllib.parse.quote(options["secret"])
request = urllib.request.Request(url, message_bytes)
request.add_header("Content-Type", "application/json; charset=utf-8")
request.get_method = lambda: "PUT"
urllib.request.urlopen(request)
def log_report(self, scraper):
if scraper not in self.dashboard_data:
self.dashboard_data[scraper] = {}
if "report_count" not in self.dashboard_data[scraper]:
self.dashboard_data[scraper]["report_count"] = 1
else:
self.dashboard_data[scraper]["report_count"] = (1 +
self.dashboard_data[scraper]["report_count"])
error_handlers = [ConsoleErrorHandler()]
if config:
if config.get("email"):
error_handlers.append(EmailErrorHandler())
if config.get("slack"):
error_handlers.append(SlackErrorHandler())
if config.get("dashboard"):
if config["dashboard"].get("secret"):
error_handlers.append(DashboardErrorHandler())
| 31.227468
| 93
| 0.674478
|
622c67170425fd24dd17deb7fea4364d566caedc
| 1,626
|
py
|
Python
|
sparse_operation_kit/sparse_operation_kit/__init__.py
|
marsmiao/HugeCTR
|
c9ff359a69565200fcc0c7aae291d9c297bea70e
|
[
"Apache-2.0"
] | null | null | null |
sparse_operation_kit/sparse_operation_kit/__init__.py
|
marsmiao/HugeCTR
|
c9ff359a69565200fcc0c7aae291d9c297bea70e
|
[
"Apache-2.0"
] | null | null | null |
sparse_operation_kit/sparse_operation_kit/__init__.py
|
marsmiao/HugeCTR
|
c9ff359a69565200fcc0c7aae291d9c297bea70e
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright (c) 2021, NVIDIA CORPORATION.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
print("[INFO]: %s is imported" %__name__)
from sparse_operation_kit.core._version import __version__
import os
os.environ["TF_FORCE_GPU_ALLOW_GROWTH"] = "true"
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["NCCL_LAUNCH_MODE"] = "PARALLEL"
os.environ["TF_GPU_THREAD_MODE"] = "gpu_private"
os.environ["TF_GPU_THREAD_COUNT"] = "16"
# ---------- import submodule ----------- #
import sparse_operation_kit.embeddings
import sparse_operation_kit.optimizers
import sparse_operation_kit.core
# ------------ import items into root package -------- #
from sparse_operation_kit.embeddings import test
from sparse_operation_kit.core.initialize import Init
from sparse_operation_kit.core.context_scope import OptimizerScope
from sparse_operation_kit.embeddings.distributed_embedding import DistributedEmbedding
from sparse_operation_kit.embeddings.all2all_dense_embedding import All2AllDenseEmbedding
from sparse_operation_kit.saver.Saver import Saver
from sparse_operation_kit.optimizers.utils import split_embedding_variable_from_others
| 40.65
| 89
| 0.798278
|
0d9db5cd92c293752edb49e912a26ee153f7484f
| 4,293
|
py
|
Python
|
examples/soce_python/clients/set-preferences.py
|
gsi-upm/sawtooth-soce
|
ff9a8e396d91d6ce5a35803dcf8a10826dc5f0ed
|
[
"Apache-2.0"
] | null | null | null |
examples/soce_python/clients/set-preferences.py
|
gsi-upm/sawtooth-soce
|
ff9a8e396d91d6ce5a35803dcf8a10826dc5f0ed
|
[
"Apache-2.0"
] | null | null | null |
examples/soce_python/clients/set-preferences.py
|
gsi-upm/sawtooth-soce
|
ff9a8e396d91d6ce5a35803dcf8a10826dc5f0ed
|
[
"Apache-2.0"
] | null | null | null |
from sawtooth_signing import create_context
from sawtooth_signing import CryptoFactory
from hashlib import sha512
from sawtooth_sdk.protobuf.transaction_pb2 import TransactionHeader
import cbor
from sawtooth_sdk.protobuf.transaction_pb2 import Transaction
from sawtooth_sdk.protobuf.batch_pb2 import BatchHeader
from sawtooth_sdk.protobuf.batch_pb2 import Batch
from sawtooth_sdk.protobuf.batch_pb2 import BatchList
import urllib.request
from urllib.error import HTTPError
import hashlib
def _sha512(data):
return hashlib.sha512(data).hexdigest()
def _get_prefix():
return _sha512("soce".encode('utf-8'))[0:6]
def _get_address(name):
soce_prefix = _get_prefix()
name_address = _sha512(name.encode('utf-8'))[0:64]
return soce_prefix + name_address
context = create_context('secp256k1')
private_key = context.new_random_private_key()
signer = CryptoFactory(context).new_signer(private_key)
action = 'set-preferences'
name_id = 'voting1'
configurations_preferences_id = 'voter1'
sc_method = {'a': 10, 'b': 10}
payload = {
'action': action,
'name_id': name_id,
'configurations_preferences_id': configurations_preferences_id,
'sc_method': sc_method
}
address = _get_address(str(name_id))
address2 = _get_address(str(configurations_preferences_id))
#payload_bytes = cbor.dumps(payload)
payload_bytes = ";".join([str(action), str(name_id),
str(configurations_preferences_id),
str(sc_method)]).encode()
print(1, payload_bytes)
txn_header_bytes = TransactionHeader(
family_name='soce',
family_version='1.0',
inputs=[address, address2],
outputs=[address, address2],
signer_public_key = signer.get_public_key().as_hex(),
# In this example, we're signing the batch with the same private key,
# but the batch can be signed by another party, in which case, the
# public key will need to be associated with that key.
batcher_public_key = signer.get_public_key().as_hex(),
# In this example, there are no dependencies. This list should include
# an previous transaction header signatures that must be applied for
# this transaction to successfully commit.
# For example,
# dependencies=['540a6803971d1880ec73a96cb97815a95d374cbad5d865925e5aa0432fcf1931539afe10310c122c5eaae15df61236079abbf4f258889359c4d175516934484a'],
dependencies=[],
payload_sha512=sha512(payload_bytes).hexdigest()
).SerializeToString()
print(2, TransactionHeader(
family_name='soce',
family_version='1.0',
inputs=[address, address2],
outputs=[address, address2],
signer_public_key = signer.get_public_key().as_hex(),
# In this example, we're signing the batch with the same private key,
# but the batch can be signed by another party, in which case, the
# public key will need to be associated with that key.
batcher_public_key = signer.get_public_key().as_hex(),
# In this example, there are no dependencies. This list should include
# an previous transaction header signatures that must be applied for
# this transaction to successfully commit.
# For example,
# dependencies=['540a6803971d1880ec73a96cb97815a95d374cbad5d865925e5aa0432fcf1931539afe10310c122c5eaae15df61236079abbf4f258889359c4d175516934484a'],
dependencies=[],
payload_sha512=sha512(payload_bytes).hexdigest()
))
print(3, txn_header_bytes)
signature = signer.sign(txn_header_bytes)
print(4, signature)
txn = Transaction(
header=txn_header_bytes,
header_signature=signature,
payload=payload_bytes
)
print(5, txn)
txns = [txn]
batch_header_bytes = BatchHeader(
signer_public_key=signer.get_public_key().as_hex(),
transaction_ids=[txn.header_signature for txn in txns],
).SerializeToString()
signature = signer.sign(batch_header_bytes)
batch = Batch(
header=batch_header_bytes,
header_signature=signature,
transactions=txns
)
batch_list_bytes = BatchList(batches=[batch]).SerializeToString()
print(1, signature)
print(2, batch_list_bytes)
try:
request = urllib.request.Request(
'http://localhost:8008/batches',
batch_list_bytes,
method='POST',
headers={'Content-Type': 'application/octet-stream'})
response = urllib.request.urlopen(request)
except HTTPError as e:
response = e.file
| 32.037313
| 152
| 0.753785
|
8c9de99b1f0fbaab20449e85898caf016044c070
| 5,229
|
py
|
Python
|
zerver/views/user_groups.py
|
ricardoteixeiraduarte/zulip
|
149132348feda1c6929e94e72abb167cc882fc74
|
[
"Apache-2.0"
] | 3
|
2018-12-04T01:44:43.000Z
|
2019-05-13T06:16:21.000Z
|
zerver/views/user_groups.py
|
ricardoteixeiraduarte/zulip
|
149132348feda1c6929e94e72abb167cc882fc74
|
[
"Apache-2.0"
] | 58
|
2018-11-27T15:18:54.000Z
|
2018-12-09T13:43:07.000Z
|
zerver/views/user_groups.py
|
ricardoteixeiraduarte/zulip
|
149132348feda1c6929e94e72abb167cc882fc74
|
[
"Apache-2.0"
] | 9
|
2019-11-04T18:59:29.000Z
|
2022-03-22T17:46:37.000Z
|
from django.http import HttpResponse, HttpRequest
from django.utils.translation import ugettext as _
from typing import List
from zerver.decorator import require_non_guest_human_user
from zerver.context_processors import get_realm_from_request
from zerver.lib.actions import check_add_user_group, do_update_user_group_name, \
do_update_user_group_description, bulk_add_members_to_user_group, \
remove_members_from_user_group, check_delete_user_group
from zerver.lib.exceptions import JsonableError
from zerver.lib.request import has_request_variables, REQ
from zerver.lib.response import json_success, json_error
from zerver.lib.users import user_ids_to_users
from zerver.lib.validator import check_list, check_string, check_int, \
check_short_string
from zerver.lib.user_groups import access_user_group_by_id, get_memberships_of_users, \
get_user_group_members, user_groups_in_realm_serialized
from zerver.models import UserProfile, UserGroup, UserGroupMembership
from zerver.views.streams import compose_views, FuncKwargPair
@require_non_guest_human_user
@has_request_variables
def add_user_group(request: HttpRequest, user_profile: UserProfile,
name: str=REQ(),
members: List[int]=REQ(validator=check_list(check_int), default=[]),
description: str=REQ()) -> HttpResponse:
user_profiles = user_ids_to_users(members, user_profile.realm)
check_add_user_group(user_profile.realm, name, user_profiles, description)
return json_success()
@require_non_guest_human_user
@has_request_variables
def get_user_group(request: HttpRequest, user_profile: UserProfile) -> HttpResponse:
user_groups = user_groups_in_realm_serialized(user_profile.realm)
return json_success({"user_groups": user_groups})
@require_non_guest_human_user
@has_request_variables
def edit_user_group(request: HttpRequest, user_profile: UserProfile,
user_group_id: int=REQ(validator=check_int),
name: str=REQ(default=""), description: str=REQ(default="")
) -> HttpResponse:
if not (name or description):
return json_error(_("No new data supplied"))
user_group = access_user_group_by_id(user_group_id, user_profile)
result = {}
if name != user_group.name:
do_update_user_group_name(user_group, name)
result['name'] = _("Name successfully updated.")
if description != user_group.description:
do_update_user_group_description(user_group, description)
result['description'] = _("Description successfully updated.")
return json_success(result)
@require_non_guest_human_user
@has_request_variables
def delete_user_group(request: HttpRequest, user_profile: UserProfile,
user_group_id: int=REQ(validator=check_int)) -> HttpResponse:
check_delete_user_group(user_group_id, user_profile)
return json_success()
@require_non_guest_human_user
@has_request_variables
def update_user_group_backend(request: HttpRequest, user_profile: UserProfile,
user_group_id: int=REQ(validator=check_int),
delete: List[int]=REQ(validator=check_list(check_int), default=[]),
add: List[int]=REQ(validator=check_list(check_int), default=[])
) -> HttpResponse:
if not add and not delete:
return json_error(_('Nothing to do. Specify at least one of "add" or "delete".'))
method_kwarg_pairs = [
(add_members_to_group_backend,
dict(user_group_id=user_group_id, members=add)),
(remove_members_from_group_backend,
dict(user_group_id=user_group_id, members=delete))
] # type: List[FuncKwargPair]
return compose_views(request, user_profile, method_kwarg_pairs)
def add_members_to_group_backend(request: HttpRequest, user_profile: UserProfile,
user_group_id: int, members: List[int]) -> HttpResponse:
if not members:
return json_success()
user_group = access_user_group_by_id(user_group_id, user_profile)
user_profiles = user_ids_to_users(members, user_profile.realm)
existing_member_ids = set(get_memberships_of_users(user_group, user_profiles))
for user_profile in user_profiles:
if user_profile.id in existing_member_ids:
raise JsonableError(_("User %s is already a member of this group" % (user_profile.id,)))
bulk_add_members_to_user_group(user_group, user_profiles)
return json_success()
def remove_members_from_group_backend(request: HttpRequest, user_profile: UserProfile,
user_group_id: int, members: List[int]) -> HttpResponse:
if not members:
return json_success()
user_profiles = user_ids_to_users(members, user_profile.realm)
user_group = access_user_group_by_id(user_group_id, user_profile)
group_member_ids = get_user_group_members(user_group)
for member in members:
if (member not in group_member_ids):
raise JsonableError(_("There is no member '%s' in this user group" % (member,)))
remove_members_from_user_group(user_group, user_profiles)
return json_success()
| 45.077586
| 100
| 0.735705
|
c4276690750fcd98ab95c58d0fc26e6a46544ad6
| 872
|
py
|
Python
|
tomltoml/tomltoken.py
|
wting/tomltoml
|
9ff477e629a50cc6deb3d729bcc0a835fd0e3acf
|
[
"MIT"
] | null | null | null |
tomltoml/tomltoken.py
|
wting/tomltoml
|
9ff477e629a50cc6deb3d729bcc0a835fd0e3acf
|
[
"MIT"
] | null | null | null |
tomltoml/tomltoken.py
|
wting/tomltoml
|
9ff477e629a50cc6deb3d729bcc0a835fd0e3acf
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# This file is purposely named tomltoken instead of token to prevent shadowing
# the native Python token module
from __future__ import absolute_import
from __future__ import unicode_literals
class Word(object):
def __init__(self, value):
self.value = value
class Comment(Word):
pass
class Operator(object):
priority = None
symbol = '\n'
class Newline(Operator):
priority = 10
symbol = '\n'
class SingleQuote(Operator):
priority = 9
symbol = '\''
class DoubleQuote(Operator):
priority = 9
symbol = '"'
class Assignment(Operator):
priority = 1
symbol = '='
# TODO(wting|2016-09-16): support inline tables
# TODO(wting|2016-09-16): support table arrays
# TODO(wting|2016-09-16): support dot tables
class TableStart(Operator):
pass
class TableEnd(Operator):
pass
| 15.034483
| 78
| 0.674312
|
4629d04766d470cfcfd5f6bd9fa83fc70fdca9d0
| 623
|
py
|
Python
|
google-cloud-sdk/lib/googlecloudsdk/command_lib/util/__init__.py
|
bopopescu/searchparty
|
afdc2805cb1b77bd5ac9fdd1a76217f4841f0ea6
|
[
"Apache-2.0"
] | 1
|
2017-11-29T18:52:27.000Z
|
2017-11-29T18:52:27.000Z
|
google-cloud-sdk/lib/googlecloudsdk/command_lib/util/__init__.py
|
bopopescu/searchparty
|
afdc2805cb1b77bd5ac9fdd1a76217f4841f0ea6
|
[
"Apache-2.0"
] | null | null | null |
google-cloud-sdk/lib/googlecloudsdk/command_lib/util/__init__.py
|
bopopescu/searchparty
|
afdc2805cb1b77bd5ac9fdd1a76217f4841f0ea6
|
[
"Apache-2.0"
] | 3
|
2017-07-27T18:44:13.000Z
|
2020-07-25T17:48:53.000Z
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Package marker file."""
| 38.9375
| 74
| 0.757624
|
440afecaa966d4e408be1e329a69c226d7596162
| 1,084
|
py
|
Python
|
lightkurve/io/k2sff.py
|
barentsen/lightkurve
|
5b1693832bc509e42742d1b6f20224d131e62d8c
|
[
"MIT"
] | 1
|
2021-05-07T10:42:01.000Z
|
2021-05-07T10:42:01.000Z
|
lightkurve/io/k2sff.py
|
dhomeier/lightkurve
|
ea53c81f3d7617441a02288ed84c016e8ef80ceb
|
[
"MIT"
] | 7
|
2018-07-14T17:49:36.000Z
|
2020-09-24T19:58:13.000Z
|
lightkurve/io/k2sff.py
|
barentsen/lightkurve
|
5b1693832bc509e42742d1b6f20224d131e62d8c
|
[
"MIT"
] | null | null | null |
"""Reader function for K2SFF community light curve products."""
from ..lightcurve import KeplerLightCurve
from ..utils import validate_method
from .generic import read_generic_lightcurve
def read_k2sff_lightcurve(filename, ext="BESTAPER", **kwargs):
"""Read a K2SFF light curve file.
More information: https://archive.stsci.edu/hlsp/k2sff
Parameters
----------
filename : str
Path or URL of a K2SFF light curve FITS file.
ext : str
Version of the light curve to use. Valid options include "BESTAPER",
"CIRC_APER0" through "CIRC_APER9", and "PRF_APER0" through "PRF_APER9".
Returns
-------
lc : `KeplerLightCurve`
A populated light curve object.
"""
lc = read_generic_lightcurve(filename,
flux_column="fcor",
time_format='bkjd',
ext=ext)
lc.meta['label'] = '{} (K2SFF)'.format(lc.meta.get("object"))
lc.meta['targetid'] = lc.meta.get('keplerid')
return KeplerLightCurve(data=lc, **kwargs)
| 30.971429
| 79
| 0.613469
|
17289f9e07f12298a36e81bbb9431320ddb3ad65
| 314
|
py
|
Python
|
openspoor/spoortakmodel/singleton.py
|
ProRail-DataLab/openspoor
|
548c7ffc12a5a97459bafe5327aa9b0d0546537c
|
[
"MIT"
] | 7
|
2022-01-28T09:54:10.000Z
|
2022-03-25T10:02:08.000Z
|
openspoor/spoortakmodel/singleton.py
|
ProRail-DataLab/openspoor
|
548c7ffc12a5a97459bafe5327aa9b0d0546537c
|
[
"MIT"
] | 11
|
2022-03-17T12:48:30.000Z
|
2022-03-25T11:22:39.000Z
|
openspoor/spoortakmodel/singleton.py
|
ProRail-DataLab/openspoor
|
548c7ffc12a5a97459bafe5327aa9b0d0546537c
|
[
"MIT"
] | 2
|
2022-03-16T14:11:48.000Z
|
2022-03-25T09:10:30.000Z
|
class Singleton(object):
""" helper class to implement the singleton pattern """
_instances = {}
def __new__(class_, *args, **kwargs):
if class_ not in class_._instances:
class_._instances[class_] = super(Singleton, class_).__new__(class_)
return class_._instances[class_]
| 34.888889
| 80
| 0.665605
|
91ae75722c313f023c15ed59289411991a438ffa
| 8,207
|
py
|
Python
|
tests/share/metadata_formats/test_sharev2_elastic_formatter.py
|
CenterForOpenScience/SHARE
|
c7715af2881f6fa23197d4e7c381d90169a90ed1
|
[
"Apache-2.0"
] | 87
|
2015-01-06T18:24:45.000Z
|
2021-08-08T07:59:40.000Z
|
tests/share/metadata_formats/test_sharev2_elastic_formatter.py
|
fortress-biotech/SHARE
|
9c5a05dd831447949fa6253afec5225ff8ab5d4f
|
[
"Apache-2.0"
] | 442
|
2015-01-01T19:16:01.000Z
|
2022-03-30T21:10:26.000Z
|
tests/share/metadata_formats/test_sharev2_elastic_formatter.py
|
fortress-biotech/SHARE
|
9c5a05dd831447949fa6253afec5225ff8ab5d4f
|
[
"Apache-2.0"
] | 67
|
2015-03-10T16:32:58.000Z
|
2021-11-12T16:33:41.000Z
|
import json
import pytest
from unittest.mock import patch
from share.metadata_formats.sharev2_elastic import format_type
from tests.share.metadata_formats.base import BaseMetadataFormatterTest
@pytest.mark.parametrize('type_name,expected', [
('Foo', 'foo'),
('FooBar', 'foo bar'),
])
def test_format_type(type_name, expected):
actual = format_type(type_name)
assert actual == expected
def fake_id_encode(obj):
return f'encoded-{obj.id}'
class TestSharev2ElasticFormatter(BaseMetadataFormatterTest):
@pytest.fixture(scope='class', autouse=True)
def patch_encode(self):
with patch('share.util.IDObfuscator.encode', wraps=fake_id_encode):
yield
def assert_formatter_outputs_equal(self, actual_output, expected_output):
assert json.loads(actual_output) == expected_output
formatter_key = 'sharev2_elastic'
expected_outputs = {
'mycorrhizas': {
'contributors': ['Suzanne Simard', 'Mary Austi'],
'date': '2017-03-31T05:39:48+00:00',
'date_created': '2017-04-07T21:09:05.023090+00:00',
'date_modified': '2017-04-07T21:09:05.023090+00:00',
'date_updated': '2017-03-31T05:39:48+00:00',
'id': 'encoded-7',
'identifiers': ['http://dx.doi.org/10.5772/9813'],
'publishers': ['InTech'],
'retracted': False,
'sources': ['SomeSource'],
'title': 'The Role of Mycorrhizas in Forest Soil Stability with Climate Change',
'type': 'creative work',
'types': ['creative work'],
'affiliations': [],
'funders': [],
'hosts': [],
'subject_synonyms': [],
'subjects': [],
'tags': [],
'lists': {
'affiliations': [],
'contributors': [
{
'cited_as': 'Suzanne Simard',
'family_name': 'Simard',
'given_name': 'Suzanne',
'identifiers': [],
'name': 'Suzanne Simard',
'order_cited': 0,
'relation': 'creator',
'type': 'person',
'types': ['person', 'agent'],
},
{
'cited_as': 'Mary Austi',
'family_name': 'Austi',
'given_name': 'Mary',
'identifiers': [],
'name': 'Mary Austi',
'order_cited': 1,
'relation': 'creator',
'type': 'person',
'types': ['person', 'agent'],
},
],
'funders': [],
'hosts': [],
'lineage': [],
'publishers': [
{
'name': 'InTech',
'identifiers': [],
'relation': 'publisher',
'type': 'organization',
'types': ['organization', 'agent'],
},
],
},
},
'no-names-only-name-parts': {
'contributors': ['Suzanne Simard', 'Mary Austi'],
'date': '2017-03-31T05:39:48+00:00',
'date_created': '2017-04-07T21:09:05.023090+00:00',
'date_modified': '2017-04-07T21:09:05.023090+00:00',
'date_updated': '2017-03-31T05:39:48+00:00',
'id': 'encoded-7',
'identifiers': ['http://dx.doi.org/10.5772/9813'],
'publishers': [],
'retracted': False,
'sources': ['SomeSource'],
'title': 'The Role of Mycorrhizas in Forest Soil Stability with Climate Change',
'type': 'creative work',
'types': ['creative work'],
'affiliations': [],
'funders': [],
'hosts': [],
'subject_synonyms': [],
'subjects': [],
'tags': [],
'lists': {
'affiliations': [],
'contributors': [
{
'family_name': 'Simard',
'given_name': 'Suzanne',
'identifiers': [],
'name': 'Suzanne Simard',
'order_cited': 0,
'relation': 'creator',
'type': 'person',
'types': ['person', 'agent'],
},
{
'family_name': 'Austi',
'given_name': 'Mary',
'identifiers': [],
'name': 'Mary Austi',
'order_cited': 1,
'relation': 'creator',
'type': 'person',
'types': ['person', 'agent'],
},
],
'funders': [],
'hosts': [],
'lineage': [],
'publishers': [],
},
},
'with-is_deleted': {
'id': 'encoded-57',
'is_deleted': True,
},
'with-subjects': {
'affiliations': ['Wassamatter University'],
'contributors': ['Some Rando'],
'date': '2019-01-23T20:34:21.633684+00:00',
'date_created': '2020-02-02T20:20:02.020000+00:00',
'date_modified': '2020-02-02T20:20:02.020000+00:00',
'date_published': '2019-01-23T20:34:21.633684+00:00',
'id': 'encoded-123',
'identifiers': ['http://staging.osf.io/chair/'],
'registration_type': 'Open-Ended Registration',
'retracted': False,
'sources': ['osf reg'],
'subject_synonyms': [
'bepress|Life Sciences|Biology',
],
'subjects': [
'bepress|Architecture',
'osf reg|Custom life sciencesssss|Custom biologyyyy',
],
'title': 'Assorted chair',
'type': 'registration',
'types': ['registration', 'publication', 'creative work'],
'withdrawn': False,
'funders': [],
'hosts': [],
'publishers': [],
'tags': [],
'lists': {
'affiliations': [
{
'cited_as': 'Wassamatter University',
'identifiers': [],
'name': 'Wassamatter University',
'relation': 'agent work relation',
'type': 'institution',
'types': ['institution', 'organization', 'agent'],
},
],
'contributors': [
{
'cited_as': 'Some Rando',
'identifiers': ['http://staging.osf.io/rando/', 'mailto:rando@example.com'],
'name': 'Some Rando',
'order_cited': 0,
'relation': 'creator',
'type': 'person',
'types': ['person', 'agent'],
},
],
'lineage': [
{
'identifiers': ['http://staging.osf.io/mdept/'],
'title': 'Miscellaneous department',
'type': 'registration',
'types': ['registration', 'publication', 'creative work'],
},
{
'identifiers': ['http://staging.osf.io/vroom/'],
'title': 'Various room',
'type': 'registration',
'types': ['registration', 'publication', 'creative work'],
},
],
'funders': [],
'hosts': [],
'publishers': [],
},
},
}
| 37.646789
| 100
| 0.402949
|
155214d93088f78e838595e3e3787c951d281102
| 985
|
py
|
Python
|
swg_generated/python/test/test_new_order.py
|
942star/upbit-client
|
43201797a605d1e1b3e0db1183ced8ec9997699f
|
[
"MIT"
] | 1
|
2021-11-03T15:13:03.000Z
|
2021-11-03T15:13:03.000Z
|
swg_generated/python/test/test_new_order.py
|
942star/upbit-client
|
43201797a605d1e1b3e0db1183ced8ec9997699f
|
[
"MIT"
] | null | null | null |
swg_generated/python/test/test_new_order.py
|
942star/upbit-client
|
43201797a605d1e1b3e0db1183ced8ec9997699f
|
[
"MIT"
] | 1
|
2021-11-03T15:12:55.000Z
|
2021-11-03T15:12:55.000Z
|
# coding: utf-8
"""
Upbit Open API
## REST API for Upbit Exchange - Base URL: [https://api.upbit.com] - Official Upbit API Documents: [https://docs.upbit.com] - Official Support email: [open-api@upbit.com] # noqa: E501
OpenAPI spec version: 1.1.6
Contact: ujhin942@gmail.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.new_order import NewOrder # noqa: E501
from swagger_client.rest import ApiException
class TestNewOrder(unittest.TestCase):
"""NewOrder unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testNewOrder(self):
"""Test NewOrder"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.new_order.NewOrder() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 24.02439
| 189
| 0.68731
|
5c07d9165f9f89107d55ba1572d4d02600f601be
| 31,268
|
py
|
Python
|
tools/ll_prof.py
|
chromium-googlesource-mirror/v8
|
bf0c820d028452571c8c744ddd212c32c6d6a996
|
[
"BSD-3-Clause-Clear",
"BSD-3-Clause"
] | 12
|
2015-03-03T02:38:54.000Z
|
2020-07-07T17:59:31.000Z
|
deps/v8/tools/ll_prof.py
|
emerleite/node
|
aa67b1f3750646a0f30e12a3b4e4fcb84dc8cafa
|
[
"MIT"
] | null | null | null |
deps/v8/tools/ll_prof.py
|
emerleite/node
|
aa67b1f3750646a0f30e12a3b4e4fcb84dc8cafa
|
[
"MIT"
] | 3
|
2017-02-25T05:46:09.000Z
|
2019-07-01T01:22:33.000Z
|
#!/usr/bin/env python
#
# Copyright 2010 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import bisect
import collections
import ctypes
import disasm
import mmap
import optparse
import os
import re
import subprocess
import sys
import time
USAGE="""usage: %prog [OPTION]...
Analyses V8 and perf logs to produce profiles.
Perf logs can be collected using a command like:
$ perf record -R -e cycles -c 10000 -f -i ./shell bench.js --ll-prof
# -R: collect all data
# -e cycles: use cpu-cycles event (run "perf list" for details)
# -c 10000: write a sample after each 10000 events
# -f: force output file overwrite
# -i: limit profiling to our process and the kernel
# --ll-prof shell flag enables the right V8 logs
This will produce a binary trace file (perf.data) that %prog can analyse.
Examples:
# Print flat profile with annotated disassembly for the 10 top
# symbols. Use default log names and include the snapshot log.
$ %prog --snapshot --disasm-top=10
# Print flat profile with annotated disassembly for all used symbols.
# Use default log names and include kernel symbols into analysis.
$ %prog --disasm-all --kernel
# Print flat profile. Use custom log names.
$ %prog --log=foo.log --snapshot-log=snap-foo.log --trace=foo.data --snapshot
"""
# Must match kGcFakeMmap.
V8_GC_FAKE_MMAP = "/tmp/__v8_gc__"
JS_ORIGIN = "js"
JS_SNAPSHOT_ORIGIN = "js-snapshot"
OBJDUMP_BIN = disasm.OBJDUMP_BIN
class Code(object):
"""Code object."""
_id = 0
def __init__(self, name, start_address, end_address, origin, origin_offset):
self.id = Code._id
Code._id += 1
self.name = name
self.other_names = None
self.start_address = start_address
self.end_address = end_address
self.origin = origin
self.origin_offset = origin_offset
self.self_ticks = 0
self.self_ticks_map = None
self.callee_ticks = None
def AddName(self, name):
assert self.name != name
if self.other_names is None:
self.other_names = [name]
return
if not name in self.other_names:
self.other_names.append(name)
def FullName(self):
if self.other_names is None:
return self.name
self.other_names.sort()
return "%s (aka %s)" % (self.name, ", ".join(self.other_names))
def IsUsed(self):
return self.self_ticks > 0 or self.callee_ticks is not None
def Tick(self, pc):
self.self_ticks += 1
if self.self_ticks_map is None:
self.self_ticks_map = collections.defaultdict(lambda: 0)
offset = pc - self.start_address
self.self_ticks_map[offset] += 1
def CalleeTick(self, callee):
if self.callee_ticks is None:
self.callee_ticks = collections.defaultdict(lambda: 0)
self.callee_ticks[callee] += 1
def PrintAnnotated(self, arch, options):
if self.self_ticks_map is None:
ticks_map = []
else:
ticks_map = self.self_ticks_map.items()
# Convert the ticks map to offsets and counts arrays so that later
# we can do binary search in the offsets array.
ticks_map.sort(key=lambda t: t[0])
ticks_offsets = [t[0] for t in ticks_map]
ticks_counts = [t[1] for t in ticks_map]
# Get a list of disassembled lines and their addresses.
lines = self._GetDisasmLines(arch, options)
if len(lines) == 0:
return
# Print annotated lines.
address = lines[0][0]
total_count = 0
for i in xrange(len(lines)):
start_offset = lines[i][0] - address
if i == len(lines) - 1:
end_offset = self.end_address - self.start_address
else:
end_offset = lines[i + 1][0] - address
# Ticks (reported pc values) are not always precise, i.e. not
# necessarily point at instruction starts. So we have to search
# for ticks that touch the current instruction line.
j = bisect.bisect_left(ticks_offsets, end_offset)
count = 0
for offset, cnt in reversed(zip(ticks_offsets[:j], ticks_counts[:j])):
if offset < start_offset:
break
count += cnt
total_count += count
count = 100.0 * count / self.self_ticks
if count >= 0.01:
print "%15.2f %x: %s" % (count, lines[i][0], lines[i][1])
else:
print "%s %x: %s" % (" " * 15, lines[i][0], lines[i][1])
print
assert total_count == self.self_ticks, \
"Lost ticks (%d != %d) in %s" % (total_count, self.self_ticks, self)
def __str__(self):
return "%s [0x%x, 0x%x) size: %d origin: %s" % (
self.name,
self.start_address,
self.end_address,
self.end_address - self.start_address,
self.origin)
def _GetDisasmLines(self, arch, options):
if self.origin == JS_ORIGIN or self.origin == JS_SNAPSHOT_ORIGIN:
inplace = False
filename = options.log + ".ll"
else:
inplace = True
filename = self.origin
return disasm.GetDisasmLines(filename,
self.origin_offset,
self.end_address - self.start_address,
arch,
inplace)
class CodePage(object):
"""Group of adjacent code objects."""
SHIFT = 12 # 4K pages
SIZE = (1 << SHIFT)
MASK = ~(SIZE - 1)
@staticmethod
def PageAddress(address):
return address & CodePage.MASK
@staticmethod
def PageId(address):
return address >> CodePage.SHIFT
@staticmethod
def PageAddressFromId(id):
return id << CodePage.SHIFT
def __init__(self, address):
self.address = address
self.code_objects = []
def Add(self, code):
self.code_objects.append(code)
def Remove(self, code):
self.code_objects.remove(code)
def Find(self, pc):
code_objects = self.code_objects
for i, code in enumerate(code_objects):
if code.start_address <= pc < code.end_address:
code_objects[0], code_objects[i] = code, code_objects[0]
return code
return None
def __iter__(self):
return self.code_objects.__iter__()
class CodeMap(object):
"""Code object map."""
def __init__(self):
self.pages = {}
self.min_address = 1 << 64
self.max_address = -1
def Add(self, code, max_pages=-1):
page_id = CodePage.PageId(code.start_address)
limit_id = CodePage.PageId(code.end_address + CodePage.SIZE - 1)
pages = 0
while page_id < limit_id:
if max_pages >= 0 and pages > max_pages:
print >>sys.stderr, \
"Warning: page limit (%d) reached for %s [%s]" % (
max_pages, code.name, code.origin)
break
if page_id in self.pages:
page = self.pages[page_id]
else:
page = CodePage(CodePage.PageAddressFromId(page_id))
self.pages[page_id] = page
page.Add(code)
page_id += 1
pages += 1
self.min_address = min(self.min_address, code.start_address)
self.max_address = max(self.max_address, code.end_address)
def Remove(self, code):
page_id = CodePage.PageId(code.start_address)
limit_id = CodePage.PageId(code.end_address + CodePage.SIZE - 1)
removed = False
while page_id < limit_id:
if page_id not in self.pages:
page_id += 1
continue
page = self.pages[page_id]
page.Remove(code)
removed = True
page_id += 1
return removed
def AllCode(self):
for page in self.pages.itervalues():
for code in page:
if CodePage.PageAddress(code.start_address) == page.address:
yield code
def UsedCode(self):
for code in self.AllCode():
if code.IsUsed():
yield code
def Print(self):
for code in self.AllCode():
print code
def Find(self, pc):
if pc < self.min_address or pc >= self.max_address:
return None
page_id = CodePage.PageId(pc)
if page_id not in self.pages:
return None
return self.pages[page_id].Find(pc)
class CodeInfo(object):
"""Generic info about generated code objects."""
def __init__(self, arch, header_size):
self.arch = arch
self.header_size = header_size
class SnapshotLogReader(object):
"""V8 snapshot log reader."""
_SNAPSHOT_CODE_NAME_RE = re.compile(
r"snapshot-code-name,(\d+),\"(.*)\"")
def __init__(self, log_name):
self.log_name = log_name
def ReadNameMap(self):
log = open(self.log_name, "r")
try:
snapshot_pos_to_name = {}
for line in log:
match = SnapshotLogReader._SNAPSHOT_CODE_NAME_RE.match(line)
if match:
pos = int(match.group(1))
name = match.group(2)
snapshot_pos_to_name[pos] = name
finally:
log.close()
return snapshot_pos_to_name
class LogReader(object):
"""V8 low-level (binary) log reader."""
_ARCH_TO_POINTER_TYPE_MAP = {
"ia32": ctypes.c_uint32,
"arm": ctypes.c_uint32,
"mips": ctypes.c_uint32,
"x64": ctypes.c_uint64
}
_CODE_CREATE_TAG = "C"
_CODE_MOVE_TAG = "M"
_CODE_DELETE_TAG = "D"
_SNAPSHOT_POSITION_TAG = "P"
_CODE_MOVING_GC_TAG = "G"
def __init__(self, log_name, code_map, snapshot_pos_to_name):
self.log_file = open(log_name, "r")
self.log = mmap.mmap(self.log_file.fileno(), 0, mmap.MAP_PRIVATE)
self.log_pos = 0
self.code_map = code_map
self.snapshot_pos_to_name = snapshot_pos_to_name
self.address_to_snapshot_name = {}
self.arch = self.log[:self.log.find("\0")]
self.log_pos += len(self.arch) + 1
assert self.arch in LogReader._ARCH_TO_POINTER_TYPE_MAP, \
"Unsupported architecture %s" % self.arch
pointer_type = LogReader._ARCH_TO_POINTER_TYPE_MAP[self.arch]
self.code_create_struct = LogReader._DefineStruct([
("name_size", ctypes.c_int32),
("code_address", pointer_type),
("code_size", ctypes.c_int32)])
self.code_move_struct = LogReader._DefineStruct([
("from_address", pointer_type),
("to_address", pointer_type)])
self.code_delete_struct = LogReader._DefineStruct([
("address", pointer_type)])
self.snapshot_position_struct = LogReader._DefineStruct([
("address", pointer_type),
("position", ctypes.c_int32)])
def ReadUpToGC(self):
while self.log_pos < self.log.size():
tag = self.log[self.log_pos]
self.log_pos += 1
if tag == LogReader._CODE_MOVING_GC_TAG:
self.address_to_snapshot_name.clear()
return
if tag == LogReader._CODE_CREATE_TAG:
event = self.code_create_struct.from_buffer(self.log, self.log_pos)
self.log_pos += ctypes.sizeof(event)
start_address = event.code_address
end_address = start_address + event.code_size
if start_address in self.address_to_snapshot_name:
name = self.address_to_snapshot_name[start_address]
origin = JS_SNAPSHOT_ORIGIN
else:
name = self.log[self.log_pos:self.log_pos + event.name_size]
origin = JS_ORIGIN
self.log_pos += event.name_size
origin_offset = self.log_pos
self.log_pos += event.code_size
code = Code(name, start_address, end_address, origin, origin_offset)
conficting_code = self.code_map.Find(start_address)
if conficting_code:
if not (conficting_code.start_address == code.start_address and
conficting_code.end_address == code.end_address):
self.code_map.Remove(conficting_code)
else:
LogReader._HandleCodeConflict(conficting_code, code)
# TODO(vitalyr): this warning is too noisy because of our
# attempts to reconstruct code log from the snapshot.
# print >>sys.stderr, \
# "Warning: Skipping duplicate code log entry %s" % code
continue
self.code_map.Add(code)
continue
if tag == LogReader._CODE_MOVE_TAG:
event = self.code_move_struct.from_buffer(self.log, self.log_pos)
self.log_pos += ctypes.sizeof(event)
old_start_address = event.from_address
new_start_address = event.to_address
if old_start_address == new_start_address:
# Skip useless code move entries.
continue
code = self.code_map.Find(old_start_address)
if not code:
print >>sys.stderr, "Warning: Not found %x" % old_start_address
continue
assert code.start_address == old_start_address, \
"Inexact move address %x for %s" % (old_start_address, code)
self.code_map.Remove(code)
size = code.end_address - code.start_address
code.start_address = new_start_address
code.end_address = new_start_address + size
self.code_map.Add(code)
continue
if tag == LogReader._CODE_DELETE_TAG:
event = self.code_delete_struct.from_buffer(self.log, self.log_pos)
self.log_pos += ctypes.sizeof(event)
old_start_address = event.address
code = self.code_map.Find(old_start_address)
if not code:
print >>sys.stderr, "Warning: Not found %x" % old_start_address
continue
assert code.start_address == old_start_address, \
"Inexact delete address %x for %s" % (old_start_address, code)
self.code_map.Remove(code)
continue
if tag == LogReader._SNAPSHOT_POSITION_TAG:
event = self.snapshot_position_struct.from_buffer(self.log,
self.log_pos)
self.log_pos += ctypes.sizeof(event)
start_address = event.address
snapshot_pos = event.position
if snapshot_pos in self.snapshot_pos_to_name:
self.address_to_snapshot_name[start_address] = \
self.snapshot_pos_to_name[snapshot_pos]
continue
assert False, "Unknown tag %s" % tag
def Dispose(self):
self.log.close()
self.log_file.close()
@staticmethod
def _DefineStruct(fields):
class Struct(ctypes.Structure):
_fields_ = fields
return Struct
@staticmethod
def _HandleCodeConflict(old_code, new_code):
assert (old_code.start_address == new_code.start_address and
old_code.end_address == new_code.end_address), \
"Conficting code log entries %s and %s" % (old_code, new_code)
if old_code.name == new_code.name:
return
# Code object may be shared by a few functions. Collect the full
# set of names.
old_code.AddName(new_code.name)
class Descriptor(object):
"""Descriptor of a structure in the binary trace log."""
CTYPE_MAP = {
"u16": ctypes.c_uint16,
"u32": ctypes.c_uint32,
"u64": ctypes.c_uint64
}
def __init__(self, fields):
class TraceItem(ctypes.Structure):
_fields_ = Descriptor.CtypesFields(fields)
def __str__(self):
return ", ".join("%s: %s" % (field, self.__getattribute__(field))
for field, _ in TraceItem._fields_)
self.ctype = TraceItem
def Read(self, trace, offset):
return self.ctype.from_buffer(trace, offset)
@staticmethod
def CtypesFields(fields):
return [(field, Descriptor.CTYPE_MAP[format]) for (field, format) in fields]
# Please see http://git.kernel.org/?p=linux/kernel/git/torvalds/linux-2.6.git;a=tree;f=tools/perf
# for the gory details.
TRACE_HEADER_DESC = Descriptor([
("magic", "u64"),
("size", "u64"),
("attr_size", "u64"),
("attrs_offset", "u64"),
("attrs_size", "u64"),
("data_offset", "u64"),
("data_size", "u64"),
("event_types_offset", "u64"),
("event_types_size", "u64")
])
PERF_EVENT_ATTR_DESC = Descriptor([
("type", "u32"),
("size", "u32"),
("config", "u64"),
("sample_period_or_freq", "u64"),
("sample_type", "u64"),
("read_format", "u64"),
("flags", "u64"),
("wakeup_events_or_watermark", "u32"),
("bt_type", "u32"),
("bp_addr", "u64"),
("bp_len", "u64"),
])
PERF_EVENT_HEADER_DESC = Descriptor([
("type", "u32"),
("misc", "u16"),
("size", "u16")
])
PERF_MMAP_EVENT_BODY_DESC = Descriptor([
("pid", "u32"),
("tid", "u32"),
("addr", "u64"),
("len", "u64"),
("pgoff", "u64")
])
# perf_event_attr.sample_type bits control the set of
# perf_sample_event fields.
PERF_SAMPLE_IP = 1 << 0
PERF_SAMPLE_TID = 1 << 1
PERF_SAMPLE_TIME = 1 << 2
PERF_SAMPLE_ADDR = 1 << 3
PERF_SAMPLE_READ = 1 << 4
PERF_SAMPLE_CALLCHAIN = 1 << 5
PERF_SAMPLE_ID = 1 << 6
PERF_SAMPLE_CPU = 1 << 7
PERF_SAMPLE_PERIOD = 1 << 8
PERF_SAMPLE_STREAM_ID = 1 << 9
PERF_SAMPLE_RAW = 1 << 10
PERF_SAMPLE_EVENT_BODY_FIELDS = [
("ip", "u64", PERF_SAMPLE_IP),
("pid", "u32", PERF_SAMPLE_TID),
("tid", "u32", PERF_SAMPLE_TID),
("time", "u64", PERF_SAMPLE_TIME),
("addr", "u64", PERF_SAMPLE_ADDR),
("id", "u64", PERF_SAMPLE_ID),
("stream_id", "u64", PERF_SAMPLE_STREAM_ID),
("cpu", "u32", PERF_SAMPLE_CPU),
("res", "u32", PERF_SAMPLE_CPU),
("period", "u64", PERF_SAMPLE_PERIOD),
# Don't want to handle read format that comes after the period and
# before the callchain and has variable size.
("nr", "u64", PERF_SAMPLE_CALLCHAIN)
# Raw data follows the callchain and is ignored.
]
PERF_SAMPLE_EVENT_IP_FORMAT = "u64"
PERF_RECORD_MMAP = 1
PERF_RECORD_SAMPLE = 9
class TraceReader(object):
"""Perf (linux-2.6/tools/perf) trace file reader."""
_TRACE_HEADER_MAGIC = 4993446653023372624
def __init__(self, trace_name):
self.trace_file = open(trace_name, "r")
self.trace = mmap.mmap(self.trace_file.fileno(), 0, mmap.MAP_PRIVATE)
self.trace_header = TRACE_HEADER_DESC.Read(self.trace, 0)
if self.trace_header.magic != TraceReader._TRACE_HEADER_MAGIC:
print >>sys.stderr, "Warning: unsupported trace header magic"
self.offset = self.trace_header.data_offset
self.limit = self.trace_header.data_offset + self.trace_header.data_size
assert self.limit <= self.trace.size(), \
"Trace data limit exceeds trace file size"
self.header_size = ctypes.sizeof(PERF_EVENT_HEADER_DESC.ctype)
assert self.trace_header.attrs_size != 0, \
"No perf event attributes found in the trace"
perf_event_attr = PERF_EVENT_ATTR_DESC.Read(self.trace,
self.trace_header.attrs_offset)
self.sample_event_body_desc = self._SampleEventBodyDesc(
perf_event_attr.sample_type)
self.callchain_supported = \
(perf_event_attr.sample_type & PERF_SAMPLE_CALLCHAIN) != 0
if self.callchain_supported:
self.ip_struct = Descriptor.CTYPE_MAP[PERF_SAMPLE_EVENT_IP_FORMAT]
self.ip_size = ctypes.sizeof(self.ip_struct)
def ReadEventHeader(self):
if self.offset >= self.limit:
return None, 0
offset = self.offset
header = PERF_EVENT_HEADER_DESC.Read(self.trace, self.offset)
self.offset += header.size
return header, offset
def ReadMmap(self, header, offset):
mmap_info = PERF_MMAP_EVENT_BODY_DESC.Read(self.trace,
offset + self.header_size)
# Read null-terminated filename.
filename = self.trace[offset + self.header_size + ctypes.sizeof(mmap_info):
offset + header.size]
mmap_info.filename = filename[:filename.find(chr(0))]
return mmap_info
def ReadSample(self, header, offset):
sample = self.sample_event_body_desc.Read(self.trace,
offset + self.header_size)
if not self.callchain_supported:
return sample
sample.ips = []
offset += self.header_size + ctypes.sizeof(sample)
for _ in xrange(sample.nr):
sample.ips.append(
self.ip_struct.from_buffer(self.trace, offset).value)
offset += self.ip_size
return sample
def Dispose(self):
self.trace.close()
self.trace_file.close()
def _SampleEventBodyDesc(self, sample_type):
assert (sample_type & PERF_SAMPLE_READ) == 0, \
"Can't hande read format in samples"
fields = [(field, format)
for (field, format, bit) in PERF_SAMPLE_EVENT_BODY_FIELDS
if (bit & sample_type) != 0]
return Descriptor(fields)
OBJDUMP_SECTION_HEADER_RE = re.compile(
r"^\s*\d+\s(\.\S+)\s+[a-f0-9]")
OBJDUMP_SYMBOL_LINE_RE = re.compile(
r"^([a-f0-9]+)\s(.{7})\s(\S+)\s+([a-f0-9]+)\s+(?:\.hidden\s+)?(.*)$")
OBJDUMP_DYNAMIC_SYMBOLS_START_RE = re.compile(
r"^DYNAMIC SYMBOL TABLE")
KERNEL_ALLSYMS_FILE = "/proc/kallsyms"
PERF_KERNEL_ALLSYMS_RE = re.compile(
r".*kallsyms.*")
KERNEL_ALLSYMS_LINE_RE = re.compile(
r"^([a-f0-9]+)\s(?:t|T)\s(\S+)$")
class LibraryRepo(object):
def __init__(self):
self.infos = []
self.names = set()
self.ticks = {}
def Load(self, mmap_info, code_map, options):
# Skip kernel mmaps when requested using the fact that their tid
# is 0.
if mmap_info.tid == 0 and not options.kernel:
return True
if PERF_KERNEL_ALLSYMS_RE.match(mmap_info.filename):
return self._LoadKernelSymbols(code_map)
self.infos.append(mmap_info)
mmap_info.ticks = 0
mmap_info.unique_name = self._UniqueMmapName(mmap_info)
if not os.path.exists(mmap_info.filename):
return True
# Request section headers (-h), symbols (-t), and dynamic symbols
# (-T) from objdump.
# Unfortunately, section headers span two lines, so we have to
# keep the just seen section name (from the first line in each
# section header) in the after_section variable.
process = subprocess.Popen(
"%s -h -t -T -C %s" % (OBJDUMP_BIN, mmap_info.filename),
shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
pipe = process.stdout
after_section = None
code_sections = set()
reloc_sections = set()
dynamic = False
try:
for line in pipe:
if after_section:
if line.find("CODE") != -1:
code_sections.add(after_section)
if line.find("RELOC") != -1:
reloc_sections.add(after_section)
after_section = None
continue
match = OBJDUMP_SECTION_HEADER_RE.match(line)
if match:
after_section = match.group(1)
continue
if OBJDUMP_DYNAMIC_SYMBOLS_START_RE.match(line):
dynamic = True
continue
match = OBJDUMP_SYMBOL_LINE_RE.match(line)
if match:
start_address = int(match.group(1), 16)
origin_offset = start_address
flags = match.group(2)
section = match.group(3)
if section in code_sections:
if dynamic or section in reloc_sections:
start_address += mmap_info.addr
size = int(match.group(4), 16)
name = match.group(5)
origin = mmap_info.filename
code_map.Add(Code(name, start_address, start_address + size,
origin, origin_offset))
finally:
pipe.close()
assert process.wait() == 0, "Failed to objdump %s" % mmap_info.filename
def Tick(self, pc):
for i, mmap_info in enumerate(self.infos):
if mmap_info.addr <= pc < (mmap_info.addr + mmap_info.len):
mmap_info.ticks += 1
self.infos[0], self.infos[i] = mmap_info, self.infos[0]
return True
return False
def _UniqueMmapName(self, mmap_info):
name = mmap_info.filename
index = 1
while name in self.names:
name = "%s-%d" % (mmap_info.filename, index)
index += 1
self.names.add(name)
return name
def _LoadKernelSymbols(self, code_map):
if not os.path.exists(KERNEL_ALLSYMS_FILE):
print >>sys.stderr, "Warning: %s not found" % KERNEL_ALLSYMS_FILE
return False
kallsyms = open(KERNEL_ALLSYMS_FILE, "r")
code = None
for line in kallsyms:
match = KERNEL_ALLSYMS_LINE_RE.match(line)
if match:
start_address = int(match.group(1), 16)
end_address = start_address
name = match.group(2)
if code:
code.end_address = start_address
code_map.Add(code, 16)
code = Code(name, start_address, end_address, "kernel", 0)
return True
def PrintReport(code_map, library_repo, arch, ticks, options):
print "Ticks per symbol:"
used_code = [code for code in code_map.UsedCode()]
used_code.sort(key=lambda x: x.self_ticks, reverse=True)
for i, code in enumerate(used_code):
code_ticks = code.self_ticks
print "%10d %5.1f%% %s [%s]" % (code_ticks, 100. * code_ticks / ticks,
code.FullName(), code.origin)
if options.disasm_all or i < options.disasm_top:
code.PrintAnnotated(arch, options)
print
print "Ticks per library:"
mmap_infos = [m for m in library_repo.infos]
mmap_infos.sort(key=lambda m: m.ticks, reverse=True)
for mmap_info in mmap_infos:
mmap_ticks = mmap_info.ticks
print "%10d %5.1f%% %s" % (mmap_ticks, 100. * mmap_ticks / ticks,
mmap_info.unique_name)
def PrintDot(code_map, options):
print "digraph G {"
for code in code_map.UsedCode():
if code.self_ticks < 10:
continue
print "n%d [shape=box,label=\"%s\"];" % (code.id, code.name)
if code.callee_ticks:
for callee, ticks in code.callee_ticks.iteritems():
print "n%d -> n%d [label=\"%d\"];" % (code.id, callee.id, ticks)
print "}"
if __name__ == "__main__":
parser = optparse.OptionParser(USAGE)
parser.add_option("--snapshot-log",
default="obj/release/snapshot.log",
help="V8 snapshot log file name [default: %default]")
parser.add_option("--log",
default="v8.log",
help="V8 log file name [default: %default]")
parser.add_option("--snapshot",
default=False,
action="store_true",
help="process V8 snapshot log [default: %default]")
parser.add_option("--trace",
default="perf.data",
help="perf trace file name [default: %default]")
parser.add_option("--kernel",
default=False,
action="store_true",
help="process kernel entries [default: %default]")
parser.add_option("--disasm-top",
default=0,
type="int",
help=("number of top symbols to disassemble and annotate "
"[default: %default]"))
parser.add_option("--disasm-all",
default=False,
action="store_true",
help=("disassemble and annotate all used symbols "
"[default: %default]"))
parser.add_option("--dot",
default=False,
action="store_true",
help="produce dot output (WIP) [default: %default]")
parser.add_option("--quiet", "-q",
default=False,
action="store_true",
help="no auxiliary messages [default: %default]")
options, args = parser.parse_args()
if not options.quiet:
if options.snapshot:
print "V8 logs: %s, %s, %s.ll" % (options.snapshot_log,
options.log,
options.log)
else:
print "V8 log: %s, %s.ll (no snapshot)" % (options.log, options.log)
print "Perf trace file: %s" % options.trace
# Stats.
events = 0
ticks = 0
missed_ticks = 0
really_missed_ticks = 0
mmap_time = 0
sample_time = 0
# Process the snapshot log to fill the snapshot name map.
snapshot_name_map = {}
if options.snapshot:
snapshot_log_reader = SnapshotLogReader(log_name=options.snapshot_log)
snapshot_name_map = snapshot_log_reader.ReadNameMap()
# Initialize the log reader.
code_map = CodeMap()
log_reader = LogReader(log_name=options.log + ".ll",
code_map=code_map,
snapshot_pos_to_name=snapshot_name_map)
if not options.quiet:
print "Generated code architecture: %s" % log_reader.arch
print
sys.stdout.flush()
# Process the code and trace logs.
library_repo = LibraryRepo()
log_reader.ReadUpToGC()
trace_reader = TraceReader(options.trace)
while True:
header, offset = trace_reader.ReadEventHeader()
if not header:
break
events += 1
if header.type == PERF_RECORD_MMAP:
start = time.time()
mmap_info = trace_reader.ReadMmap(header, offset)
if mmap_info.filename == V8_GC_FAKE_MMAP:
log_reader.ReadUpToGC()
else:
library_repo.Load(mmap_info, code_map, options)
mmap_time += time.time() - start
elif header.type == PERF_RECORD_SAMPLE:
ticks += 1
start = time.time()
sample = trace_reader.ReadSample(header, offset)
code = code_map.Find(sample.ip)
if code:
code.Tick(sample.ip)
else:
missed_ticks += 1
if not library_repo.Tick(sample.ip) and not code:
really_missed_ticks += 1
if trace_reader.callchain_supported:
for ip in sample.ips:
caller_code = code_map.Find(ip)
if caller_code:
if code:
caller_code.CalleeTick(code)
code = caller_code
sample_time += time.time() - start
if options.dot:
PrintDot(code_map, options)
else:
PrintReport(code_map, library_repo, log_reader.arch, ticks, options)
if not options.quiet:
print
print "Stats:"
print "%10d total trace events" % events
print "%10d total ticks" % ticks
print "%10d ticks not in symbols" % missed_ticks
print "%10d unaccounted ticks" % really_missed_ticks
print "%10d total symbols" % len([c for c in code_map.AllCode()])
print "%10d used symbols" % len([c for c in code_map.UsedCode()])
print "%9.2fs library processing time" % mmap_time
print "%9.2fs tick processing time" % sample_time
log_reader.Dispose()
trace_reader.Dispose()
| 32.983122
| 97
| 0.641774
|
0e711d489b4a4566faa9b1b167ac00c3fd9903dc
| 663
|
py
|
Python
|
advent/day_02/task_1.py
|
kurazu/advent_of_code_2021
|
a4b18e0e7f286d3485d85f2a1a58c7bdea0115d7
|
[
"MIT"
] | null | null | null |
advent/day_02/task_1.py
|
kurazu/advent_of_code_2021
|
a4b18e0e7f286d3485d85f2a1a58c7bdea0115d7
|
[
"MIT"
] | null | null | null |
advent/day_02/task_1.py
|
kurazu/advent_of_code_2021
|
a4b18e0e7f286d3485d85f2a1a58c7bdea0115d7
|
[
"MIT"
] | null | null | null |
import logging
from typing import TextIO
import pandas as pd
from ..cli import run_with_file_argument
logger = logging.getLogger(__name__)
def main(input: TextIO) -> str:
df = pd.read_csv(input, names=["direction", "distance"], delimiter=" ")
df["x_factor"] = df["direction"].map({"forward": 1, "down": 0, "up": 0})
df["y_factor"] = df["direction"].map({"forward": 0, "down": 1, "up": -1})
df["x"] = df["x_factor"] * df["distance"]
df["y"] = df["y_factor"] * df["distance"]
x = df["x"].sum()
y = df["y"].sum()
logger.info("X=%d, Y=%d", x, y)
return f"{x * y}"
if __name__ == "__main__":
run_with_file_argument(main)
| 26.52
| 77
| 0.594268
|
24e34dda9c7e01baac11bf502de3a2dac04a050b
| 1,238
|
py
|
Python
|
IM_test/app_lib/serialOperate.py
|
joakimzhang/qa_study
|
ff8930e674d45c49bea4e130d14d73d17b090e48
|
[
"Apache-2.0"
] | null | null | null |
IM_test/app_lib/serialOperate.py
|
joakimzhang/qa_study
|
ff8930e674d45c49bea4e130d14d73d17b090e48
|
[
"Apache-2.0"
] | null | null | null |
IM_test/app_lib/serialOperate.py
|
joakimzhang/qa_study
|
ff8930e674d45c49bea4e130d14d73d17b090e48
|
[
"Apache-2.0"
] | null | null | null |
import serial
import sys
import globalVariable
class SerialOperation(object):
def __init__(self):
self.com = globalVariable.serial_config['serial_port']
self.xonxoff = globalVariable.serial_config['xonxoff']
self.baudrate = globalVariable.serial_config['baudrate']
self.timeout = globalVariable.serial_config['timeout']
def connectSerial(self):
try:
self.inst = serial.Serial(self.com,
baudrate = self.baudrate,
xonxoff = self.xonxoff,
timeout = self.timeout)
except Exception ,e:
print 'Exception is ' ,e
return False
if self.inst.isOpen():
return True
else:
return False
def is_open(self):
return self.inst.isOpen()
def readable(self):
try:
return self.inst.readable()
except Exception:
return False
def write_msg(self,msg):
self.inst.flushInput()
self.inst.write(msg)
def read_msg(self):
return self.inst.readline()
def closeSerial(self):
self.inst.close()
| 25.791667
| 64
| 0.546042
|
37a9657c57b6a3217a97115cbac1833169d5bebd
| 471
|
py
|
Python
|
src/scimschema/__init__.py
|
stefanfoulis/scimschema
|
a314453d7b0fbabc69a0fad864d727a8d158abf9
|
[
"MIT"
] | null | null | null |
src/scimschema/__init__.py
|
stefanfoulis/scimschema
|
a314453d7b0fbabc69a0fad864d727a8d158abf9
|
[
"MIT"
] | null | null | null |
src/scimschema/__init__.py
|
stefanfoulis/scimschema
|
a314453d7b0fbabc69a0fad864d727a8d158abf9
|
[
"MIT"
] | null | null | null |
from .core_schemas import load_dict as _load_dict
from . import core_schemas
from ._model.schema_response import ScimResponse
from ._model import scim_exceptions, model, attribute
def validate(data, extension_schema_definitions):
ScimResponse(
data=data,
core_schema_definitions=core_schemas.schema,
extension_schema_definitions=extension_schema_definitions
).validate()
def load_dict_to_schema(path):
return _load_dict(path=path)
| 27.705882
| 65
| 0.787686
|
d44f5540dee95de5e5eb62d7bd35cef58814928d
| 14,524
|
py
|
Python
|
cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_ethernet_lldp_cfg.py
|
Maikor/ydk-py
|
b86c4a7c570ae3b2c5557d098420446df5de4929
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_ethernet_lldp_cfg.py
|
Maikor/ydk-py
|
b86c4a7c570ae3b2c5557d098420446df5de4929
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_ethernet_lldp_cfg.py
|
Maikor/ydk-py
|
b86c4a7c570ae3b2c5557d098420446df5de4929
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
""" Cisco_IOS_XR_ethernet_lldp_cfg
This module contains a collection of YANG definitions
for Cisco IOS\-XR ethernet\-lldp package configuration.
This module contains definitions
for the following management objects\:
lldp\: Enable LLDP, or configure global LLDP subcommands
This YANG module augments the
Cisco\-IOS\-XR\-ifmgr\-cfg
module with configuration data.
Copyright (c) 2013\-2018 by Cisco Systems, Inc.
All rights reserved.
"""
from collections import OrderedDict
from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64
from ydk.filters import YFilter
from ydk.errors import YError, YModelError
from ydk.errors.error_handler import handle_type_error as _handle_type_error
class Lldp(Entity):
"""
Enable LLDP, or configure global LLDP subcommands
.. attribute:: tlv_select
Selection of LLDP TLVs to disable
**type**\: :py:class:`TlvSelect <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_lldp_cfg.Lldp.TlvSelect>`
**presence node**\: True
.. attribute:: holdtime
Length of time (in sec) that receiver must keep this packet
**type**\: int
**range:** 0..65535
.. attribute:: extended_show_width
Enable or disable LLDP Show LLDP Neighbor Extended Width
**type**\: bool
**default value**\: false
.. attribute:: enable_subintf
Enable or disable LLDP on Sub\-interfaces as well globally
**type**\: bool
**default value**\: false
.. attribute:: enable_mgmtintf
Enable or disable LLDP on Mgmt interfaces as well globally
**type**\: bool
**default value**\: false
.. attribute:: timer
Specify the rate at which LLDP packets are sent (in sec)
**type**\: int
**range:** 5..65534
**default value**\: 30
.. attribute:: reinit
Delay (in sec) for LLDP initialization on any interface
**type**\: int
**range:** 2..5
**default value**\: 2
.. attribute:: enable
Enable or disable LLDP globally
**type**\: bool
**default value**\: false
"""
_prefix = 'ethernet-lldp-cfg'
_revision = '2017-05-01'
def __init__(self):
super(Lldp, self).__init__()
self._top_entity = None
self.yang_name = "lldp"
self.yang_parent_name = "Cisco-IOS-XR-ethernet-lldp-cfg"
self.is_top_level_class = True
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("tlv-select", ("tlv_select", Lldp.TlvSelect))])
self._leafs = OrderedDict([
('holdtime', (YLeaf(YType.uint32, 'holdtime'), ['int'])),
('extended_show_width', (YLeaf(YType.boolean, 'extended-show-width'), ['bool'])),
('enable_subintf', (YLeaf(YType.boolean, 'enable-subintf'), ['bool'])),
('enable_mgmtintf', (YLeaf(YType.boolean, 'enable-mgmtintf'), ['bool'])),
('timer', (YLeaf(YType.uint32, 'timer'), ['int'])),
('reinit', (YLeaf(YType.uint32, 'reinit'), ['int'])),
('enable', (YLeaf(YType.boolean, 'enable'), ['bool'])),
])
self.holdtime = None
self.extended_show_width = None
self.enable_subintf = None
self.enable_mgmtintf = None
self.timer = None
self.reinit = None
self.enable = None
self.tlv_select = None
self._children_name_map["tlv_select"] = "tlv-select"
self._segment_path = lambda: "Cisco-IOS-XR-ethernet-lldp-cfg:lldp"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Lldp, ['holdtime', 'extended_show_width', 'enable_subintf', 'enable_mgmtintf', 'timer', 'reinit', 'enable'], name, value)
class TlvSelect(Entity):
"""
Selection of LLDP TLVs to disable
.. attribute:: system_name
System Name TLV
**type**\: :py:class:`SystemName <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_lldp_cfg.Lldp.TlvSelect.SystemName>`
.. attribute:: port_description
Port Description TLV
**type**\: :py:class:`PortDescription <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_lldp_cfg.Lldp.TlvSelect.PortDescription>`
.. attribute:: system_description
System Description TLV
**type**\: :py:class:`SystemDescription <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_lldp_cfg.Lldp.TlvSelect.SystemDescription>`
.. attribute:: system_capabilities
System Capabilities TLV
**type**\: :py:class:`SystemCapabilities <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_lldp_cfg.Lldp.TlvSelect.SystemCapabilities>`
.. attribute:: management_address
Management Address TLV
**type**\: :py:class:`ManagementAddress <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_lldp_cfg.Lldp.TlvSelect.ManagementAddress>`
.. attribute:: tlv_select_enter
enter lldp tlv\-select submode
**type**\: bool
**mandatory**\: True
This class is a :ref:`presence class<presence-class>`
"""
_prefix = 'ethernet-lldp-cfg'
_revision = '2017-05-01'
def __init__(self):
super(Lldp.TlvSelect, self).__init__()
self.yang_name = "tlv-select"
self.yang_parent_name = "lldp"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("system-name", ("system_name", Lldp.TlvSelect.SystemName)), ("port-description", ("port_description", Lldp.TlvSelect.PortDescription)), ("system-description", ("system_description", Lldp.TlvSelect.SystemDescription)), ("system-capabilities", ("system_capabilities", Lldp.TlvSelect.SystemCapabilities)), ("management-address", ("management_address", Lldp.TlvSelect.ManagementAddress))])
self.is_presence_container = True
self._leafs = OrderedDict([
('tlv_select_enter', (YLeaf(YType.boolean, 'tlv-select-enter'), ['bool'])),
])
self.tlv_select_enter = None
self.system_name = Lldp.TlvSelect.SystemName()
self.system_name.parent = self
self._children_name_map["system_name"] = "system-name"
self.port_description = Lldp.TlvSelect.PortDescription()
self.port_description.parent = self
self._children_name_map["port_description"] = "port-description"
self.system_description = Lldp.TlvSelect.SystemDescription()
self.system_description.parent = self
self._children_name_map["system_description"] = "system-description"
self.system_capabilities = Lldp.TlvSelect.SystemCapabilities()
self.system_capabilities.parent = self
self._children_name_map["system_capabilities"] = "system-capabilities"
self.management_address = Lldp.TlvSelect.ManagementAddress()
self.management_address.parent = self
self._children_name_map["management_address"] = "management-address"
self._segment_path = lambda: "tlv-select"
self._absolute_path = lambda: "Cisco-IOS-XR-ethernet-lldp-cfg:lldp/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Lldp.TlvSelect, ['tlv_select_enter'], name, value)
class SystemName(Entity):
"""
System Name TLV
.. attribute:: disable
disable System Name TLV
**type**\: bool
**default value**\: false
"""
_prefix = 'ethernet-lldp-cfg'
_revision = '2017-05-01'
def __init__(self):
super(Lldp.TlvSelect.SystemName, self).__init__()
self.yang_name = "system-name"
self.yang_parent_name = "tlv-select"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('disable', (YLeaf(YType.boolean, 'disable'), ['bool'])),
])
self.disable = None
self._segment_path = lambda: "system-name"
self._absolute_path = lambda: "Cisco-IOS-XR-ethernet-lldp-cfg:lldp/tlv-select/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Lldp.TlvSelect.SystemName, ['disable'], name, value)
class PortDescription(Entity):
"""
Port Description TLV
.. attribute:: disable
disable Port Description TLV
**type**\: bool
**default value**\: false
"""
_prefix = 'ethernet-lldp-cfg'
_revision = '2017-05-01'
def __init__(self):
super(Lldp.TlvSelect.PortDescription, self).__init__()
self.yang_name = "port-description"
self.yang_parent_name = "tlv-select"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('disable', (YLeaf(YType.boolean, 'disable'), ['bool'])),
])
self.disable = None
self._segment_path = lambda: "port-description"
self._absolute_path = lambda: "Cisco-IOS-XR-ethernet-lldp-cfg:lldp/tlv-select/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Lldp.TlvSelect.PortDescription, ['disable'], name, value)
class SystemDescription(Entity):
"""
System Description TLV
.. attribute:: disable
disable System Description TLV
**type**\: bool
**default value**\: false
"""
_prefix = 'ethernet-lldp-cfg'
_revision = '2017-05-01'
def __init__(self):
super(Lldp.TlvSelect.SystemDescription, self).__init__()
self.yang_name = "system-description"
self.yang_parent_name = "tlv-select"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('disable', (YLeaf(YType.boolean, 'disable'), ['bool'])),
])
self.disable = None
self._segment_path = lambda: "system-description"
self._absolute_path = lambda: "Cisco-IOS-XR-ethernet-lldp-cfg:lldp/tlv-select/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Lldp.TlvSelect.SystemDescription, ['disable'], name, value)
class SystemCapabilities(Entity):
"""
System Capabilities TLV
.. attribute:: disable
disable System Capabilities TLV
**type**\: bool
**default value**\: false
"""
_prefix = 'ethernet-lldp-cfg'
_revision = '2017-05-01'
def __init__(self):
super(Lldp.TlvSelect.SystemCapabilities, self).__init__()
self.yang_name = "system-capabilities"
self.yang_parent_name = "tlv-select"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('disable', (YLeaf(YType.boolean, 'disable'), ['bool'])),
])
self.disable = None
self._segment_path = lambda: "system-capabilities"
self._absolute_path = lambda: "Cisco-IOS-XR-ethernet-lldp-cfg:lldp/tlv-select/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Lldp.TlvSelect.SystemCapabilities, ['disable'], name, value)
class ManagementAddress(Entity):
"""
Management Address TLV
.. attribute:: disable
disable Management Address TLV
**type**\: bool
**default value**\: false
"""
_prefix = 'ethernet-lldp-cfg'
_revision = '2017-05-01'
def __init__(self):
super(Lldp.TlvSelect.ManagementAddress, self).__init__()
self.yang_name = "management-address"
self.yang_parent_name = "tlv-select"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('disable', (YLeaf(YType.boolean, 'disable'), ['bool'])),
])
self.disable = None
self._segment_path = lambda: "management-address"
self._absolute_path = lambda: "Cisco-IOS-XR-ethernet-lldp-cfg:lldp/tlv-select/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Lldp.TlvSelect.ManagementAddress, ['disable'], name, value)
def clone_ptr(self):
self._top_entity = Lldp()
return self._top_entity
| 34.580952
| 433
| 0.568232
|
19e74a657445f9da21052744db0d22026f95773b
| 1,399
|
py
|
Python
|
python/matplotlib/imshow_colour_map_ax.py
|
jeremiedecock/snippets
|
4bd4e7f459eee610d5cf19f845299ca942ff4b64
|
[
"MIT"
] | 23
|
2015-06-08T13:01:00.000Z
|
2021-12-30T08:20:04.000Z
|
python/matplotlib/imshow_colour_map_ax.py
|
jeremiedecock/snippets
|
4bd4e7f459eee610d5cf19f845299ca942ff4b64
|
[
"MIT"
] | 1
|
2020-10-22T02:36:10.000Z
|
2020-10-22T02:36:10.000Z
|
python/matplotlib/imshow_colour_map_ax.py
|
jeremiedecock/snippets
|
4bd4e7f459eee610d5cf19f845299ca942ff4b64
|
[
"MIT"
] | 7
|
2017-10-31T09:48:14.000Z
|
2022-01-04T15:59:45.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Display data as an image using a color bar (via AxesSubplot)
See: http://matplotlib.org/examples/pylab_examples/image_demo.html
See also:
- http://matplotlib.org/examples/color/colormaps_reference.html (the list of all colormaps)
- http://matplotlib.org/users/colormaps.html?highlight=colormap#mycarta-banding (what is the right colormap to choose for a given plot)
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
# MAKE DATAS ##################################################################
z_matrix = np.array([[xi * yi for xi in range(50)] for yi in range(50)])
# PLOT ########################################################################
# The list of all colormaps: http://matplotlib.org/examples/color/colormaps_reference.html
#interp='nearest' # "raw" (non smooth) map
interp = 'bilinear' # "smooth" map
fig = plt.figure()
ax = fig.add_subplot(111)
im = ax.imshow(z_matrix, interpolation=interp, origin='lower', cmap="inferno") # cmap=cm.inferno and cmap="inferno" are both valid
#im = ax.imshow(z_matrix, interpolation=interp, origin='lower', cmap=cm.inferno) # cmap=cm.inferno and cmap="inferno" are both valid
plt.colorbar(im) # draw the colorbar
# SAVE AND SHOW ###############################################################
plt.savefig("imshow_colour_map_ax.png")
plt.show()
| 34.121951
| 135
| 0.626876
|
73499d1b3e46d625246e734381206ba0364bccb0
| 1,397
|
py
|
Python
|
setup.py
|
adrianmo/pymeteoclimatic
|
e699766282d1a6f09a452bac68b7f5e9ae510073
|
[
"MIT"
] | 3
|
2021-01-18T12:20:31.000Z
|
2022-01-28T19:31:23.000Z
|
setup.py
|
adrianmo/pymeteoclimatic
|
e699766282d1a6f09a452bac68b7f5e9ae510073
|
[
"MIT"
] | 1
|
2020-09-11T16:31:34.000Z
|
2020-09-14T07:36:45.000Z
|
setup.py
|
adrianmo/pymeteoclimatic
|
e699766282d1a6f09a452bac68b7f5e9ae510073
|
[
"MIT"
] | 1
|
2022-01-31T20:07:41.000Z
|
2022-01-31T20:07:41.000Z
|
from setuptools import setup
from os import path
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='pymeteoclimatic',
version='0.0.6',
description='A Python wrapper around the Meteoclimatic service',
long_description=long_description,
long_description_content_type='text/markdown',
author='Adrián Moreno',
author_email='adrian@morenomartinez.com',
url='https://github.com/adrianmo/pymeteoclimatic',
packages=['meteoclimatic', ],
install_requires=['lxml~=4.5',
'beautifulsoup4~=4.9'
],
python_requires='>=3.6',
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Natural Language :: English",
"Operating System :: OS Independent",
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Topic :: Software Development :: Libraries"],
keywords='meteoclimatic client library api weather',
license='MIT',
)
| 36.763158
| 73
| 0.634932
|
9cb99d3767b0e8aafd120a5e4bf25e141b29434d
| 1,171
|
py
|
Python
|
dictionaries/memory_game.py
|
MaggieIllustrations/softuni-github-programming
|
f5695cb14602f3d2974359f6d8734332acc650d3
|
[
"MIT"
] | null | null | null |
dictionaries/memory_game.py
|
MaggieIllustrations/softuni-github-programming
|
f5695cb14602f3d2974359f6d8734332acc650d3
|
[
"MIT"
] | null | null | null |
dictionaries/memory_game.py
|
MaggieIllustrations/softuni-github-programming
|
f5695cb14602f3d2974359f6d8734332acc650d3
|
[
"MIT"
] | 1
|
2022-01-14T17:12:44.000Z
|
2022-01-14T17:12:44.000Z
|
sequence_of_elements = input().split()
count_moves = 0
command = input()
while not command == "end":
count_moves += 1
index1 = int(command.split()[0])
index2 = int(command.split()[1])
if index1 == index2 or index1 < 0 or index2 < 0 or index1 >= len(sequence_of_elements) or index2 >= len(sequence_of_elements):
sequence_of_elements.insert(int(len(sequence_of_elements) / 2), f"-{str(count_moves)}a")
sequence_of_elements.insert(int(len(sequence_of_elements) / 2), f"-{str(count_moves)}a")
print("Invalid input! Adding additional elements to the board")
elif sequence_of_elements[index1] == sequence_of_elements[index2]:
print(f"Congrats! You have found matching elements - {sequence_of_elements[index1]}!")
x = sequence_of_elements.pop(index1)
sequence_of_elements.remove(x)
elif sequence_of_elements[index1] != sequence_of_elements[index2]:
print("Try again!")
if len(sequence_of_elements) == 0:
print(f"You have won in {count_moves} turns!")
break
command = input()
if command == "end":
print("Sorry you lose :(\n"
f"{' '.join(sequence_of_elements)}")
| 48.791667
| 130
| 0.677199
|
19b77e626ce44cb2b631aa070c2650c2c7d0b614
| 2,297
|
py
|
Python
|
test/pseudo_terminal.py
|
cbuescher/documentation
|
97714317adef3eb676a7941df94d2d1c3835e909
|
[
"Apache-2.0"
] | null | null | null |
test/pseudo_terminal.py
|
cbuescher/documentation
|
97714317adef3eb676a7941df94d2d1c3835e909
|
[
"Apache-2.0"
] | null | null | null |
test/pseudo_terminal.py
|
cbuescher/documentation
|
97714317adef3eb676a7941df94d2d1c3835e909
|
[
"Apache-2.0"
] | null | null | null |
#! /usr/bin/python
# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
import os
import re
import sys
import random
import pexpect
class Log:
def __init__(self):
self._log = None
self._filter_pattern = None
self.reset_log()
def reset_log(self):
self._log = []
def stdout_filter(self, pattern):
self._filter_pattern = pattern
def get_log(self):
return "".join(self._log)
def write(self, s):
self._log.append(s)
if re.match(self._filter_pattern, s) is None:
sys.stdout.write(s)
def flush(self):
sys.stdout.flush()
class PseudoTerminal:
def __init__(self, timeout=30*60):
self._log = Log()
self._pty = None
self._cmd_timeout = timeout
def __enter__(self):
self.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.stop()
def start(self):
env = os.environ.copy()
env["PS1"] = "" # remove default terminal prompt
env["PS2"] = ""
self._pty = pexpect.spawn('sh', env=env, echo=False)
self._pty.logfile_read = self._log
def stop(self):
self._pty.close()
def run(self, command):
command_id = random.randint(10e4, 10e5)
sentinel = "sentinel-{0}> exit code: ".format(command_id)
sentinel_pattern = re.compile(sentinel + "(\d+)")
self._log.reset_log()
self._log.stdout_filter(sentinel_pattern)
self._pty.sendline(command)
self._pty.sendline("echo \"" + sentinel + "$?\"")
index = self._pty.expect([sentinel_pattern, pexpect.EOF, pexpect.TIMEOUT], timeout=self._cmd_timeout)
if index == 0:
if self._pty.match is not None:
exit_code = int(self._pty.match.group(1))
output = self._log.get_log()
return exit_code, output
raise RuntimeError("Unexpected state: Found pattern, but not regexp match.")
if index == 1:
raise RuntimeError("Unexpected EOF in pseudo-terminal")
if index == 2:
raise RuntimeError("Timeout in execution of {}".format(command))
raise RuntimeError("Unexpected state")
| 28.012195
| 117
| 0.60296
|
a2f4951723a1a141f09e7f02d27bf6ae92c4d1f2
| 6,616
|
py
|
Python
|
numba/typing/listdecl.py
|
jdtatz/numba
|
eaba872337e0116f742c157301a3a42bcb1ca63f
|
[
"BSD-2-Clause"
] | 3
|
2019-09-30T20:00:36.000Z
|
2020-07-13T04:17:15.000Z
|
numba/typing/listdecl.py
|
jdtatz/numba
|
eaba872337e0116f742c157301a3a42bcb1ca63f
|
[
"BSD-2-Clause"
] | null | null | null |
numba/typing/listdecl.py
|
jdtatz/numba
|
eaba872337e0116f742c157301a3a42bcb1ca63f
|
[
"BSD-2-Clause"
] | 1
|
2021-12-14T16:21:11.000Z
|
2021-12-14T16:21:11.000Z
|
from __future__ import absolute_import, print_function
from .. import types
from .templates import (ConcreteTemplate, AbstractTemplate, AttributeTemplate,
CallableTemplate, Registry, signature, bound_function,
make_callable_template)
# Ensure list is typed as a collection as well
from . import collections
registry = Registry()
infer = registry.register
infer_global = registry.register_global
infer_getattr = registry.register_attr
@infer_global(list)
class ListBuiltin(AbstractTemplate):
def generic(self, args, kws):
assert not kws
if args:
iterable, = args
if isinstance(iterable, types.IterableType):
dtype = iterable.iterator_type.yield_type
return signature(types.List(dtype), iterable)
else:
return signature(types.List(types.undefined))
@infer_global(sorted)
class SortedBuiltin(CallableTemplate):
def generic(self):
def typer(iterable, reverse=None):
if not isinstance(iterable, types.IterableType):
return
if (reverse is not None and
not isinstance(reverse, types.Boolean)):
return
return types.List(iterable.iterator_type.yield_type)
return typer
@infer_getattr
class ListAttribute(AttributeTemplate):
key = types.List
# NOTE: some of these should be Sequence / MutableSequence methods
@bound_function("list.append")
def resolve_append(self, list, args, kws):
item, = args
assert not kws
unified = self.context.unify_pairs(list.dtype, item)
if unified is not None:
sig = signature(types.none, unified)
sig.recvr = list.copy(dtype=unified)
return sig
@bound_function("list.clear")
def resolve_clear(self, list, args, kws):
assert not args
assert not kws
return signature(types.none)
@bound_function("list.copy")
def resolve_copy(self, list, args, kws):
assert not args
assert not kws
return signature(list)
@bound_function("list.count")
def resolve_count(self, list, args, kws):
item, = args
assert not kws
return signature(types.intp, list.dtype)
@bound_function("list.extend")
def resolve_extend(self, list, args, kws):
iterable, = args
assert not kws
if not isinstance(iterable, types.IterableType):
return
dtype = iterable.iterator_type.yield_type
unified = self.context.unify_pairs(list.dtype, dtype)
if unified is not None:
sig = signature(types.none, iterable)
sig.recvr = list.copy(dtype=unified)
return sig
@bound_function("list.index")
def resolve_index(self, list, args, kws):
assert not kws
if len(args) == 1:
return signature(types.intp, list.dtype)
elif len(args) == 2:
if isinstance(args[1], types.Integer):
return signature(types.intp, list.dtype, types.intp)
elif len(args) == 3:
if (isinstance(args[1], types.Integer)
and isinstance(args[2], types.Integer)):
return signature(types.intp, list.dtype, types.intp, types.intp)
@bound_function("list.insert")
def resolve_insert(self, list, args, kws):
idx, item = args
assert not kws
if isinstance(idx, types.Integer):
unified = self.context.unify_pairs(list.dtype, item)
if unified is not None:
sig = signature(types.none, types.intp, unified)
sig.recvr = list.copy(dtype=unified)
return sig
@bound_function("list.pop")
def resolve_pop(self, list, args, kws):
assert not kws
if not args:
return signature(list.dtype)
else:
idx, = args
if isinstance(idx, types.Integer):
return signature(list.dtype, types.intp)
@bound_function("list.remove")
def resolve_remove(self, list, args, kws):
assert not kws
if len(args) == 1:
return signature(types.none, list.dtype)
@bound_function("list.reverse")
def resolve_reverse(self, list, args, kws):
assert not args
assert not kws
return signature(types.none)
def resolve_sort(self, list):
def typer(reverse=None):
if (reverse is not None and
not isinstance(reverse, types.Boolean)):
return
return types.none
return types.BoundFunction(make_callable_template(key="list.sort",
typer=typer,
recvr=list),
list)
@infer
class AddList(AbstractTemplate):
key = "+"
def generic(self, args, kws):
if len(args) == 2:
a, b = args
if isinstance(a, types.List) and isinstance(b, types.List):
unified = self.context.unify_pairs(a, b)
if unified is not None:
return signature(unified, a, b)
@infer
class InplaceAddList(AbstractTemplate):
key = "+="
def generic(self, args, kws):
if len(args) == 2:
a, b = args
if isinstance(a, types.List) and isinstance(b, types.List):
if self.context.can_convert(b.dtype, a.dtype):
return signature(a, a, b)
@infer
class MulList(AbstractTemplate):
key = "*"
def generic(self, args, kws):
a, b = args
if isinstance(a, types.List) and isinstance(b, types.Integer):
return signature(a, a, types.intp)
@infer
class InplaceMulList(MulList):
key = "*="
class ListCompare(AbstractTemplate):
def generic(self, args, kws):
[lhs, rhs] = args
if isinstance(lhs, types.List) and isinstance(rhs, types.List):
# Check element-wise comparability
res = self.context.resolve_function_type(self.key,
(lhs.dtype, rhs.dtype), {})
if res is not None:
return signature(types.boolean, lhs, rhs)
@infer
class ListEq(ListCompare):
key = '=='
@infer
class ListNe(ListCompare):
key = '!='
@infer
class ListLt(ListCompare):
key = '<'
@infer
class ListLe(ListCompare):
key = '<='
@infer
class ListGt(ListCompare):
key = '>'
@infer
class ListGe(ListCompare):
key = '>='
| 29.145374
| 80
| 0.586457
|
fa8f2792a0241f9aba5ce8a6e17325d5aa4e334f
| 2,470
|
py
|
Python
|
scripts/calculate_distance.py
|
colbyprior/python-maec
|
109d4517b0123a5f01e31c15818f35772d451705
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/calculate_distance.py
|
colbyprior/python-maec
|
109d4517b0123a5f01e31c15818f35772d451705
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/calculate_distance.py
|
colbyprior/python-maec
|
109d4517b0123a5f01e31c15818f35772d451705
|
[
"BSD-3-Clause"
] | null | null | null |
# calculate_distance script
# Calculates and prints the distance between two or more MAEC Malware Subjects
# NOTE: This code imports and uses the maec.analytics.distance module, which uses the external numpy library.
# Numpy can be found here: https://pypi.python.org/pypi/numpy
# Copyright (c) 2015, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
import os
import maec
import argparse
from maec.analytics.distance import Distance
from maec.package.package import Package
def main():
# Setup the argument parser
parser = argparse.ArgumentParser(description="MAEC Distance Calculation script")
group = parser.add_mutually_exclusive_group()
group.add_argument("-l", "-list", nargs="+", help="a space separated list of MAEC Package files to calculate the distances for")
group.add_argument("-d", "-directory", help="the path to a directory of MAEC Package files to calculate the distances for")
parser.add_argument("--only_static", "--only_static", help="use only static features in the distance calculation", action="store_true")
parser.add_argument("--only_dynamic", "--only_dynamic", help="use only dynamic features (Actions) in the distance calculation", action="store_true")
parser.add_argument("output", help="the name of the CSV file to which the calculated distances will be written")
args = parser.parse_args()
package_list = []
# Parse the input files
if args.l:
for file in args.l:
api_obj = maec.parse_xml_instance(file)['api']
if isinstance(api_obj, Package):
package_list.append(api_obj)
elif args.d:
for filename in os.listdir(args.d):
if '.xml' not in filename:
pass
else:
api_obj = maec.parse_xml_instance(os.path.join(args.d, filename))['api']
if isinstance(api_obj, Package):
package_list.append(api_obj)
# Perform the distance calculation
dist = Distance(package_list)
# Set the particular features that will be used
if args.only_static:
dist.options_dict['use_dynamic_features'] = False
if args.only_dynamic:
dist.options_dict['use_static_features'] = False
dist.calculate()
# Write the results to the specified CSV file
out_file = open(args.output, mode='w')
dist.print_distances(out_file)
out_file.close()
if __name__ == "__main__":
main()
| 43.333333
| 152
| 0.693117
|
e1b60c32f7094e4e5cfc5a3ef8be26699d826492
| 731
|
py
|
Python
|
backend/tests/libs/cinq_test_cls.py
|
gibbsie/cloud-inquisitor
|
03bc5bb70ff5ab27393fae833af7b6b750d39ce5
|
[
"Apache-2.0"
] | 462
|
2017-11-27T20:53:25.000Z
|
2022-03-26T18:32:50.000Z
|
backend/tests/libs/cinq_test_cls.py
|
gibbsie/cloud-inquisitor
|
03bc5bb70ff5ab27393fae833af7b6b750d39ce5
|
[
"Apache-2.0"
] | 103
|
2017-11-28T14:33:26.000Z
|
2020-11-06T20:01:11.000Z
|
backend/tests/libs/cinq_test_cls.py
|
gibbsie/cloud-inquisitor
|
03bc5bb70ff5ab27393fae833af7b6b750d39ce5
|
[
"Apache-2.0"
] | 58
|
2017-11-28T00:49:12.000Z
|
2022-03-26T18:32:44.000Z
|
from cinq_auditor_domain_hijacking import DomainHijackAuditor
from cinq_auditor_ebs import EBSAuditor
from cinq_auditor_required_tags import RequiredTagsAuditor
from cloud_inquisitor.constants import ActionStatus
class MockNotify(object):
def notify(self, notices):
self._cinq_test_notices = notices
class MockDomainHijackAuditor(MockNotify, DomainHijackAuditor):
pass
class MockEBSAuditor(MockNotify, EBSAuditor):
pass
class MockRequiredTagsAuditor(MockNotify, RequiredTagsAuditor):
def run(self, enable_process_action=True, *args, **kwargs):
if not enable_process_action:
self.process_action = lambda resource, action: ActionStatus.SUCCEED
super().run(args, kwargs)
| 26.107143
| 79
| 0.783858
|
9ec540f1aa519a3c163fcf3b9226a6b296e41131
| 4,169
|
py
|
Python
|
AlgorithmDevCode/bgrem2.py
|
norm42/summerize_video
|
18e5665e66f037dfa27382162ab9c41e5c2707dd
|
[
"MIT"
] | 1
|
2020-05-18T23:39:29.000Z
|
2020-05-18T23:39:29.000Z
|
AlgorithmDevCode/bgrem2.py
|
norm42/summerize_video
|
18e5665e66f037dfa27382162ab9c41e5c2707dd
|
[
"MIT"
] | 1
|
2021-06-02T01:48:53.000Z
|
2021-06-02T01:48:53.000Z
|
AlgorithmDevCode/bgrem2.py
|
norm42/summerize_video
|
18e5665e66f037dfa27382162ab9c41e5c2707dd
|
[
"MIT"
] | null | null | null |
#
# Copyright https://github.com/norm42/summerize_video/blob/master/LICENSE.md
# (Mit license)
#
import sys
import numpy as np
import cv2
import create_collage as ccoll
threshold = 20
# Open video file for processing
camera = cv2.VideoCapture('Front.20200506_154854.mp4')
fps = camera.get(cv2.CAP_PROP_FPS) # OpenCV2 version 2 used "CV_CAP_PROP_FPS"
frame_count = int(camera.get(cv2.CAP_PROP_FRAME_COUNT))
duration = frame_count / fps
num_frames = 10
if((duration > 20) and (duration <=40)):
num_frames = 15
elif ( duration > 40):
num_frames = 20
#nsamples = int(duration - 2) # each second
#second_sample = int(duration/num_frames) # number of seconds to sample, only num_frames
#frame_sample = fps * second_sample
iframs_per_sample = int(frame_count/num_frames) # Only process num_frames in any video
nframes_to_proc = frame_count - iframs_per_sample # do not run off end of video (error)
# Get initial reference frame. Generate a grayscale and filtered version
_, backgroundFrame = camera.read()
backgroundFrame = cv2.cvtColor(backgroundFrame, cv2.COLOR_BGR2GRAY)
refdenoise = cv2.fastNlMeansDenoising(backgroundFrame, None, 10.0, 7, 21)
refimage = backgroundFrame
for j in range(1, nframes_to_proc):
_, currentFrame = camera.read()
if (j % iframs_per_sample) == 0:
collage_list = []
# Note if you use this program, the directory structures need to be made shown here
# so intermediate files can be saved. Also purge the directories between runs.
ifname = "img/foregnd/foregnd" + str(j) +".png"
filter_fname = "img/filter/filter_foregnd" + str(j) +".png"
raw_fname = "img/raw/raw_foregnd" + str(j) + ".png"
cur_fname = "img/current/cur_frame" + str(j) + ".png"
ref_fname = "img/refimage/refimage" + str(j) + ".png"
coll_fname = "img/collage/coll" + str(j) + ".png"
#
# convert color image to grayscale
srcimage = cv2.cvtColor(currentFrame, cv2.COLOR_BGR2GRAY)
# Filter out some of the camera noise. Not sure how much this helps
srcdenoise = cv2.fastNlMeansDenoising(srcimage, 10.0, 10.0, 7, 21)
# Write the noise image for analysis
cv2.imwrite(cur_fname, srcdenoise)
collage_list.append(srcdenoise) # filtered new frame
# Difference the reference image with the new frame
rawimage = cv2.absdiff(refdenoise, srcdenoise)
# write for analysis
cv2.imwrite(raw_fname, rawimage)
# Filter out some of the noise in the differenced image
fordenoise = cv2.fastNlMeansDenoising(rawimage, 10.0, 10.0, 7, 21)
# Write out filtered difference for analysis
cv2.imwrite(filter_fname, fordenoise)
collage_list.append(fordenoise)
# Start to build the mask images. These will be binary images that control
# What parts from the build up reference image are punched out and replaced
# with the complement area in the new frame
th1 = cv2.threshold(fordenoise, threshold, 255, cv2.THRESH_BINARY)[1]
th1_inv = cv2.bitwise_not(th1)
# Write out mask image
cv2.imwrite(ifname, th1)
collage_list.append(th1)
collage_list.append(th1_inv)
# Punch out of the current reference image the motion portion of the new frame
backgnd = cv2.bitwise_and(src1=refimage, src2=refimage, mask=th1_inv)
# Punch out the reference image - background from the new frame
fgnd = cv2.bitwise_and(src1=srcdenoise, src2=srcdenoise, mask=th1)
collage_list.append(backgnd)
# Add the two together to merge in the past reference image with the new frame
# A new reference frame is generated
refimage = cv2.add(backgnd, fgnd)
collage_list.append(refimage)
coll_img = ccoll.create_colg(collage_list, 0.25)
cv2.imwrite(coll_fname, coll_img)
# Write for analysis
cv2.imwrite(ref_fname, refimage)
# Write out the final image
cv2.imwrite("img/final_image.png", refimage)
cv2.imwrite("img/back_img.png", backgroundFrame)
| 45.315217
| 92
| 0.679539
|
4e6d736eb37d8762a439fd1420929c032780033f
| 3,136
|
py
|
Python
|
tests/pipeline/nodes/draw/test_legend.py
|
chngzyk/PeekingDuck
|
299dbbda9104c40822ff1076e98b2d45f9974302
|
[
"Apache-2.0"
] | 79
|
2021-06-04T04:11:33.000Z
|
2022-03-31T17:46:38.000Z
|
tests/pipeline/nodes/draw/test_legend.py
|
chngzyk/PeekingDuck
|
299dbbda9104c40822ff1076e98b2d45f9974302
|
[
"Apache-2.0"
] | 248
|
2021-06-04T07:19:52.000Z
|
2022-03-30T08:03:35.000Z
|
tests/pipeline/nodes/draw/test_legend.py
|
chngzyk/PeekingDuck
|
299dbbda9104c40822ff1076e98b2d45f9974302
|
[
"Apache-2.0"
] | 25
|
2021-06-22T03:48:57.000Z
|
2022-03-16T04:33:52.000Z
|
# Copyright 2021 AI Singapore
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Test for draw legend node
"""
import numpy as np
import pytest
from peekingduck.pipeline.nodes.draw.legend import Node
@pytest.fixture
def draw_legend_bottom():
node = Node(
{
"input": ["all"],
"output": ["img"],
"all_legend_items": ["fps", "count", "zone_count"],
"position": "bottom",
"include": ["all_legend_items"],
}
)
return node
@pytest.fixture
def draw_legend_top():
node = Node(
{
"input": ["all"],
"output": ["img"],
"all_legend_items": ["fps", "count", "zone_count"],
"position": "top",
"include": ["all_legend_items"],
}
)
return node
@pytest.fixture
def draw_legend_fps_only():
node = Node(
{
"input": ["all"],
"output": ["img"],
"all_legend_items": ["fps", "count", "zone_count"],
"position": "top",
"include": ["fps"],
}
)
return node
class TestLegend:
def test_no_relevant_inputs(self, draw_legend_bottom, create_image):
original_img = create_image((28, 28, 3))
input1 = {"img": original_img}
expected_output = {}
results = draw_legend_bottom.run(input1)
assert results == expected_output
# formula: processed image = contrast * image + brightness
def test_draw_legend_bottom_and_top(
self, draw_legend_bottom, draw_legend_top, create_image
):
original_img = create_image((640, 480, 3))
output_img = original_img.copy()
input1 = {"img": output_img, "fps": 50.5, "count": 2, "zone_count": [1, 1]}
results_btm = draw_legend_bottom.run(input1)
assert results_btm != {}
assert original_img.shape == results_btm["img"].shape
np.testing.assert_raises(
AssertionError, np.testing.assert_equal, original_img, results_btm["img"]
)
results_top = draw_legend_top.run(input1)
np.testing.assert_raises(
AssertionError, np.testing.assert_equal, original_img, results_top
)
def test_draw_fps_only(self, draw_legend_fps_only, create_image):
original_img = create_image((640, 480, 3))
output_img = original_img.copy()
input1 = {
"img": output_img,
"fps": 50.5,
}
results = draw_legend_fps_only.run(input1)
np.testing.assert_raises(
AssertionError, np.testing.assert_equal, original_img, results["img"]
)
| 29.308411
| 85
| 0.610651
|
8ddae8442b0851bdc04859a6e20172cff5d9f9f2
| 847
|
py
|
Python
|
mvpa2/clfs/sg/__init__.py
|
mortonne/PyMVPA
|
98644c5cd9733edd39fac746ea7cf67398674645
|
[
"MIT"
] | null | null | null |
mvpa2/clfs/sg/__init__.py
|
mortonne/PyMVPA
|
98644c5cd9733edd39fac746ea7cf67398674645
|
[
"MIT"
] | null | null | null |
mvpa2/clfs/sg/__init__.py
|
mortonne/PyMVPA
|
98644c5cd9733edd39fac746ea7cf67398674645
|
[
"MIT"
] | null | null | null |
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the PyMVPA package for the
# copyright and license terms.
#
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""Classifiers provided by shogun (sg) library"""
__docformat__ = "restructuredtext"
from mvpa2.base import externals
if __debug__:
from mvpa2.base import debug
debug("INIT", "mvpa2.clfs.sg")
def setup_module(module):
if not externals.exists("shogun"):
from nose.plugins.skip import SkipTest
raise SkipTest
if externals.exists("shogun"):
from mvpa2.clfs.sg.svm import SVM
if __debug__:
debug("INIT", "mvpa2.clfs.sg end")
| 25.666667
| 78
| 0.563164
|
ab0fb28cc896e624366ee64b7fa086ff45bd8bcc
| 7,631
|
py
|
Python
|
mmf/models/base_model.py
|
facebookresearch/worldsheet
|
13175f8f5e10b13436546ebf1600dc21005124d0
|
[
"BSD-3-Clause"
] | 21
|
2021-09-29T17:19:29.000Z
|
2022-03-14T23:24:51.000Z
|
mmf/models/base_model.py
|
facebookresearch/worldsheet
|
13175f8f5e10b13436546ebf1600dc21005124d0
|
[
"BSD-3-Clause"
] | 1
|
2022-03-14T00:53:26.000Z
|
2022-03-15T16:08:16.000Z
|
mmf/models/base_model.py
|
facebookresearch/worldsheet
|
13175f8f5e10b13436546ebf1600dc21005124d0
|
[
"BSD-3-Clause"
] | 2
|
2021-11-06T01:35:39.000Z
|
2021-11-27T02:53:49.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
"""
Models built on top of Pythia need to inherit ``BaseModel`` class and adhere to
some format. To create a model for MMF, follow this quick cheatsheet.
1. Inherit ``BaseModel`` class, make sure to call ``super().__init__()`` in your
class's ``__init__`` function.
2. Implement `build` function for your model. If you build everything in ``__init__``,
you can just return in this function.
3. Write a `forward` function which takes in a ``SampleList`` as an argument and
returns a dict.
4. Register using ``@registry.register_model("key")`` decorator on top of the
class.
If you are doing logits based predictions, the dict you return from your model
should contain a `scores` field. Losses are automatically calculated by the
``BaseModel`` class and added to this dict if not present.
Example::
import torch
from mmf.common.registry import registry
from mmf.models.base_model import BaseModel
@registry.register("pythia")
class Pythia(BaseModel):
# config is model_config from global config
def __init__(self, config):
super().__init__(config)
def build(self):
....
def forward(self, sample_list):
scores = torch.rand(sample_list.get_batch_size(), 3127)
return {"scores": scores}
"""
import collections
import warnings
from copy import deepcopy
from mmf.common.registry import registry
from mmf.common.sample import to_device
from mmf.modules.losses import Losses
from mmf.utils.checkpoint import load_pretrained_model
from mmf.utils.download import download_pretrained_model
from torch import nn
class BaseModel(nn.Module):
"""For integration with Pythia's trainer, datasets and other features,
models needs to inherit this class, call `super`, write a build function,
write a forward function taking a ``SampleList`` as input and returning a
dict as output and finally, register it using ``@registry.register_model``
Args:
config (DictConfig): ``model_config`` configuration from global config.
"""
def __init__(self, config):
super().__init__()
self.config = config
self._logged_warning = {"losses_present": False}
self._is_pretrained = False
@property
def is_pretrained(self):
return self._is_pretrained
@is_pretrained.setter
def is_pretrained(self, x):
self._is_pretrained = x
def build(self):
"""Function to be implemented by the child class, in case they need to
build their model separately than ``__init__``. All model related
downloads should also happen here.
"""
raise NotImplementedError(
"Build method not implemented in the child model class."
)
def init_losses(self):
"""Initializes loss for the model based ``losses`` key. Automatically called by
MMF internally after building the model.
"""
losses = self.config.get("losses", [])
if len(losses) == 0 and not self.is_pretrained:
warnings.warn(
"No losses are defined in model configuration. You are expected "
"to return loss in your return dict from forward."
)
self.losses = Losses(losses)
@classmethod
def config_path(cls):
return None
@classmethod
def format_state_key(cls, key):
"""Can be implemented if something special needs to be done
key when pretrained model is being load. This will adapt and return
keys according to that. Useful for backwards compatibility. See
updated load_state_dict below. For an example, see VisualBERT model's
code.
Args:
key (string): key to be formatted
Returns:
string: formatted key
"""
return key
def load_state_dict(self, state_dict, *args, **kwargs):
copied_state_dict = deepcopy(state_dict)
for key in list(copied_state_dict.keys()):
formatted_key = self.format_state_key(key)
copied_state_dict[formatted_key] = copied_state_dict.pop(key)
return super().load_state_dict(copied_state_dict, *args, **kwargs)
def forward(self, sample_list, *args, **kwargs):
"""To be implemented by child class. Takes in a ``SampleList`` and
returns back a dict.
Args:
sample_list (SampleList): SampleList returned by the DataLoader for
current iteration
Returns:
Dict: Dict containing scores object.
"""
raise NotImplementedError(
"Forward of the child model class needs to be implemented."
)
def __call__(self, sample_list, *args, **kwargs):
# Move to proper device i.e. same as the model before passing
model_device = next(self.parameters()).device
sample_list = to_device(sample_list, model_device)
model_output = super().__call__(sample_list, *args, **kwargs)
# Don't do anything fancy to output if it is pretrained
if self.is_pretrained:
return model_output
# Make sure theat the output from the model is a Mapping
assert isinstance(
model_output, collections.abc.Mapping
), "A dict must be returned from the forward of the model."
if "losses" in model_output:
if not self._logged_warning["losses_present"]:
warnings.warn(
"'losses' already present in model output. "
"No calculation will be done in base model."
)
self._logged_warning["losses_present"] = True
assert isinstance(
model_output["losses"], collections.abc.Mapping
), "'losses' must be a dict."
else:
model_output["losses"] = self.losses(sample_list, model_output)
return model_output
def load_requirements(self, *args, **kwargs):
requirements = self.config.get("zoo_requirements", [])
if isinstance(requirements, str):
requirements = [requirements]
for item in requirements:
download_pretrained_model(item, *args, **kwargs)
def format_for_prediction(self, results, report):
"""Implement this method in models if it requires to modify prediction
results using report fields. Note that the required fields in report
should already be gathered in report.
"""
return results
@classmethod
def from_pretrained(cls, model_name, *args, **kwargs):
model_key = model_name.split(".")[0]
model_cls = registry.get_model_class(model_key)
assert (
model_cls == cls
), f"Incorrect pretrained model key {model_name} for class {cls.__name__}"
output = load_pretrained_model(model_name, *args, **kwargs)
config, checkpoint = output["config"], output["checkpoint"]
# Some models need registry updates to be load pretrained model
# If they have this method, call it so they can update accordingly
if hasattr(cls, "update_registry_for_pretrained"):
cls.update_registry_for_pretrained(config, checkpoint, output)
instance = cls(config)
instance.is_pretrained = True
instance.build()
instance.load_state_dict(checkpoint)
instance.eval()
return instance
| 35.004587
| 87
| 0.654829
|
1c0eb30ba262827170e41dce538fbd6a4a8c3e56
| 3,508
|
py
|
Python
|
poship/engine/components/user/handlers.py
|
candango/podship-engine
|
d8cef10da00182c034320113fe1d990f40a585bd
|
[
"Apache-2.0"
] | null | null | null |
poship/engine/components/user/handlers.py
|
candango/podship-engine
|
d8cef10da00182c034320113fe1d990f40a585bd
|
[
"Apache-2.0"
] | null | null | null |
poship/engine/components/user/handlers.py
|
candango/podship-engine
|
d8cef10da00182c034320113fe1d990f40a585bd
|
[
"Apache-2.0"
] | 2
|
2016-08-04T14:55:17.000Z
|
2016-08-05T13:50:50.000Z
|
#!/usr/bin/env python
#
# Copyright 2015-2016 Flavio Garcia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import firenado.tornadoweb
from firenado.service import served_by
import logging
from jsonschema import validate
from jsonschema.exceptions import ValidationError
from tornado.escape import json_decode
from wtforms.fields import StringField, PasswordField
from wtforms.validators import DataRequired
from wtforms_tornado import Form
import wtforms_json
wtforms_json.init()
# TODO: Use this instead of wtforms
# https://pypi.python.org/pypi/jsonschema
import six
# Schema definition from:
# http://spacetelescope.github.io/understanding-json-schema/structuring.html
schema = {
"type": "object",
"properties": {
"payload": {
"type": "object",
"properties": {
"username": {"type": "string"},
"password": {"type": "string"},
},
"required": ["username", "password"],
},
},
"required": ["payload"],
}
# Schema error should be 400
class LoginForm(Form):
username = StringField(validators=[DataRequired(
'The user name is required.')])
password = PasswordField(validators=[DataRequired(
'Password is required.')])
logger = logging.getLogger(__name__)
class LoginHandler(firenado.tornadoweb.TornadoHandler):
@served_by('diasporapy.services.account.AccountService')
def post(self):
data = None
try:
data = json_decode(self.request.body)
try:
validate(data, schema)
form = LoginForm.from_json(data['payload'])
error_data = {}
error_data['errors'] = {}
if form.validate():
is_valid_login = self.account_service.is_login_valid(form.data)
if is_valid_login:
response = {'status': 200}
response['userid'] = is_valid_login.id
print(response)
self.write(response)
else:
self.set_status(401)
error_data['errors']['form'] = ['Invalid Login']
self.write(error_data)
else:
self.set_status(401)
error_data['errors'].update(form.errors)
self.write(error_data)
except ValidationError as e:
self.set_status(400)
response = {'status': 400}
response['errors'] = {
'schema': e.message
}
self.write(response)
except ValueError as e:
self.set_status(500)
response = {'status': 500}
response['errors'] = {
'schema': ["Invalid json body content."]
}
self.write(response)
def get_data_sources(self):
return self.get_data_connected().data_sources
| 32.183486
| 83
| 0.590935
|
b141f6bebc0e3315e743b8d9a2f1496dbe7dc56b
| 1,970
|
py
|
Python
|
Experiments/VLDB20/Synthetic_data/report_median.py
|
northeastern-datalab/anyk-code
|
45354ecb8716b227e8cf0fe8e0c8afc5327f3828
|
[
"Apache-2.0"
] | 2
|
2021-09-03T22:07:01.000Z
|
2021-11-09T19:01:58.000Z
|
Experiments/VLDB20/Synthetic_data/report_median.py
|
northeastern-datalab/anyk-code
|
45354ecb8716b227e8cf0fe8e0c8afc5327f3828
|
[
"Apache-2.0"
] | null | null | null |
Experiments/VLDB20/Synthetic_data/report_median.py
|
northeastern-datalab/anyk-code
|
45354ecb8716b227e8cf0fe8e0c8afc5327f3828
|
[
"Apache-2.0"
] | 2
|
2021-07-05T17:51:36.000Z
|
2021-09-03T22:07:02.000Z
|
#!/usr/bin/env python
import sys
import os
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
# For debugging
def print_list(times, k_list):
for i in range(len(k_list)):
print "k = " + str(k_list[i]) + " : " + str(times[i])
## -- Read input
import argparse
parser = argparse.ArgumentParser(description='Plotting script')
parser.add_argument('-a', action="store", dest="algorithm", default="BatchSorting", help="Name of the algorithm")
parser.add_argument('-i', action="store", dest="inFileName", default="out", help="Name of input file")
parser.add_argument('-k', action="store", dest="k_to_report", default="-1", help="TTK to report")
arg_results = parser.parse_args()
algorithm = arg_results.algorithm
inFileName = arg_results.inFileName
k_to_report = int(arg_results.k_to_report)
# Read file
times = []
times_aux = [] # times_aux contains a list of lists of runtimes (one list for each k contains all the runtimes for that k)
k_list = []
max_k = 0
fp1 = open(inFileName + "_" + algorithm + ".out")
line = fp1.readline()
while line:
if line.startswith("k="):
tokens = line.split()
k = int(tokens[1])
if (k == 1): index = 0 # The index tells us which position in the list corresponds to the k we read
else: index += 1
if (k > max_k):
max_k = k
k_list.append(k)
times_aux.append([])
times_aux[index].append(float(tokens[3]))
line = fp1.readline()
fp1.close()
# If some instances contained more data points than others, cut them off
instances_no = len(times_aux[0])
while (len(times_aux[-1]) < instances_no):
times_aux = times_aux[:-1]
k_list = k_list[:-1]
# Now build one list by taking the median
times = []
index = 0
for k in k_list:
runtimes = times_aux[index]
median_runtime = np.median(runtimes)
times.append(median_runtime)
index += 1
print algorithm + " : " + ('%.2f' % times[k_to_report])
| 29.402985
| 123
| 0.669036
|
37c1a6ad9e5aa7960d2f6a494f47f51e1ac0007a
| 52,379
|
py
|
Python
|
pyzfscmds/cmd.py
|
johnramsden/pyzfscmds
|
b5d430ffd0454bc6b09e256aeea67164714d9809
|
[
"BSD-3-Clause"
] | 9
|
2018-07-08T20:01:33.000Z
|
2022-03-29T11:31:51.000Z
|
pyzfscmds/cmd.py
|
johnramsden/pyzfscmds
|
b5d430ffd0454bc6b09e256aeea67164714d9809
|
[
"BSD-3-Clause"
] | 1
|
2019-07-10T12:16:53.000Z
|
2019-07-10T12:16:53.000Z
|
pyzfscmds/cmd.py
|
johnramsden/pyzfscmds
|
b5d430ffd0454bc6b09e256aeea67164714d9809
|
[
"BSD-3-Clause"
] | 5
|
2018-06-04T02:33:43.000Z
|
2020-05-25T22:48:58.000Z
|
"""ZFS library"""
import itertools
import os
import subprocess
from typing import List
import pyzfscmds.check
import pyzfscmds.utility
import pyzfscmds.system.agnostic
"""
ZFS commands
"""
class _Command:
def __init__(self,
sub_command: str,
options: list = None,
properties: List[str] = None,
targets: List[str] = None,
main_command: str = "zfs",
env_variables_override: dict = None):
self.main_command = main_command
self.sub_command = sub_command
self.targets = targets
self.env_variables_override = env_variables_override
self.call_args = [o for o in options] if options is not None else []
if properties:
self.properties = self._prepare_properties(properties)
@staticmethod
def _prepare_properties(properties: List[str]) -> list:
if properties is not None:
prop_list = [["-o", prop] for prop in properties]
return list(itertools.chain.from_iterable(prop_list))
return []
def argcheck_depth(self, depth):
if depth is not None:
if depth < 0:
raise RuntimeError("Depth cannot be negative")
self.call_args.extend(["-d", str(depth)])
def argcheck_columns(self, columns: list):
if columns:
if "all" in columns:
self.call_args.extend(["-o", "all"])
else:
self.call_args.extend(["-o", ",".join(columns)])
def run(self) -> str:
new_env = dict(os.environ)
if self.env_variables_override:
for key, value in self.env_variables_override.items():
new_env[key] = value
arguments = self.call_args
if hasattr(self, 'properties') and self.properties:
arguments.extend(self.properties)
if hasattr(self, 'targets') and self.targets:
arguments.extend(self.targets)
zfs_call = [self.main_command, self.sub_command] + arguments
try:
output = subprocess.check_output(zfs_call,
universal_newlines=True,
stderr=subprocess.PIPE,
env=new_env)
except subprocess.CalledProcessError as e:
raise e
return output
"""
zpool Commands
"""
def zpool_set(pool: str, prop: str) -> str:
"""
zpool set property=value pool
Sets the given property on the specified pool. See the
Properties section for more information on what properties
can be set and acceptable values.
"""
if pool is None:
raise TypeError("Target name cannot be of type 'None'")
command = _Command("set", [],
main_command="zpool",
targets=[prop, pool])
try:
return command.run()
except subprocess.CalledProcessError as e:
raise RuntimeError(f"Failed to set pool property {prop}\n{e.output}\n")
def zpool_get(pool: str = None,
scripting: bool = True,
properties: list = None,
columns: list = None,
parsable: bool = False) -> str:
"""
zpool get [-Hp] [-o field[,field]...] all|property[,property]...
pool...
Retrieves the given list of properties (or all properties if
all is used) for the specified storage pool(s). These prop‐
erties are displayed with the following fields:
name Name of storage pool
property Property name
value Property value
source Property source, either 'default' or 'local'.
See the Properties section for more information on the avail‐
able pool properties.
-H Scripted mode. Do not display headers, and separate
fields by a single tab instead of arbitrary space.
-o field
A comma-separated list of columns to display.
name,property,value,source is the default value.
-p Display numbers in parsable (exact) values.
NOTE: -o requires zfsonlinux 0.7.0
https://github.com/zfsonlinux/zfs/commit/2a8b84b747cb27a175aa3a45b8cdb293cde31886
"""
call_args = []
if scripting:
call_args.append("-H")
if parsable:
call_args.append("-p")
if properties is None:
property_target = "all"
elif properties:
if "all" in properties:
if len(properties) < 2:
property_target = "all"
else:
raise RuntimeError(f"Cannot use 'all' with other properties")
else:
property_target = ",".join(properties)
else:
raise RuntimeError(f"Cannot request no property type")
target_list = [property_target]
if pool is not None:
target_list.append(pool)
command = _Command("get", call_args,
main_command="zpool",
targets=target_list)
command.argcheck_columns(columns)
try:
return command.run()
except subprocess.CalledProcessError as e:
raise RuntimeError(f"Failed to get zfs property '{property_target}' "
f"from {pool}\n{e.output}\n")
"""
zfs Commands
"""
def zfs_create_dataset(filesystem: str,
create_parent: bool = False,
mounted: bool = True,
properties: list = None) -> str:
"""
zfs create [-pu] [-o property=value]... filesystem
"""
if filesystem is None:
raise TypeError("Filesystem name cannot be of type 'None'")
call_args = []
if create_parent:
call_args.append('-p')
if not mounted:
if pyzfscmds.check.check_valid_system() == "freebsd":
call_args.append('-u')
else:
raise SystemError("-u is not valid on this system")
create = _Command("create", call_args, properties=properties, targets=[filesystem])
try:
return create.run()
except subprocess.CalledProcessError as e:
raise RuntimeError(f"Failed to create {filesystem}\n{e.output}\n")
def zfs_create_zvol(volume: str,
size: int,
size_suffix: str = "G",
blocksize: int = None,
create_parent: bool = False,
sparse: bool = False,
properties: list = None) -> str:
"""
zfs create [-ps] [-b blocksize] [-o property=value]... -V size volume
"""
if volume is None:
raise TypeError("Filesystem name cannot be of type 'None'")
call_args = []
if create_parent:
call_args = ["-p"]
if sparse:
call_args.append('-s')
if blocksize:
call_args.extend(['-b', str(blocksize)])
call_args.extend(['-V', f"{str(size)}{size_suffix}"])
command = _Command("create", call_args, properties=properties, targets=[volume])
try:
return command.run()
except subprocess.CalledProcessError as e:
raise RuntimeError(f"Failed to create {volume}\n{e.output}\n")
def zfs_clone(snapname: str,
filesystem: str,
properties: list = None,
create_parent: bool = False) -> str:
if snapname is None:
raise TypeError("Snapshot name cannot be of type 'None'")
call_args = []
if create_parent:
call_args = ["-p"]
command = _Command("clone", call_args, properties=properties, targets=[snapname, filesystem])
try:
return command.run()
except subprocess.CalledProcessError as e:
raise RuntimeError(f"Failed to clone {filesystem}\n{e.output}\n")
def zfs_snapshot(filesystem: str,
snapname: str,
recursive: bool = False,
properties: list = None) -> str:
"""
zfs snapshot|snap [-r] [-o property=value]...
filesystem@snapname|volume@snapname
filesystem@snapname|volume@snapname...
"""
if snapname is None:
raise TypeError("Snapshot name cannot be of type 'None'")
call_args = []
if recursive:
call_args = ["-r"]
command = _Command("snapshot", call_args,
properties=properties, targets=[f"{filesystem}@{snapname}"])
try:
return command.run()
except subprocess.CalledProcessError as e:
raise RuntimeError(f"Failed to snapshot {filesystem}\n{e.output}\n")
def zfs_get(target: str,
recursive: bool = False,
depth: int = None,
scripting: bool = True,
parsable: bool = False,
columns: list = None,
zfs_types: list = None,
source: list = None,
properties: list = None,
env_variables_override: dict = None) -> str:
"""
zfs get [-r|-d depth] [-Hp] [-o all | field[,field]...] [-t
type[,type]...] [-s source[,source]...] all | property[,property]...
filesystem|volume|snapshot...
"""
call_args = []
if recursive:
call_args.append("-r")
if scripting:
call_args.append("-H")
if parsable:
call_args.append("-p")
if zfs_types:
call_args.extend(["-t", ",".join(zfs_types)])
if source:
call_args.extend(["-s", ",".join(source)])
if properties is None:
property_target = "all"
elif properties:
if "all" in properties:
if len(properties) < 2:
property_target = "all"
else:
raise RuntimeError(f"Cannot use 'all' with other properties")
else:
property_target = ",".join(properties)
else:
raise RuntimeError(f"Cannot request no property type")
command = _Command("get", call_args, targets=[property_target, target],
env_variables_override=env_variables_override)
command.argcheck_depth(depth)
command.argcheck_columns(columns)
try:
return command.run()
except subprocess.CalledProcessError as e:
raise RuntimeError(f"Failed to get zfs properties of {target}\n{e.output}\n")
def zfs_list(target: str,
recursive: bool = False,
depth: int = None,
scripting: bool = True,
parsable: bool = False,
columns: list = None,
zfs_types: list = None,
sort_properties_ascending: list = None,
sort_properties_descending: list = None,
env_variables_override: dict = None) -> str:
"""
zfs list [-r|-d depth] [-Hp] [-o property[,property]...] [-t
type[,type]...] [-s property]... [-S property]...
filesystem|volume|snapshot...
"""
call_args = []
if recursive:
call_args.append("-r")
if scripting:
call_args.append("-H")
if parsable:
call_args.append("-p")
if zfs_types:
call_args.extend(["-t", ",".join(zfs_types)])
if sort_properties_ascending is not None:
call_args.extend(
[p for prop in sort_properties_ascending for p in ("-s", prop)])
if sort_properties_descending is not None:
call_args.extend(
[p for prop in sort_properties_descending for p in ("-S", prop)])
command = _Command("list", call_args, targets=[target],
env_variables_override=env_variables_override)
command.argcheck_depth(depth)
command.argcheck_columns(columns)
try:
return command.run()
except subprocess.CalledProcessError as e:
raise RuntimeError(f"Failed to get zfs list of {target}\n{e.output}\n")
def zfs_destroy(target: str,
recursive_children: bool = False,
recursive_dependents: bool = False,
force_unmount: bool = False,
dry_run: bool = False,
machine_parsable: bool = False,
verbose: bool = False) -> str:
"""
zfs destroy [-fnpRrv] filesystem|volume
"""
if target is None:
raise TypeError("Target name cannot be of type 'None'")
call_args = []
if recursive_children:
call_args.append("-r")
if recursive_dependents:
call_args.append("-R")
if force_unmount:
call_args.append("-f")
if dry_run:
call_args.append("-n")
if machine_parsable:
call_args.append("-p")
if verbose:
call_args.append("-v")
command = _Command("destroy", call_args, targets=[target])
try:
return command.run()
except subprocess.CalledProcessError as e:
raise RuntimeError(f"Failed to destroy {target}\n{e.output}\n")
def zfs_destroy_snapshot(snapname: str,
recursive_descendents: bool = False,
recursive_clones: bool = False,
dry_run: bool = False,
machine_parsable: bool = False,
verbose: bool = False,
defer: bool = False) -> str:
"""
zfs destroy [-dnpRrv] snapshot[%snapname][,...]
"""
if snapname is None:
raise TypeError("Snapshot name cannot be of type 'None'")
call_args = []
if recursive_descendents:
call_args.append("-r")
if recursive_clones:
call_args.append("-R")
if dry_run:
call_args.append("-n")
if machine_parsable:
call_args.append("-p")
if verbose:
call_args.append("-v")
if defer:
call_args.append("-d")
command = _Command("destroy", call_args, targets=[snapname])
try:
return command.run()
except subprocess.CalledProcessError as e:
raise RuntimeError(f"Failed to destroy {snapname}\n{e.output}\n")
def zfs_rollback(snapname: str,
destroy_between: bool = False,
destroy_more_recent: bool = False,
force_unmount: bool = False):
"""
zfs rollback [-rRf] snapshot
"""
if snapname is None:
raise TypeError("Snapshot name cannot be of type 'None'")
call_args = []
if destroy_between:
call_args.append("-r")
if destroy_more_recent:
call_args.append("-R")
if force_unmount:
call_args.append("-f")
command = _Command("rollback", call_args, targets=[snapname])
try:
return command.run()
except subprocess.CalledProcessError as e:
raise RuntimeError(f"Failed to rollback {snapname}\n{e.output}\n")
def zfs_promote(clone: str) -> str:
"""
zfs promote clone-filesystem
"""
command = _Command("promote", [], targets=[clone])
try:
return command.run()
except subprocess.CalledProcessError as e:
raise RuntimeError(f"Failed to promote {clone}\n{e.output}\n")
def zfs_rename(target_source: str,
target_dest: str,
create_parents: bool = False,
dont_remount: bool = False,
force_unmount: bool = False,
recursive: bool = False) -> str:
"""
zfs rename [-f] filesystem|volume|snapshot filesystem|volume|snapshot
zfs rename [-f] -p filesystem|volume filesystem|volume
zfs rename -u [-p] filesystem filesystem
zfs rename -r snapshot snapshot
"""
if target_source is None or target_dest is None:
raise TypeError("Target name cannot be of type 'None'")
call_args = []
if create_parents:
call_args.append("-p")
if dont_remount:
call_args.append("-u")
if force_unmount:
call_args.append("-f")
if recursive:
call_args.append("-r")
command = _Command("rename", call_args, targets=[target_source, target_dest])
try:
return command.run()
except subprocess.CalledProcessError as e:
raise RuntimeError(f"Failed to rename {target_source} to {target_dest}\n{e.output}\n")
def zfs_set(target: str, prop: str) -> str:
"""
zfs set property=value [property=value]... filesystem|volume|snapshot
"""
if target is None:
raise TypeError("Target name cannot be of type 'None'")
command = _Command("set", [], targets=[prop, target])
try:
return command.run()
except subprocess.CalledProcessError as e:
raise RuntimeError(f"Failed to \n{e.output}\n")
def zfs_inherit(prop: str,
target: str,
recursive: bool = False,
revert: bool = False) -> str:
"""
zfs inherit [-rS] property filesystem|volume|snapshot...
"""
if prop is None:
raise TypeError("Property name cannot be of type 'None'")
if target is None:
raise TypeError("Target name cannot be of type 'None'")
call_args = []
if recursive:
call_args.append("-r")
if revert:
call_args.append("-S")
command = _Command("inherit", call_args, targets=[prop, target])
try:
return command.run()
except subprocess.CalledProcessError as e:
raise RuntimeError(f"Failed to inherit property\n{e.output}\n")
def zfs_upgrade_list(supported: bool = False) -> str:
"""
zfs upgrade [-v]
Displays a list of file systems that are not the most recent version.
-v Displays ZFS filesystem versions supported by the current
software. The current ZFS filesystem version and all previous
supported versions are displayed, along with an explanation
of the features provided with each version.
"""
call_args = []
if supported:
call_args.append("-v")
command = _Command("upgrade", call_args)
try:
return command.run()
except subprocess.CalledProcessError as e:
raise RuntimeError(f"Failed to list upgradeable filesystems\n{e.output}\n")
def zfs_upgrade(target: str = None,
descendent: bool = False,
version: str = None,
upgrade_all: bool = False) -> str:
"""
zfs upgrade [-r] [-V version] -a | filesystem
"""
if target is not None and upgrade_all:
raise RuntimeError("Both target and upgrade all cannot be true")
call_args = []
if descendent:
call_args.append("-r")
if upgrade_all:
call_args.append("-a")
if version is not None:
call_args.extend(["-V", version])
targets = [target] if target is not None else []
command = _Command("upgrade", call_args, targets=targets)
try:
return command.run()
except subprocess.CalledProcessError as e:
raise RuntimeError(f"Failed to run upgrade\n{e.output}\n")
def zfs_mount_list() -> str:
"""
zfs mount
Displays all ZFS file systems currently mounted.
"""
command = _Command("mount", [])
try:
return command.run()
except subprocess.CalledProcessError as e:
raise RuntimeError(f"Failed to \n{e.output}\n")
def zfs_mount(target: str = None,
progress: bool = False,
overlay: bool = False,
properties: list = None,
mount_all: bool = False) -> str:
"""
zfs mount [-vO] [-o property[,property]...] -a | filesystem
"""
if target is not None and mount_all:
raise RuntimeError("Both target and unmount all cannot be true")
call_args = []
if progress:
call_args.append("-v")
if overlay:
call_args.append("-O")
if mount_all:
call_args.append("-a")
if properties:
call_args.extend(["-o", ",".join(properties)])
targets = [target] if target is not None else []
command = _Command("mount", call_args, targets=targets)
try:
return command.run()
except subprocess.CalledProcessError as e:
raise RuntimeError(f"Failed to mount target\n{e.output}\n")
def zfs_unmount(target: str = None,
force: bool = False,
unmount_all: bool = False) -> str:
"""
zfs unmount|umount [-f] -a | filesystem|mountpoint
"""
if target is not None and unmount_all:
raise RuntimeError("Both target and unmount all cannot be true")
call_args = []
if force:
call_args.append("-f")
if unmount_all:
call_args.append("-a")
targets = [target] if target is not None else []
command = _Command("unmount", call_args, targets=targets)
try:
return command.run()
except subprocess.CalledProcessError as e:
raise RuntimeError(f"Failed to unmount {target}\n{e.output}\n")
# TODO: Unimplemented:
# def zfs_userspace():
# """
# zfs userspace [-Hinp] [-o field[,field]...] [-s field]... [-S field]...
# [-t type[,type]...] filesystem|snapshot
#
# Displays space consumed by, and quotas on, each user in the specified
# filesystem or snapshot. This corresponds to the userused@user and
# userquota@user properties.
#
# -n Print numeric ID instead of user/group name.
#
# -H Do not print headers, use tab-delimited output.
#
# -p Use exact (parsable) numeric output.
#
# -o field[,field]...
# Display only the specified fields from the following set:
# type,name,used,quota. The default is to display all fields.
#
# -s field
# Sort output by this field. The -s and -S flags may be speci-
# fied multiple times to sort first by one field, then by
# another. The default is -s type -s name.
#
# -S field
# Sort by this field in reverse order. See -s.
#
# -t type[,type]...
# Print only the specified types from the following set:
# all,posixuser,smbuser,posixgroup,smbgroup.
#
# The default is -t posixuser,smbuser.
#
# The default can be changed to include group types.
#
# -i Translate SID to POSIX ID. This flag currently has no effect
# on FreeBSD.
# """
# if x is None:
# raise TypeError(" name cannot be of type 'None'")
#
# call_args = []
#
# if y:
# call_args.append("-r")
#
# command = _Command("rollback", call_args, targets=[])
#
# try:
# return command.run()
# except subprocess.CalledProcessError as e:
# raise RuntimeError(f"Failed to \n{e.output}\n")
#
#
# def zfs_groupspace():
# """
# zfs groupspace [-Hinp] [-o field[,field]...] [-s field]... [-S field]...
# [-t type[,type]...] filesystem|snapshot
#
# Displays space consumed by, and quotas on, each group in the speci-
# fied filesystem or snapshot. This subcommand is identical to "zfs
# userspace", except that the default types to display are -t
# posixgroup,smbgroup.
# """
# if x is None:
# raise TypeError(" name cannot be of type 'None'")
#
# call_args = []
#
# if y:
# call_args.append("-r")
#
# command = _Command("rollback", call_args, targets=[])
#
# try:
# return command.run()
# except subprocess.CalledProcessError as e:
# raise RuntimeError(f"Failed to \n{e.output}\n")
#
#
#
#
# def zfs_share():
# """
# zfs share -a | filesystem
#
# Shares ZFS file systems that have the sharenfs property set.
#
# -a Share all ZFS file systems that have the sharenfs property
# set. This command may be executed on FreeBSD system startup
# by /etc/rc.d/zfs. For more information, see variable
# zfs_enable in rc.conf(5).
#
# filesystem
# Share the specified filesystem according to the sharenfs
# property. File systems are shared when the sharenfs property
# is set.
# """
# if x is None:
# raise TypeError(" name cannot be of type 'None'")
#
# call_args = []
#
# if y:
# call_args.append("-r")
#
# command = _Command("rollback", call_args, targets=[])
#
# try:
# return command.run()
# except subprocess.CalledProcessError as e:
# raise RuntimeError(f"Failed to \n{e.output}\n")
#
#
# def zfs_unshare():
# """
# zfs unshare -a | filesystem|mountpoint
#
# Unshares ZFS file systems that have the sharenfs property set.
#
# -a Unshares ZFS file systems that have the sharenfs property
# set. This command may be executed on FreeBSD system shutdown
# by /etc/rc.d/zfs. For more information, see variable
# zfs_enable in rc.conf(5).
#
# filesystem | mountpoint
# Unshare the specified filesystem. The command can also be
# given a path to a ZFS file system shared on the system.
# """
# if x is None:
# raise TypeError(" name cannot be of type 'None'")
#
# call_args = []
#
# if y:
# call_args.append("-r")
#
# command = _Command("rollback", call_args, targets=[])
#
# try:
# return command.run()
# except subprocess.CalledProcessError as e:
# raise RuntimeError(f"Failed to \n{e.output}\n")
#
#
# def zfs_bookmark():
# """
# zfs bookmark snapshot bookmark
#
# Creates a bookmark of the given snapshot. Bookmarks mark the point
# in time when the snapshot was created, and can be used as the incre-
# mental source for a "zfs send" command.
#
# This feature must be enabled to be used. See zpool-features(7) for
# details on ZFS feature flags and the bookmark feature.
# """
# if x is None:
# raise TypeError(" name cannot be of type 'None'")
#
# call_args = []
#
# if y:
# call_args.append("-r")
#
# command = _Command("rollback", call_args, targets=[])
#
# try:
# return command.run()
# except subprocess.CalledProcessError as e:
# raise RuntimeError(f"Failed to \n{e.output}\n")
#
#
# def zfs_send():
# """
# zfs send [-DnPpRveL] [-i snapshot | -I snapshot] snapshot
#
# Creates a stream representation of the last snapshot argument (not
# part of -i or -I) which is written to standard output. The output can
# be redirected to a file or to a different system (for example, using
# ssh(1)). By default, a full stream is generated.
#
# -i snapshot
# Generate an incremental stream from the first snapshot (the
# incremental source) to the second snapshot (the incremental
# target). The incremental source can be specified as the last
# component of the snapshot name (the @ character and
# following) and it is assumed to be from the same file system
# as the incremental target.
#
# If the destination is a clone, the source may be the origin
# snapshot, which must be fully specified (for example,
# pool/fs@origin, not just @origin).
#
# -I snapshot
# Generate a stream package that sends all intermediary snap-
# shots from the first snapshot to the second snapshot. For
# example, -I @a fs@d is similar to -i @a fs@b; -i @b fs@c; -i
# @c fs@d. The incremental source may be specified as with the
# -i option.
#
# -R Generate a replication stream package, which will replicate
# the specified filesystem, and all descendent file systems, up
# to the named snapshot. When received, all properties, snap-
# shots, descendent file systems, and clones are preserved.
#
# If the -i or -I flags are used in conjunction with the -R
# flag, an incremental replication stream is generated. The
# current values of properties, and current snapshot and file
# system names are set when the stream is received. If the -F
# flag is specified when this stream is received, snapshots and
# file systems that do not exist on the sending side are
# destroyed.
#
# -D Generate a deduplicated stream. Blocks which would have been
# sent multiple times in the send stream will only be sent
# once. The receiving system must also support this feature to
# receive a deduplicated stream. This flag can be used regard-
# less of the dataset's dedup property, but performance will be
# much better if the filesystem uses a dedup-capable checksum
# (eg. sha256).
#
# -L Generate a stream which may contain blocks larger than 128KB.
# This flag has no effect if the large_blocks pool feature is
# disabled, or if the recordsize property of this filesystem
# has never been set above 128KB. The receiving system must
# have the large_blocks pool feature enabled as well. See
# zpool-features(7) for details on ZFS feature flags and the
# large_blocks feature.
#
# -e Generate a more compact stream by using WRITE_EMBEDDED
# records for blocks which are stored more compactly on disk by
# the embedded_data pool feature. This flag has no effect if
# the embedded_data feature is disabled. The receiving system
# must have the embedded_data feature enabled. If the
# lz4_compress feature is active on the sending system, then
# the receiving system must have that feature enabled as well.
# See zpool-features(7) for details on ZFS feature flags and
# the embedded_data feature.
#
# -p Include the dataset's properties in the stream. This flag is
# implicit when -R is specified. The receiving system must also
# support this feature.
#
# -n Do a dry-run ("No-op") send. Do not generate any actual send
# data. This is useful in conjunction with the -v or -P flags
# to determine what data will be sent. In this case, the ver-
# bose output will be written to standard output (contrast with
# a non-dry-run, where the stream is written to standard output
# and the verbose output goes to standard error).
#
# -P Print machine-parsable verbose information about the stream
# package generated.
#
# -v Print verbose information about the stream package generated.
# This information includes a per-second report of how much
# data has been sent.
#
# The format of the stream is committed. You will be able to receive
# your streams on future versions of ZFS.
#
# zfs send [-eL] [-i snapshot|bookmark] filesystem|volume|snapshot
#
# Generate a send stream, which may be of a filesystem, and may be
# incremental from a bookmark. If the destination is a filesystem or
# volume, the pool must be read-only, or the filesystem must not be
# mounted. When the stream generated from a filesystem or volume is
# received, the default snapshot name will be (--head--).
#
# -i snapshot|bookmark
# Generate an incremental send stream. The incremental source
# must be an earlier snapshot in the destination's history. It
# will commonly be an earlier snapshot in the destination's
# filesystem, in which case it can be specified as the last
# component of the name (the # or @ character and following).
#
# If the incremental target is a clone, the incremental source
# can be the origin snapshot, or an earlier snapshot in the
# origin's filesystem, or the origin's origin, etc.
#
# -L Generate a stream which may contain blocks larger than 128KB.
# This flag has no effect if the large_blocks pool feature is
# disabled, or if the recordsize property of this filesystem
# has never been set above 128KB. The receiving system must
# have the large_blocks pool feature enabled as well. See
# zpool-features(7) for details on ZFS feature flags and the
# large_blocks feature.
#
# -e Generate a more compact stream by using WRITE_EMBEDDED
# records for blocks which are stored more compactly on disk by
# the embedded_data pool feature. This flag has no effect if
# the embedded_data feature is disabled. The receiving system
# must have the embedded_data feature enabled. If the
# lz4_compress feature is active on the sending system, then
# the receiving system must have that feature enabled as well.
# See zpool-features(7) for details on ZFS feature flags and
# the embedded_data feature.
#
# zfs send [-Penv] -t receive_resume_token
# Creates a send stream which resumes an interrupted receive. The
# receive_resume_token is the value of this property on the filesystem
# or volume that was being received into. See the documentation for
# zfs receive -s for more details.
# """
# if x is None:
# raise TypeError(" name cannot be of type 'None'")
#
# call_args = []
#
# if y:
# call_args.append("-r")
#
# command = _Command("rollback", call_args, targets=[])
#
# try:
# return command.run()
# except subprocess.CalledProcessError as e:
# raise RuntimeError(f"Failed to \n{e.output}\n")
#
#
# def zfs_receive():
# """
# zfs receive|recv [-vnsFu] [-o origin=snapshot] filesystem|volume|snapshot
#
# zfs receive|recv [-vnsFu] [-d | -e] [-o origin=snapshot] filesystem
#
# Creates a snapshot whose contents are as specified in the stream pro-
# vided on standard input. If a full stream is received, then a new
# file system is created as well. Streams are created using the "zfs
# send" subcommand, which by default creates a full stream. "zfs recv"
# can be used as an alias for "zfs receive".
#
# If an incremental stream is received, then the destination file sys-
# tem must already exist, and its most recent snapshot must match the
# incremental stream's source. For zvols, the destination device link
# is destroyed and recreated, which means the zvol cannot be accessed
# during the receive operation.
#
# When a snapshot replication package stream that is generated by using
# the "zfs send -R" command is received, any snapshots that do not
# exist on the sending location are destroyed by using the "zfs destroy
# -d" command.
#
# The name of the snapshot (and file system, if a full stream is
# received) that this subcommand creates depends on the argument type
# and the -d or -e option.
#
# If the argument is a snapshot name, the specified snapshot is cre-
# ated. If the argument is a file system or volume name, a snapshot
# with the same name as the sent snapshot is created within the speci-
# fied filesystem or volume. If the -d or -e option is specified, the
# snapshot name is determined by appending the sent snapshot's name to
# the specified filesystem. If the -d option is specified, all but the
# pool name of the sent snapshot path is appended (for example, b/c@1
# appended from sent snapshot a/b/c@1), and if the -e option is speci-
# fied, only the tail of the sent snapshot path is appended (for exam-
# ple, c@1 appended from sent snapshot a/b/c@1). In the case of -d,
# any file systems needed to replicate the path of the sent snapshot
# are created within the specified file system.
#
# -d Use the full sent snapshot path without the first element
# (without pool name) to determine the name of the new snapshot
# as described in the paragraph above.
#
# -e Use only the last element of the sent snapshot path to deter-
# mine the name of the new snapshot as described in the para-
# graph above.
#
# -u File system that is associated with the received stream is
# not mounted.
#
# -v Print verbose information about the stream and the time
# required to perform the receive operation.
#
# -n Do not actually receive the stream. This can be useful in
# conjunction with the -v option to verify the name the receive
# operation would use.
#
# -o origin=snapshot
# Forces the stream to be received as a clone of the given
# snapshot. If the stream is a full send stream, this will
# create the filesystem described by the stream as a clone of
# the specified snapshot. Which snapshot was specified will not
# affect the success or failure of the receive, as long as the
# snapshot does exist. If the stream is an incremental send
# stream, all the normal verification will be performed.
#
# -F Force a rollback of the file system to the most recent snap-
# shot before performing the receive operation. If receiving an
# incremental replication stream (for example, one generated by
# "zfs send -R {-i | -I}"), destroy snapshots and file systems
# that do not exist on the sending side.
#
# -s If the receive is interrupted, save the partially received
# state, rather than deleting it. Interruption may be due to
# premature termination of the stream (e.g. due to network
# failure or failure of the remote system if the stream is
# being read over a network connection), a checksum error in
# the stream, termination of the zfs receive process, or
# unclean shutdown of the system.
#
# The receive can be resumed with a stream generated by zfs
# send -t token, where the token is the value of the
# receive_resume_token property of the filesystem or volume
# which is received into.
#
# To use this flag, the storage pool must have the
# extensible_dataset feature enabled. See zpool-features(5)
# for details on ZFS feature flags.
#
# zfs receive|recv -A filesystem|volume
# Abort an interrupted zfs receive -s, deleting its saved partially
# received state.
# """
# if x is None:
# raise TypeError(" name cannot be of type 'None'")
#
# call_args = []
#
# if y:
# call_args.append("-r")
#
# command = _Command("rollback", call_args, targets=[])
#
# try:
# return command.run()
# except subprocess.CalledProcessError as e:
# raise RuntimeError(f"Failed to \n{e.output}\n")
#
#
# def zfs_allow():
# """
# zfs allow filesystem|volume
#
# Displays permissions that have been delegated on the specified
# filesystem or volume. See the other forms of "zfs allow" for more
# information.
#
# zfs allow [-ldug] user|group[,user|group]...
# perm|@setname[,perm|@setname]... filesystem|volume
#
# zfs allow [-ld] -e|everyone perm|@setname[,perm|@setname]...
# filesystem|volume
#
# Delegates ZFS administration permission for the file systems to non-
# privileged users.
#
# [-ug] user|group[, user|group]...
# Specifies to whom the permissions are delegated. Multiple
# entities can be specified as a comma-separated list. If nei-
# ther of the -ug options are specified, then the argument is
# interpreted preferentially as the keyword everyone, then as a
# user name, and lastly as a group name. To specify a user or
# group named "everyone", use the -u or -g options. To specify
# a group with the same name as a user, use the -g option.
#
# [-e|everyone]
# Specifies that the permissions be delegated to "everyone".
#
# perm|@setname[,perm|@setname]...
# The permissions to delegate. Multiple permissions may be
# specified as a comma-separated list. Permission names are the
# same as ZFS subcommand and property names. See the property
# list below. Property set names, which begin with an at sign
# (@), may be specified. See the -s form below for details.
#
# [-ld] filesystem|volume
# Specifies where the permissions are delegated. If neither of
# the -ld options are specified, or both are, then the permis-
# sions are allowed for the file system or volume, and all of
# its descendents. If only the -l option is used, then is
# allowed "locally" only for the specified file system. If
# only the -d option is used, then is allowed only for the
# descendent file systems.
#
# Permissions are generally the ability to use a ZFS subcommand or
# change a ZFS property. The following permissions are available:
#
# NAME TYPE NOTES
# allow subcommand Must also have the permission
# that is being allowed
# clone subcommand Must also have the 'create'
# ability and 'mount' ability in
# the origin file system
# create subcommand Must also have the 'mount'
# ability
# destroy subcommand Must also have the 'mount'
# ability
# diff subcommand Allows lookup of paths within a
# dataset given an object number,
# and the ability to create snap-
# shots necessary to 'zfs diff'
# hold subcommand Allows adding a user hold to a
# snapshot
# mount subcommand Allows mount/umount of ZFS
# datasets
# promote subcommand Must also have the 'mount' and
# 'promote' ability in the origin
# file system
# receive subcommand Must also have the 'mount' and
# 'create' ability
# release subcommand Allows releasing a user hold
# which might destroy the snapshot
# rename subcommand Must also have the 'mount' and
# 'create' ability in the new
# parent
# rollback subcommand Must also have the 'mount'
# ability
# send subcommand
# share subcommand Allows sharing file systems over
# the NFS protocol
# snapshot subcommand Must also have the 'mount'
# ability
# groupquota other Allows accessing any
# groupquota@... property
# groupused other Allows reading any groupused@...
# property
# userprop other Allows changing any user property
# userquota other Allows accessing any
# userquota@... property
# userused other Allows reading any userused@...
# property
# aclinherit property
# aclmode property
# atime property
# canmount property
# casesensitivity property
# checksum property
# compression property
# copies property
# dedup property
# devices property
# exec property
# filesystem_limit property
# logbias property
# jailed property
# mlslabel property
# mountpoint property
# nbmand property
# normalization property
# primarycache property
# quota property
# readonly property
# recordsize property
# refquota property
# refreservation property
# reservation property
# secondarycache property
# setuid property
# sharenfs property
# sharesmb property
# snapdir property
# snapshot_limit property
# sync property
# utf8only property
# version property
# volblocksize property
# volsize property
# vscan property
# xattr property
#
# zfs allow -c perm|@setname[,perm|@setname]... filesystem|volume
#
# Sets "create time" permissions. These permissions are granted
# (locally) to the creator of any newly-created descendent file system.
#
# zfs allow -s @setname perm|@setname[,perm|@setname]... filesystem|volume
#
# Defines or adds permissions to a permission set. The set can be used
# by other "zfs allow" commands for the specified file system and its
# descendents. Sets are evaluated dynamically, so changes to a set are
# immediately reflected. Permission sets follow the same naming
# restrictions as ZFS file systems, but the name must begin with an "at
# sign" (@), and can be no more than 64 characters long.
# """
# if x is None:
# raise TypeError(" name cannot be of type 'None'")
#
# call_args = []
#
# if y:
# call_args.append("-r")
#
# command = _Command("rollback", call_args, targets=[])
#
# try:
# return command.run()
# except subprocess.CalledProcessError as e:
# raise RuntimeError(f"Failed to \n{e.output}\n")
#
#
# def zfs_unallow():
# """
# zfs unallow [-rldug] user|group[,user|group]...
# [perm|@setname[,perm|@setname]...] filesystem|volume
#
# zfs unallow [-rld] -e|everyone [perm|@setname[,perm|@setname]...]
# filesystem|volume
#
# zfs unallow [-r] -c [perm|@setname[,perm|@setname]...] filesystem|volume
#
# Removes permissions that were granted with the "zfs allow" command.
# No permissions are explicitly denied, so other permissions granted
# are still in effect. For example, if the permission is granted by an
# ancestor. If no permissions are specified, then all permissions for
# the specified user, group, or everyone are removed. Specifying
# everyone (or using the -e option) only removes the permissions that
# were granted to everyone, not all permissions for every user and
# group. See the "zfs allow" command for a description of the -ldugec
# options.
#
# -r Recursively remove the permissions from this file system and
# all descendents.
#
# zfs unallow [-r] -s @setname [perm|@setname[,perm|@setname]...]
# filesystem|volume
#
# Removes permissions from a permission set. If no permissions are
# specified, then all permissions are removed, thus removing the set
# entirely.
# """
# if x is None:
# raise TypeError(" name cannot be of type 'None'")
#
# call_args = []
#
# if y:
# call_args.append("-r")
#
# command = _Command("rollback", call_args, targets=[])
#
# try:
# return command.run()
# except subprocess.CalledProcessError as e:
# raise RuntimeError(f"Failed to \n{e.output}\n")
#
#
# def zfs_hold():
# """
# zfs hold [-r] tag snapshot...
#
# Adds a single reference, named with the tag argument, to the speci-
# fied snapshot or snapshots. Each snapshot has its own tag namespace,
# and tags must be unique within that space.
#
# If a hold exists on a snapshot, attempts to destroy that snapshot by
# using the "zfs destroy" command returns EBUSY.
#
# -r Specifies that a hold with the given tag is applied recur-
# sively to the snapshots of all descendent file systems.
#
# """
# if x is None:
# raise TypeError(" name cannot be of type 'None'")
#
# call_args = []
#
# if y:
# call_args.append("-r")
#
# command = _Command("rollback", call_args, targets=[])
#
# try:
# return command.run()
# except subprocess.CalledProcessError as e:
# raise RuntimeError(f"Failed to \n{e.output}\n")
#
#
# def zfs_holds():
# """
# zfs holds [-Hp] [-r|-d depth] filesystem|volume|snapshot...
#
# Lists all existing user references for the given dataset or datasets.
#
# -H Used for scripting mode. Do not print headers and separate
# fields by a single tab instead of arbitrary white space.
#
# -p Display numbers in parsable (exact) values.
#
# -r Lists the holds that are set on the descendent snapshots of
# the named datasets or snapshots, in addition to listing the
# holds on the named snapshots, if any.
#
# -d depth
# Recursively display any holds on the named snapshots, or
# descendent snapshots of the named datasets or snapshots, lim-
# iting the recursion to depth.
# """
# if x is None:
# raise TypeError(" name cannot be of type 'None'")
#
# call_args = []
#
# if y:
# call_args.append("-r")
#
# command = _Command("rollback", call_args, targets=[])
#
# try:
# return command.run()
# except subprocess.CalledProcessError as e:
# raise RuntimeError(f"Failed to \n{e.output}\n")
#
#
# def zfs_release():
# """
# zfs release [-r] tag snapshot...
#
# Removes a single reference, named with the tag argument, from the
# specified snapshot or snapshots. The tag must already exist for each
# snapshot.
#
# -r Recursively releases a hold with the given tag on the snap-
# shots of all descendent file systems.
# """
# if x is None:
# raise TypeError(" name cannot be of type 'None'")
#
# call_args = []
#
# if y:
# call_args.append("-r")
#
# command = _Command("rollback", call_args, targets=[])
#
# try:
# return command.run()
# except subprocess.CalledProcessError as e:
# raise RuntimeError(f"Failed to \n{e.output}\n")
#
#
# def zfs_diff():
# """
# zfs diff [-FHt] snapshot [snapshot|filesystem]
#
# Display the difference between a snapshot of a given filesystem and
# another snapshot of that filesystem from a later time or the current
# contents of the filesystem. The first column is a character indicat-
# ing the type of change, the other columns indicate pathname, new
# pathname (in case of rename), change in link count, and optionally
# file type and/or change time.
#
# The types of change are:
#
# - path was removed
# + path was added
# M path was modified
# R path was renamed
#
# -F Display an indication of the type of file, in a manner simi-
# lar to the -F option of ls(1).
#
# B block device
# C character device
# F regular file
# / directory
# @ symbolic link
# = socket
# > door (not supported on FreeBSD)
# | named pipe (not supported on FreeBSD)
# P event port (not supported on FreeBSD)
#
# -H Give more parsable tab-separated output, without header lines
# and without arrows.
#
# -t Display the path's inode change time as the first column of
# output.
# """
# if x is None:
# raise TypeError(" name cannot be of type 'None'")
#
# call_args = []
#
# if y:
# call_args.append("-r")
#
# command = _Command("rollback", call_args, targets=[])
#
# try:
# return command.run()
# except subprocess.CalledProcessError as e:
# raise RuntimeError(f"Failed to \n{e.output}\n")
#
#
# """FreeBSD Only"""
#
# """
# zfs jail jailid filesystem
#
# Attaches the specified filesystem to the jail identified by JID
# jailid. From now on this file system tree can be managed from within
# a jail if the jailed property has been set. To use this functional-
# ity, the jail needs the allow.mount and allow.mount.zfs parameters
# set to 1 and the enforce_statfs parameter set to a value lower than
# 2.
#
# See jail(8) for more information on managing jails and configuring
# the parameters above.
#
# zfs unjail jailid filesystem
#
# Detaches the specified filesystem from the jail identified by JID
# jailid.
# """
| 34.145372
| 97
| 0.602589
|
0f70b4a4dc40adf68a4b331fcc2f4ae0aa7a1ba0
| 2,179
|
py
|
Python
|
setup.py
|
ossobv/djangosaml2idp
|
71e65e28cff8e83d1a77325396784c879906d962
|
[
"Apache-2.0"
] | 2
|
2018-11-14T07:44:24.000Z
|
2018-11-19T01:01:58.000Z
|
setup.py
|
ossobv/djangosaml2idp
|
71e65e28cff8e83d1a77325396784c879906d962
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
ossobv/djangosaml2idp
|
71e65e28cff8e83d1a77325396784c879906d962
|
[
"Apache-2.0"
] | 1
|
2018-12-19T22:02:34.000Z
|
2018-12-19T22:02:34.000Z
|
# -*- coding: utf-8 -*-
import os
from setuptools import setup, find_packages
from djangosaml2idp import __version__
setup(
name='djangosaml2idp',
version=__version__,
description='SAML 2.0 Identity Provider for Django',
keywords="django,pysaml2,sso,saml2,federated authentication,authentication,idp",
author='Mathieu Hinderyckx',
author_email='mathieu.hinderyckx@gmail.com',
maintainer="Mathieu Hinderyckx",
long_description="\n\n".join([
open(os.path.join(os.path.dirname(__file__), 'README.rst')).read(),
]),
install_requires=[
'django>=2.2',
'pysaml2>=5.0.0',
'pytz',
'arrow',
],
extras_require={
"testing": [
"pytest",
"pytest-runner",
"pytest-django",
"pytest-cov",
"pytest-pythonpath",
"pytest-mock",
"requests-mock"
]
},
python_requires=">=3.6",
license='Apache Software License 2.0',
packages=find_packages(exclude=["tests*", "docs", "example_setup"]),
url='https://github.com/OTA-Insight/djangosaml2idp/',
zip_safe=False,
include_package_data=True,
classifiers=[
"Development Status :: 5 - Production/Stable",
'Environment :: Web Environment',
"Framework :: Django",
"Framework :: Django :: 2.2",
"Framework :: Django :: 3.0",
"Intended Audience :: Developers",
"Intended Audience :: Information Technology",
"License :: OSI Approved :: Apache Software License",
'Operating System :: OS Independent',
'Programming Language :: Python',
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: WSGI",
"Topic :: Security",
"Topic :: Software Development :: Libraries :: Application Frameworks",
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
| 34.046875
| 84
| 0.591097
|
41179067c41d948a13025281ab69785d87c223d3
| 8,331
|
py
|
Python
|
test/functional/feature_proxy.py
|
Blessing-BLES/NEWSAVINGSOURCE
|
529d24de45cffe89ad6c39e30e13f57dd1eee4f5
|
[
"MIT"
] | 1
|
2021-02-10T13:20:37.000Z
|
2021-02-10T13:20:37.000Z
|
test/functional/feature_proxy.py
|
Blessing-BLES/NEWSAVINGSOURCE
|
529d24de45cffe89ad6c39e30e13f57dd1eee4f5
|
[
"MIT"
] | null | null | null |
test/functional/feature_proxy.py
|
Blessing-BLES/NEWSAVINGSOURCE
|
529d24de45cffe89ad6c39e30e13f57dd1eee4f5
|
[
"MIT"
] | 2
|
2020-10-16T16:38:23.000Z
|
2021-10-05T03:13:18.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2015-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test bitcoind with different proxy configuration.
Test plan:
- Start savingd's with different proxy configurations
- Use addnode to initiate connections
- Verify that proxies are connected to, and the right connection command is given
- Proxy configurations to test on savingd side:
- `-proxy` (proxy everything)
- `-onion` (proxy just onions)
- `-proxyrandomize` Circuit randomization
- Proxy configurations to test on proxy side,
- support no authentication (other proxy)
- support no authentication + user/pass authentication (Tor)
- proxy on IPv6
- Create various proxies (as threads)
- Create savingds that connect to them
- Manipulate the savingds using addnode (onetry) an observe effects
addnode connect to IPv4
addnode connect to IPv6
addnode connect to onion
addnode connect to generic DNS name
"""
import socket
import os
from test_framework.socks5 import Socks5Configuration, Socks5Command, Socks5Server, AddressType
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
PORT_MIN,
PORT_RANGE,
assert_equal,
)
from test_framework.netutil import test_ipv6_local
RANGE_BEGIN = PORT_MIN + 2 * PORT_RANGE # Start after p2p and rpc ports
class ProxyTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 4
def setup_nodes(self):
self.have_ipv6 = test_ipv6_local()
# Create two proxies on different ports
# ... one unauthenticated
self.conf1 = Socks5Configuration()
self.conf1.addr = ('127.0.0.1', RANGE_BEGIN + (os.getpid() % 1000))
self.conf1.unauth = True
self.conf1.auth = False
# ... one supporting authenticated and unauthenticated (Tor)
self.conf2 = Socks5Configuration()
self.conf2.addr = ('127.0.0.1', RANGE_BEGIN + 1000 + (os.getpid() % 1000))
self.conf2.unauth = True
self.conf2.auth = True
if self.have_ipv6:
# ... one on IPv6 with similar configuration
self.conf3 = Socks5Configuration()
self.conf3.af = socket.AF_INET6
self.conf3.addr = ('::1', RANGE_BEGIN + 2000 + (os.getpid() % 1000))
self.conf3.unauth = True
self.conf3.auth = True
else:
self.log.warning("Testing without local IPv6 support")
self.serv1 = Socks5Server(self.conf1)
self.serv1.start()
self.serv2 = Socks5Server(self.conf2)
self.serv2.start()
if self.have_ipv6:
self.serv3 = Socks5Server(self.conf3)
self.serv3.start()
# Note: proxies are not used to connect to local nodes
# this is because the proxy to use is based on CService.GetNetwork(), which return NET_UNROUTABLE for localhost
args = [
['-listen', '-proxy=%s:%i' % (self.conf1.addr),'-proxyrandomize=1'],
['-listen', '-proxy=%s:%i' % (self.conf1.addr),'-onion=%s:%i' % (self.conf2.addr),'-proxyrandomize=0'],
['-listen', '-proxy=%s:%i' % (self.conf2.addr),'-proxyrandomize=1'],
[]
]
if self.have_ipv6:
args[3] = ['-listen', '-proxy=[%s]:%i' % (self.conf3.addr),'-proxyrandomize=0', '-noonion']
self.add_nodes(self.num_nodes, extra_args=args)
self.start_nodes()
def node_test(self, node, proxies, auth, test_onion=True):
rv = []
# Test: outgoing IPv4 connection through node
node.addnode("15.61.23.23:1234", "onetry")
cmd = proxies[0].queue.get()
assert(isinstance(cmd, Socks5Command))
# Note: bitcoind's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"15.61.23.23")
assert_equal(cmd.port, 1234)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
if self.have_ipv6:
# Test: outgoing IPv6 connection through node
node.addnode("[1233:3432:2434:2343:3234:2345:6546:4534]:5443", "onetry")
cmd = proxies[1].queue.get()
assert(isinstance(cmd, Socks5Command))
# Note: bitcoind's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"1233:3432:2434:2343:3234:2345:6546:4534")
assert_equal(cmd.port, 5443)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
if test_onion:
# Test: outgoing onion connection through node
node.addnode("bitcoinostk4e4re.onion:8333", "onetry")
cmd = proxies[2].queue.get()
assert(isinstance(cmd, Socks5Command))
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"bitcoinostk4e4re.onion")
assert_equal(cmd.port, 8333)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
# Test: outgoing DNS name connection through node
node.addnode("node.noumenon:8333", "onetry")
cmd = proxies[3].queue.get()
assert(isinstance(cmd, Socks5Command))
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"node.noumenon")
assert_equal(cmd.port, 8333)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
return rv
def run_test(self):
# basic -proxy
self.node_test(self.nodes[0], [self.serv1, self.serv1, self.serv1, self.serv1], False)
# -proxy plus -onion
self.node_test(self.nodes[1], [self.serv1, self.serv1, self.serv2, self.serv1], False)
# -proxy plus -onion, -proxyrandomize
rv = self.node_test(self.nodes[2], [self.serv2, self.serv2, self.serv2, self.serv2], True)
# Check that credentials as used for -proxyrandomize connections are unique
credentials = set((x.username,x.password) for x in rv)
assert_equal(len(credentials), len(rv))
if self.have_ipv6:
# proxy on IPv6 localhost
self.node_test(self.nodes[3], [self.serv3, self.serv3, self.serv3, self.serv3], False, False)
def networks_dict(d):
r = {}
for x in d['networks']:
r[x['name']] = x
return r
# test RPC getnetworkinfo
n0 = networks_dict(self.nodes[0].getnetworkinfo())
for net in ['ipv4','ipv6','onion']:
assert_equal(n0[net]['proxy'], '%s:%i' % (self.conf1.addr))
assert_equal(n0[net]['proxy_randomize_credentials'], True)
assert_equal(n0['onion']['reachable'], True)
n1 = networks_dict(self.nodes[1].getnetworkinfo())
for net in ['ipv4','ipv6']:
assert_equal(n1[net]['proxy'], '%s:%i' % (self.conf1.addr))
assert_equal(n1[net]['proxy_randomize_credentials'], False)
assert_equal(n1['onion']['proxy'], '%s:%i' % (self.conf2.addr))
assert_equal(n1['onion']['proxy_randomize_credentials'], False)
assert_equal(n1['onion']['reachable'], True)
n2 = networks_dict(self.nodes[2].getnetworkinfo())
for net in ['ipv4','ipv6','onion']:
assert_equal(n2[net]['proxy'], '%s:%i' % (self.conf2.addr))
assert_equal(n2[net]['proxy_randomize_credentials'], True)
assert_equal(n2['onion']['reachable'], True)
if self.have_ipv6:
n3 = networks_dict(self.nodes[3].getnetworkinfo())
for net in ['ipv4','ipv6']:
assert_equal(n3[net]['proxy'], '[%s]:%i' % (self.conf3.addr))
assert_equal(n3[net]['proxy_randomize_credentials'], False)
assert_equal(n3['onion']['reachable'], False)
if __name__ == '__main__':
ProxyTest().main()
| 41.242574
| 121
| 0.625495
|
ea79ceba3366cb53748bff2f20b28d9ef1473ea0
| 9,907
|
py
|
Python
|
sdk/metricsadvisor/azure-ai-metricsadvisor/tests/test_hooks_aad.py
|
ankitarorabit/azure-sdk-for-python
|
dd90281cbad9400f8080754a5ef2f56791a5a88f
|
[
"MIT"
] | 3
|
2020-06-23T02:25:27.000Z
|
2021-09-07T18:48:11.000Z
|
sdk/metricsadvisor/azure-ai-metricsadvisor/tests/test_hooks_aad.py
|
ankitarorabit/azure-sdk-for-python
|
dd90281cbad9400f8080754a5ef2f56791a5a88f
|
[
"MIT"
] | 510
|
2019-07-17T16:11:19.000Z
|
2021-08-02T08:38:32.000Z
|
sdk/metricsadvisor/azure-ai-metricsadvisor/tests/test_hooks_aad.py
|
ankitarorabit/azure-sdk-for-python
|
dd90281cbad9400f8080754a5ef2f56791a5a88f
|
[
"MIT"
] | 5
|
2019-09-04T12:51:37.000Z
|
2020-09-16T07:28:40.000Z
|
# coding=utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import pytest
from azure.core.exceptions import ResourceNotFoundError
from azure.ai.metricsadvisor.models import (
EmailNotificationHook,
WebNotificationHook,
)
from base_testcase_aad import TestMetricsAdvisorAdministrationClientBase
class TestMetricsAdvisorAdministrationClient(TestMetricsAdvisorAdministrationClientBase):
def test_create_email_hook(self):
email_hook_name = self.create_random_name("testemailhook")
try:
email_hook = self.admin_client.create_hook(
hook=EmailNotificationHook(
name=email_hook_name,
emails_to_alert=["yournamehere@microsoft.com"],
description="my email hook",
external_link="external link"
)
)
self.assertIsNotNone(email_hook.id)
self.assertIsNotNone(email_hook.name)
self.assertIsNotNone(email_hook.admin_emails)
self.assertEqual(email_hook.emails_to_alert, ["yournamehere@microsoft.com"])
self.assertEqual(email_hook.description, "my email hook")
self.assertEqual(email_hook.external_link, "external link")
self.assertEqual(email_hook.hook_type, "Email")
finally:
self.admin_client.delete_hook(email_hook.id)
with self.assertRaises(ResourceNotFoundError):
self.admin_client.get_hook(email_hook.id)
def test_create_web_hook(self):
web_hook_name = self.create_random_name("testwebhooks")
try:
web_hook = self.admin_client.create_hook(
hook=WebNotificationHook(
name=web_hook_name,
endpoint="https://httpbin.org/post",
description="my web hook",
external_link="external link"
)
)
self.assertIsNotNone(web_hook.id)
self.assertIsNotNone(web_hook.name)
self.assertIsNotNone(web_hook.admin_emails)
self.assertEqual(web_hook.endpoint, "https://httpbin.org/post")
self.assertEqual(web_hook.description, "my web hook")
self.assertEqual(web_hook.external_link, "external link")
self.assertEqual(web_hook.hook_type, "Webhook")
finally:
self.admin_client.delete_hook(web_hook.id)
with self.assertRaises(ResourceNotFoundError):
self.admin_client.get_hook(web_hook.id)
def test_list_hooks(self):
hooks = self.admin_client.list_hooks()
assert len(list(hooks)) > 0
def test_update_email_hook_with_model(self):
name = self.create_random_name("testwebhook")
try:
hook = self._create_email_hook_for_update(name)
hook.name = "update"
hook.description = "update"
hook.external_link = "update"
hook.emails_to_alert = ["myemail@m.com"]
self.admin_client.update_hook(hook)
updated = self.admin_client.get_hook(hook.id)
self.assertEqual(updated.name, "update")
self.assertEqual(updated.description, "update")
self.assertEqual(updated.external_link, "update")
self.assertEqual(updated.emails_to_alert, ["myemail@m.com"])
finally:
self.admin_client.delete_hook(hook.id)
def test_update_email_hook_with_kwargs(self):
name = self.create_random_name("testhook")
try:
hook = self._create_email_hook_for_update(name)
self.admin_client.update_hook(
hook.id,
hook_type="Email",
name="update",
description="update",
external_link="update",
emails_to_alert=["myemail@m.com"]
)
updated = self.admin_client.get_hook(hook.id)
self.assertEqual(updated.name, "update")
self.assertEqual(updated.description, "update")
self.assertEqual(updated.external_link, "update")
self.assertEqual(updated.emails_to_alert, ["myemail@m.com"])
finally:
self.admin_client.delete_hook(hook.id)
def test_update_email_hook_with_model_and_kwargs(self):
name = self.create_random_name("testhook")
try:
hook = self._create_email_hook_for_update(name)
hook.name = "don't update me"
hook.description = "don't update me"
hook.emails_to_alert = []
self.admin_client.update_hook(
hook,
hook_type="Email",
name="update",
description="update",
external_link="update",
emails_to_alert=["myemail@m.com"]
)
updated = self.admin_client.get_hook(hook.id)
self.assertEqual(updated.name, "update")
self.assertEqual(updated.description, "update")
self.assertEqual(updated.external_link, "update")
self.assertEqual(updated.emails_to_alert, ["myemail@m.com"])
finally:
self.admin_client.delete_hook(hook.id)
def test_update_email_hook_by_resetting_properties(self):
name = self.create_random_name("testhook")
try:
hook = self._create_email_hook_for_update(name)
self.admin_client.update_hook(
hook.id,
hook_type="Email",
name="reset",
description=None,
external_link=None,
)
updated = self.admin_client.get_hook(hook.id)
self.assertEqual(updated.name, "reset")
# sending null, but not clearing properties
# self.assertEqual(updated.description, "")
# self.assertEqual(updated.external_link, "")
finally:
self.admin_client.delete_hook(hook.id)
def test_update_web_hook_with_model(self):
name = self.create_random_name("testwebhook")
try:
hook = self._create_web_hook_for_update(name)
hook.name = "update"
hook.description = "update"
hook.external_link = "update"
hook.username = "myusername"
hook.password = "password"
self.admin_client.update_hook(hook)
updated = self.admin_client.get_hook(hook.id)
self.assertEqual(updated.name, "update")
self.assertEqual(updated.description, "update")
self.assertEqual(updated.external_link, "update")
self.assertEqual(updated.username, "myusername")
finally:
self.admin_client.delete_hook(hook.id)
def test_update_web_hook_with_kwargs(self):
name = self.create_random_name("testwebhook")
try:
hook = self._create_web_hook_for_update(name)
self.admin_client.update_hook(
hook.id,
hook_type="Web",
endpoint="https://httpbin.org/post",
name="update",
description="update",
external_link="update",
username="myusername",
password="password"
)
updated = self.admin_client.get_hook(hook.id)
self.assertEqual(updated.name, "update")
self.assertEqual(updated.description, "update")
self.assertEqual(updated.external_link, "update")
self.assertEqual(updated.username, "myusername")
finally:
self.admin_client.delete_hook(hook.id)
def test_update_web_hook_with_model_and_kwargs(self):
name = self.create_random_name("testwebhooks")
try:
hook = self._create_web_hook_for_update(name)
hook.name = "don't update me"
hook.description = "updateMe"
hook.username = "don't update me"
hook.password = "don't update me"
hook.endpoint = "don't update me"
self.admin_client.update_hook(
hook,
hook_type="Web",
endpoint="https://httpbin.org/post",
name="update",
external_link="update",
username="myusername",
password="password"
)
updated = self.admin_client.get_hook(hook.id)
self.assertEqual(updated.name, "update")
self.assertEqual(updated.description, "updateMe")
self.assertEqual(updated.external_link, "update")
self.assertEqual(updated.username, "myusername")
finally:
self.admin_client.delete_hook(hook.id)
def test_update_web_hook_by_resetting_properties(self):
name = self.create_random_name("testhook")
try:
hook = self._create_web_hook_for_update(name)
self.admin_client.update_hook(
hook.id,
hook_type="Web",
name="reset",
description=None,
endpoint="https://httpbin.org/post",
external_link=None,
username="myusername",
password=None
)
updated = self.admin_client.get_hook(hook.id)
self.assertEqual(updated.name, "reset")
self.assertEqual(updated.password, "")
# sending null, but not clearing properties
# self.assertEqual(updated.description, "")
# self.assertEqual(updated.external_link, "")
finally:
self.admin_client.delete_hook(hook.id)
| 39.158103
| 89
| 0.587766
|
04998d0b7366eb608119f1e5c7f3c21ac6c1f34b
| 319
|
py
|
Python
|
Mundo 1 Fundamentos/ex013.py
|
costa53/curso_em_video_python3
|
4f859641324f8b35be56d807f40457d7dddc451f
|
[
"MIT"
] | 1
|
2022-02-17T16:23:52.000Z
|
2022-02-17T16:23:52.000Z
|
Mundo 1 Fundamentos/ex013.py
|
costa53/curso_em_video_python3
|
4f859641324f8b35be56d807f40457d7dddc451f
|
[
"MIT"
] | null | null | null |
Mundo 1 Fundamentos/ex013.py
|
costa53/curso_em_video_python3
|
4f859641324f8b35be56d807f40457d7dddc451f
|
[
"MIT"
] | null | null | null |
# DESAFIO 013
# Faça um algoritmo que leia o salário de um funcionário e mostre seu novo salário, com 15% de aumento.
sal = float(input('Qual é o salário do funcionário? R$'))
aum = 15
novo = sal + (sal * aum / 100)
print(f'Um funcionário que ganhava R${sal:.2f}, com {aum}% de aumento, passa a receber R${novo:.2f}')
| 39.875
| 103
| 0.69279
|
a96d365fb486eb7959981a4db6e2db141b31a718
| 1,099
|
py
|
Python
|
OracleInternetDirectory/dockerfiles/12.2.1.4.0/container-scripts/stop_oid_component.py
|
MarkkuPekkarinen/docker-images
|
675a87d61bedd9117ffc88b651cda533e7c4a6b8
|
[
"UPL-1.0"
] | 5,519
|
2015-01-23T15:07:05.000Z
|
2022-03-31T12:12:19.000Z
|
OracleInternetDirectory/dockerfiles/12.2.1.4.0/container-scripts/stop_oid_component.py
|
MarkkuPekkarinen/docker-images
|
675a87d61bedd9117ffc88b651cda533e7c4a6b8
|
[
"UPL-1.0"
] | 1,492
|
2015-01-26T05:31:35.000Z
|
2022-03-31T21:16:34.000Z
|
OracleInternetDirectory/dockerfiles/12.2.1.4.0/container-scripts/stop_oid_component.py
|
MarkkuPekkarinen/docker-images
|
675a87d61bedd9117ffc88b651cda533e7c4a6b8
|
[
"UPL-1.0"
] | 5,850
|
2015-01-22T01:40:51.000Z
|
2022-03-31T12:12:19.000Z
|
#!/usr/bin/python
#
# Copyright (c) 2021, Oracle and/or its affiliates.
#
# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl.
#
# Author: Pratyush Dash
#
import os, sys, re
domain_name = os.environ.get("DOMAIN_NAME", "oid_domain")
oracle_home = os.environ.get("ORACLE_HOME", "/u01/oracle/")
weblogic_home = '/u01/oracle/wlserver'
# Node Manager Vars
i = 1
while i < len(sys.argv):
if sys.argv[i] == '-username':
user = sys.argv[i + 1]
i += 2
elif sys.argv[i] == '-adminpassword':
password = sys.argv[i + 1]
i += 2
elif sys.argv[i] == '-instance_Name':
instanceName= sys.argv[i + 1]
i += 2
else:
print 'Unexpected argument switch at position ' + str(i) + ': ' + str(sys.argv[i])
sys.exit(1)
try:
nmConnect(domainName=domain_name,username=user,password=password,nmType='ssl')
nmServerStatus(serverName=instanceName,serverType='OID')
nmKill(serverName=instanceName,serverType='OID')
exit()
except:
print 'Unable to kill '+instanceName
exit()
| 28.921053
| 104
| 0.643312
|
ec92e85d6fdf833542f0f87a175c0759609a14ed
| 5,621
|
py
|
Python
|
vaccineSpotter.py
|
iamspidey/Vaccine-Spotter-India
|
212c5d7009f7c281d6b314ce4faa8376398e6bb7
|
[
"MIT"
] | 17
|
2021-05-01T18:27:29.000Z
|
2021-11-09T10:39:37.000Z
|
vaccineSpotter.py
|
iamspidey/Vaccine-Spotter-India
|
212c5d7009f7c281d6b314ce4faa8376398e6bb7
|
[
"MIT"
] | 5
|
2021-05-05T06:30:22.000Z
|
2021-07-15T14:54:40.000Z
|
vaccineSpotter.py
|
iamspidey/Vaccine-Spotter-India
|
212c5d7009f7c281d6b314ce4faa8376398e6bb7
|
[
"MIT"
] | 23
|
2021-05-02T05:08:53.000Z
|
2021-05-26T06:20:11.000Z
|
import requests
from datetime import date,datetime
import os
import smtplib
from time import time,ctime
import yaml
class vaccineSpotter:
def __init__(self, config_file_path, time_delay=1):
self.config_file_path = config_file_path
self.time_delay = time_delay
self.cfg = self.read_config()
self.set_params()
def read_config(self):
with open(self.config_file_path, "r") as ymlfile:
cfg = yaml.safe_load(ymlfile)
return cfg
def set_params(self):
## params
self.email_info = self.cfg["email"]
self.area_info = self.cfg["area_info"]
## sender mail info
self.sent_from = self.email_info['sent_from']
self.email_user = self.sent_from
self.email_password = self.email_info['email_password']
# receiver email details
self.to = self.email_info['to']
# area code
self.__district_code = self.area_info['__district_code']
self.__pincode = self.area_info['__pincode']
#age limit for vaccination
self.age_limit_info = self.cfg['age_limit']
self.age_limit = self.age_limit_info['age_limit']
def send_email(self, result):
# turn on allow less secure apps to get email
# https://myaccount.google.com/lesssecureapps
# suggest to use a backup account for this to preserve security
subject = 'Vaccine slot available in your area'
body = "Following vaccines centers are found \n\n Query Time : \
"+ctime(time())+"\n\n" + result
email_text = """\
From: %s
To: %s
Subject: %s
%s
""" % (self.sent_from, ", ".join(self.to), subject, body)
print(email_text)
try:
server = smtplib.SMTP_SSL('smtp.gmail.com', 465)
server.ehlo()
server.login(self.email_user, self.email_password)
server.sendmail(self.sent_from, self.to, email_text)
server.close()
print('Email sent!\n')
except Exception as e:
print('Something went wrong...')
print (e)
def parse_json_district_code(self, result):
output = []
centers = result['centers']
for center in centers:
sessions = center['sessions']
for session in sessions:
if session['available_capacity'] > 0:
res = { 'name': center['name'], 'block_name':center['block_name'],\
'age_limit':session['min_age_limit'], 'vaccine_type':session['vaccine'] ,\
'date':session['date'],'available_capacity':session['available_capacity'] }
if res['age_limit'] in self.age_limit:
output.append(res)
return output
def parse_json_pincode(self, result):
output = []
sessions = result['sessions']
if len(sessions)==0:
return output
for session in sessions:
if session['available_capacity'] >= 0:
res = { 'name': session['name'], 'block_name':session['block_name'], \
'age_limit':session['min_age_limit'], 'vaccine_type':session['vaccine'] , \
'date':session['date'],'available_capacity':session['available_capacity'] }
if res['age_limit'] in self.age_limit:
output.append(res)
return output
def call_api(self, url, headers, query_type):
response = requests.get(url, headers = headers)
if response.status_code == 200:
print("API call success")
result = response.json()
if query_type=='district_code':
output = self.parse_json_district_code(result)
elif query_type =='pincode':
output = self.parse_json_pincode(result)
else:
print('incorrect query type\nquery type must be either district_code or pincode\n')
return
if len(output) > 0:
print("Vaccines available")
print('\007')
result_str = ""
for center in output:
result_str = result_str + center['name'] + "\n"
result_str = result_str + "block:"+center['block_name'] + "\n"
result_str = result_str + "vaccine count:"+str(center['available_capacity']) + "\n"
result_str = result_str + "vaccine type:"+ center['vaccine_type'] + "\n"
result_str = result_str + center['date'] + "\n"
result_str = result_str + "age_limit:"+str(center['age_limit'])+"\n"
result_str = result_str + "-----------------------------------------------------\n"
self.send_email(result_str)
else:
print("Vaccines not available for age limit {}\nTrying again\
after {} minute.....\n".format(*self.age_limit, self.time_delay))
else:
print("something went wrong :(\nStatus code {} \nTrying again......\
after {} minute.....\n".format(response.status_code, self.time_delay))
def query(self, root_url, headers, query_type):
print(ctime(time()))
# format date
today = date.today()
d1 = today.strftime("%d/%m/%Y")
__date = str(d1).replace("/","-")
if query_type == 'district_code':
url = root_url + "/calendarByDistrict?district_id=" + self.__district_code + "&date="+ __date
elif query_type =='pincode':
url = root_url + "/findByPin?pincode=" + self.__pincode + "&date=" + __date
else:
print('incorrect query type\nquery type must be either district_code or pincode\n')
return
self.call_api(url, headers, query_type)
t = datetime.now()
if __name__ == '__main__':
time_delay = 1
query_type = 'district_code' # set it to "pincode" to query by pincode
config_file_path = 'config.yml'
print("querying by {} .....".format(query_type))
## root url and headers
root_url = "https://cdn-api.co-vin.in/api/v2/appointment/sessions/public"
headers = {'User-Agent': "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36"}
vaccineSpotter = vaccineSpotter(config_file_path, time_delay)
vaccineSpotter.query(root_url, headers, query_type)
while True:
delta = datetime.now()-t
if delta.seconds >= time_delay * 60:
vaccineSpotter.query(root_url, headers, query_type)
t = datetime.now()
| 32.491329
| 150
| 0.68333
|
dc2fcaa4ea06d8f72ff8a454dd275f4b0d6af05a
| 4,797
|
py
|
Python
|
examples/samples/wr_strategy.py
|
walter211/ctpbee
|
0cb45f7a3822ab6040286a6eb0d3240fb347fad3
|
[
"MIT"
] | 1
|
2021-03-30T01:20:56.000Z
|
2021-03-30T01:20:56.000Z
|
examples/samples/wr_strategy.py
|
walter211/ctpbee
|
0cb45f7a3822ab6040286a6eb0d3240fb347fad3
|
[
"MIT"
] | null | null | null |
examples/samples/wr_strategy.py
|
walter211/ctpbee
|
0cb45f7a3822ab6040286a6eb0d3240fb347fad3
|
[
"MIT"
] | 1
|
2019-12-06T01:38:22.000Z
|
2019-12-06T01:38:22.000Z
|
"""
简单威廉姆斯wr策略 wr = info.wr()
"""
import json
from datetime import datetime, date
from ctpbee import LooperApi, Vessel
from ctpbee.constant import Direction
from ctpbee.indicator import Indicator
def get_data(start, end, symbol, exchange, level):
""" using rqdatac to make an example """
# import rqdatac as rq
# from rqdatac import get_price, id_convert
# username = "license"
# password = "NK-Ci7vnLsRiPPWYwxvvPYdYM90vxN60qUB5tVac2mQuvZ8f9Mq8K_nnUqVspOpi4BLTkSLgq8OQFpOOj7L" \
# "t7AbdBZEBqRK74fIJH5vsaAfFQgl-tuB8l03axrW8cyN6-nBUho_6Y5VCRI63Mx_PN54nsQOpc1psIGEz" \
# "gND8c6Y=bqMVlABkpSlrDNk4DgG-1QXNknJtk0Kkw2axvFDa0E_XPMqOcBxifuRa_DFI2svseXU-8A" \
# "eLjchnTkeuvQkKh6nrfehVDiXjoMeq5sXgqpbgFAd4A5j2B1a0gpE3cb5kXb42n13fGwFaGris" \
# "8-eKzz_jncvuAamkJEQQV0aLdiw="
# host = "rqdatad-pro.ricequant.com"
# port = 16011
# rq.init(username, password, (host, port))
# symbol_rq = id_convert(symbol)
# data = get_price(symbol_rq, start_date=start, end_date=end, frequency=level, fields=None,
# adjust_type='pre', skip_suspended=False, market='cn', expect_df=False)
# origin = data.to_dict(orient='records')
# result = []
# for x in origin:
# do = {}
# do['open_price'] = x['open']
# do['low_price'] = x['low']
# do['high_price'] = x['high']
# do['close_price'] = x['close']
# do['datetime'] = datetime.strptime(str(x['trading_date']), "%Y-%m-%d %H:%M:%S")
# do['symbol'] = symbol
# do['local_symbol'] = symbol + "." + exchange
# do['exchange'] = exchange
# result.append(do)
# return result
def get_a_strategy():
class SmaStrategy(LooperApi):
def __init__(self, name):
super().__init__(name)
self.count = 1
self.pos = 0
self.bar_3 = Indicator() # 3分钟bar线
self.bar_3.open_json('../zn1912.SHFE.json') # 读取本地数据
self.allow_max_price = 5000 # 设置价格上限 当价格达到这个就卖出 防止突然跌
self.allow_low_price = 2000 # 设置价格下限 当价格低出这里就卖 防止巨亏
def on_bar(self, bar):
# todo: 威廉姆斯 WR
""" """
self.bar_3.add_bar(bar)
if not self.bar_3.inited:
return
wr = self.bar_3.wr()
if self.allow_max_price < bar.close_price and self.pos > 0:
self.action.sell(bar.close_price, self.pos, bar)
if self.allow_low_price > bar.close_price and self.pos > 0:
self.action.sell(bar.close_price, self.pos, bar)
#############
# 暂时不写 #
############
pass
def on_trade(self, trade):
if trade.direction == Direction.LONG:
self.pos += trade.volume
else:
self.pos -= trade.volume
def init_params(self, data):
""""""
# print("我在设置策略参数")
return SmaStrategy("double_ma")
def save_data_json(data):
result = {"result": data}
class CJsonEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime):
return obj.strftime('%Y-%m-%d %H:%M:%S')
elif isinstance(obj, date):
return obj.strftime('%Y-%m-%d')
else:
return json.JSONEncoder.default(self, obj)
with open("../data.json", "w") as f:
json.dump(result, f, cls=CJsonEncoder)
def load_data():
with open("../data.json", "r") as f:
data = json.load(f)
return data.get("result")
def run_main(data):
vessel = Vessel()
vessel.add_data(data)
stra = get_a_strategy()
vessel.add_strategy(stra)
vessel.set_params({"looper":
{"initial_capital": 100000,
"commission": 0.005,
"deal_pattern": "price",
"size_map": {"ag1912.SHFE": 15},
"today_commission": 0.005,
"yesterday_commission": 0.02,
"close_commission": 0.005,
"slippage_sell": 0,
"slippage_cover": 0,
"slippage_buy": 0,
"slippage_short": 0,
"close_pattern": "yesterday",
},
"strategy": {}
})
vessel.run()
from pprint import pprint
result = vessel.get_result()
pprint(result)
if __name__ == '__main__':
data = load_data()
for x in data:
x['datetime'] = datetime.strptime(str(x['datetime']), "%Y-%m-%d %H:%M:%S")
run_main(data)
| 33.545455
| 104
| 0.536377
|
5c5031044f245b76564e7d851d48f35514bda9b5
| 4,226
|
py
|
Python
|
pystr/decodable.py
|
gwangyi/pystr
|
b80ac1906746e98afeaf934a3593633a2e439172
|
[
"MIT"
] | null | null | null |
pystr/decodable.py
|
gwangyi/pystr
|
b80ac1906746e98afeaf934a3593633a2e439172
|
[
"MIT"
] | null | null | null |
pystr/decodable.py
|
gwangyi/pystr
|
b80ac1906746e98afeaf934a3593633a2e439172
|
[
"MIT"
] | null | null | null |
r"""This module provides decoding functionality of struct
+-------+--------+-------------------------------+---+---+---+---+---+---+---+---+
| Byte | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |
+-------+--------+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
| bit | | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 | |
+-------+--------+---+---+---+---+---+---+---+---+-------------------------------+
| Field | opcode | |fua| lba (big endian) |
+-------+--------+---------------------------+---+-------------------------------+
If command block has layout shown as above and there is 5 command as following:
+--------+-----+-------------+
| opcode | fua | Command |
+--------+-----+-------------+
| 0x00 | X | NOP |
+--------+-----+-------------+
| 0x01 | 0 | Cache Write |
+--------+-----+-------------+
| 0x01 | 1 | Force Write |
+--------+-----+-------------+
| 0x02 | 0 | Cache Read |
+--------+-----+-------------+
| 0x02 | 1 | Force Read |
+--------+-----+-------------+
Command classes can be defined as following:
>>> from pystr import Decodable
>>> from enum import IntEnum
>>> class Opcode(IntEnum):
... Nop = 0
... Write = 1
... Read = 2
>>> class BaseCommand(Decodable):
... _layout_ = dict(
... opcode=dict(
... offset=0,
... ),
... fua=dict(
... offset=1,
... bit=0,
... ),
... lba=dict(
... offset=2,
... width=64,
... endian='be'
... ),
... )
... opcode: Opcode
... fua: bool
... lba: int
>>> class NopCommand(BaseCommand):
... opcode = Opcode.Nop
>>> class WriteCommand(BaseCommand):
... opcode = Opcode.Write
>>> class ReadCommand(BaseCommand):
... opcode = Opcode.Read
>>> class CacheWriteCommand(WriteCommand):
... fua = False
>>> class ForceWriteCommand(WriteCommand):
... fua = True
>>> class CacheReadCommand(ReadCommand):
... fua = False
>>> class ForceReadCommand(ReadCommand):
... fua = True
The results of each case are same as following:
>>> print(BaseCommand(b'\0\0\0\0\0\0\0\0').decode())
NopCommand(lba=0, fua=False, opcode=<Opcode.Nop: 0>)
>>> print(BaseCommand(b'\x01\0\0\0\0\0\0\0').decode())
CacheWriteCommand(lba=0, fua=False, opcode=<Opcode.Write: 1>)
>>> print(BaseCommand(b'\x01\x01\0\0\0\0\0\0').decode())
ForceWriteCommand(lba=0, fua=True, opcode=<Opcode.Write: 1>)
>>> print(BaseCommand(b'\x02\0\0\0\0\0\0\0').decode())
CacheReadCommand(lba=0, fua=False, opcode=<Opcode.Read: 2>)
>>> print(BaseCommand(b'\x02\x01\0\0\0\0\0\0').decode())
ForceReadCommand(lba=0, fua=True, opcode=<Opcode.Read: 2>)
If you want to add initial value to sub struct, `initial` parameter can be used.
>>> class SomeDecodable(Decodable):
... _layout_ = ...
... child: ChildDecodable
... class DerivedDecodable(SomeDecodable, initial={"child.value": 1}):
... pass
"""
import typing
from .struct import Struct
DerivedDecodable = typing.TypeVar('DerivedDecodable', bound='Decodable')
class Decodable(Struct):
"""Decoding facility added Struct"""
_decode_map: typing.List[
typing.Tuple[
typing.Dict[str, typing.Any],
typing.Type['Decodable']
]
] = []
def __init_subclass__(cls, **kwargs: typing.Any): #pylint: disable=arguments-differ
super().__init_subclass__(**kwargs)
if cls._initial:
cls._decode_map.append((cls._initial, cls))
cls._decode_map = []
def decode(self: DerivedDecodable) -> DerivedDecodable:
"""Decode struct by derived Decodables"""
dmap = self._decode_map
ret_tp = type(self)
while True:
for cond, child_tp in reversed(dmap):
if all(getattr(self, k) == v for k, v in cond.items()):
dmap = child_tp._decode_map #pylint: disable=protected-access
ret_tp = typing.cast(typing.Type[DerivedDecodable], child_tp)
break
else:
return self if ret_tp is type(self) else ret_tp(ref=self.buffer)
| 32.015152
| 88
| 0.493138
|
f3bac61a516e37f4a6fb93ca3531ef6edce6f320
| 40,046
|
py
|
Python
|
src/trunk/apps/fdsnws/fdsnws.py
|
megies/seiscomp3
|
0daa3700c728f503c9c6c626dd7b17c84ff98ac4
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 2
|
2015-09-17T22:43:50.000Z
|
2017-11-29T20:27:11.000Z
|
src/trunk/apps/fdsnws/fdsnws.py
|
megies/seiscomp3
|
0daa3700c728f503c9c6c626dd7b17c84ff98ac4
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 2
|
2016-04-26T00:03:09.000Z
|
2017-12-05T02:24:50.000Z
|
src/trunk/apps/fdsnws/fdsnws.py
|
salichon/seiscomp3
|
4f7715f9ff9a35e7912c379ebf10446d0bceaeb2
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
#!/usr/bin/env python
################################################################################
# Copyright (C) 2013-2014 by gempa GmbH
#
# FDSNWS -- Implements FDSN Web Service interface, see
# http://www.fdsn.org/webservices/
#
# Implemented Services:
# fdsnws-dataselect
# fdsnws-event
# fdsnws-station
#
# Author: Stephan Herrnkind
# Email: herrnkind@gempa.de
################################################################################
import os, sys, time, fnmatch, base64, signal, re
try:
from twisted.cred import checkers, credentials, error, portal
from twisted.internet import reactor, defer, task
from twisted.web import guard, resource, server, static
from twisted.python import log, failure
from zope.interface import implements
except ImportError, e:
sys.exit("%s\nIs python twisted installed?" % str(e))
try:
from seiscomp3 import Core, DataModel, IO, Logging
from seiscomp3.Client import Application, Inventory
from seiscomp3.System import Environment
except ImportError, e:
sys.exit("%s\nIs the SeisComP environment set correctly?" % str(e))
from seiscomp3.fdsnws import utils
from seiscomp3.fdsnws.dataselect import FDSNDataSelect, FDSNDataSelectRealm, FDSNDataSelectAuthRealm
from seiscomp3.fdsnws.event import FDSNEvent
from seiscomp3.fdsnws.station import FDSNStation
from seiscomp3.fdsnws.availability import AvailabilityExtent, AvailabilityQuery
from seiscomp3.fdsnws.http import DirectoryResource, ListingResource, NoResource, \
Site, ServiceVersion, AuthResource
from seiscomp3.fdsnws.log import Log
def logSC3(entry):
try:
isError = entry['isError']
msg = entry['message']
if isError:
for l in msg:
Logging.error("[reactor] %s" % l)
else:
for l in msg:
Logging.info("[reactor] %s" % l)
except:
pass
################################################################################
# Fixes bug of DigestCredentialFactory by overriding decode method,
# see http://twistedmatrix.com/trac/ticket/6445
class BugfixedDigest(credentials.DigestCredentialFactory):
def decode(self, response, method, host):
response = ' '.join(response.splitlines())
# split comma separated parameters, don't split quoted strings, remove
# quotes
quoted = False
parts = []
p = ""
for c in response:
if c == '"':
quoted = not quoted
elif not quoted and c == ',':
parts.append(p)
p = ""
else:
p += c
if p:
parts.append(p)
auth = {}
for (k, v) in [p.split('=', 1) for p in parts]:
auth[k.strip()] = v.strip()
username = auth.get('username')
if not username:
raise error.LoginFailed("invalid response, no user name given")
if 'opaque' not in auth:
raise error.LoginFailed("invalid response, no opaque given")
if 'nonce' not in auth:
raise error.LoginFailed("invalid response, no nonce given.")
# Now verify the nonce/opaque values for this client
if self._verifyOpaque(auth.get('opaque'), auth.get('nonce'), host):
return credentials.DigestedCredentials(username, method,
self.authenticationRealm,
auth)
################################################################################
# Make CORS work with queryauth
class HTTPAuthSessionWrapper(guard.HTTPAuthSessionWrapper):
def __init__(self, *args, **kwargs):
guard.HTTPAuthSessionWrapper.__init__(self, *args, **kwargs)
def render(self, request):
if request.method == 'OPTIONS':
request.setHeader('Allow', 'GET,HEAD,POST,OPTIONS')
return ''
else:
return guard.HTTPAuthSessionWrapper.render(self, request)
################################################################################
class UsernamePasswordChecker(object):
implements(checkers.ICredentialsChecker)
credentialInterfaces = (credentials.IUsernamePassword,
credentials.IUsernameHashedPassword)
#---------------------------------------------------------------------------
def __init__(self, userdb):
self.__userdb = userdb
#---------------------------------------------------------------------------
def __cbPasswordMatch(self, matched, username):
if matched:
return username
else:
return failure.Failure(error.UnauthorizedLogin())
#---------------------------------------------------------------------------
def requestAvatarId(self, credentials):
return defer.maybeDeferred(self.__userdb.checkPassword, credentials) \
.addCallback(self.__cbPasswordMatch, str(credentials.username))
################################################################################
class UserDB(object):
#---------------------------------------------------------------------------
def __init__(self):
self.__users = {}
self.__blacklist = set()
task.LoopingCall(self.__expireUsers).start(60, False)
#---------------------------------------------------------------------------
def __expireUsers(self):
for (name, (password, attributes, expires)) in self.__users.items():
if time.time() > expires:
Logging.info("de-registering %s" % name)
del self.__users[name]
#---------------------------------------------------------------------------
def blacklistUser(self, name):
Logging.info("blacklisting %s" % name)
self.__blacklist.add(name)
#---------------------------------------------------------------------------
def addUser(self, name, attributes, expires, data):
try:
password = self.__users[name][0]
except KeyError:
bl = " (blacklisted)" if name in self.__blacklist else ""
Logging.notice("registering %s%s %s" % (name, bl, data))
password = base64.urlsafe_b64encode(os.urandom(12))
attributes['blacklisted'] = name in self.__blacklist
self.__users[name] = (password, attributes, expires)
return password
#---------------------------------------------------------------------------
def checkPassword(self, credentials):
try:
pw = self.__users[str(credentials.username)][0]
except KeyError:
return False
return credentials.checkPassword(pw)
#---------------------------------------------------------------------------
def getAttributes(self, name):
return self.__users[name][1]
#---------------------------------------------------------------------------
def dump(self):
Logging.info("known users:")
for name, user in self.__users.items():
Logging.info(" %s %s %d" % (name, user[1], user[2]))
################################################################################
class Access(object):
#---------------------------------------------------------------------------
def __init__(self):
self.__access = {}
#---------------------------------------------------------------------------
def initFromSC3Routing(self, routing):
for i in xrange(routing.accessCount()):
acc = routing.access(i)
net = acc.networkCode()
sta = acc.stationCode()
loc = acc.locationCode()
cha = acc.streamCode()
user = acc.user()
start = acc.start()
try:
end = acc.end()
except ValueError:
end = None
self.__access.setdefault((net, sta, loc, cha), []) \
.append((user, start, end))
#---------------------------------------------------------------------------
def __matchTime(self, t1, t2, accessStart, accessEnd):
return (not accessStart or (t1 and t1 >= accessStart)) and \
(not accessEnd or (t2 and t2 <= accessEnd))
#---------------------------------------------------------------------------
def __matchEmail(self, emailAddress, accessUser):
defaultPrefix = "mail:"
if accessUser.startswith(defaultPrefix):
accessUser = accessUser[len(defaultPrefix):]
return (emailAddress.upper() == accessUser.upper() or \
(accessUser[:1] == '@' and emailAddress[:1] != '@' and \
emailAddress.upper().endswith(accessUser.upper())))
#---------------------------------------------------------------------------
def __matchAttribute(self, attribute, accessUser):
return (attribute.upper() == accessUser.upper())
#---------------------------------------------------------------------------
def authorize(self, user, net, sta, loc, cha, t1, t2):
if user['blacklisted']:
return False
matchers = []
try:
# OID 0.9.2342.19200300.100.1.3 (RFC 2798)
emailAddress = user['mail']
matchers.append((self.__matchEmail, emailAddress))
except KeyError:
pass
try:
# B2ACCESS
for memberof in user['memberof'].split(';'):
matchers.append((self.__matchAttribute, "group:" + memberof))
except KeyError:
pass
for m in matchers:
for (u, start, end) in self.__access.get((net, '', '', ''), []):
if self.__matchTime(t1, t2, start, end) and m[0](m[1], u):
return True
for (u, start, end) in self.__access.get((net, sta, '', ''), []):
if self.__matchTime(t1, t2, start, end) and m[0](m[1], u):
return True
for (u, start, end) in self.__access.get((net, sta, loc, cha), []):
if self.__matchTime(t1, t2, start, end) and m[0](m[1], u):
return True
return False
################################################################################
class DataAvailabilityCache(object):
#---------------------------------------------------------------------------
def __init__(self, app, da, validUntil):
self._da = da
self._validUntil = validUntil
self._extents = {}
self._extentsSorted = []
self._extentsOID = {}
for i in xrange(self._da.dataExtentCount()):
ext = self._da.dataExtent(i)
wid = ext.waveformID()
sid = "%s.%s.%s.%s" % (wid.networkCode(), wid.stationCode(),
wid.locationCode(), wid.channelCode())
restricted = app._openStreams is None or sid not in app._openStreams
if restricted and not app._allowRestricted:
continue
self._extents[sid] = (ext, restricted)
#Logging.debug("%s: %s ~ %s" % (sid, ext.start().iso(),
# ext.end().iso()))
if app._serveAvailability:
# load data attribute extents if availability is served
for i in xrange(da.dataExtentCount()):
extent = da.dataExtent(i)
app.query().loadDataAttributeExtents(extent)
# create a list of (extent, oid, restricted) tuples sorted by stream
self._extentsSorted = [ (e, app.query().getCachedId(e), res) \
for wid, (e, res) in sorted(self._extents.iteritems(),
key=lambda t: t[0]) ]
# create a dictionary of object ID to extents
self._extentsOID = dict((oid, (e, res)) \
for (e, oid, res) in self._extentsSorted)
Logging.info("loaded %i extents" % len(self._extents))
#---------------------------------------------------------------------------
def validUntil(self):
return self._validUntil
#---------------------------------------------------------------------------
def extent(self, net, sta, loc, cha):
wid = "%s.%s.%s.%s" % (net, sta, loc, cha)
if wid in self._extents:
return self._extents[wid][0]
return None
#---------------------------------------------------------------------------
def extents(self):
return self._extents
#---------------------------------------------------------------------------
def extentsSorted(self):
return self._extentsSorted
#---------------------------------------------------------------------------
def extentsOID(self):
return self._extentsOID
#---------------------------------------------------------------------------
def dataAvailability(self):
return self._da
################################################################################
class FDSNWS(Application):
#---------------------------------------------------------------------------
def __init__(self):
Application.__init__(self, len(sys.argv), sys.argv)
self.setMessagingEnabled(True)
self.setDatabaseEnabled(True, True)
self.setRecordStreamEnabled(True)
self.setLoadInventoryEnabled(True)
self._serverRoot = os.path.dirname(__file__)
self._listenAddress = '0.0.0.0' # all interfaces
self._port = 8080
self._connections = 5
self._queryObjects = 100000 # maximum number of objects per query
self._realtimeGap = None # minimum data age: 5min
self._samplesM = None # maximum number of samples per query
self._recordBulkSize = 102400 # desired record bulk size
self._htpasswd = '@CONFIGDIR@/fdsnws.htpasswd'
self._accessLogFile = ''
self._allowRestricted = True
self._useArclinkAccess = False
self._serveDataSelect = True
self._serveEvent = True
self._serveStation = True
self._serveAvailability = False
self._daEnabled = True
self._daCacheDuration = 300
self._daCache = None
self._openStreams = None
self._daRepositoryName = 'primary'
self._daDCCName = 'DCC'
self._hideAuthor = False
self._evaluationMode = None
self._eventTypeWhitelist = None
self._eventTypeBlacklist = None
self._eventFormats = None
self._stationFilter = None
self._dataSelectFilter = None
self._debugFilter = False
self._accessLog = None
self._fileNamePrefix = 'fdsnws'
self._trackdbEnabled = False
self._trackdbDefaultUser = 'fdsnws'
self._authEnabled = False
self._authGnupgHome = '@ROOTDIR@/var/lib/gpg'
self._authBlacklist = []
self._userdb = UserDB()
self._access = Access()
# Leave signal handling to us
Application.HandleSignals(False, False)
#---------------------------------------------------------------------------
def initConfiguration(self):
if not Application.initConfiguration(self):
return False
# bind address and port
try: self._listenAddress = self.configGetString('listenAddress')
except Exception: pass
try: self._port = self.configGetInt('port')
except Exception: pass
# maximum number of connections
try: self._connections = self.configGetInt('connections')
except Exception: pass
# maximum number of objects per query, used in fdsnws-station and
# fdsnws-event to limit main memory consumption
try: self._queryObjects = self.configGetInt('queryObjects')
except Exception: pass
# restrict end time of request to now-realtimeGap seconds, used in
# fdsnws-dataselect
try: self._realtimeGap = self.configGetInt('realtimeGap')
except Exception: pass
# maximum number of samples (in units of million) per query, used in
# fdsnws-dataselect to limit bandwidth
try: self._samplesM = self.configGetDouble('samplesM')
except Exception: pass
try: self._recordBulkSize = self.configGetInt('recordBulkSize')
except Exception: pass
if self._recordBulkSize < 1:
print >> sys.stderr, "Invalid recordBulkSize, must be larger than 0"
return False
# location of htpasswd file
try:
self._htpasswd = self.configGetString('htpasswd')
except Exception: pass
self._htpasswd = Environment.Instance().absolutePath(self._htpasswd)
# location of access log file
try:
self._accessLogFile = Environment.Instance().absolutePath(
self.configGetString('accessLog'))
except Exception: pass
# access to restricted inventory information
try: self._allowRestricted = self.configGetBool('allowRestricted')
except Exception: pass
# use arclink-access bindings
try: self._useArclinkAccess = self.configGetBool('useArclinkAccess')
except Exception: pass
# services to enable
try: self._serveDataSelect = self.configGetBool('serveDataSelect')
except Exception: pass
try: self._serveEvent = self.configGetBool('serveEvent')
except Exception: pass
try: self._serveStation = self.configGetBool('serveStation')
except Exception: pass
try: self._serveAvailability = self.configGetBool('serveAvailability')
except Exception: pass
# data availability
try: self._daEnabled = self.configGetBool('dataAvailability.enable')
except Exception: pass
try: self._daCacheDuration = self.configGetInt('dataAvailability.cacheDuration')
except Exception: pass
try: self._daRepositoryName = self.configGetString('dataAvailability.repositoryName')
except Exception: pass
try: self._daDCCName = self.configGetString('dataAvailability.dccName')
except Exception: pass
if self._serveAvailability and not self._daEnabled:
print >> sys.stderr, "can't serve availabilty without " \
"dataAvailability.enable set to true"
return False
if not bool(re.match(r'^[a-zA-Z0-9_\ -]*$', self._daRepositoryName)):
print >> sys.stderr, "invalid characters in dataAvailability.repositoryName"
return False
if not bool(re.match(r'^[a-zA-Z0-9_\ -]*$', self._daDCCName)):
print >> sys.stderr, "invalid characters in dataAvailability.dccName"
return False
# event filter
try: self._hideAuthor = self.configGetBool('hideAuthor')
except Exception: pass
try:
name = self.configGetString('evaluationMode')
if name.lower() == DataModel.EEvaluationModeNames.name(DataModel.MANUAL):
self._evaluationMode = DataModel.MANUAL
elif name.lower() == DataModel.EEvaluationModeNames.name(DataModel.AUTOMATIC):
self._evaluationMode = DataModel.AUTOMATIC
else:
print >> sys.stderr, "invalid evaluation mode string: %s" % name
return False
except Exception: pass
try:
strings = self.configGetStrings('eventType.whitelist')
if len(strings) > 1 or len(strings[0]):
self._eventTypeWhitelist = [ s.lower() for s in strings ]
except Exception: pass
try:
strings = self.configGetStrings('eventType.blacklist')
if len(strings) > 0 or len(strings[0]):
self._eventTypeBlacklist = [ s.lower() for s in strings ]
except Exception: pass
try:
strings = self.configGetStrings('eventFormats')
if len(strings) > 1 or len(strings[0]):
self._eventFormats = [ s.lower() for s in strings ]
except Exception: pass
# station filter
try: self._stationFilter = Environment.Instance().absolutePath(self.configGetString('stationFilter'))
except Exception: pass
# dataSelect filter
try: self._dataSelectFilter = Environment.Instance().absolutePath(self.configGetString('dataSelectFilter'))
except Exception: pass
# output filter debug information
try: self._debugFilter = self.configGetBool('debugFilter')
except Exception: pass
# prefix to be used as default for output filenames
try: self._fileNamePrefix = self.configGetString('fileNamePrefix')
except Exception: pass
# save request logs in database?
try: self._trackdbEnabled = self.configGetBool('trackdb.enable')
except Exception: pass
# default user
try: self._trackdbDefaultUser = self.configGetString('trackdb.defaultUser')
except Exception: pass
# enable authentication extension?
try: self._authEnabled = self.configGetBool('auth.enable')
except Exception: pass
# GnuPG home directory
try: self._authGnupgHome = self.configGetString('auth.gnupgHome')
except Exception: pass
self._authGnupgHome = Environment.Instance().absolutePath(self._authGnupgHome)
# blacklist of users/tokens
try:
strings = self.configGetStrings('auth.blacklist')
if len(strings) > 1 or len(strings[0]):
self._authBlacklist = strings
except Exception: pass
# If the database connection is passed via command line or configuration
# file then messaging is disabled. Messaging is only used to get
# the configured database connection URI.
if self.databaseURI() != "":
self.setMessagingEnabled(self._trackdbEnabled)
else:
# Without the event service, a database connection is not
# required if the inventory is loaded from file and no data
# availability is not enabled
if not self._serveEvent and not self._useArclinkAccess and \
( not self._serveStation or ( \
not self.isInventoryDatabaseEnabled() and not self._daEnabled ) ):
self.setMessagingEnabled(self._trackdbEnabled)
self.setDatabaseEnabled(False, False)
return True
#---------------------------------------------------------------------------
# Signal handling in Python and fork in wrapped C++ code is not a good
# combination. Without digging too much into the problem, forking the
# process with os.fork() helps
def forkProcess(self):
cp = os.fork()
if cp < 0: return False
elif cp == 0: return True
elif cp > 0:
sys.exit(0)
#---------------------------------------------------------------------------
def getDACache(self):
if not self._daEnabled:
return None
now = Core.Time.GMT()
# check if cache is still valid
if self._daCache is None or now > self._daCache.validUntil():
if self.query() is None or \
not self.query().driver().isConnected():
dbInt = IO.DatabaseInterface.Open(self.databaseURI())
if dbInt is None:
Logging.error('failed to connect to database')
return self._daCache
else:
self.setDatabase(dbInt)
da = DataModel.DataAvailability()
self.query().loadDataExtents(da)
validUntil = now + Core.TimeSpan(self._daCacheDuration, 0)
self._daCache = DataAvailabilityCache(self, da, validUntil)
return self._daCache
#---------------------------------------------------------------------------
def _site(self):
modeStr = None
if self._evaluationMode is not None:
modeStr = DataModel.EEvaluationModeNames.name(self._evaluationMode)
whitelistStr = "<None>"
if self._eventTypeWhitelist is not None:
whitelistStr = ", ".join(self._eventTypeWhitelist)
blacklistStr = "<None>"
if self._eventTypeBlacklist is not None:
blacklistStr = ", ".join(self._eventTypeBlacklist)
stationFilterStr = "<None>"
if self._stationFilter is not None:
stationFilterStr = self._stationFilter
dataSelectFilterStr = "<None>"
if self._dataSelectFilter is not None:
dataSelectFilterStr = self._dataSelectFilter
Logging.debug("\n" \
"configuration read:\n" \
" serve\n" \
" dataselect : %s\n" \
" event : %s\n" \
" station : %s\n" \
" availability : %s\n" \
" listenAddress : %s\n" \
" port : %i\n" \
" connections : %i\n" \
" htpasswd : %s\n" \
" accessLog : %s\n" \
" queryObjects : %i\n" \
" realtimeGap : %s\n" \
" samples (M) : %s\n" \
" recordBulkSize : %i\n" \
" allowRestricted : %s\n" \
" useArclinkAccess: %s\n" \
" hideAuthor : %s\n" \
" evaluationMode : %s\n" \
" data availability\n" \
" enabled : %s\n" \
" cache duration: %i\n" \
" repo name : %s\n" \
" dcc name : %s\n" \
" eventType\n" \
" whitelist : %s\n" \
" blacklist : %s\n" \
" inventory filter\n" \
" station : %s\n" \
" dataSelect : %s\n" \
" debug enabled : %s\n" \
" trackdb\n" \
" enabled : %s\n" \
" defaultUser : %s\n" \
" auth\n" \
" enabled : %s\n" \
" gnupgHome : %s\n" % (
self._serveDataSelect, self._serveEvent,
self._serveStation, self._serveAvailability,
self._listenAddress, self._port, self._connections,
self._htpasswd, self._accessLogFile, self._queryObjects,
self._realtimeGap, self._samplesM, self._recordBulkSize,
self._allowRestricted, self._useArclinkAccess,
self._hideAuthor, modeStr, self._daEnabled,
self._daCacheDuration, self._daRepositoryName,
self._daDCCName, whitelistStr, blacklistStr,
stationFilterStr, dataSelectFilterStr,
self._debugFilter, self._trackdbEnabled,
self._trackdbDefaultUser, self._authEnabled,
self._authGnupgHome))
if not self._serveDataSelect and not self._serveEvent and \
not self._serveStation:
Logging.error("all services disabled through configuration")
return None
# access logger if requested
if self._accessLogFile:
self._accessLog = Log(self._accessLogFile)
# load inventory needed by DataSelect and Station service
stationInv = dataSelectInv = None
if self._serveDataSelect or self._serveStation:
retn = False
stationInv = dataSelectInv = Inventory.Instance().inventory()
Logging.info("inventory loaded")
if self._serveDataSelect and self._serveStation:
# clone inventory if station and dataSelect filter are distinct
# else share inventory between both services
if self._stationFilter != self._dataSelectFilter:
dataSelectInv = self._cloneInventory(stationInv)
retn = self._filterInventory(stationInv, self._stationFilter, "station") and \
self._filterInventory(dataSelectInv, self._dataSelectFilter, "dataSelect")
else:
retn = self._filterInventory(stationInv, self._stationFilter)
elif self._serveStation:
retn = self._filterInventory(stationInv, self._stationFilter)
else:
retn = self._filterInventory(dataSelectInv, self._dataSelectFilter)
if not retn:
return None
self._access = Access()
if self._serveDataSelect and self._useArclinkAccess:
self._access.initFromSC3Routing(self.query().loadRouting())
DataModel.PublicObject.SetRegistrationEnabled(False)
shareDir = os.path.join(Environment.Instance().shareDir(), 'fdsnws')
# Overwrite/set mime type of *.wadl and *.xml documents. Instead of
# using the official types defined in /etc/mime.types 'application/xml'
# is used as enforced by the FDSNWS spec.
static.File.contentTypes['.wadl'] = 'application/xml'
static.File.contentTypes['.xml'] = 'application/xml'
# create resource tree /fdsnws/...
root = ListingResource()
fileName = os.path.join(shareDir, 'favicon.ico')
fileRes = static.File(fileName, 'image/x-icon')
fileRes.childNotFound = NoResource()
fileRes.isLeaf = True
root.putChild('favicon.ico', fileRes)
prefix = ListingResource()
root.putChild('fdsnws', prefix)
# right now service version is shared by all services
serviceVersion = ServiceVersion()
# dataselect
if self._serveDataSelect:
dataselect = ListingResource()
prefix.putChild('dataselect', dataselect)
dataselect1 = DirectoryResource(os.path.join(shareDir, 'dataselect.html'))
dataselect.putChild('1', dataselect1)
dataselect1.putChild('query', FDSNDataSelect(dataSelectInv, self._recordBulkSize))
msg = 'authorization for restricted time series data required'
authSession = self._getAuthSessionWrapper(dataSelectInv, msg)
dataselect1.putChild('queryauth', authSession)
dataselect1.putChild('version', serviceVersion)
fileRes = static.File(os.path.join(shareDir, 'dataselect.wadl'))
fileRes.childNotFound = NoResource()
dataselect1.putChild('application.wadl', fileRes)
fileRes = static.File(os.path.join(shareDir, 'dataselect-builder.html'))
fileRes.childNotFound = NoResource()
dataselect1.putChild('builder', fileRes)
if self._authEnabled:
dataselect1.putChild('auth', AuthResource(self._authGnupgHome,
self._userdb))
# event
if self._serveEvent:
event = ListingResource()
prefix.putChild('event', event)
event1 = DirectoryResource(os.path.join(shareDir, 'event.html'))
event.putChild('1', event1)
event1.putChild('query', FDSNEvent(self._hideAuthor,
self._evaluationMode,
self._eventTypeWhitelist,
self._eventTypeBlacklist,
self._eventFormats))
fileRes = static.File(os.path.join(shareDir, 'catalogs.xml'))
fileRes.childNotFound = NoResource()
event1.putChild('catalogs', fileRes)
fileRes = static.File(os.path.join(shareDir, 'contributors.xml'))
fileRes.childNotFound = NoResource()
event1.putChild('contributors', fileRes)
event1.putChild('version', serviceVersion)
fileRes = static.File(os.path.join(shareDir, 'event.wadl'))
fileRes.childNotFound = NoResource()
event1.putChild('application.wadl', fileRes)
fileRes = static.File(os.path.join(shareDir, 'event-builder.html'))
fileRes.childNotFound = NoResource()
event1.putChild('builder', fileRes)
# station
if self._serveStation:
station = ListingResource()
prefix.putChild('station', station)
station1 = DirectoryResource(os.path.join(shareDir, 'station.html'))
station.putChild('1', station1)
station1.putChild('query', FDSNStation(stationInv,
self._allowRestricted,
self._queryObjects,
self._daEnabled))
station1.putChild('version', serviceVersion)
fileRes = static.File(os.path.join(shareDir, 'station.wadl'))
fileRes.childNotFound = NoResource()
station1.putChild('application.wadl', fileRes)
fileRes = static.File(os.path.join(shareDir, 'station-builder.html'))
fileRes.childNotFound = NoResource()
station1.putChild('builder', fileRes)
# availability
if self._serveAvailability:
# create a set of waveformIDs which represent open channels
if self._serveDataSelect:
openStreams = set()
for iNet in xrange(dataSelectInv.networkCount()):
net = dataSelectInv.network(iNet)
if utils.isRestricted(net): continue
for iSta in xrange(net.stationCount()):
sta = net.station(iSta)
if utils.isRestricted(sta): continue
for iLoc in xrange(sta.sensorLocationCount()):
loc = sta.sensorLocation(iLoc)
for iCha in xrange(loc.streamCount()):
cha = loc.stream(iCha)
if utils.isRestricted(cha): continue
openStreams.add("{0}.{1}.{2}.{3}".format(
net.code(), sta.code(),
loc.code(), cha.code()))
self._openStreams = openStreams
else:
self._openStreams = None
ext = ListingResource()
prefix.putChild('ext', ext)
availability = ListingResource()
ext.putChild('availability', availability)
availability1 = DirectoryResource(os.path.join(shareDir, 'availability.html'))
availability.putChild('1', availability1)
availability1.putChild('extent', AvailabilityExtent())
availability1.putChild('query', AvailabilityQuery())
availability1.putChild('version', serviceVersion)
fileRes = static.File(os.path.join(shareDir, 'station.wadl'))
fileRes.childNotFound = NoResource()
availability1.putChild('availability.wadl', fileRes)
fileRes = static.File(os.path.join(shareDir, 'availability-extent-builder.html'))
fileRes.childNotFound = NoResource()
availability1.putChild('builder-extent', fileRes)
fileRes = static.File(os.path.join(shareDir, 'availability-builder.html'))
fileRes.childNotFound = NoResource()
availability1.putChild('builder', fileRes)
# static files
fileRes = static.File(os.path.join(shareDir, 'js'))
fileRes.childNotFound = NoResource()
fileRes.hideInListing = True
prefix.putChild('js', fileRes)
fileRes = static.File(os.path.join(shareDir, 'css'))
fileRes.childNotFound = NoResource()
fileRes.hideInListing = True
prefix.putChild('css', fileRes)
return Site(root)
#---------------------------------------------------------------------------
def _reloadTask(self):
if not self.__sighup:
return
self.__sighup = False
Logging.info("reloading inventory")
self.reloadInventory()
site = self._site()
if site:
self.__tcpPort.factory = site
Logging.info("reload successful")
else:
Logging.info("reload failed")
self._userdb.dump()
#---------------------------------------------------------------------------
def _sighupHandler(self, signum, frame):
Logging.info("SIGHUP received")
self.__sighup = True
#---------------------------------------------------------------------------
def run(self):
retn = False
try:
for user in self._authBlacklist:
self._userdb.blacklistUser(user)
site = self._site()
if not site:
return False
# start listen for incoming request
self.__tcpPort = reactor.listenTCP(self._port,
site,
self._connections,
self._listenAddress)
# setup signal handler
self.__sighup = False
signal.signal(signal.SIGHUP, self._sighupHandler)
task.LoopingCall(self._reloadTask).start(60, False)
# start processing
Logging.info("start listening")
log.addObserver(logSC3)
reactor.run()
retn = True
except Exception, e:
Logging.error(str(e))
return retn
#---------------------------------------------------------------------------
def _cloneInventory(self, inv):
wasEnabled = DataModel.PublicObject.IsRegistrationEnabled()
DataModel.PublicObject.SetRegistrationEnabled(False)
inv2 = DataModel.Inventory.Cast(inv.clone())
for iNet in xrange(inv.networkCount()):
net = inv.network(iNet)
net2 = DataModel.Network.Cast(net.clone())
inv2.add(net2)
for iSta in xrange(net.stationCount()):
sta = net.station(iSta)
sta2 = DataModel.Station.Cast(sta.clone())
net2.add(sta2)
for iLoc in xrange(sta.sensorLocationCount()):
loc = sta.sensorLocation(iLoc)
loc2 = DataModel.SensorLocation.Cast(loc.clone())
sta2.add(loc2)
for iCha in xrange(loc.streamCount()):
cha = loc.stream(iCha)
cha2 = DataModel.Stream.Cast(cha.clone())
loc2.add(cha2)
DataModel.PublicObject.SetRegistrationEnabled(wasEnabled)
return inv2
#---------------------------------------------------------------------------
def _filterInventory(self, inv, fileName, serviceName=""):
if not fileName:
return True
class FilterRule:
def __init__(self, name, code):
self.name = name
self.exclude = name.startswith("!")
self.code = code
self.restricted = None
self.shared = None
self.netClass = None
self.archive = None
# read filter configuration from INI file
filter = []
includeRuleDefined = False
try:
import ConfigParser
except ImportError, ie:
Logging.error("could not load 'ConfigParser' Python module")
return False
try:
cp = ConfigParser.ConfigParser()
Logging.notice("reading inventory filter file: %s" % fileName)
cp.readfp(open(fileName, 'r'))
if len(cp.sections()) == 0:
return True
# check for mandatory code attribute
for sectionName in cp.sections():
code = ""
try:
code = cp.get(sectionName, "code")
except:
Logging.error("missing 'code' attribute in section %s of " \
"inventory filter file %s" % (
sectionName, fileName))
return False
rule = FilterRule(sectionName, str(code))
try:
rule.restricted = cp.getboolean(sectionName, 'restricted')
except: pass
try:
rule.shared = cp.getboolean(sectionName, 'shared')
except: pass
try:
rule.netClass = str(cp.get(sectionName, 'netClass'))
except: pass
try:
rule.archive = str(cp.get(sectionName, 'archive'))
except: pass
includeRuleDefined |= not rule.exclude
filter.append(rule)
except Exception, e:
Logging.error("could not read inventory filter file %s: %s" % (
fileName, str(e)))
return False
# apply filter
# networks
if self._debugFilter:
debugLines = []
delNet = delSta = delLoc = delCha = 0
iNet = 0
while iNet < inv.networkCount():
net = inv.network(iNet)
try: netRestricted = net.restricted()
except ValueError: netRestricted = None
try: netShared = net.shared()
except ValueError: netShared = None
# stations
iSta = 0
while iSta < net.stationCount():
sta = net.station(iSta)
staCode = "%s.%s" % (net.code(), sta.code())
try: staRestricted = sta.restricted()
except ValueError: staRestricted = None
try: staShared = sta.shared()
except ValueError: staShared = None
# sensor locations
iLoc = 0
while iLoc < sta.sensorLocationCount():
loc = sta.sensorLocation(iLoc)
locCode = "%s.%s" % (staCode, loc.code())
# channels
iCha = 0
while iCha < loc.streamCount():
cha = loc.stream(iCha)
code = "%s.%s" % (locCode, cha.code())
# evaluate rules until matching code is found
match = False
for rule in filter:
# code
if not fnmatch.fnmatchcase(code, rule.code):
continue
# restricted
if rule.restricted is not None:
try:
if cha.restricted() != rule.restricted:
continue
except ValueError:
if staRestricted is not None:
if sta.Restricted != rule.Restricted:
continue
elif netRestricted is None or \
netRestricted != rule.Restricted:
continue
# shared
if rule.shared is not None:
try:
if cha.shared() != rule.shared:
continue
except ValueError:
if staShared is not None:
if sta.Shared != rule.Shared:
continue
elif netShared is None or \
netShared != rule.Shared:
continue
# netClass
if rule.netClass is not None and \
net.netClass() != rule.netClass:
continue
# archive
if rule.archive is not None and \
net.archive() != rule.archive:
continue
# the rule matched
match = True
break
if (match and rule.exclude) or \
(not match and includeRuleDefined):
loc.removeStream(iCha)
delCha += 1
reason = "no matching include rule"
if match:
reason = "'%s'" % rule.name
if self._debugFilter:
debugLines.append("%s [-]: %s" % (code, reason))
else:
iCha += 1
reason = "no matching exclude rule"
if match:
reason = "'%s'" % rule.name
if self._debugFilter:
debugLines.append("%s [+]: %s" % (code, reason))
# remove empty sensor locations
if loc.streamCount() == 0:
sta.removeSensorLocation(iLoc)
delLoc += 1
else:
iLoc += 1
# remove empty stations
if sta.sensorLocationCount() == 0:
delSta += 1
net.removeStation(iSta)
else:
iSta += 1
# remove empty networks
if net.stationCount() == 0:
delNet += 1
inv.removeNetwork(iNet)
else:
iNet += 1
if serviceName:
serviceName += ": "
Logging.debug("%sremoved %i networks, %i stations, %i locations, "
"%i streams" % (serviceName, delNet, delSta, delLoc,
delCha))
if self._debugFilter:
debugLines.sort()
Logging.notice("%sfilter decisions based on file %s:\n%s" % (
serviceName, fileName, str("\n".join(debugLines))))
return True
#---------------------------------------------------------------------------
def _getAuthSessionWrapper(self, inv, msg):
if self._useArclinkAccess:
access = self._access
else:
access = None
if self._authEnabled: # auth extension
access = self._access # requires useArclinkAccess for security reasons
realm = FDSNDataSelectAuthRealm(inv, self._recordBulkSize, access, self._userdb)
checker = UsernamePasswordChecker(self._userdb)
else: # htpasswd
realm = FDSNDataSelectRealm(inv, self._recordBulkSize, access)
checker = checkers.FilePasswordDB(self._htpasswd)
p = portal.Portal(realm, [checker])
f = guard.DigestCredentialFactory('MD5', msg)
f.digest = BugfixedDigest('MD5', msg)
return HTTPAuthSessionWrapper(p, [f])
app = FDSNWS()
sys.exit(app())
# vim: ts=4 noet
| 32.932566
| 109
| 0.606527
|
8141404e5a1258c12c0cb4333e11080018aaee27
| 20,013
|
py
|
Python
|
backend/worker/pe_scripts/enrich_shodan_pe.py
|
v0rts/crossfeed
|
cfac3f9597ada37df1c2ae46c34cf27efd5cb706
|
[
"CC0-1.0"
] | null | null | null |
backend/worker/pe_scripts/enrich_shodan_pe.py
|
v0rts/crossfeed
|
cfac3f9597ada37df1c2ae46c34cf27efd5cb706
|
[
"CC0-1.0"
] | 12
|
2021-11-30T11:25:26.000Z
|
2022-03-30T11:28:39.000Z
|
backend/worker/pe_scripts/enrich_shodan_pe.py
|
v0rts/crossfeed
|
cfac3f9597ada37df1c2ae46c34cf27efd5cb706
|
[
"CC0-1.0"
] | null | null | null |
try:
import traceback
import shodan
import pandas as pd
import requests
import time
import os
import datetime
import json
import sys
from pe_db.query_db import (
execute_shodan_data,
query_ips,
getDataSource,
get_org_id,
)
import time
except:
print(traceback.format_exc())
DB_HOST = os.environ.get("DB_HOST")
PE_DB_NAME = os.environ.get("PE_DB_NAME")
PE_DB_USERNAME = os.environ.get("PE_DB_USERNAME")
PE_DB_PASSWORD = os.environ.get("PE_DB_PASSWORD")
API_KEY = os.environ.get("key")
ORGS_LIST = os.environ.get("org_list")
THREAD_NUM = os.environ.get("thread_num")
def get_dates():
"""Get dates for the query."""
end = datetime.datetime.now()
days_back = datetime.timedelta(days=30)
days_forward = datetime.timedelta(days=1)
start = end - days_back
end = end + days_forward
start_time = time_to_utc(start)
end_time = time_to_utc(end)
return start_time, end_time
def time_to_utc(in_time):
"""Convert time to UTC."""
# If time does not have timezone info, assume it is local
if in_time.tzinfo is None:
local_tz = datetime.datetime.now().astimezone().tzinfo
in_time = in_time.replace(tzinfo=local_tz)
utc_time = in_time.astimezone(datetime.timezone.utc)
return utc_time
def search_circl(cve):
"""Fetch CVE info from Circl."""
re = requests.get(f"https://cve.circl.lu/api/cve/{cve}")
return re
def is_verified(
vulns, cve, av_dict, ac_dict, ci_dict, vuln_data, org_uid, r, d, asn, unverified
):
"""Check if a CVE is verified."""
v = vulns[cve]
if v["verified"]:
re = search_circl(cve)
r_json = re.json()
if r_json is not None:
summary = r_json.get("summary", None)
product = r_json.get("vulnerable_product", None)
attack_vector = r_json.get("access", {}).get("vector")
av = av_dict.get(attack_vector, None)
attack_complexity = r_json.get("access", {}).get("complexity")
ac = ac_dict.get(attack_complexity, None)
conf_imp = r_json.get("impact", {}).get("confidentiality")
ci = ci_dict.get(conf_imp, None)
int_imp = r_json.get("impact", {}).get("integrity")
ii = ci_dict.get(int_imp, None)
avail_imp = r_json.get("impact", {}).get("availability")
ai = ci_dict.get(avail_imp, None)
cvss = r_json.get("cvss", None)
if cvss == 10:
severity = "Critical"
elif cvss >= 7:
severity = "High"
elif cvss >= 4:
severity = "Medium"
elif cvss > 0:
severity = "Low"
else:
severity = None
else:
# Set cve info to null if circl has no results
summary = ""
product = ""
attack_vector = ""
av = ""
attack_complexity = ""
ac = ""
conf_imp = ""
ci = ""
int_imp = ""
ii = ""
avail_imp = ""
ai = ""
severity = ""
cvss = None
vuln_data.append(
[
org_uid,
r["org"],
r["ip_str"],
d["port"],
d["_shodan"]["module"],
d["timestamp"],
cve,
severity,
cvss,
summary,
product,
attack_vector,
av,
attack_complexity,
ac,
conf_imp,
ci,
int_imp,
ii,
avail_imp,
ai,
r["tags"],
r["domains"],
r["hostnames"],
r["isp"],
asn,
]
)
else:
unverified.append(cve)
return unverified, vuln_data
def get_shodan_dicts():
"""Build Shodan dictionaries that hold definitions and naming conventions."""
risky_ports = [
"ftp",
"telnet",
"http",
"smtp",
"pop3",
"imap",
"netbios",
"snmp",
"ldap",
"smb",
"sip",
"rdp",
"vnc",
"kerberos",
]
name_dict = {
"ftp": "File Transfer Protocol",
"telnet": "Telnet",
"http": "Hypertext Transfer Protocol",
"smtp": "Simple Mail Transfer Protocol",
"pop3": "Post Office Protocol 3",
"imap": "Internet Message Access Protocol",
"netbios": "Network Basic Input/Output System",
"snmp": "Simple Network Management Protocol",
"ldap": "Lightweight Directory Access Protocol",
"smb": "Server Message Block",
"sip": "Session Initiation Protocol",
"rdp": "Remote Desktop Protocol",
"kerberos": "Kerberos",
}
risk_dict = {
"ftp": "FTP",
"telnet": "Telnet",
"http": "HTTP",
"smtp": "SMTP",
"pop3": "POP3",
"imap": "IMAP",
"netbios": "NetBIOS",
"snmp": "SNMP",
"ldap": "LDAP",
"smb": "SMB",
"sip": "SIP",
"rdp": "RDP",
"vnc": "VNC",
"kerberos": "Kerberos",
}
# Create dictionaries for CVSSv2 vector definitions using https://nvd.nist.gov/vuln-metrics/cvss/v3-calculator
av_dict = {
"NETWORK": "A vulnerability exploitable with network access means the vulnerable software is bound to the network stack and the attacker does not require local network access or local access. Such a vulnerability is often termed “remotely exploitable”. An example of a network attack is an RPC buffer overflow.",
"ADJACENT_NETWORK": "A vulnerability exploitable with adjacent network access requires the attacker to have access to either the broadcast or collision domain of the vulnerable software. Examples of local networks include local IP subnet, Bluetooth, IEEE 802.11, and local Ethernet segment.",
"LOCAL": "A vulnerability exploitable with only local access requires the attacker to have either physical access to the vulnerable system or a local (shell) account. Examples of locally exploitable vulnerabilities are peripheral attacks such as Firewire/USB DMA attacks, and local privilege escalations (e.g., sudo).",
}
ac_dict = {
"LOW": "Specialized access conditions or extenuating circumstances do not exist. The following are examples: The affected product typically requires access to a wide range of systems and users, possibly anonymous and untrusted (e.g., Internet-facing web or mail server). The affected configuration is default or ubiquitous. The attack can be performed manually and requires little skill or additional information gathering. The 'race condition' is a lazy one (i.e., it is technically a race but easily winnable).",
"MEDIUM": "The access conditions are somewhat specialized; the following are examples: The attacking party is limited to a group of systems or users at some level of authorization, possibly untrusted. Some information must be gathered before a successful attack can be launched. The affected configuration is non-default, and is not commonly configured (e.g., a vulnerability present when a server performs user account authentication via a specific scheme, but not present for another authentication scheme). The attack requires a small amount of social engineering that might occasionally fool cautious users (e.g., phishing attacks that modify a web browser’s status bar to show a false link, having to be on someone’s “buddy” list before sending an IM exploit).",
"HIGH": "Specialized access conditions exist. For example, in most configurations, the attacking party must already have elevated privileges or spoof additional systems in addition to the attacking system (e.g., DNS hijacking). The attack depends on social engineering methods that would be easily detected by knowledgeable people. For example, the victim must perform several suspicious or atypical actions. The vulnerable configuration is seen very rarely in practice. If a race condition exists, the window is very narrow.",
}
ci_dict = {
"NONE": "There is no impact to the confidentiality of the system",
"PARTIAL": "There is considerable informational disclosure. Access to some system files is possible, but the attacker does not have control over what is obtained, or the scope of the loss is constrained. An example is a vulnerability that divulges only certain tables in a database.",
"COMPLETE": "There is total information disclosure, resulting in all system files being revealed. The attacker is able to read all of the system's data (memory, files, etc.).",
}
return risky_ports, name_dict, risk_dict, av_dict, ac_dict, ci_dict
def search_shodan(thread_name, ips, api_key, start, end, org_uid, org_name, failed):
"""Search IPs in the Shodan API."""
# Initialize lists to store Shodan results
data = []
risk_data = []
vuln_data = []
# Build dictionaries for naming conventions and definitions
risky_ports, name_dict, risk_dict, av_dict, ac_dict, ci_dict = get_shodan_dicts()
# Break up IPs into chunks of 100
# Throws flake8 E203 error which is ignored in .flake8
ip_chunks = [ips[i : i + 100] for i in range(0, len(ips), 100)]
tot_ips = len(ips)
tot = len(ip_chunks)
print(f"{thread_name} Split {tot_ips} IPs into {tot} chunks - {org_name}")
# Loop through chunks and search Shodan
for i, ip_chunk in enumerate(ip_chunks):
try_again = True
try_count = 1
while try_again:
try:
api = shodan.Shodan(api_key)
results = api.host(ip_chunk, history=True)
for r in results:
for d in r["data"]:
# Convert Shodan date string to UTC datetime
shodan_datetime = datetime.datetime.strptime(
d["timestamp"], "%Y-%m-%dT%H:%M:%S.%f"
)
shodan_utc = time_to_utc(shodan_datetime)
# Only include results in the timeframe
if shodan_utc > start and shodan_utc < end:
prod = d.get("product", None)
serv = d.get("http", {}).get("server")
asn = d.get("ASN", None)
vulns = d.get("vulns", None)
if vulns is not None:
cves = list(vulns.keys())
unverified = []
for cve in cves:
# Check if CVEs are verified
unverified, vuln_data = is_verified(
vulns,
cve,
av_dict,
ac_dict,
ci_dict,
vuln_data,
org_uid,
r,
d,
asn,
unverified,
)
if len(unverified) > 0:
ftype = "Pontentially Vulnerable Product"
name = prod
risk = unverified
mitigation = "Verify asset is up to date, supported by the vendor, and configured securely"
risk_data.append(
[
org_uid,
r["org"],
r["ip_str"],
d["port"],
d["_shodan"]["module"],
ftype,
name,
risk,
mitigation,
d["timestamp"],
prod,
serv,
r["tags"],
r["domains"],
r["hostnames"],
r["isp"],
asn,
]
)
elif d["_shodan"]["module"] in risky_ports:
ftype = "Insecure Protocol"
name = name_dict[d["_shodan"]["module"]]
risk = [risk_dict[d["_shodan"]["module"]]]
mitigation = "Confirm open port has a required business use for internet exposure and ensure necessary safeguards are in place through TCP wrapping, TLS encryption, or authentication requirements"
risk_data.append(
[
org_uid,
r["org"],
r["ip_str"],
d["port"],
d["_shodan"]["module"],
ftype,
name,
risk,
mitigation,
d["timestamp"],
prod,
serv,
r["tags"],
r["domains"],
r["hostnames"],
r["isp"],
asn,
]
)
data.append(
[
org_uid,
r["org"],
r["ip_str"],
d["port"],
d["_shodan"]["module"],
d["timestamp"],
prod,
serv,
r["tags"],
r["domains"],
r["hostnames"],
r["isp"],
asn,
]
)
time.sleep(1)
try_again = False
except shodan.APIError as e:
if try_count == 5:
print(
f"{thread_name} Failed 5 times. Continuing to next chunk - {org_name}"
)
failed.append(
f"{org_name} chunk {i + 1} failed 5 times and skipped"
)
try_again = False
print(f"{thread_name} {e} - {org_name}")
print(
f"{thread_name} Try #{try_count} failed. Calling the API again. - {org_name}"
)
try_count += 1
# Most likely too many API calls per second so sleep
time.sleep(5)
except Exception as e:
print(f"{thread_name} {e} - {org_name}")
print(
f"{thread_name} Not a shodan API error. Continuing to next chunk - {org_name}"
)
failed.append(f"{org_name} chunk {i + 1} failed and skipped")
try_again = False
count = i + 1
print(f"{thread_name} {count}/{tot} complete - {org_name}")
df = pd.DataFrame(
data,
columns=[
"organizations_uid",
"organization",
"ip",
"port",
"protocol",
"timestamp",
"product",
"server",
"tags",
"domains",
"hostnames",
"isn",
"asn",
],
)
risk_df = pd.DataFrame(
risk_data,
columns=[
"organizations_uid",
"organization",
"ip",
"port",
"protocol",
"type",
"name",
"potential_vulns",
"mitigation",
"timestamp",
"product",
"server",
"tags",
"domains",
"hostnames",
"isn",
"asn",
],
)
vuln_df = pd.DataFrame(
vuln_data,
columns=[
"organizations_uid",
"organization",
"ip",
"port",
"protocol",
"timestamp",
"cve",
"severity",
"cvss",
"summary",
"product",
"attack_vector",
"av_description",
"attack_complexity",
"ac_description",
"confidentiality_impact",
"ci_description",
"integrity_impact",
"ii_Description",
"availability_impact",
"ai_description",
"tags",
"domains",
"hostnames",
"isn",
"asn",
],
)
# Add data_source value
source = getDataSource("Shodan")
source_uid = source[0]
df["data_source_uid"] = source_uid
risk_df["data_source_uid"] = source_uid
vuln_df["data_source_uid"] = source_uid
# Insert data into the PE database
failed = execute_shodan_data(df, "shodan_assets", thread_name, org_name, failed)
failed = execute_shodan_data(
risk_df,
"shodan_insecure_protocols_unverified_vulns",
thread_name,
org_name,
failed,
)
failed = execute_shodan_data(
vuln_df, "shodan_verified_vulns", thread_name, org_name, failed
)
return failed
def run_shodan_thread(api, org_list, thread_name):
"""Run a shodan thread."""
failed = []
for org_name in org_list:
org_uid = get_org_id(org_name)
# if org_name not in ["DOL_BLS"]:
# continue
print(f"Running IPs for {org_name}")
start, end = get_dates()
try:
ips_df = query_ips(org_uid)
ips = list(ips_df["ip_address"].values)
except Exception as e:
print(f"Failed fetching IPs for {org_name}.")
print(f"{e} - {org_name}")
failed.append(f"{org_name} fetching IPs")
continue
if len(ips) <= 0:
print(f"No IPs for {org_name}.")
failed.append(f"{org_name} has 0 IPs")
continue
failed = search_shodan(
thread_name, ips, api, start, end, org_uid, org_name, failed
)
if len(failed) > 0:
print(f"Failures: {failed}")
def main():
try:
print("Starting new thread")
org_list = json.loads(ORGS_LIST)
print(org_list, flush=True)
run_shodan_thread(API_KEY, org_list, "")
except:
print(traceback.format_exc(), flush=True)
if __name__ == "__main__":
sys.exit(main())
| 39.708333
| 775
| 0.47664
|
7804752b7ad05cfe60d1838935a4032236c31f59
| 75,027
|
py
|
Python
|
python/datamodel_parser/application/Hdu.py
|
sdss/datamodel_parser
|
a4ff123ff7daf88e7402a7d50c38141d206e8054
|
[
"BSD-3-Clause"
] | 2
|
2019-06-19T09:28:26.000Z
|
2019-09-20T01:58:00.000Z
|
python/datamodel_parser/application/Hdu.py
|
sdss/datamodel_parser
|
a4ff123ff7daf88e7402a7d50c38141d206e8054
|
[
"BSD-3-Clause"
] | null | null | null |
python/datamodel_parser/application/Hdu.py
|
sdss/datamodel_parser
|
a4ff123ff7daf88e7402a7d50c38141d206e8054
|
[
"BSD-3-Clause"
] | null | null | null |
from bs4 import Tag, NavigableString
from datamodel_parser.application import Util
from datamodel_parser.application import Intro
from datamodel_parser.application.Type import Hdu_type
import string
from json import dumps
class Hdu:
'''
'''
def __init__(self,logger=None,options=None,body=None):
self.initialize(logger=logger,options=options)
self.set_body(body=body)
self.set_ready()
self.set_attributes()
def initialize(self,logger=None,options=None):
'''Initialize utility class, logger, and command line options.'''
self.util = Util(logger=logger,options=options)
if self.util and self.util.ready:
self.logger = self.util.logger if self.util.logger else None
self.options = self.util.options if self.util.options else None
self.ready = bool(self.logger)
else:
self.ready = False
print('ERROR: Unable to initialize. self.util: {}'.format(self.util))
def set_body(self, body=None):
'''Set the body class attribute.'''
self.body = None
if self.ready:
self.body = body if body else None
if not self.body:
self.ready = False
self.logger.error('Unable to set_body.')
def set_ready(self):
'''Set error indicator.'''
self.ready = bool(self.ready and
self.util and
self.logger and
self.body
)
def set_attributes(self):
'''Set class attributes.'''
if self.ready:
self.verbose = self.options.verbose if self.options else None
self.heading_tags = self.util.heading_tag_names
self.paragraph_tags = self.util.paragraph_tags
self.bold_tags = self.util.bold_tags
self.unordered_list_tags = self.util.unordered_list_tags
self.file_hdu_info = list()
self.file_hdu_tables = list()
self.hdu_type = None
def parse_file(self,nodes=None):
'''Parse file hdu content from given BeautifulSoup nodes.'''
if self.ready:
if nodes:
type = Hdu_type(logger=self.logger,options=self.options)
if type:
for node in nodes:
if self.ready:
self.hdu_type = type.get_hdu_type(node=node)
# print('node: %r'% node)
# print('self.hdu_type: %r'% self.hdu_type)
# input('pause')
if self.hdu_type:
if self.hdu_type == 1:
self.parse_file_hdu_intro_1(node=node)
self.parse_file_hdu_tables_1(node=node)
elif self.hdu_type == 2:
self.parse_file_hdu_intro_2(node=node)
self.parse_file_hdu_tables_1(node=node)
elif self.hdu_type == 3:
self.parse_file_hdu_intro_3(node=node)
self.parse_file_hdu_tables_2(node=node)
elif self.hdu_type == 4:
self.parse_file_hdu_intro_3(node=node)
self.parse_file_hdu_tables_3(node=node)
elif self.hdu_type == 5:
self.parse_file_hdu_intro_3(node=node)
self.parse_file_hdu_tables_4(node=node) # No table
elif self.hdu_type == 6:
self.parse_file_hdu_intro_4(node=node)
self.parse_file_hdu_tables_3(node=node)
elif self.hdu_type == 7:
self.parse_file_hdu_intro_7(node=node)
self.parse_file_hdu_tables_7(node=node)
elif self.hdu_type == 8:
self.parse_file_hdu_intro_6(node=node)
self.parse_file_hdu_tables_1(node=node)
elif self.hdu_type == 9:
self.parse_file_hdu_intro_2(node=node)
self.parse_file_hdu_tables_1(node=node)
elif self.hdu_type == 10:
self.parse_file_hdu_intro_3(node=node)
self.parse_file_hdu_tables_2(node=node)
elif self.hdu_type == 11:
self.parse_file_hdu_intro_7(node=node)
self.parse_file_hdu_tables_6(node=node)
elif self.hdu_type == 12:
self.parse_file_hdu_intro_3(node=node)
self.parse_file_hdu_tables_5(node=node)
else:
self.ready = False
self.logger.error('Unexpected self.hdu_type encountered ' +
'in Hdu.parse_file().')
else:
self.ready = False
self.logger.error('Unable to parse_file. ' +
'self.hdu_type: {}, '.format(self.hdu_type) )
self.set_hdu_count()
else:
self.ready = False
self.logger.error('Unable to parse_file. ' +
'type: {}.'.format(type))
else: # some files don't have hdus
self.hdu_count = 0
self.logger.warning('Unable to parse_file. ' +
'nodes: {}.'.format(nodes))
def set_hdu_count(self):
'''Set the class attribute hdu_count.'''
if self.ready:
if self.file_hdu_info is not None and self.file_hdu_tables is not None:
if len(self.file_hdu_info) == len(self.file_hdu_tables):
self.hdu_count = len(self.file_hdu_info)
else:
self.ready = False
self.logger.error(
'Unable to parse_file. ' +
'len(self.file_hdu_info) != len(self.file_hdu_tables). ' +
'len(self.file_hdu_info): {}, '.format(len(self.file_hdu_info)) +
'len(self.file_hdu_tables): {}.'.format(len(self.file_hdu_tables)))
else: self.hdu_count = 0
def parse_file_hdu_intro_1(self,node=None):
'''Parse file hdu data content from given BeautifulSoup node.'''
if self.ready:
if node:
# hdu_number and header_title (from only heading tag, if present)
if self.util.get_heading_tag_child_names(node=node):
(hdu_number,hdu_title) = (
self.util.get_hdu_number_and_hdu_title_from_heading_tag(node=node))
else:
(hdu_number,hdu_title) = (0,' ')
if hdu_number is None:
self.logger.error('Unable to parse_file_hdu_intro_1. ' +
'hdu_number cannot be None. ' +
'hdu_number: {}, '.format(hdu_number))
# hdu_description
ps = node.find_all('p')
hdu_descriptions = ([self.util.get_string(node=p) for p in ps
if str(p) and not str(p).isspace()]
if ps else list())
hdu_description = ' '.join(hdu_descriptions)
# datatype and hdu_size
dl = node.find('dl')
(datatype,hdu_size) = (self.util.get_datatype_and_hdu_size(node=dl)
if dl else (None,None))
# is_image
is_image = (self.util.check_match(regex='(?i)IMAGE',string=datatype)
if datatype else None)
# check if an error has occurred
self.ready = self.util.ready
# put it all together
hdu_info = dict()
if self.ready:
hdu_info['is_image'] = is_image
hdu_info['hdu_number'] = hdu_number
hdu_info['hdu_title'] = hdu_title if hdu_title else ' '
hdu_info['hdu_size'] = hdu_size
hdu_info['hdu_description'] = hdu_description
hdu_info['hdu_type'] = self.hdu_type if self.hdu_type else None
self.file_hdu_info.append(hdu_info)
else:
self.ready = False
self.logger.error('Unable to parse_file_hdu_intro_1. ' +
'node: {}, '.format(node))
def parse_file_hdu_intro_2(self,node=None):
'''Parse file hdu data content from given BeautifulSoup node.'''
if self.ready:
if node:
# hdu_number and header_title (from only heading tag, if present)
if self.util.get_heading_tag_child_names(node=node):
(hdu_number,hdu_title) = (
self.util.get_hdu_number_and_hdu_title_from_heading_tag(node=node))
else:
(hdu_number,hdu_title) = (0,' ')
if hdu_number is None:
self.logger.error('Unable to parse_file_hdu_intro_2. ' +
'hdu_number cannot be None. ' +
'hdu_number: {}, '.format(hdu_number))
# hdu.description
ps = list()
for p in node.find_all('p'): ps.append(p)
ps.pop() # remove last p tag containing datatype and hdu_size
hdu_description = ('\n\n'.join([self.util.get_string(node=p) for p in ps])
if ps else None)
# datatype and hdu_size
for p in node.find_all('p'): pass # get last p tag
(datatype,hdu_size) = self.util.get_datatype_and_hdu_size(node=p)
# is_image
is_image = (self.util.check_match(regex='(?i)IMAGE',string=datatype)
if datatype else None)
# check if an error has occurred
self.ready = self.util.ready
# put it all together
hdu_info = dict()
if self.ready:
hdu_info['is_image'] = is_image
hdu_info['hdu_number'] = hdu_number
hdu_info['hdu_title'] = hdu_title if hdu_title else ' '
hdu_info['hdu_size'] = hdu_size
hdu_info['hdu_description'] = hdu_description
hdu_info['hdu_type'] = self.hdu_type if self.hdu_type else None
self.file_hdu_info.append(hdu_info)
else:
self.ready = False
self.logger.error('Unable to parse_file_hdu_intro_2. ' +
'node: {}, '.format(node))
def parse_file_hdu_intro_3(self,node=None):
'''Parse file hdu data content from given BeautifulSoup node.'''
if self.ready:
if node:
# hdu_number and header_title (from only heading tag, if present)
if self.util.get_heading_tag_child_names(node=node):
(hdu_number,hdu_title) = (
self.util.get_hdu_number_and_hdu_title_from_heading_tag(node=node))
else:
(hdu_number,hdu_title) = (0,' ')
if hdu_number is None:
self.logger.error('Unable to parse_file_hdu_intro_3. ' +
'hdu_number cannot be None. ' +
'hdu_number: {}, '.format(hdu_number))
# hdu_description
ps = node.find_all('p')
hdu_descriptions = ([self.util.get_string(p) for p in ps
if str(p) and not str(p).isspace()]
if ps else list())
hdu_description = '\n'.join(hdu_descriptions) if hdu_descriptions else str()
# datatype and hdu_size
(datatype,hdu_size) = (None,None)
# is_image
is_image = None
# check if an error has occurred
self.ready = self.util.ready
# put it all together
hdu_info = dict()
if self.ready:
hdu_info['is_image'] = is_image
hdu_info['hdu_number'] = hdu_number
hdu_info['hdu_title'] = hdu_title if hdu_title else ' '
hdu_info['hdu_size'] = hdu_size
hdu_info['hdu_description'] = hdu_description
hdu_info['hdu_type'] = self.hdu_type if self.hdu_type else None
self.file_hdu_info.append(hdu_info)
else:
self.ready = False
self.logger.error('Unable to parse_file_hdu_intro_3. ' +
'node: {}, '.format(node) )
def parse_file_hdu_intro_4(self,node=None):
'''Parse file hdu data content from given BeautifulSoup node.'''
if self.ready:
if node:
# hdu_number and header_title (from only heading tag, if present)
if self.util.get_heading_tag_child_names(node=node):
(hdu_number,hdu_title) = (
self.util.get_hdu_number_and_hdu_title_from_heading_tag(node=node))
else:
(hdu_number,hdu_title) = (0,' ')
if hdu_number is None:
self.logger.error('Unable to parse_file_hdu_intro_4. ' +
'hdu_number cannot be None. ' +
'hdu_number: {}, '.format(hdu_number))
# hdu_description
hdu_description = str()
child_names = set(self.util.get_child_names(node=node))
if 'p' in child_names:
ps = node.find_all('p')
hdu_description = '\n\n'.join([self.util.get_string(node=p) for p in ps])
if 'ul' in child_names:
uls = node.find_all('ul')
hdu_description += '\n\n'.join([self.util.get_string(node=ul) for ul in uls])
# datatype and hdu_size
(datatype,hdu_size) = (None,None)
# is_image
is_image = None
# check if an error has occurred
self.ready = self.util.ready
# put it all together
hdu_info = dict()
if self.ready:
hdu_info['is_image'] = is_image
hdu_info['hdu_number'] = hdu_number
hdu_info['hdu_title'] = hdu_title if hdu_title else ' '
hdu_info['hdu_size'] = hdu_size
hdu_info['hdu_description'] = hdu_description
hdu_info['hdu_type'] = self.hdu_type if self.hdu_type else None
self.file_hdu_info.append(hdu_info)
else:
self.ready = False
self.logger.error('Unable to parse_file_hdu_intro_4. ' +
'node: {}, '.format(node) )
def parse_file_hdu_intro_5(self,node=None):
'''Parse file hdu data content from given BeautifulSoup node.'''
if self.ready:
if node:
# hdu_number and header_title (from first heading tag)
(hdu_number,hdu_title) = (
self.util.get_hdu_number_and_hdu_title_from_p_tags_1(node=node))
# hdu_description
hdu_description = str()
# datatype and hdu_size
(datatype,hdu_size) = (None,None)
# is_image
is_image = None
# check if an error has occurred
self.ready = self.util.ready
# put it all together
hdu_info = dict()
if self.ready:
hdu_info['is_image'] = is_image
hdu_info['hdu_number'] = hdu_number
hdu_info['hdu_title'] = hdu_title if hdu_title else ' '
hdu_info['hdu_size'] = hdu_size
hdu_info['hdu_description'] = hdu_description
hdu_info['hdu_type'] = self.hdu_type if self.hdu_type else None
self.file_hdu_info.append(hdu_info)
else:
self.ready = False
self.logger.error('Unable to parse_file_hdu_intro_5. ' +
'node: {}, '.format(node) )
def parse_file_hdu_intro_6(self,node=None):
'''Parse file hdu data content from given BeautifulSoup node.'''
if self.ready:
if node:
# hdu_number and header_title (from only heading tag, if present)
if self.util.get_heading_tag_child_names(node=node):
(hdu_number,hdu_title) = (
self.util.get_hdu_number_and_hdu_title_from_heading_tag(node=node))
else:
(hdu_number,hdu_title) = (0,' ')
if hdu_number is None:
self.logger.error('Unable to parse_file_hdu_intro_6. ' +
'hdu_number cannot be None. ' +
'hdu_number: {}, '.format(hdu_number))
# hdu.description
hdu_description = self.util.get_string_from_middle_children_1(node=node)
# datatype and hdu_size
for p in node.find_all('p'): pass # get last p tag
(datatype,hdu_size) = self.util.get_datatype_and_hdu_size(node=p)
# is_image
is_image = (self.util.check_match(regex='(?i)IMAGE',string=datatype)
if datatype else None)
# check if an error has occurred
self.ready = self.ready and self.util.ready
# put it all together
hdu_info = dict()
if self.ready:
hdu_info['is_image'] = is_image
hdu_info['hdu_number'] = hdu_number
hdu_info['hdu_title'] = hdu_title if hdu_title else ' '
hdu_info['hdu_size'] = hdu_size
hdu_info['hdu_description'] = hdu_description
hdu_info['hdu_type'] = self.hdu_type if self.hdu_type else None
self.file_hdu_info.append(hdu_info)
else:
self.ready = False
self.logger.error('Unable to parse_file_hdu_intro_6. ' +
'node: {}, '.format(node))
def parse_file_hdu_intro_7(self,node=None):
'''Parse file hdu data content from given BeautifulSoup node.'''
if self.ready:
if node:
# hdu_number and header_title (from only heading tag, if present)
if self.util.get_heading_tag_child_names(node=node):
(hdu_number,hdu_title) = (
self.util.get_hdu_number_and_hdu_title_from_heading_tag(node=node))
else:
(hdu_number,hdu_title) = (0,' ')
if hdu_number is None:
self.logger.error('Unable to parse_file_hdu_intro_7. ' +
'hdu_number cannot be None. ' +
'hdu_number: {}, '.format(hdu_number))
# hdu_description
ps = node.find_all('p')
regex = self.util.get_table_title_regex_1()
hdu_descriptions = ([self.util.get_string(node=p) for p in ps
if str(p) and not str(p).isspace()
and not self.util.check_match(regex=regex,string=str(p))]
if ps else list())
hdu_description = '\n\n'.join(hdu_descriptions) if hdu_descriptions else str()
# datatype and hdu_size
(datatype,hdu_size) = (None,None)
# is_image
is_image = None
# check if an error has occurred
self.ready = self.util.ready
# put it all together
hdu_info = dict()
if self.ready:
hdu_info['is_image'] = is_image
hdu_info['hdu_number'] = hdu_number
hdu_info['hdu_title'] = hdu_title if hdu_title else ' '
hdu_info['hdu_size'] = hdu_size
hdu_info['hdu_description'] = hdu_description
hdu_info['hdu_type'] = self.hdu_type if self.hdu_type else None
self.file_hdu_info.append(hdu_info)
else:
self.ready = False
self.logger.error('Unable to parse_file_hdu_intro_7. ' +
'node: {}, '.format(node) )
def parse_file_hdu_tables_1(self,node=None):
'''Parse file hdu keyword/value/type/comment content
from given BeautifulSoup node.'''
hdu_tables = list()
if self.ready:
if node:
tables = node.find_all('table')
for (table_number,table) in enumerate(tables):
if self.ready:
# table caption
captions = self.util.get_children(node=table,names=['caption'])
table_caption = (self.util.get_string(node=captions[0])
if captions and len(captions) == 1
else None)
# column_names
column_names = list(table.find('thead').find('tr').strings)
# is_header
is_header = self.get_is_header_1(table=table,
column_names=column_names,
table_number=table_number)
# table keyword/values
trs = table.find('tbody').find_all('tr')
table_rows = dict()
for (position,tr) in enumerate(trs):
table_row = list()
for td in tr.find_all('td'):
if self.util.ready:
string = self.util.get_string(node=td)
table_row.append(string)
table_rows[position] = table_row
# check if errors have occurred
self.ready = self.ready and self.util.ready
# put it all together
if self.ready:
hdu_table = dict()
hdu_table['is_header'] = is_header
hdu_table['table_caption'] = table_caption
hdu_table['table_column_names'] = column_names
hdu_table['table_rows'] = table_rows
hdu_tables.append(hdu_table)
self.file_hdu_tables.append(hdu_tables)
else:
self.ready = False
self.logger.error('Unable to parse_file_hdu_tables_1. ' +
'node: {}, '.format(node))
def parse_file_hdu_tables_2(self,node=None):
'''Parse file hdu keyword/value/type/comment content
from given BeautifulSoup node.'''
hdu_tables = list()
if self.ready:
if node:
tables = self.util.get_tables_1(node=node,table_tag='pre')
if tables:
# print('tables: %r' % tables)
# input('pause')
for (table_number,table) in enumerate(tables):
if self.ready:
# print('\n\ntable: %r' % table)
# print('pre: %r' % table.find('pre'))
# input('pause')
# table caption
table_caption = None
# is_header
is_header = self.get_is_header_2(table=table,
table_number=table_number)
# column_names
column_names = (['Key','Value','Type','Comment']
if is_header == True else
['Name','Type','Unit','Description']
if is_header == False
else None
)
# table_rows
table_rows = (self.get_table_rows_pre(table=table)
if table and table.find('pre') else None)
# check if errors have occurred
self.ready = self.ready and self.util.ready
# put it all together
if self.ready:
hdu_table = dict()
hdu_table['is_header'] = is_header
hdu_table['table_caption'] = table_caption
hdu_table['table_column_names'] = column_names
hdu_table['table_rows'] = table_rows
hdu_tables.append(hdu_table)
self.file_hdu_tables.append(hdu_tables)
# print('\n\nhdu_tables: \n' + dumps(hdu_tables,indent=1))
# print('\n\nhdu_tables: %r' % hdu_tables)
# input('pause')
else:
self.ready = False
self.logger.error('Unable to parse_file_hdu_tables_2. ' +
'tables: {}, '.format(tables)
)
else:
self.ready = False
self.logger.error('Unable to parse_file_hdu_tables_2. ' +
'node: {}, '.format(node)
)
def parse_file_hdu_tables_3(self,node=None):
'''Parse file hdu keyword/value/type/comment content
from given BeautifulSoup node.'''
hdu_tables = list()
if self.ready:
if node:
tables = node.find_all('table')
for (table_number,table) in enumerate(tables):
if self.ready:
# table_caption
captions = self.util.get_children(node=table,names=['caption'])
table_caption = (self.util.get_string(node=captions[0])
if captions and len(captions) == 1
else None)
# table rows
trs = [tr for tr in table.find_all('tr')
if not self.util.get_string(node=tr).isspace()]
# column_names
# get column names from trs with all th tag children
column_names = self.util.get_column_names(trs=trs)
# remove trs with all th tag children
trs = [tr for tr in trs
if not self.util.children_all_one_tag_type(node=tr,
tag_name='th')]
# is_header
is_header = self.get_is_header_1(table=table,
column_names=column_names,
table_number=table_number)
# check if errors have occurred
self.ready = self.ready and self.util.ready
# table keyword/values
table_rows = dict()
for (position,tr) in enumerate(trs):
if self.ready:
if column_names:
table_row = self.get_table_row_tr_1(
column_names=column_names,
is_header=is_header,
node=tr)
else:
column_names = (['key','value','type','comment']
if is_header else
['name','type','unit','description'])
table_row = list()
for td in tr.find_all('td'):
string = self.util.get_string(node=td)
table_row.append(string)
table_rows[position] = table_row
# put it all together
if self.ready:
hdu_table = dict()
hdu_table['is_header'] = is_header
hdu_table['table_caption'] = table_caption
hdu_table['table_column_names'] = column_names
hdu_table['table_rows'] = table_rows
hdu_tables.append(hdu_table)
self.file_hdu_tables.append(hdu_tables)
else:
self.ready = False
self.logger.error('Unable to parse_file_hdu_tables_3. ' +
'node: {}, '.format(node))
def get_table_row_tr_1(self,
column_names=None,
is_header=None,
node=None):
'''Get table row from the <td> children of the given BeautifulSoup node.'''
table_row = [None,None,None,None]
if self.ready:
if column_names and is_header is not None and node:
if is_header:
column_dict = {'key' : 0,
'name' : 0,
'value' : 1,
'example' : 1,
'type' : 2,
'comment' : 3,
'description' : 3,
}
else:
column_dict = {'name' : 0,
'channel' : 0,
'type' : 1,
'unit' : 2,
'units' : 2,
'comment' : 3,
'description' : 3,
'details of its content' : 3,
}
column_names = [n.strip().lower() for n in column_names]
strings = list()
# get strings from the <td> tags
for td in node.find_all('td'):
strings.append(self.util.get_string(node=td))
strings = [s.strip() for s in strings if not s.isspace()]
# put the strings in the appropriate table rows
for (column_name,string) in list(zip(column_names,strings)):
if self.ready:
if column_name in column_dict:
table_row[column_dict[column_name]] = string
else:
self.ready=False
self.logger.error('Unable to get_table_row_tr_1. '
'Unanticipated column_name:{}'
.format(column_name))
# Warn when there is less columns than column_names
if len(column_names) != len(strings):
self.logger.warning('Unable to get_table_row_tr_1. ' +
'len(column_names) != len(strings). ' +
'Truncating to the smaller length. ' +
'len(column_names): {}, '.format(len(column_names)) +
'len(strings): {}, '.format(len(strings)) +
'\ncolumn_names: {}, '.format(column_names) +
'\nstrings: {}.'.format(strings)
)
# Warn when len(column_names) > 4
if len(column_names) > 4:
self.logger.warning('Table does not adhere to database schema. ' +
'len(column_names) > 4. ' +
'\ncolumn_names: {}, '.format(column_names)
)
else:
self.ready = False
self.logger.error('Unable to get_table_row_tr_1. ' +
'table_column_names: {}, '.format(table_column_names) +
'node: {}.'.format(node) )
if table_row == [None,None,None,None]:
self.ready = False
self.logger.error('Unable to get_table_row_tr_1. ' +
'column_names: {}, '.format(column_names) +
'is_header: {}, '.format(is_header) +
'node: {}, '.format(node)
)
return table_row
def parse_file_hdu_tables_4(self,node=None):
'''Parse file hdu keyword/value/type/comment content
from given BeautifulSoup node.'''
hdu_tables = list()
if self.ready:
if node:
# No header or data table for this one
hdu_table = dict()
hdu_tables.append(hdu_table)
self.file_hdu_tables.append(hdu_tables)
else:
self.ready = False
self.logger.error('Unable to parse_file_hdu_tables_4. ' +
'node: {}, '.format(node))
def parse_file_hdu_tables_5(self,node=None):
'''Parse file hdu keyword/value/type/comment content
from given BeautifulSoup node.'''
hdu_tables = list()
if self.ready:
if node:
tables = self.util.get_tables_1(node=node,table_tag='ul')
if tables:
# print('tables: %r' % tables)
# input('pause')
for (table_number,table) in enumerate(tables):
if self.ready:
# print('\n\ntable: %r' % table)
# print('pre: %r' % table.find('pre'))
# input('pause')
# table caption
table_caption = None
# is_header
is_header = self.get_is_header_2(table=table,
table_number=table_number)
# column_names
column_names = (['Key','Value','Type','Comment']
if is_header == True else
['Name','Type','Unit','Description']
if is_header == False
else None
)
# table_rows
table_rows = (self.get_table_rows_ul(table=table)
if table and table.find('ul') else None)
# check if errors have occurred
self.ready = self.ready and self.util.ready
# put it all together
if self.ready:
hdu_table = dict()
hdu_table['is_header'] = is_header
hdu_table['table_caption'] = table_caption
hdu_table['table_column_names'] = column_names
hdu_table['table_rows'] = table_rows
hdu_tables.append(hdu_table)
self.file_hdu_tables.append(hdu_tables)
# print('\n\nhdu_tables: \n' + dumps(hdu_tables,indent=1))
# print('\n\nhdu_tables: %r' % hdu_tables)
# input('pause')
else:
self.ready = False
self.logger.error('Unable to parse_file_hdu_tables_5. ' +
'tables: {}, '.format(tables)
)
else:
self.ready = False
self.logger.error('Unable to parse_file_hdu_tables_5. ' +
'node: {}, '.format(node)
)
def parse_file_hdu_tables_6(self,node=None):
'''Parse file hdu keyword/value/type/comment content
from given BeautifulSoup node.'''
hdu_tables = list()
if self.ready:
if node:
regex = self.util.get_table_title_regex_1()
tables = self.util.get_tables_3(node=node,regex=regex)
if tables:
for (table_number,table) in enumerate(tables):
if self.ready:
# table caption
ps = self.util.get_children(node=table,names=['p'])
p = ps[0] if ps else None
ps = ps[1:] if ps and len(ps) > 1 else None
(title,description) = (
self.util.get_title_and_description_from_p(p=p)
if p else (None,None))
p_strings = ([self.util.get_string(node=p) for p in ps if p]
if ps else list())
table_caption = (title.strip() + '. '
if title.strip() else str())
table_caption += (description.strip() + '. '
if description.strip() else str())
table_caption += ('\n'.join(p_strings)
if p_strings else str())
# is_header
is_header = (True if title and
self.util.check_match(
regex = self.util.get_table_title_regex_2(),
string=title)
else False if title and
self.util.check_match(
regex = self.util.get_table_title_regex_3(),
string=title)
else None
)
# column_names
column_names = (['Key','Value','Type','Comment']
if is_header == True else
['Name','Type','Unit','Description']
if is_header == False
else None
)
# table_rows
table_rows = (self.get_table_rows_pre(table=table)
if table and table.find('pre') else dict())
# check if errors have occurred
self.ready = self.ready and self.util.ready
# put it all together
if self.ready:
hdu_table = dict()
previous_hdu_table = dict()
# concatenate split tables
if hdu_tables and hdu_tables[-1]['is_header'] == is_header:
previous_hdu_table = hdu_tables.pop()
previous_table_caption = previous_hdu_table['table_caption']
previous_table_rows = previous_hdu_table['table_rows']
hdu_table['is_header'] = is_header
hdu_table['table_caption'] = (previous_table_caption +
table_caption)
hdu_table['table_column_names'] = column_names
k = 0
hdu_table['table_rows'] = dict()
for key, value in previous_table_rows.items():
hdu_table['table_rows'][k] = value
k += 1
for key, value in table_rows.items():
hdu_table['table_rows'][k] = value
k += 1
else:
hdu_table['is_header'] = is_header
hdu_table['table_caption'] = table_caption
hdu_table['table_column_names'] = column_names
hdu_table['table_rows'] = table_rows
hdu_tables.append(hdu_table)
self.file_hdu_tables.append(hdu_tables)
else:
self.ready = False
self.logger.error('Unable to parse_file_hdu_tables_6. ' +
'tables: {}, '.format(tables)
)
else:
self.ready = False
self.logger.error('Unable to parse_file_hdu_tables_6. ' +
'node: {}, '.format(node)
)
def parse_file_hdu_tables_7(self,node=None):
'''Parse file hdu keyword/value/type/comment content
from given BeautifulSoup node.'''
hdu_tables = list()
if self.ready:
if node:
regex = self.util.get_table_title_regex_1()
tables = self.util.get_tables_3(node=node,regex=regex)
if tables:
for (table_number,table) in enumerate(tables):
if self.ready:
# table caption
ps = self.util.get_children(node=table,names=['p'])
p = ps[0] if ps else None
ps = ps[1:] if ps and len(ps) > 1 else None
(title,description) = (
self.util.get_title_and_description_from_p(p=p)
if p else (None,None))
p_strings = ([self.util.get_string(node=p) for p in ps if p]
if ps else list())
p_strings = ([p for p in p_strings if not str(p).startswith('<b>')]
if p_strings else list())
table_caption = (title.strip() + '. '
if title.strip() else str())
table_caption += (description.strip() + '. '
if description.strip() else str())
table_caption += ('\n'.join(p_strings)
if p_strings else str())
# is_header
is_header = (True if title and
self.util.check_match(
regex = self.util.get_table_title_regex_2(),
string=title)
else False if title and
self.util.check_match(
regex = self.util.get_table_title_regex_3(),
string=title)
else None
)
# column_names
column_names = (['Key','Value','Type','Comment']
if is_header == True else
['Name','Type','Unit','Description']
if is_header == False
else None
)
# table_rows
table_rows = (self.get_table_rows_ul(table=table)
if table and table.find('ul') else dict())
# check if errors have occurred
self.ready = self.ready and self.util.ready
# put it all together
if self.ready:
hdu_table = dict()
previous_hdu_table = dict()
# concatenate split tables
if hdu_tables and hdu_tables[-1]['is_header'] == is_header:
previous_hdu_table = hdu_tables.pop()
previous_table_caption = previous_hdu_table['table_caption']
previous_table_rows = previous_hdu_table['table_rows']
hdu_table['is_header'] = is_header
hdu_table['table_caption'] = (previous_table_caption +
table_caption)
hdu_table['table_column_names'] = column_names
k = 0
hdu_table['table_rows'] = dict()
for key, value in previous_table_rows.items():
hdu_table['table_rows'][k] = value
k += 1
for key, value in table_rows.items():
hdu_table['table_rows'][k] = value
k += 1
else:
hdu_table['is_header'] = is_header
hdu_table['table_caption'] = table_caption
hdu_table['table_column_names'] = column_names
hdu_table['table_rows'] = table_rows
hdu_tables.append(hdu_table)
self.file_hdu_tables.append(hdu_tables)
else:
self.ready = False
self.logger.error('Unable to parse_file_hdu_tables_7. ' +
'tables: {}, '.format(tables)
)
else:
self.ready = False
self.logger.error('Unable to parse_file_hdu_tables_7. ' +
'node: {}, '.format(node)
)
def get_is_header_1(self,table=None,column_names=None,table_number=None):
'''Get is_header from either table.attrs, column_names, or table_number.'''
is_header = None
if self.ready:
if table and table_number is not None:
table_class = (table.attrs['class']
if table.attrs and 'class' in table.attrs else None)
table_class = (table_class[0]
if isinstance(table_class,list) and len(table_class) == 1 else None)
if table_class:
header_regex = '(?i)head'
bin_table_regex = '(?i)column' + '|' + '(?i)bintable'
is_header = (
True
if self.util.check_match(regex=header_regex,string=table_class)
else False
if self.util.check_match(regex=bin_table_regex,string=table_class)
else None)
if is_header is None:
self.ready = False
self.logger.error('Unable to get_is_header_1. ' +
'is_header: {}, '.format(is_header) +
'table.attrs: {}.'.format(table.attrs))
elif column_names:
column_names = [c.lower() for c in column_names]
is_header = (False
if ('unit' in column_names or
'units' in column_names)
else True)
else:
is_header = True if table_number == 0 else False
else:
self.ready = False
self.logger.error('Unable to get_is_header_1. ' +
'table: {}, '.format(table) +
'table_number: {}, '.format(table_number)
)
if is_header is None:
self.ready = False
self.logger.error('Unable to get_is_header_1. ' +
'is_header: {}, '.format(is_header)
)
return is_header
def get_is_header_2(self,table=None,table_number=None):
'''Get is_header from either heading.attrs, column_names, or heading_title.'''
is_header = None
if self.ready:
if table and table_number is not None:
heading_tag_names = (self.util.get_heading_tag_child_names(node=table)
if table else None)
heading_tag_name = heading_tag_names[0] if heading_tag_names else None
heading_tag = (table.find(heading_tag_name)
if heading_tag_name else None)
heading_id = (heading_tag.attrs['id'] if heading_tag and
heading_tag.attrs and 'id' in heading_tag.attrs else None)
heading_string = (self.util.get_string(node=heading_tag)
if heading_tag else None)
regex = ('(?i)header' + '|' +
'(?i)binary' + '|' + '(?i)field' + '|' + '(?i)column')
is_table_heading = (self.util.check_match(regex=regex,string=heading_string)
if heading_string else None)
# print('heading_tag_names: %r'% heading_tag_names)
# print('heading_tag_name: %r'% heading_tag_name)
# print('heading_tag: %r'% heading_tag)
# print('heading_id: %r'% heading_id)
# print('heading_string: %r'% heading_string)
# print('is_table_heading: %r'% is_table_heading)
# input('pause')
if heading_id:
header_regex = '(?i)head'
bin_table_regex = '(?i)field'
is_header = (
True
if self.util.check_match(regex=header_regex,string=heading_id)
else False
if self.util.check_match(regex=bin_table_regex,string=heading_id)
else None)
if is_header is None:
self.ready = False
self.logger.error('Unable to get_is_header_2. ' +
'is_header: {}, '.format(is_header) +
'heading.attrs: {}.'.format(heading.attrs))
elif heading_tag and is_table_heading:
header_regex = '(?i)header'
bin_table_regex = '(?i)binary' + '|' + '(?i)field' + '|' + '(?i)column'
is_header = (
True
if self.util.check_match(regex=header_regex,string=heading_string)
else False
if self.util.check_match(regex=bin_table_regex,string=heading_string)
else None)
else:
is_header = True if table_number == 0 else False
if is_header is None: is_header = True if table_number == 0 else False
else:
self.ready = False
self.logger.error('Unable to get_is_header_2. ' +
'bool(table): {}, '.format(bool(table)) +
'table_number: {}, '.format(table_number)
)
if is_header is None:
self.ready = False
self.logger.error('Unable to get_is_header_2. ' +
'is_header: {}, '.format(is_header)
)
return is_header
def set_hdu_tags(self):
'''Set hdus from given body tag.'''
self.hdu_tags = None
if self.ready:
if self.body and self.body.children:
previous_child = None
found_hdu_tags = False
for child in self.util.get_children(node=self.body):
if child.name in self.heading_tags:
string = self.util.get_string(node=child)
if string and 'HDU' in string:
found_hdu_tags = True
self.hdu_tags = (previous_child.next_siblings
if previous_child else None)
break
if not found_hdu_tags:
previous_child = child if child else None
else:
self.ready = False
self.logger.error('Unable to set_hdu_tags. ' +
'self.body: {}'.format(self.body) +
'self.body.children: {}'
.format(self.body.children))
def set_hdu_headings_and_pres(self):
'''Set hdu_headings and hdu_pres from the hdu_tags'''
if self.ready:
if self.hdu_tags:
first_hdu = True
self.hdu_headings = list()
self.hdu_pres = list()
pres = list()
for tag in [tag for tag in self.hdu_tags if tag.name]:
string = self.util.get_string(node=tag)
if tag.name in self.heading_tags and 'HDU' in string:
self.hdu_headings.append(tag)
if first_hdu:
first_hdu = False
else:
self.hdu_pres.append(pres)
pres = list()
elif tag.name == 'pre':
pres.append(tag)
else: # Do nothing; only processing heading and pre tags
pass
self.hdu_pres.append(pres)
else:
self.ready = False
self.logger.error('Unable to set_hdu_headings_and_pres. ' +
'self.hdu_tags: {}'
.format(self.hdu_tags))
def set_row_data(self,row=None):
'''Set the header keyword-value pairs for the given row.'''
self.row_data = list()
if self.ready:
if row:
keyword = None
value = None
type = None
comment = None
value_comment = None
if 'HISTORY' in row:
keyword = 'HISTORY'
value_comment = row.replace('HISTORY',str())
elif '=' in row:
split = row.split('=')
keyword = split[0].strip() if split else None
value_comment = split[1].strip() if split else None
elif 'END' in row:
keyword = 'END'
value_comment = row.replace('END',str())
else:
self.ready = False
self.logger.error(
'Unable to set_row_data. ' +
"The strings 'HISTORY', 'END' and '=' " +
'not found in row. ' +
'row: {}'.format(row))
if value_comment and '/' in value_comment:
split = value_comment.split('/')
value = split[0].strip() if split else None
comment = split[1].strip() if split else None
else:
value = value_comment.strip()
comment = None
self.row_data = [keyword,value,type,comment]
else:
self.ready = False
self.logger.error('Unable to set_row_data. ' +
'row: {}'.format(row))
def set_heading_tag_names(self,child_names=None):
'''Set a list of child for the given BeautifulSoup child_names.'''
self.heading_tag_names = list()
if self.ready:
if child_names:
for name in child_names:
if name and name in self.heading_tags:
self.heading_tag_names.append(str(name))
else:
self.ready = False
self.logger.error('Unable to set_heading_tag_names. ' +
'child_names: {}'.format(child_names))
def get_table_rows_pre(self,table=None):
'''Get table rows from the <table> tag.'''
table_rows = dict()
if self.ready:
pre = table.find('pre') if table else None
if pre:
pre_string = self.util.get_string(node=pre) if pre else None
rows = pre_string.split('\n') if pre_string else None
rows = [r for r in rows if r] if rows else None
if rows:
for (position,row) in enumerate(rows):
if self.ready:
if row: # skip empty rows
table_row = self.get_table_row_1(row=row)
if table_row:
table_rows[position] = table_row
else:
print('row: %r'% row)
# input('pause')
self.ready = False
self.logger.error('Unable to get_table_rows_pre. ' +
'row: {}, '.format(row) +
'table_row: {}, '.format(table_row)
)
else:
self.ready = False
self.logger.error('Unable to get_table_rows_pre. ' +
'table: {}'.format(table))
if not table_rows:
self.ready = False
self.logger.error('Unable to get_table_rows_pre. ' +
'table: {}'.format(table))
return table_rows
def get_table_rows_ul(self,table=None):
'''Get table rows from the <table> tag.'''
table_rows = dict()
if self.ready:
ul = table.find('ul') if table else None
if ul:
rows = [self.util.get_string(node=li) for li in table.find_all('li')]
if rows:
for (position,row) in enumerate(rows):
if self.ready:
table_row = self.get_table_row_1(row=row)
if table_row:
table_rows[position] = table_row
else:
self.ready = False
self.logger.error('Unable to get_table_rows_ul. ' +
'row: {}, '.format(row) +
'table_row: {}, '.format(table_row)
)
else:
self.ready = False
self.logger.error('Unable to get_table_rows_ul. ' +
'table: {}'.format(table))
if not table_rows:
self.ready = False
self.logger.error('Unable to get_table_rows_ul. ' +
'table: {}'.format(table))
return table_rows
def get_table_row_1(self,row=None):
'''Set the header keyword-value pairs for the given row.'''
table_row = list()
if self.ready:
if row:
regex0 = '^\s{5,}' # starts with 5 or more spaces
# starts with '*', '#', '}', or a digit
regex1 = ('^\s*\*' + '|' + '^\s*\#' + '|' + '^\s*\}' + '|' +
'^\s*\{' + '|' + '^\s*\d+' + '|' + '^\s*\&+' + '|' +
'^\s*\/+' + '|' + '^\s*\<br' + '|' + '^\s*\.+' + '|' +
'^\s*\-\d+' + '|' + '^\s*\-+')
regex2 = '^([A-Z\d*]{1,}\_){0,20}([A-Z\d*]{1,})\s*\=\s*'
regex3 = '^([A-Z\d*]{1,}\-){0,20}([A-Z\d*]{1,})\s*\=\s*'
regex4 = '^([A-Z\d*]{1,}\_){0,20}([A-Z\d*]{1,})\s*'
regex5 = '^\s*<b>(.*?)</b>' # starts with bold tag
regex6 = '^([A-Za-z\d*]{1,}\_){0,20}([A-Za-z\d*]{1,})\s*'
regex7 = '^\s*<b class="omit"></b>'
regex8 = '^\s*\w' # starts with word character MUST BE LAST regex !!!!
match0 = self.util.check_match(regex=regex0,string=row)
match1 = self.util.check_match(regex=regex1,string=row.lstrip())
match2 = self.util.check_match(regex=regex2,string=row.lstrip())
match3 = self.util.check_match(regex=regex3,string=row.lstrip())
match4 = self.util.check_match(regex=regex4,string=row.lstrip())
match5 = self.util.check_match(regex=regex5,string=row.lstrip())
match6 = self.util.check_match(regex=regex6,string=row.lstrip())
match7 = self.util.check_match(regex=regex7,string=row.lstrip())
# print('row: %r' %row)
# print('match0: %r' % match0)
# print('match1: %r' % match1)
# print('match2: %r' % match2)
# print('match3: %r' % match3)
# print('match4: %r' % match4)
# print('match5: %r' % match5)
# print('match6: %r' % match6)
# print('match7: %r' % match7)
# input('pause')
# header_table_columns = ['Key','Value','Type','Comment']
# binary_table_columns = ['Name','Type','Unit','Description']
if match0 or match1:
table_row = [None,None,None,row.strip()]
elif match2 or match3:
row = row.strip()
matches2 = self.util.get_matches(regex=regex2,string=row)
match2 = matches2[0] if matches2 else None
matches3 = self.util.get_matches(regex=regex3,string=row)
match3 = matches3[0] if matches3 else None
match = match2 if match2 else match3
col0 = match if row.startswith(match) else None
if col0:
col13 = row.split(col0)[1].strip() # = ['',row.replace(col0,'')]
col0 = col0.replace('=',str()).strip()
if ' / ' in col13: split_char = ' / '
elif ' /' in col13: split_char = ' /'
elif '/ ' in col13: split_char = '/ '
elif '/' in col13: split_char = '/'
else: split_char = None
split = col13.split(split_char) if col13 and split_char else None
col1 = split[0].strip() if split else col13
col3 = split[1].strip() if split else None
col1 = col1 if not col1 in set(string.punctuation) else None
table_row = [col0,col1,None,col3]
elif match4:
row = row.strip()
matches = self.util.get_matches(regex=regex4,string=row)
match = matches[0] if matches else None
col0 = match if row.startswith(match) else None
if col0:
col13 = row.split(col0)[1].strip() # = ['',row.replace(col0,'')]
if col13 and col13.startswith('(') and ':' in col13:
key = ':'
split = [s.strip() for s in col13.split(key) if s]
split[0] = split[0].replace('(',str()).replace(')',str()).strip()
else:
key = ' '*3
split = [s.strip() for s in col13.split(key) if s]
l_split = len(split) if col13 else 0
if l_split == 0: table_row = [col0,None,None,None]
elif l_split == 1: table_row = [col0,None,None,split[0]]
elif l_split == 2: table_row = [col0,split[0],None,split[1]]
elif l_split == 3: table_row = [col0,split[0],split[1],split[2]]
else:
self.ready = False
self.logger.error('Unable to get_table_row_1. ' +
'l_split: {}'.format(l_split))
else:
self.ready = False
self.logger.error('Unable to get_table_row_1. ' +
'col0: {}'.format(col0))
elif match5:
row = row.strip()
matches = self.util.get_matches(regex=regex5,string=row)
match = matches[0] if matches else None
col0 = match if row.startswith(match) else None
if col0:
col23 = row.split(col0)[1].strip() # = [str(),row.replace(col0,str())]
col0 = col0.replace('<b>',str()).replace('</b>',str()).strip()
matches = self.util.get_matches(regex='\((.*?)\)',string=col23)
col2 = matches[0] if matches else None
col3 = col23.split(col2)[1].strip() if col2 else col23
col2 = col2.replace('(',str()).replace(')',str()).strip() if col2 else None
col3 = col3[1:].strip() if col3 and col3.startswith(':') else col3
col3 = None if col3 and col3.strip() == '.' else col3
table_row = [col0,col2,None,col3]
# print('/\n\nrow: %r' % row)
# print('col0: %r' % col0)
# print('col23: %r' % col23)
# print('matches: %r' % matches)
# print('col2: %r' % col2)
# print('col3: %r' % col3)
# print('table_row: %r' % table_row)
# input('pause')
elif match6:
row = row.strip()
matches = self.util.get_matches(regex=regex6,string=row)
match = matches[0] if matches else None
col0 = match if row.startswith(match) else None
if col0:
col13 = row.split(col0)[1].strip() # = ['',row.replace(col0,'')]
col0 = col0.replace('=',str()).strip()
if ' # ' in col13: split_char = ' # '
elif ' #' in col13: split_char = ' #'
elif '# ' in col13: split_char = '# '
elif '#' in col13: split_char = '#'
else: split_char = None
split = col13.split(split_char) if col13 and split_char else None
col1 = split[0].strip() if split else col13
col3 = split[1].strip() if split else None
table_row = [col0,col1,None,col3]
elif match7:
row = row.strip()
matches = self.util.get_matches(regex=regex7,string=row)
match = matches[0] if matches else None
col0 = match if row.startswith(match) else None
if col0:
col13 = row.split(col0)[1].strip() # = ['',row.replace(col0,'')]
split = col13.split(':') if ':' in col13 else None
# print('split: %r' % split)
col0 = split[0] if split else None
col3 = split[1] if split else col13
table_row = [col0,None,None,col3]
# print('row: %r' % row)
# print('table_row: %r' % table_row)
# input('pause')
elif match8:
row = row.strip()
table_row = [None,None,None,row]
else:
self.ready = False
self.logger.error('Unable to get_table_row_1. ' +
'no regex match. ' +
'row: {}'.format(row))
input('pause')
else:
self.logger.debug('Unable to get_table_row_1. ' +
'row: {}'.format(row)
)
# print('row: %r' % row)
# print('table_row: %r' % table_row)
# input('pause')
return table_row
def get_table_row_2(self,row=None):
'''Set the header keyword-value pairs for the given row.'''
table_row = list()
if self.ready:
if row:
regex1 = '^\s*<b>(.*?)</b>'
match1 = self.util.check_match(regex=regex1,string=row)
# print('match1: %r' % match1)
# input('pause')
# header_table_columns = ['Key','Value','Type','Comment']
# binary_table_columns = ['Name','Type','Unit','Description']
if match0 or match1:
table_row = [None,None,None,row.strip()]
elif match2:
row = row.strip()
matches = self.util.get_matches(regex=regex2,string=row)
match = matches[0] if matches else None
col0 = match if row.startswith(match) else None
if col0:
col13 = row.split(col0)[1].strip() # = ['',row.replace(col0,'')]
else:
self.logger.debug('Unable to get_table_row_2. ' +
'row: {}'.format(row)
)
# print('row: %r' % row)
# print('table_row: %r' % table_row)
# input('pause')
return table_row
| 51.635926
| 99
| 0.424141
|
955258576004f8aaac36f37af8c24bf9e6e9d72e
| 3,417
|
py
|
Python
|
plot_HDDM_priors.py
|
anne-urai/RT_RDK
|
1a2f168ca05ff9759cc3ede4b5ead7f22df098ae
|
[
"MIT"
] | 5
|
2019-07-02T17:44:12.000Z
|
2022-02-16T14:07:03.000Z
|
plot_HDDM_priors.py
|
anne-urai/RT_RDK
|
1a2f168ca05ff9759cc3ede4b5ead7f22df098ae
|
[
"MIT"
] | null | null | null |
plot_HDDM_priors.py
|
anne-urai/RT_RDK
|
1a2f168ca05ff9759cc3ede4b5ead7f22df098ae
|
[
"MIT"
] | 3
|
2019-08-26T07:09:42.000Z
|
2021-08-16T16:12:02.000Z
|
#!/usr/bin/env python
# encoding: utf-8
import os
import numpy as np
import scipy as sp
import matplotlib as mpl
mpl.use('Agg') # to still plot even when no display is defined
import matplotlib.pyplot as plt
mpl.rcParams['pdf.fonttype'] = 42
# import matplotlib.pylab as plt
import seaborn as sns
import pandas as pd
#import bottleneck as bn
from IPython import embed as shell
import hddm
import pymc as pm
sns.set(style='ticks', font='Arial', font_scale=1, rc={
'axes.linewidth': 0.25,
'axes.labelsize': 8,
'axes.titlesize': 7,
'xtick.labelsize': 6,
'ytick.labelsize': 6,
'legend.fontsize': 6,
'xtick.major.width': 0.1,
'ytick.major.width': 0.1,
'text.color': 'Black',
'axes.labelcolor':'Black',
'xtick.color':'Black',
'ytick.color':'Black',} )
sns.plotting_context()
def plot_all_priors(model, data=None, unique=True, model_kwargs=None):
"""
plot the priors of an HDDM model
Input:
data <DataFrame> - data to be plot against the priors
unique <bool> - whether to unique each column in data before before ploting it
"""
#set limits for plots
lb = {'v': -10, 'dc(1)':-5, 'z':0.001, 'z_std':0}
ub = {'a': 4, 't':1, 'v':10, 'z':1, 'sz': 1, 'st':1, 'sv':15, 'p_outlier': 1,
'z_trans(1)':1, 'z(1)':1, 'dc(1)':5, 'a_std':5, 'v_std':5, 'z_std':0.5, 't_std':5, 'dc_std':5}
#plot all priors
n_rows=4
n_cols=5
for n_subjs in [1]: #,2]:
# create a model
# h_data, _ = hddm.generate.gen_rand_data(subjs=n_subjs, size=2)
# if model_kwargs is None:
# model_kwargs = {}
# h = model(h_data, include='all', **model_kwargs)
#h = model
fig = plt.figure()
plt.subplots_adjust(left=0.1, right=0.9, top=0.9, bottom=0.1, hspace=.7)
counter = 0
for name, node_row in model.iter_group_nodes():
if not name in ub: # only those listed
continue
if 'var' in name or 'p_outlier' in name:
continue
if 'trans' in name:
trans = True
name = name.replace('_trans','')
else:
trans = False
counter += 1
node = node_row['node']
print(name)
print(node.logp)
#plot a single proir
ax = plt.subplot(n_rows, n_cols, counter)
ax.set_yticklabels([])
#generate pdf
xlim = np.arange(lb.get(name, 0.001), ub[name], 0.01)
pdf = np.zeros(len(xlim))
# assume that the logp has the prior?
for i in range(len(pdf)):
if not trans:
node.value = xlim[i]
pdf[i] = np.exp(node.logp)
else:
node.value = pm.logit(xlim[i])
pdf[i] = np.exp(node.logp)*10
#plot shit
plt.plot(xlim, pdf)
plt.xlabel(name)
sns.despine(offset=2, trim=True)
# # Hide the right and top spines
# ax.spines['right'].set_visible(False)
# ax.spines['top'].set_visible(False)
#
# # Only show ticks on the left and bottom spines
# ax.yaxis.set_ticks_position('left')
# ax.xaxis.set_ticks_position('bottom')
#add suptitle
plt.suptitle('HDDM priors')
# save the figure
plt.savefig(os.path.join(mypath, 'priorPlot.pdf'))
## LOAD MODEL WITH THE MOST PARAMETERS WE HAVE
mypath = os.path.realpath(os.path.expanduser('/nfs/aeurai/HDDM/JW_PNAS'))
m = hddm.load(os.path.join(mypath, 'stimcoding_dc_z_prevresp_st', 'modelfit-combined.model'))
#print(m)
#shell()
plot_all_priors(m)
| 27.336
| 102
| 0.612233
|
fb69e3c668d5d8faae9db24657569f20486cbf4d
| 2,747
|
py
|
Python
|
mosdef_code/main_script.py
|
brianlorenz/code
|
e24277bbb1deb2f0488f7b6e1f28c7b633c2c12b
|
[
"MIT"
] | null | null | null |
mosdef_code/main_script.py
|
brianlorenz/code
|
e24277bbb1deb2f0488f7b6e1f28c7b633c2c12b
|
[
"MIT"
] | null | null | null |
mosdef_code/main_script.py
|
brianlorenz/code
|
e24277bbb1deb2f0488f7b6e1f28c7b633c2c12b
|
[
"MIT"
] | 1
|
2021-12-08T01:20:12.000Z
|
2021-12-08T01:20:12.000Z
|
'''Runs all methods after clustering the SEDs'''
import sys
import os
import string
import numpy as np
import initialize_mosdef_dirs as imd
from composite_sed import get_all_composite_seds
from stack_spectra import stack_all_spectra
from fit_emission import fit_all_emission
from generate_cluster_plots import generate_all_cluster_plots
from interpolate import gen_all_mock_composites
from uvj_clusters import observe_all_uvj
from convert_filter_to_sedpy import convert_all_folders_to_sedpy, find_median_redshifts
from convert_flux_to_maggies import convert_folder_to_maggies
from plot_scaled_comps import plot_scaled_composites
from scale_spectra import scale_all_spectra
from fit_prospector_emission import setup_all_prospector_fit_csvs, fit_all_prospector_emission
from check_for_agn import check_for_all_agn
from filter_groups import generate_skip_file
'''Starting point: One folder ('cluster_folder') that contains:
-folders labeled '0', '1', ..., 'N' where N is the number of clusters-1. These will be the cluster "groups"
-each of these folders contains images of each of the seds in a given cluster, named as '{field}_{v4id}_mock.pdf'
Specify the directories in initialize_mosdef_dirs
'''
# Make sure to go to initialize_mosdef_dirs to set all the directories properly
# Set the total number of clusters
n_clusters = 29
# Set the name of the prospector run
run_name = 'redshift_maggies'
# Begin running all the functions
print('Generating composite seds...')
get_all_composite_seds(n_clusters, run_filters=True)
print('Generating composite spectra...')
stack_all_spectra(n_clusters, 'cluster_norm')
print('Fitting emission lines...')
#Check for agn and list which groups do not have enough galaxies
check_for_all_agn(n_clusters)
generate_skip_file()
# Will break here if one of the spectra is so bad that it can't fit
fit_all_emission(n_clusters, 'cluster_norm')
# Need to do a few things to composites (measure uvj, generate mocks sed, etc. before we can plot)
print('Generating plots')
gen_all_mock_composites(n_clusters)
observe_all_uvj(n_clusters, individual_gals=False, composite_uvjs=True)
generate_all_cluster_plots(n_clusters)
# Prepare for prospector:
print('Preparing data for Prospector')
convert_all_folders_to_sedpy(n_clusters)
find_median_redshifts(n_clusters)
convert_folder_to_maggies(imd.composite_sed_csvs_dir)
# Plot of all of the scaled composites, must be run after convert_folder_to_maggies
plot_scaled_composites(n_clusters)
# Scale and re-fit the spectra using the scale that was used for the composites
scale_all_spectra(n_clusters)
# Re-fit the prospector spectra in the same way that we fit the mosdef ones:
setup_all_prospector_fit_csvs(29, run_name)
fit_all_prospector_emission(29, run_name)
| 36.626667
| 113
| 0.832545
|
61764ab80d8db54c9ba437b7ee2eb135a2a9d6c4
| 8,937
|
py
|
Python
|
mars/utils.py
|
pingrunhuang/mars
|
ae920c374e9844d7426d0cc09c0d97059dc5341c
|
[
"Apache-2.0"
] | 1
|
2019-09-22T16:00:48.000Z
|
2019-09-22T16:00:48.000Z
|
mars/utils.py
|
turboFei/mars
|
cde691285d921add5460944764c7278e7ddec8ff
|
[
"Apache-2.0"
] | null | null | null |
mars/utils.py
|
turboFei/mars
|
cde691285d921add5460944764c7278e7ddec8ff
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2018 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import functools
import inspect
import json
import logging
import numbers
import os
import random
import socket
import struct
import sys
import time
import zlib
from hashlib import md5
import numpy as np
from .compat import irange, functools32, getargspec
from .utils_c import to_binary, to_str, to_text, tokenize
logger = logging.getLogger(__name__)
random.seed(int(time.time()) * os.getpid())
tokenize = tokenize
# fix encoding conversion problem under windows
if sys.platform == 'win32': # pragma: no cover
def _replace_default_encoding(func):
def _fun(s, encoding=None):
encoding = encoding or getattr(sys.stdout, 'encoding', None) or 'mbcs'
return func(s, encoding=encoding)
_fun.__name__ = func.__name__
_fun.__doc__ = func.__doc__
return _fun
to_binary = _replace_default_encoding(to_binary)
to_text = _replace_default_encoding(to_text)
to_str = _replace_default_encoding(to_str)
class AttributeDict(dict):
def __getattr__(self, item):
try:
return self[item]
except KeyError:
raise AttributeError(
"'AttributeDict' object has no attribute {0}".format(item))
def on_serialize_shape(shape):
if shape:
return tuple(s if not np.isnan(s) else -1 for s in shape)
return shape
def on_deserialize_shape(shape):
if shape:
return tuple(s if s != -1 else np.nan for s in shape)
return shape
def get_gpu_used_memory(device_id):
import pynvml
handle = pynvml.nvmlDeviceGetHandleByIndex(device_id)
mem_info = pynvml.nvmlDeviceGetMemoryInfo(handle)
return mem_info.used
def parse_memory_limit(value):
if isinstance(value, numbers.Number):
return float(value), False
elif value.endswith('%'):
return float(value[:-1]) / 100, True
elif value.lower().endswith('t'):
return float(value[:-1]) * (1024 ** 4), False
elif value.lower().endswith('g'):
return float(value[:-1]) * (1024 ** 3), False
elif value.lower().endswith('m'):
return float(value[:-1]) * (1024 ** 2), False
elif value.lower().endswith('k'):
return float(value[:-1]) * 1024, False
else:
raise ValueError('Unknown limitation value: {0}'.format(value))
def readable_size(size):
if size < 1024:
return size
elif 1024 <= size < 1024 ** 2:
return '{0:.2f}K'.format(size / 1024)
elif 1024 ** 2 <= size < 1024 ** 3:
return '{0:.2f}M'.format(size / (1024 ** 2))
elif 1024 ** 3 <= size < 1024 ** 4:
return '{0:.2f}G'.format(size / (1024 ** 3))
else:
return '{0:.2f}T'.format(size / (1024 ** 4))
_commit_hash, _commit_ref = None, None
def git_info():
from ._version import get_git_info
global _commit_hash, _commit_ref
if _commit_ref is not None:
if _commit_hash is None:
return None
return _commit_hash, _commit_ref
git_tuple = get_git_info()
if git_tuple is None:
_commit_ref, _commit_hash = ':INVALID:', None
return None
else:
_commit_hash, _commit_ref = git_tuple
return git_tuple
LOW_PORT_BOUND = 10000
HIGH_PORT_BOUND = 65535
_local_occupied_ports = set()
def _get_ports_from_netstat():
import subprocess
p = subprocess.Popen('netstat -a -n -p tcp'.split(), stdout=subprocess.PIPE)
p.wait()
occupied = set()
for line in p.stdout:
line = to_str(line)
if '.' not in line:
continue
for part in line.split():
if '.' in part:
_, port_str = part.rsplit('.', 1)
if port_str == '*':
continue
port = int(port_str)
if LOW_PORT_BOUND <= port <= HIGH_PORT_BOUND:
occupied.add(int(port_str))
break
p.stdout.close()
return occupied
def get_next_port(typ=None):
import psutil
try:
conns = psutil.net_connections()
typ = typ or socket.SOCK_STREAM
occupied = set(sc.laddr.port for sc in conns
if sc.type == typ and LOW_PORT_BOUND <= sc.laddr.port <= HIGH_PORT_BOUND)
except psutil.AccessDenied:
occupied = _get_ports_from_netstat()
occupied.update(_local_occupied_ports)
randn = struct.unpack('<Q', os.urandom(8))[0]
idx = int(randn % (1 + HIGH_PORT_BOUND - LOW_PORT_BOUND - len(occupied)))
for i in irange(LOW_PORT_BOUND, HIGH_PORT_BOUND + 1):
if i in occupied:
continue
if idx == 0:
_local_occupied_ports.add(i)
return i
idx -= 1
raise SystemError('No ports available.')
@functools32.lru_cache(200)
def mod_hash(val, modulus):
return int(md5(to_binary(val)).hexdigest(), 16) % modulus
class classproperty(object):
def __init__(self, f):
self.f = f
def __get__(self, obj, owner):
return self.f(owner)
def serialize_graph(graph, compress=False):
ser_graph = graph.to_pb().SerializeToString()
if compress:
ser_graph = zlib.compress(ser_graph)
return base64.b64encode(ser_graph)
def deserialize_graph(graph_b64, graph_cls=None):
from .serialize.protos.graph_pb2 import GraphDef
from .graph import DirectedGraph
graph_cls = graph_cls or DirectedGraph
try:
json_obj = json.loads(to_str(graph_b64))
return graph_cls.from_json(json_obj)
except (SyntaxError, ValueError):
g = GraphDef()
ser_graph = base64.b64decode(graph_b64)
try:
ser_graph = zlib.decompress(ser_graph)
except zlib.error:
pass
g.ParseFromString(ser_graph)
return graph_cls.from_pb(g)
def merge_tensor_chunks(input_tensor, ctx):
from .tensor.execution.core import Executor
from .tensor.expressions.datasource import TensorFetchChunk
if len(input_tensor.chunks) == 1:
return ctx[input_tensor.chunks[0].key]
chunks = []
for c in input_tensor.chunks:
op = TensorFetchChunk(dtype=c.dtype, to_fetch_key=c.key)
chunk = op.new_chunk(None, c.shape, index=c.index, _key=c.key)
chunks.append(chunk)
new_op = TensorFetchChunk(dtype=input_tensor.dtype, to_fetch_key=input_tensor.key)
tensor = new_op.new_tensor(None, input_tensor.shape, chunks=chunks,
nsplits=input_tensor.nsplits)
executor = Executor(storage=ctx)
concat_result = executor.execute_tensor(tensor, concat=True)
return concat_result[0]
if sys.version_info[0] < 3:
def wraps(fun):
if isinstance(fun, functools.partial):
return lambda f: f
return functools.wraps(fun)
else:
wraps = functools.wraps
def calc_data_size(dt):
if isinstance(dt, tuple):
return sum(c.nbytes for c in dt)
else:
return dt.nbytes
def _get_mod_logger():
mod_logger = None
frame_globals = inspect.currentframe().f_back.f_globals
for logger_name in ('logger', 'LOG', 'LOGGER'):
if logger_name in frame_globals:
mod_logger = frame_globals[logger_name]
break
return mod_logger
def log_unhandled(func):
mod_logger = _get_mod_logger()
if not mod_logger:
return func
func_name = getattr(func, '__qualname__', func.__module__ + func.__name__)
func_args = getargspec(func)
@wraps(func)
def _wrapped(*args, **kwargs):
try:
return func(*args, **kwargs)
except: # noqa: E722
kwcopy = kwargs.copy()
kwcopy.update(zip(func_args.args, args))
if getattr(func, '__closure__', None) is not None:
kwargs.update(zip(
func.__code__.co_freevars + getattr(func.__code__, 'co_cellvars', ()),
[getattr(c, 'cell_contents', None) for c in func.__closure__],
))
messages = []
for k, v in kwcopy.items():
if 'key' in k:
messages.append('%s=%r' % (k, v))
err_msg = 'Unexpected exception occurred in %s.' % func_name
if messages:
err_msg += ' ' + ' '.join(messages)
mod_logger.exception(err_msg)
raise
return _wrapped
| 29.016234
| 96
| 0.633882
|
0c09ca97fba4e8882d11ce0c71e41cd5dbf379e9
| 2,630
|
py
|
Python
|
tests/unit/pywbemcli/all_types_method_mock_v1old.py
|
pywbem/pywbemtools
|
6b7c3f124324fd3ab7cffb82bc98c8f9555317e4
|
[
"Apache-2.0"
] | 8
|
2017-04-01T13:55:00.000Z
|
2022-03-15T18:28:47.000Z
|
tests/unit/pywbemcli/all_types_method_mock_v1old.py
|
pywbem/pywbemtools
|
6b7c3f124324fd3ab7cffb82bc98c8f9555317e4
|
[
"Apache-2.0"
] | 918
|
2017-03-03T14:29:03.000Z
|
2022-03-29T15:32:16.000Z
|
tests/unit/pywbemcli/all_types_method_mock_v1old.py
|
pywbem/pywbemtools
|
6b7c3f124324fd3ab7cffb82bc98c8f9555317e4
|
[
"Apache-2.0"
] | 2
|
2020-01-17T15:56:46.000Z
|
2020-02-12T18:49:30.000Z
|
"""
Test mock script that installs a test method provider for CIM method
AllTypesMethod() in CIM class PyWBEM_AllTypes, using the old setup approach
with global variables.
Note: This script and its method provider perform checks because their purpose
is to test the provider dispatcher. A real mock script with a real method
provider would not need to perform any of these checks.
"""
import pywbem
import pywbem_mock
assert "CONN" in globals()
assert 'SERVER' in globals()
assert 'VERBOSE' in globals()
global CONN # pylint: disable=global-at-module-level
class CIM_AllTypesMethodProvider(pywbem_mock.MethodProvider):
"""
User test provider for InvokeMethod using CIM_Foo and method1.
This is basis for testing passing of input parameters correctly and
generating some exceptions. It uses only one input parameter where the
value defines the test and one return parameter that provides data from the
provider, normally the value of the parameter defined with the input
parameter. Test for existence of method named method1
"""
provider_classnames = 'PyWBEM_AllTypes'
def __init__(self, cimrepository):
super(CIM_AllTypesMethodProvider, self).__init__(cimrepository)
def InvokeMethod(self, methodname, localobject, params):
"""
Simplistic test method. Validates methodname, localobject,
and returns return value 0 and the input parameters.
The parameters and return for Invoke method are defined in
:meth:`~pywbem_mock.MethodProvider.InvokeMethod`
"""
namespace = localobject.namespace
# get classname and validate. This provider uses only one class
classname = localobject.classname
assert classname.lower() == self.provider_classnames.lower()
if methodname != 'AllTypesMethod':
raise pywbem.CIMError(pywbem.CIM_ERR_METHOD_NOT_AVAILABLE)
# Test if class exists.
if not self.class_exists(namespace, classname):
raise pywbem.CIMError(
pywbem.CIM_ERR_NOT_FOUND,
"class {0} does not exist in CIM repository, "
"namespace {1}".format(classname, namespace))
# Return the input parameters as output parameters
out_params = params
return_value = 0
return (return_value, out_params)
# Register the provider to the mock environment
# pylint: disable=undefined-variable
_PROV = CIM_AllTypesMethodProvider(CONN.cimrepository) # noqa: F821
CONN.register_provider(_PROV, CONN.default_namespace, # noqa: F821
verbose=VERBOSE) # noqa: F821
| 36.527778
| 79
| 0.715209
|
1cebeebe882615792cb0e7d5dcafcc9c2949e554
| 8,665
|
bzl
|
Python
|
apple/internal/platform_support.bzl
|
michaeleisel/rules_apple
|
424c73847eba4d2a093fa59df1aa22b5629b0fda
|
[
"Apache-2.0"
] | null | null | null |
apple/internal/platform_support.bzl
|
michaeleisel/rules_apple
|
424c73847eba4d2a093fa59df1aa22b5629b0fda
|
[
"Apache-2.0"
] | null | null | null |
apple/internal/platform_support.bzl
|
michaeleisel/rules_apple
|
424c73847eba4d2a093fa59df1aa22b5629b0fda
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Support functions for working with Apple platforms and device families."""
load(
"@build_bazel_rules_apple//apple/internal:rule_support.bzl",
"rule_support",
)
load(
"@build_bazel_rules_apple//apple/internal:swift_support.bzl",
"swift_support",
)
# Maps the strings passed in to the "families" attribute to the numerical
# representation in the UIDeviceFamily plist entry.
# @unsorted-dict-items
_DEVICE_FAMILY_VALUES = {
"iphone": 1,
"ipad": 2,
"tv": 3,
"watch": 4,
# We want _ui_device_family_plist_value to find None for the valid "mac"
# family since macOS doesn't use the UIDeviceFamily Info.plist key, but we
# still want to catch invalid families with a KeyError.
"mac": None,
}
def _families(ctx):
"""Returns the device families that apply to the target being built.
Some platforms, such as iOS, support multiple device families (iPhone and
iPad) and provide a `families` attribute that lets the user specify which
to use. Other platforms, like tvOS, only support one family, so they do not
provide the public attribute and instead we implicitly get the supported
families from the private attribute instead.
Args:
ctx: The Starlark context.
Returns:
The list of device families that apply to the target being built.
"""
rule_descriptor = rule_support.rule_descriptor(ctx)
return getattr(ctx.attr, "families", rule_descriptor.allowed_device_families)
def _ui_device_family_plist_value(*, platform_prerequisites):
"""Returns the value to use for `UIDeviceFamily` in an info.plist.
This function returns the array of value to use or None if there should be
no plist entry (currently, only macOS doesn't use UIDeviceFamily).
Args:
platform_prerequisites: The platform prerequisites.
Returns:
A list of integers to use for the `UIDeviceFamily` in an Info.plist
or None if the key should not be added to the Info.plist.
"""
family_ids = []
families = platform_prerequisites.device_families
for f in families:
number = _DEVICE_FAMILY_VALUES[f]
if number:
family_ids.append(number)
if family_ids:
return family_ids
return None
def _is_device_build(ctx):
"""Returns True if the target is being built for a device.
Args:
ctx: The Starlark context.
Returns:
True if this is a device build, or False if it is a simulator build.
"""
platform = _platform(ctx)
return platform.is_device
def _platform_prerequisites(
*,
apple_fragment,
config_vars,
device_families,
explicit_minimum_os = None,
objc_fragment = None,
platform_type_string,
uses_swift,
xcode_path_wrapper,
xcode_version_config):
"""Returns a struct containing information on the platform being targeted.
Args:
apple_fragment: An Apple fragment (ctx.fragments.apple).
config_vars: A reference to configuration variables, typically from `ctx.var`.
device_families: The list of device families that apply to the target being built.
explicit_minimum_os: A dotted version string indicating minimum OS desired. Optional.
objc_fragment: An Objective-C fragment (ctx.fragments.objc), if it is present. Optional.
platform_type_string: The platform type for the current target as a string.
uses_swift: Boolean value to indicate if this target uses Swift.
xcode_path_wrapper: The Xcode path wrapper script. Can be none if and only we don't need to
resolve __BAZEL_XCODE_SDKROOT__ and other placeholders in environment arguments.
xcode_version_config: The `apple_common.XcodeVersionConfig` provider from the current context.
Returns:
A struct representing the collected platform information.
"""
platform_type_attr = getattr(apple_common.platform_type, platform_type_string)
platform = apple_fragment.multi_arch_platform(platform_type_attr)
if explicit_minimum_os:
minimum_os = explicit_minimum_os
else:
minimum_os = xcode_version_config.minimum_os_for_platform_type(platform_type_attr)
sdk_version = xcode_version_config.sdk_version_for_platform(platform)
return struct(
apple_fragment = apple_fragment,
config_vars = config_vars,
device_families = device_families,
minimum_os = minimum_os,
platform = platform,
platform_type = platform_type_attr,
objc_fragment = objc_fragment,
sdk_version = sdk_version,
uses_swift = uses_swift,
xcode_path_wrapper = xcode_path_wrapper,
xcode_version_config = xcode_version_config,
)
def _platform_prerequisites_from_rule_ctx(ctx):
"""Returns a struct containing information on the platform being targeted from a rule context.
Args:
ctx: The Starlark context for a rule.
Returns:
A struct representing the default collected platform information for that rule context.
"""
device_families = getattr(ctx.attr, "families", None)
if not device_families:
rule_descriptor = rule_support.rule_descriptor(ctx)
device_families = rule_descriptor.allowed_device_families
deps = getattr(ctx.attr, "deps", None)
uses_swift = swift_support.uses_swift(deps) if deps else False
return _platform_prerequisites(
apple_fragment = ctx.fragments.apple,
config_vars = ctx.var,
device_families = device_families,
explicit_minimum_os = ctx.attr.minimum_os_version,
objc_fragment = ctx.fragments.objc,
platform_type_string = ctx.attr.platform_type,
uses_swift = uses_swift,
xcode_path_wrapper = ctx.executable._xcode_path_wrapper,
xcode_version_config = ctx.attr._xcode_config[apple_common.XcodeVersionConfig],
)
def _minimum_os(ctx):
"""Returns the minimum OS version required for the current target.
Args:
ctx: The Starlark context.
Returns:
A string containing the dotted minimum OS version.
"""
min_os = ctx.attr.minimum_os_version
if not min_os:
# TODO(b/38006810): Use the SDK version instead of the flag value as a soft
# default.
min_os = str(ctx.attr._xcode_config[apple_common.XcodeVersionConfig].minimum_os_for_platform_type(_platform_type(ctx)))
return min_os
def _platform_type(ctx):
"""Returns the platform type for the current target.
Args:
ctx: The Starlark context.
Returns:
The `PlatformType` for the current target, after being converted from its
string attribute form.
"""
platform_type_string = ctx.attr.platform_type
return getattr(apple_common.platform_type, platform_type_string)
def _platform(ctx):
"""Returns the platform for the current target.
Args:
ctx: The Starlark context.
Returns:
The Platform object for the target.
"""
apple = ctx.fragments.apple
platform = apple.multi_arch_platform(_platform_type(ctx))
return platform
def _platform_and_sdk_version(ctx):
"""Returns the platform and SDK version for the current target.
Args:
ctx: The Starlark context.
Returns:
A tuple containing the Platform object for the target and the SDK version
to build against for that platform.
"""
platform = _platform(ctx)
sdk_version = (ctx.attr._xcode_config[apple_common.XcodeVersionConfig].sdk_version_for_platform(platform))
return platform, sdk_version
# Define the loadable module that lists the exported symbols in this file.
platform_support = struct(
families = _families,
is_device_build = _is_device_build,
minimum_os = _minimum_os,
platform = _platform,
platform_and_sdk_version = _platform_and_sdk_version,
platform_prerequisites = _platform_prerequisites,
platform_prerequisites_from_rule_ctx = _platform_prerequisites_from_rule_ctx,
platform_type = _platform_type,
ui_device_family_plist_value = _ui_device_family_plist_value,
)
| 35.512295
| 127
| 0.723485
|
7009292ba26b02bedb7383ba0d697b0f260abc13
| 761
|
py
|
Python
|
Leak #5 - Lost In Translation/windows/Resources/Dsz/PyScripts/Lib/dsz/mca/network/cmd/nameserverlookup/errors.py
|
bidhata/EquationGroupLeaks
|
1ff4bc115cb2bd5bf2ed6bf769af44392926830c
|
[
"Unlicense"
] | 9
|
2019-11-22T04:58:40.000Z
|
2022-02-26T16:47:28.000Z
|
Leak #5 - Lost In Translation/windows/Resources/Dsz/PyScripts/Lib/dsz/mca/network/cmd/nameserverlookup/errors.py
|
bidhata/EquationGroupLeaks
|
1ff4bc115cb2bd5bf2ed6bf769af44392926830c
|
[
"Unlicense"
] | null | null | null |
Leak #5 - Lost In Translation/windows/Resources/Dsz/PyScripts/Lib/dsz/mca/network/cmd/nameserverlookup/errors.py
|
bidhata/EquationGroupLeaks
|
1ff4bc115cb2bd5bf2ed6bf769af44392926830c
|
[
"Unlicense"
] | 8
|
2017-09-27T10:31:18.000Z
|
2022-01-08T10:30:46.000Z
|
# uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: errors.py
import mcl.status
ERR_SUCCESS = mcl.status.MCL_SUCCESS
ERR_INVALID_PARAM = mcl.status.framework.ERR_START
ERR_CONTEXT_INVALID = mcl.status.framework.ERR_START + 1
ERR_MARSHAL_FAILED = mcl.status.framework.ERR_START + 2
ERR_LOOKUP_FAILED = mcl.status.framework.ERR_START + 3
ERR_DATA_SEND_FAILED = mcl.status.framework.ERR_START + 4
errorStrings = {ERR_INVALID_PARAM: 'Invalid parameter(s)',
ERR_CONTEXT_INVALID: 'Context Invalid',
ERR_MARSHAL_FAILED: 'Marshaling data failed',
ERR_LOOKUP_FAILED: 'Lookup Failed',
ERR_DATA_SEND_FAILED: 'Sending result data failed'
}
| 42.277778
| 67
| 0.777924
|
d387b26775f2a57579e8ede80f77a0b8c724cef3
| 3,374
|
py
|
Python
|
act/act_portero/atajar.py
|
Gusta2307/Football-Simulation-IA-SIM-COM-
|
8c29c5b1ef61708a4f8b34f5e0e00990aeecfacd
|
[
"MIT"
] | null | null | null |
act/act_portero/atajar.py
|
Gusta2307/Football-Simulation-IA-SIM-COM-
|
8c29c5b1ef61708a4f8b34f5e0e00990aeecfacd
|
[
"MIT"
] | null | null | null |
act/act_portero/atajar.py
|
Gusta2307/Football-Simulation-IA-SIM-COM-
|
8c29c5b1ef61708a4f8b34f5e0e00990aeecfacd
|
[
"MIT"
] | 1
|
2022-02-07T04:47:15.000Z
|
2022-02-07T04:47:15.000Z
|
import numpy
from act.accion import Accion
from config import Config
from colorama import Fore, Style
config = Config()
class Atajar(Accion):
def __init__(self, agente) -> None:
self.agente = agente
self.__descripcion = f"El portero {self.agente.nombre} "
self.estado = None
self.tiempo = 0.13
self.tipo = config.ACCIONES.JUGADOR.ACT_ATAJAR
def descripcion(self):
return self.__descripcion
def precondicion(self, partido) -> bool:
# if partido.ultima_accion.tipo == config.ACT_TIRO_PORTERIA:
return ((partido.ultima_accion.tipo == config.ACCIONES.JUGADOR.ACT_TIRO_PORTERIA and partido.ultima_accion.estado == config.ACCIONES.ESTADO.TIRO_PORTERIA.A_PORTERIA) or partido.ultima_accion.tipo == config.ACCIONES.JUGADOR.ACT_SAQUE_ESQUINA) and partido.ultima_accion.agente.equipo != self.agente.equipo
# return False
def ejecutar(self, partido):
atajar = numpy.random.choice(numpy.arange(0, 2), p=[1 - self.agente.atajar_balon , self.agente.atajar_balon])
if atajar:
partido.reporte.annadir_a_resumen(f"REBOTE: {self.agente.sin_rebote} {self.agente.rebote_banda} {self.agente.rebote_linea_final} {self.agente.rebote_jugador}", partido.pt)
rebote = numpy.random.choice(numpy.arange(0, 4), p=[self.agente.sin_rebote, self.agente.rebote_banda, self.agente.rebote_linea_final, self.agente.rebote_jugador])
if rebote == 0:
self.estado = config.ACCIONES.ESTADO.ATAJAR.SIN_REBOTE
elif rebote == 1:
self.estado = config.ACCIONES.ESTADO.ATAJAR.REBOTE_BANDA
elif rebote == 2:
self.estado = config.ACCIONES.ESTADO.ATAJAR.REBOTE_LINEA_FINAL
elif rebote == 3:
self.estado = config.ACCIONES.ESTADO.ATAJAR.REBOTE_JUGADOR
self.poscondicion(partido, atajar, rebote)
partido.reporte.annadir_a_resumen(f"{partido.obtener_tiempo()} {self.descripcion()} {Fore.RED}{self.estado} {Style.RESET_ALL}", partido.pt)
else:
partido.reporte.annadir_a_resumen(f"{partido.obtener_tiempo()} El jugador {partido.ultima_accion.agente} {Fore.CYAN} marco GOOOOOL {Style.RESET_ALL}", partido.pt)
# partido.ultima_accion.agente.equipo.estadisticas['GOLES'] += 1
self.tiempo = 0.8
self.estado = config.ACCIONES.ESTADO.ATAJAR.NO_ATAJO
self.poscondicion(partido, atajar, -1)
def poscondicion(self, partido, atajar, rebote):
if atajar:
# self.agente.equipo.estadisticas['PARADAS PORTERO'] += 1
partido.reporte.annadir_parada_portero(self.agente.equipo.nombre, partido.pt)
self.agente.reporte.annadir_parada_portero()
if rebote == 0:
partido.pos_balon = self.agente
elif rebote == 1 or rebote == 2:
partido.estado = config.PARTIDO.ESTADO.DETENIDO
elif rebote == 3:
partido.pos_balon = None
partido.ultima_accion = self
else:
partido.pos_balon = None
partido.estado = config.PARTIDO.ESTADO.REANUDAR_PARTIDO
partido.reporte.annadir_gol(partido.ultima_accion.agente.equipo.nombre, partido.pt)
partido.ultima_accion.agente.reporte.annadir_gol()
| 49.617647
| 314
| 0.658269
|
0775d359356a2a87bdf8001f0d738258356bc156
| 53,505
|
py
|
Python
|
cwltool/main.py
|
huzech/cwltool
|
c26a7881c5135a42c5891e3903ba755a3ceefa6b
|
[
"Apache-2.0"
] | null | null | null |
cwltool/main.py
|
huzech/cwltool
|
c26a7881c5135a42c5891e3903ba755a3ceefa6b
|
[
"Apache-2.0"
] | null | null | null |
cwltool/main.py
|
huzech/cwltool
|
c26a7881c5135a42c5891e3903ba755a3ceefa6b
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# PYTHON_ARGCOMPLETE_OK
"""Entry point for cwltool."""
import argparse
import copy
import functools
import io
import logging
import os
import signal
import subprocess # nosec
import sys
import time
import urllib
import warnings
from codecs import StreamWriter, getwriter
from collections.abc import MutableMapping, MutableSequence
from typing import (
IO,
Any,
Callable,
Dict,
List,
Mapping,
MutableMapping,
MutableSequence,
Optional,
Sized,
TextIO,
Tuple,
Union,
cast,
)
import argcomplete
import coloredlogs
import pkg_resources # part of setuptools
import ruamel.yaml
from ruamel.yaml.comments import CommentedMap, CommentedSeq
from ruamel.yaml.main import YAML
from schema_salad.exceptions import ValidationException
from schema_salad.ref_resolver import Loader, file_uri, uri_file_path
from schema_salad.sourceline import cmap, strip_dup_lineno
from schema_salad.utils import ContextType, FetcherCallableType, json_dumps, yaml_no_ts
from . import CWL_CONTENT_TYPES, workflow
from .argparser import arg_parser, generate_parser, get_default_args
from .context import LoadingContext, RuntimeContext, getdefault
from .cwlrdf import printdot, printrdf
from .errors import (
ArgumentException,
GraphTargetMissingException,
UnsupportedRequirement,
WorkflowException,
)
from .executors import JobExecutor, MultithreadedJobExecutor, SingleJobExecutor
from .load_tool import (
default_loader,
fetch_document,
jobloaderctx,
load_overrides,
make_tool,
resolve_and_validate_document,
resolve_overrides,
resolve_tool_uri,
)
from .loghandler import _logger, configure_logging, defaultStreamHandler
from .mpi import MpiConfig
from .mutation import MutationManager
from .pack import pack
from .process import (
CWL_IANA,
Process,
add_sizes,
scandeps,
shortname,
use_custom_schema,
use_standard_schema,
)
from .procgenerator import ProcessGenerator
from .provenance import ResearchObject, WritableBagFile
from .resolver import ga4gh_tool_registries, tool_resolver
from .secrets import SecretStore
from .software_requirements import (
DependenciesConfiguration,
get_container_from_software_requirements,
)
from .stdfsaccess import StdFsAccess
from .subgraph import get_process, get_step, get_subgraph
from .update import ALLUPDATES, UPDATES
from .utils import (
DEFAULT_TMP_PREFIX,
CWLObjectType,
CWLOutputAtomType,
CWLOutputType,
HasReqsHints,
adjustDirObjs,
normalizeFilesDirs,
processes_to_kill,
trim_listing,
versionstring,
visit_class,
)
from .workflow import Workflow
def _terminate_processes() -> None:
"""Kill all spawned processes.
Processes to be killed must be appended to `utils.processes_to_kill`
as they are spawned.
An important caveat: since there's no supported way to kill another
thread in Python, this function cannot stop other threads from
continuing to execute while it kills the processes that they've
spawned. This may occasionally lead to unexpected behaviour.
"""
# It's possible that another thread will spawn a new task while
# we're executing, so it's not safe to use a for loop here.
while processes_to_kill:
process = processes_to_kill.popleft()
if isinstance(process.args, MutableSequence):
args = process.args
else:
args = [process.args]
cidfile = [str(arg).split("=")[1] for arg in args if "--cidfile" in str(arg)]
if cidfile: # Try to be nice
try:
with open(cidfile[0]) as inp_stream:
p = subprocess.Popen( # nosec
["docker", "kill", inp_stream.read()], shell=False # nosec
)
try:
p.wait(timeout=10)
except subprocess.TimeoutExpired:
p.kill()
except FileNotFoundError:
pass
if process.stdin:
process.stdin.close()
try:
process.wait(10)
except subprocess.TimeoutExpired:
pass
process.kill() # Always kill, even if we tried with the cidfile
def _signal_handler(signum: int, _: Any) -> None:
"""Kill all spawned processes and exit.
Note that it's possible for another thread to spawn a process after
all processes have been killed, but before Python exits.
Refer to the docstring for _terminate_processes() for other caveats.
"""
_terminate_processes()
sys.exit(signum)
def generate_example_input(
inptype: Optional[CWLOutputType],
default: Optional[CWLOutputType],
) -> Tuple[Any, str]:
"""Convert a single input schema into an example."""
example = None
comment = ""
defaults = {
"null": "null",
"Any": "null",
"boolean": False,
"int": 0,
"long": 0,
"float": 0.1,
"double": 0.1,
"string": "a_string",
"File": ruamel.yaml.comments.CommentedMap(
[("class", "File"), ("path", "a/file/path")]
),
"Directory": ruamel.yaml.comments.CommentedMap(
[("class", "Directory"), ("path", "a/directory/path")]
),
} # type: CWLObjectType
if isinstance(inptype, MutableSequence):
optional = False
if "null" in inptype:
inptype.remove("null")
optional = True
if len(inptype) == 1:
example, comment = generate_example_input(inptype[0], default)
if optional:
if comment:
comment = f"{comment} (optional)"
else:
comment = "optional"
else:
example = CommentedSeq()
for index, entry in enumerate(inptype):
value, e_comment = generate_example_input(entry, default)
example.append(value)
example.yaml_add_eol_comment(e_comment, index)
if optional:
comment = "optional"
elif isinstance(inptype, Mapping) and "type" in inptype:
if inptype["type"] == "array":
first_item = cast(MutableSequence[CWLObjectType], inptype["items"])[0]
items_len = len(cast(Sized, inptype["items"]))
if items_len == 1 and "type" in first_item and first_item["type"] == "enum":
# array of just an enum then list all the options
example = first_item["symbols"]
if "name" in first_item:
comment = 'array of type "{}".'.format(first_item["name"])
else:
value, comment = generate_example_input(inptype["items"], None)
comment = "array of " + comment
if items_len == 1:
example = [value]
else:
example = value
if default is not None:
example = default
elif inptype["type"] == "enum":
symbols = cast(List[str], inptype["symbols"])
if default is not None:
example = default
elif "default" in inptype:
example = inptype["default"]
elif len(cast(Sized, inptype["symbols"])) == 1:
example = symbols[0]
else:
example = "{}_enum_value".format(inptype.get("name", "valid"))
comment = 'enum; valid values: "{}"'.format('", "'.join(symbols))
elif inptype["type"] == "record":
example = ruamel.yaml.comments.CommentedMap()
if "name" in inptype:
comment = '"{}" record type.'.format(inptype["name"])
else:
comment = "Anonymous record type."
for field in cast(List[CWLObjectType], inptype["fields"]):
value, f_comment = generate_example_input(field["type"], None)
example.insert(0, shortname(cast(str, field["name"])), value, f_comment)
elif "default" in inptype:
example = inptype["default"]
comment = 'default value of type "{}".'.format(inptype["type"])
else:
example = defaults.get(cast(str, inptype["type"]), str(inptype))
comment = 'type "{}".'.format(inptype["type"])
else:
if not default:
example = defaults.get(str(inptype), str(inptype))
comment = f'type "{inptype}"'
else:
example = default
comment = f'default value of type "{inptype}".'
return example, comment
def realize_input_schema(
input_types: MutableSequence[Union[str, CWLObjectType]],
schema_defs: MutableMapping[str, CWLObjectType],
) -> MutableSequence[Union[str, CWLObjectType]]:
"""Replace references to named typed with the actual types."""
for index, entry in enumerate(input_types):
if isinstance(entry, str):
if "#" in entry:
_, input_type_name = entry.split("#")
else:
input_type_name = entry
if input_type_name in schema_defs:
entry = input_types[index] = schema_defs[input_type_name]
if isinstance(entry, MutableMapping):
if isinstance(entry["type"], str) and "#" in entry["type"]:
_, input_type_name = entry["type"].split("#")
if input_type_name in schema_defs:
entry["type"] = cast(
CWLOutputAtomType,
realize_input_schema(
cast(
MutableSequence[Union[str, CWLObjectType]],
schema_defs[input_type_name],
),
schema_defs,
),
)
if isinstance(entry["type"], MutableSequence):
entry["type"] = cast(
CWLOutputAtomType,
realize_input_schema(
cast(MutableSequence[Union[str, CWLObjectType]], entry["type"]),
schema_defs,
),
)
if isinstance(entry["type"], Mapping):
entry["type"] = cast(
CWLOutputAtomType,
realize_input_schema(
[cast(CWLObjectType, entry["type"])], schema_defs
),
)
if entry["type"] == "array":
items = (
entry["items"]
if not isinstance(entry["items"], str)
else [entry["items"]]
)
entry["items"] = cast(
CWLOutputAtomType,
realize_input_schema(
cast(MutableSequence[Union[str, CWLObjectType]], items),
schema_defs,
),
)
if entry["type"] == "record":
entry["fields"] = cast(
CWLOutputAtomType,
realize_input_schema(
cast(
MutableSequence[Union[str, CWLObjectType]], entry["fields"]
),
schema_defs,
),
)
return input_types
def generate_input_template(tool: Process) -> CWLObjectType:
"""Generate an example input object for the given CWL process."""
template = ruamel.yaml.comments.CommentedMap()
for inp in cast(
List[MutableMapping[str, str]],
realize_input_schema(tool.tool["inputs"], tool.schemaDefs),
):
name = shortname(inp["id"])
value, comment = generate_example_input(inp["type"], inp.get("default", None))
template.insert(0, name, value, comment)
return template
def load_job_order(
args: argparse.Namespace,
stdin: IO[Any],
fetcher_constructor: Optional[FetcherCallableType],
overrides_list: List[CWLObjectType],
tool_file_uri: str,
) -> Tuple[Optional[CWLObjectType], str, Loader]:
job_order_object = None
job_order_file = None
_jobloaderctx = jobloaderctx.copy()
loader = Loader(_jobloaderctx, fetcher_constructor=fetcher_constructor)
if len(args.job_order) == 1 and args.job_order[0][0] != "-":
job_order_file = args.job_order[0]
elif len(args.job_order) == 1 and args.job_order[0] == "-":
yaml = yaml_no_ts()
job_order_object = yaml.load(stdin)
job_order_object, _ = loader.resolve_all(
job_order_object, file_uri(os.getcwd()) + "/"
)
else:
job_order_file = None
if job_order_object is not None:
input_basedir = args.basedir if args.basedir else os.getcwd()
elif job_order_file is not None:
input_basedir = (
args.basedir
if args.basedir
else os.path.abspath(os.path.dirname(job_order_file))
)
job_order_object, _ = loader.resolve_ref(
job_order_file,
checklinks=False,
content_types=CWL_CONTENT_TYPES,
)
if (
job_order_object is not None
and "http://commonwl.org/cwltool#overrides" in job_order_object
):
ov_uri = file_uri(job_order_file or input_basedir)
overrides_list.extend(
resolve_overrides(job_order_object, ov_uri, tool_file_uri)
)
del job_order_object["http://commonwl.org/cwltool#overrides"]
if job_order_object is None:
input_basedir = args.basedir if args.basedir else os.getcwd()
if job_order_object is not None and not isinstance(
job_order_object, MutableMapping
):
_logger.error(
"CWL input object at %s is not formatted correctly, it should be a "
"JSON/YAML dictionay, not %s.\n"
"Raw input object:\n%s",
job_order_file or "stdin",
type(job_order_object),
job_order_object,
)
sys.exit(1)
return (job_order_object, input_basedir, loader)
def init_job_order(
job_order_object: Optional[CWLObjectType],
args: argparse.Namespace,
process: Process,
loader: Loader,
stdout: Union[TextIO, StreamWriter],
print_input_deps: bool = False,
relative_deps: str = "primary",
make_fs_access: Callable[[str], StdFsAccess] = StdFsAccess,
input_basedir: str = "",
secret_store: Optional[SecretStore] = None,
input_required: bool = True,
runtime_context: Optional[RuntimeContext] = None,
) -> CWLObjectType:
secrets_req, _ = process.get_requirement("http://commonwl.org/cwltool#Secrets")
if job_order_object is None:
namemap = {} # type: Dict[str, str]
records = [] # type: List[str]
toolparser = generate_parser(
argparse.ArgumentParser(prog=args.workflow),
process,
namemap,
records,
input_required,
)
if args.tool_help:
toolparser.print_help(cast(IO[str], stdout))
exit(0)
cmd_line = vars(toolparser.parse_args(args.job_order))
for record_name in records:
record = {}
record_items = {
k: v for k, v in cmd_line.items() if k.startswith(record_name)
}
for key, value in record_items.items():
record[key[len(record_name) + 1 :]] = value
del cmd_line[key]
cmd_line[str(record_name)] = record
if "job_order" in cmd_line and cmd_line["job_order"]:
try:
job_order_object = cast(
CWLObjectType,
loader.resolve_ref(cmd_line["job_order"])[0],
)
except Exception:
_logger.exception(
"Failed to resolv job_order: %s", cmd_line["job_order"]
)
exit(1)
else:
job_order_object = {"id": args.workflow}
del cmd_line["job_order"]
job_order_object.update({namemap[k]: v for k, v in cmd_line.items()})
if secret_store and secrets_req:
secret_store.store(
[shortname(sc) for sc in cast(List[str], secrets_req["secrets"])],
job_order_object,
)
if _logger.isEnabledFor(logging.DEBUG):
_logger.debug(
"Parsed job order from command line: %s",
json_dumps(job_order_object, indent=4, default=str),
)
for inp in process.tool["inputs"]:
if "default" in inp and (
not job_order_object or shortname(inp["id"]) not in job_order_object
):
if not job_order_object:
job_order_object = {}
job_order_object[shortname(inp["id"])] = inp["default"]
def path_to_loc(p: CWLObjectType) -> None:
if "location" not in p and "path" in p:
p["location"] = p["path"]
del p["path"]
ns = {} # type: ContextType
ns.update(cast(ContextType, job_order_object.get("$namespaces", {})))
ns.update(cast(ContextType, process.metadata.get("$namespaces", {})))
ld = Loader(ns)
def expand_formats(p: CWLObjectType) -> None:
if "format" in p:
p["format"] = ld.expand_url(cast(str, p["format"]), "")
visit_class(job_order_object, ("File", "Directory"), path_to_loc)
visit_class(
job_order_object,
("File",),
functools.partial(add_sizes, make_fs_access(input_basedir)),
)
visit_class(job_order_object, ("File",), expand_formats)
adjustDirObjs(job_order_object, trim_listing)
normalizeFilesDirs(job_order_object)
if print_input_deps:
if not runtime_context:
raise RuntimeError("runtime_context is required for print_input_deps.")
runtime_context.toplevel = True
builder = process._init_job(job_order_object, runtime_context)
builder.loadListing = "no_listing"
builder.bind_input(
process.inputs_record_schema, job_order_object, discover_secondaryFiles=True
)
basedir: Optional[str] = None
uri = cast(str, job_order_object["id"])
if uri == args.workflow:
basedir = os.path.dirname(uri)
uri = ""
printdeps(
job_order_object,
loader,
stdout,
relative_deps,
uri,
basedir=basedir,
nestdirs=False,
)
exit(0)
if secret_store and secrets_req:
secret_store.store(
[shortname(sc) for sc in cast(List[str], secrets_req["secrets"])],
job_order_object,
)
if "cwl:tool" in job_order_object:
del job_order_object["cwl:tool"]
if "id" in job_order_object:
del job_order_object["id"]
return job_order_object
def make_relative(base: str, obj: CWLObjectType) -> None:
"""Relativize the location URI of a File or Directory object."""
uri = cast(str, obj.get("location", obj.get("path")))
if ":" in uri.split("/")[0] and not uri.startswith("file://"):
pass
else:
if uri.startswith("file://"):
uri = uri_file_path(uri)
obj["location"] = os.path.relpath(uri, base)
def printdeps(
obj: CWLObjectType,
document_loader: Loader,
stdout: Union[TextIO, StreamWriter],
relative_deps: str,
uri: str,
basedir: Optional[str] = None,
nestdirs: bool = True,
) -> None:
"""Print a JSON representation of the dependencies of the CWL document."""
deps = find_deps(obj, document_loader, uri, basedir=basedir, nestdirs=nestdirs)
if relative_deps == "primary":
base = basedir if basedir else os.path.dirname(uri_file_path(str(uri)))
elif relative_deps == "cwd":
base = os.getcwd()
visit_class(deps, ("File", "Directory"), functools.partial(make_relative, base))
print(json_dumps(deps, indent=4, default=str), file=stdout)
def prov_deps(
obj: CWLObjectType,
document_loader: Loader,
uri: str,
basedir: Optional[str] = None,
) -> CWLObjectType:
deps = find_deps(obj, document_loader, uri, basedir=basedir)
def remove_non_cwl(deps: CWLObjectType) -> None:
if "secondaryFiles" in deps:
sec_files = cast(List[CWLObjectType], deps["secondaryFiles"])
for index, entry in enumerate(sec_files):
if not ("format" in entry and entry["format"] == CWL_IANA):
del sec_files[index]
else:
remove_non_cwl(entry)
remove_non_cwl(deps)
return deps
def find_deps(
obj: CWLObjectType,
document_loader: Loader,
uri: str,
basedir: Optional[str] = None,
nestdirs: bool = True,
) -> CWLObjectType:
"""Find the dependencies of the CWL document."""
deps = {
"class": "File",
"location": uri,
"format": CWL_IANA,
} # type: CWLObjectType
def loadref(base: str, uri: str) -> Union[CommentedMap, CommentedSeq, str, None]:
return document_loader.fetch(document_loader.fetcher.urljoin(base, uri))
sfs = scandeps(
basedir if basedir else uri,
obj,
{"$import", "run"},
{"$include", "$schemas", "location"},
loadref,
nestdirs=nestdirs,
)
if sfs is not None:
deps["secondaryFiles"] = cast(MutableSequence[CWLOutputAtomType], sfs)
return deps
def print_pack(
loadingContext: LoadingContext,
uri: str,
) -> str:
"""Return a CWL serialization of the CWL document in JSON."""
packed = pack(loadingContext, uri)
if len(cast(Sized, packed["$graph"])) > 1:
return json_dumps(packed, indent=4, default=str)
return json_dumps(
cast(MutableSequence[CWLObjectType], packed["$graph"])[0], indent=4, default=str
)
def supported_cwl_versions(enable_dev: bool) -> List[str]:
# ALLUPDATES and UPDATES are dicts
if enable_dev:
versions = list(ALLUPDATES)
else:
versions = list(UPDATES)
versions.sort()
return versions
def setup_schema(
args: argparse.Namespace, custom_schema_callback: Optional[Callable[[], None]]
) -> None:
if custom_schema_callback is not None:
custom_schema_callback()
elif args.enable_ext:
with pkg_resources.resource_stream(__name__, "extensions.yml") as res:
ext10 = res.read().decode("utf-8")
with pkg_resources.resource_stream(__name__, "extensions-v1.1.yml") as res:
ext11 = res.read().decode("utf-8")
use_custom_schema("v1.0", "http://commonwl.org/cwltool", ext10)
use_custom_schema("v1.1", "http://commonwl.org/cwltool", ext11)
use_custom_schema("v1.2", "http://commonwl.org/cwltool", ext11)
use_custom_schema("v1.2.0-dev1", "http://commonwl.org/cwltool", ext11)
use_custom_schema("v1.2.0-dev2", "http://commonwl.org/cwltool", ext11)
use_custom_schema("v1.2.0-dev3", "http://commonwl.org/cwltool", ext11)
else:
use_standard_schema("v1.0")
use_standard_schema("v1.1")
use_standard_schema("v1.2")
use_standard_schema("v1.2.0-dev1")
use_standard_schema("v1.2.0-dev2")
use_standard_schema("v1.2.0-dev3")
class ProvLogFormatter(logging.Formatter):
"""Enforce ISO8601 with both T and Z."""
def __init__(self) -> None:
"""Use the default formatter with our custom formatstring."""
super().__init__("[%(asctime)sZ] %(message)s")
def formatTime(
self, record: logging.LogRecord, datefmt: Optional[str] = None
) -> str:
formatted_time = time.strftime(
"%Y-%m-%dT%H:%M:%S", time.gmtime(float(record.created))
)
with_msecs = f"{formatted_time},{record.msecs:03f}"
return with_msecs
ProvOut = Union[io.TextIOWrapper, WritableBagFile]
def setup_provenance(
args: argparse.Namespace,
argsl: List[str],
runtimeContext: RuntimeContext,
) -> Tuple[ProvOut, "logging.StreamHandler[ProvOut]"]:
if not args.compute_checksum:
_logger.error("--provenance incompatible with --no-compute-checksum")
raise ArgumentException()
ro = ResearchObject(
getdefault(runtimeContext.make_fs_access, StdFsAccess)(""),
temp_prefix_ro=args.tmpdir_prefix,
orcid=args.orcid,
full_name=args.cwl_full_name,
)
runtimeContext.research_obj = ro
log_file_io = ro.open_log_file_for_activity(ro.engine_uuid)
prov_log_handler = logging.StreamHandler(log_file_io)
prov_log_handler.setFormatter(ProvLogFormatter())
_logger.addHandler(prov_log_handler)
_logger.debug("[provenance] Logging to %s", log_file_io)
if argsl is not None:
# Log cwltool command line options to provenance file
_logger.info("[cwltool] %s %s", sys.argv[0], " ".join(argsl))
_logger.debug("[cwltool] Arguments: %s", args)
return log_file_io, prov_log_handler
def setup_loadingContext(
loadingContext: Optional[LoadingContext],
runtimeContext: RuntimeContext,
args: argparse.Namespace,
) -> LoadingContext:
"""Prepare a LoadingContext from the given arguments."""
if loadingContext is None:
loadingContext = LoadingContext(vars(args))
loadingContext.singularity = runtimeContext.singularity
loadingContext.podman = runtimeContext.podman
else:
loadingContext = loadingContext.copy()
loadingContext.loader = default_loader(
loadingContext.fetcher_constructor,
enable_dev=args.enable_dev,
doc_cache=args.doc_cache,
)
loadingContext.research_obj = runtimeContext.research_obj
loadingContext.disable_js_validation = args.disable_js_validation or (
not args.do_validate
)
loadingContext.construct_tool_object = getdefault(
loadingContext.construct_tool_object, workflow.default_make_tool
)
loadingContext.resolver = getdefault(loadingContext.resolver, tool_resolver)
if loadingContext.do_update is None:
loadingContext.do_update = not (args.pack or args.print_subgraph)
return loadingContext
def make_template(
tool: Process,
) -> None:
"""Make a template CWL input object for the give Process."""
def my_represent_none(
self: Any, data: Any
) -> Any: # pylint: disable=unused-argument
"""Force clean representation of 'null'."""
return self.represent_scalar("tag:yaml.org,2002:null", "null")
ruamel.yaml.representer.RoundTripRepresenter.add_representer(
type(None), my_represent_none
)
yaml = YAML()
yaml.default_flow_style = False
yaml.indent = 4
yaml.block_seq_indent = 2
yaml.dump(
generate_input_template(tool),
sys.stdout,
)
def inherit_reqshints(tool: Process, parent: Process) -> None:
"""Copy down requirements and hints from ancestors of a given process."""
for parent_req in parent.requirements:
found = False
for tool_req in tool.requirements:
if parent_req["class"] == tool_req["class"]:
found = True
break
if not found:
tool.requirements.append(parent_req)
for parent_hint in parent.hints:
found = False
for tool_req in tool.requirements:
if parent_hint["class"] == tool_req["class"]:
found = True
break
if not found:
for tool_hint in tool.hints:
if parent_hint["class"] == tool_hint["class"]:
found = True
break
if not found:
tool.hints.append(parent_hint)
def choose_target(
args: argparse.Namespace,
tool: Process,
loading_context: LoadingContext,
) -> Optional[Process]:
"""Walk the Workflow, extract the subset matches all the args.targets."""
if loading_context.loader is None:
raise Exception("loading_context.loader cannot be None")
if isinstance(tool, Workflow):
url = urllib.parse.urlparse(tool.tool["id"])
if url.fragment:
extracted = get_subgraph(
[tool.tool["id"] + "/" + r for r in args.target], tool, loading_context
)
else:
extracted = get_subgraph(
[
loading_context.loader.fetcher.urljoin(tool.tool["id"], "#" + r)
for r in args.target
],
tool,
loading_context,
)
else:
_logger.error("Can only use --target on Workflows")
return None
if isinstance(loading_context.loader.idx, MutableMapping):
loading_context.loader.idx[extracted["id"]] = extracted
tool = make_tool(extracted["id"], loading_context)
else:
raise Exception("Missing loading_context.loader.idx!")
return tool
def choose_step(
args: argparse.Namespace,
tool: Process,
loading_context: LoadingContext,
) -> Optional[Process]:
"""Walk the given Workflow and extract just args.single_step."""
if loading_context.loader is None:
raise Exception("loading_context.loader cannot be None")
if isinstance(tool, Workflow):
url = urllib.parse.urlparse(tool.tool["id"])
if url.fragment:
step_id = tool.tool["id"] + "/" + args.single_step
else:
step_id = loading_context.loader.fetcher.urljoin(
tool.tool["id"], "#" + args.single_step
)
extracted = get_step(tool, step_id, loading_context)
else:
_logger.error("Can only use --single-step on Workflows")
return None
if isinstance(loading_context.loader.idx, MutableMapping):
loading_context.loader.idx[extracted["id"]] = cast(
Union[CommentedMap, CommentedSeq, str, None], cmap(extracted)
)
tool = make_tool(extracted["id"], loading_context)
else:
raise Exception("Missing loading_context.loader.idx!")
return tool
def choose_process(
args: argparse.Namespace,
tool: Process,
loadingContext: LoadingContext,
) -> Optional[Process]:
"""Walk the given Workflow and extract just args.single_process."""
if loadingContext.loader is None:
raise Exception("loadingContext.loader cannot be None")
if isinstance(tool, Workflow):
url = urllib.parse.urlparse(tool.tool["id"])
if url.fragment:
step_id = tool.tool["id"] + "/" + args.single_process
else:
step_id = loadingContext.loader.fetcher.urljoin(
tool.tool["id"], "#" + args.single_process
)
extracted, workflow_step = get_process(
tool,
step_id,
loadingContext,
)
else:
_logger.error("Can only use --single-process on Workflows")
return None
if isinstance(loadingContext.loader.idx, MutableMapping):
loadingContext.loader.idx[extracted["id"]] = extracted
new_tool = make_tool(extracted["id"], loadingContext)
else:
raise Exception("Missing loadingContext.loader.idx!")
inherit_reqshints(new_tool, workflow_step)
return new_tool
def check_working_directories(
runtimeContext: RuntimeContext,
) -> Optional[int]:
"""Make any needed working directories."""
for dirprefix in ("tmpdir_prefix", "tmp_outdir_prefix", "cachedir"):
if (
getattr(runtimeContext, dirprefix)
and getattr(runtimeContext, dirprefix) != DEFAULT_TMP_PREFIX
):
sl = (
"/"
if getattr(runtimeContext, dirprefix).endswith("/")
or dirprefix == "cachedir"
else ""
)
setattr(
runtimeContext,
dirprefix,
os.path.abspath(getattr(runtimeContext, dirprefix)) + sl,
)
if not os.path.exists(os.path.dirname(getattr(runtimeContext, dirprefix))):
try:
os.makedirs(os.path.dirname(getattr(runtimeContext, dirprefix)))
except Exception:
_logger.exception("Failed to create directory.")
return 1
return None
def print_targets(
tool: Process,
stdout: Union[TextIO, StreamWriter],
loading_context: LoadingContext,
prefix: str = "",
) -> None:
"""Recursively find targets for --subgraph and friends."""
for f in ("outputs", "inputs"):
if tool.tool[f]:
_logger.info("%s %s%s targets:", prefix[:-1], f[0].upper(), f[1:-1])
print(
" "
+ "\n ".join([f"{prefix}{shortname(t['id'])}" for t in tool.tool[f]]),
file=stdout,
)
if "steps" in tool.tool:
loading_context = copy.copy(loading_context)
loading_context.requirements = tool.requirements
loading_context.hints = tool.hints
_logger.info("%s steps targets:", prefix[:-1])
for t in tool.tool["steps"]:
print(f" {prefix}{shortname(t['id'])}", file=stdout)
run: Union[str, Process, Dict[str, Any]] = t["run"]
if isinstance(run, str):
process = make_tool(run, loading_context)
elif isinstance(run, dict):
process = make_tool(cast(CommentedMap, cmap(run)), loading_context)
else:
process = run
print_targets(process, stdout, loading_context, shortname(t["id"]) + "/")
def main(
argsl: Optional[List[str]] = None,
args: Optional[argparse.Namespace] = None,
job_order_object: Optional[CWLObjectType] = None,
stdin: IO[Any] = sys.stdin,
stdout: Optional[Union[TextIO, StreamWriter]] = None,
stderr: IO[Any] = sys.stderr,
versionfunc: Callable[[], str] = versionstring,
logger_handler: Optional[logging.Handler] = None,
custom_schema_callback: Optional[Callable[[], None]] = None,
executor: Optional[JobExecutor] = None,
loadingContext: Optional[LoadingContext] = None,
runtimeContext: Optional[RuntimeContext] = None,
input_required: bool = True,
) -> int:
if not stdout: # force UTF-8 even if the console is configured differently
if hasattr(sys.stdout, "encoding") and sys.stdout.encoding.upper() not in (
"UTF-8",
"UTF8",
):
if hasattr(sys.stdout, "detach"):
stdout = io.TextIOWrapper(sys.stdout.buffer, encoding="utf-8")
else:
stdout = getwriter("utf-8")(sys.stdout) # type: ignore
else:
stdout = sys.stdout
_logger.removeHandler(defaultStreamHandler)
stderr_handler = logger_handler
if stderr_handler is not None:
_logger.addHandler(stderr_handler)
else:
coloredlogs.install(logger=_logger, stream=stderr)
stderr_handler = _logger.handlers[-1]
workflowobj = None
prov_log_handler: Optional[logging.StreamHandler[ProvOut]] = None
try:
if args is None:
if argsl is None:
argsl = sys.argv[1:]
addl = [] # type: List[str]
if "CWLTOOL_OPTIONS" in os.environ:
addl = os.environ["CWLTOOL_OPTIONS"].split(" ")
parser = arg_parser()
argcomplete.autocomplete(parser)
args = parser.parse_args(addl + argsl)
if args.record_container_id:
if not args.cidfile_dir:
args.cidfile_dir = os.getcwd()
del args.record_container_id
if runtimeContext is None:
runtimeContext = RuntimeContext(vars(args))
else:
runtimeContext = runtimeContext.copy()
# If caller parsed its own arguments, it may not include every
# cwltool option, so fill in defaults to avoid crashing when
# dereferencing them in args.
for key, val in get_default_args().items():
if not hasattr(args, key):
setattr(args, key, val)
configure_logging(
stderr_handler,
args.quiet,
runtimeContext.debug,
args.enable_color,
args.timestamps,
)
if args.version:
print(versionfunc(), file=stdout)
return 0
_logger.info(versionfunc())
if args.print_supported_versions:
print("\n".join(supported_cwl_versions(args.enable_dev)), file=stdout)
return 0
if not args.workflow:
if os.path.isfile("CWLFile"):
args.workflow = "CWLFile"
else:
_logger.error("CWL document required, no input file was provided")
parser.print_help(stderr)
return 1
if args.ga4gh_tool_registries:
ga4gh_tool_registries[:] = args.ga4gh_tool_registries
if not args.enable_ga4gh_tool_registry:
del ga4gh_tool_registries[:]
if args.mpi_config_file is not None:
runtimeContext.mpi_config = MpiConfig.load(args.mpi_config_file)
setup_schema(args, custom_schema_callback)
prov_log_stream: Optional[Union[io.TextIOWrapper, WritableBagFile]] = None
if args.provenance:
if argsl is None:
raise Exception("argsl cannot be None")
try:
prov_log_stream, prov_log_handler = setup_provenance(
args, argsl, runtimeContext
)
except ArgumentException:
return 1
loadingContext = setup_loadingContext(loadingContext, runtimeContext, args)
uri, tool_file_uri = resolve_tool_uri(
args.workflow,
resolver=loadingContext.resolver,
fetcher_constructor=loadingContext.fetcher_constructor,
)
try_again_msg = (
"" if args.debug else ", try again with --debug for more information"
)
try:
job_order_object, input_basedir, jobloader = load_job_order(
args,
stdin,
loadingContext.fetcher_constructor,
loadingContext.overrides_list,
tool_file_uri,
)
if args.overrides:
loadingContext.overrides_list.extend(
load_overrides(
file_uri(os.path.abspath(args.overrides)), tool_file_uri
)
)
loadingContext, workflowobj, uri = fetch_document(uri, loadingContext)
if args.print_deps and loadingContext.loader:
printdeps(
workflowobj, loadingContext.loader, stdout, args.relative_deps, uri
)
return 0
loadingContext, uri = resolve_and_validate_document(
loadingContext,
workflowobj,
uri,
preprocess_only=(args.print_pre or args.pack),
skip_schemas=args.skip_schemas,
)
if loadingContext.loader is None:
raise Exception("Impossible code path.")
processobj, metadata = loadingContext.loader.resolve_ref(uri)
processobj = cast(Union[CommentedMap, CommentedSeq], processobj)
if args.pack:
print(print_pack(loadingContext, uri), file=stdout)
return 0
if args.provenance and runtimeContext.research_obj:
# Can't really be combined with args.pack at same time
runtimeContext.research_obj.packed_workflow(
print_pack(loadingContext, uri)
)
if args.print_pre:
print(
json_dumps(
processobj,
indent=4,
sort_keys=True,
separators=(",", ": "),
default=str,
),
file=stdout,
)
return 0
try:
tool = make_tool(uri, loadingContext)
except GraphTargetMissingException as main_missing_exc:
if args.validate:
logging.warn(
"File contains $graph of multiple objects and no default "
"process (#main). Validating all objects:"
)
for entry in workflowobj["$graph"]:
entry_id = entry["id"]
make_tool(entry_id, loadingContext)
print(f"{entry_id} is valid CWL.", file=stdout)
else:
raise main_missing_exc
if args.make_template:
make_template(tool)
return 0
if args.validate:
print(f"{args.workflow} is valid CWL.", file=stdout)
return 0
if args.print_rdf:
print(
printrdf(tool, loadingContext.loader.ctx, args.rdf_serializer),
file=stdout,
)
return 0
if args.print_dot:
printdot(tool, loadingContext.loader.ctx, stdout)
return 0
if args.print_targets:
print_targets(tool, stdout, loadingContext)
return 0
if args.target:
ctool = choose_target(args, tool, loadingContext)
if ctool is None:
return 1
else:
tool = ctool
elif args.single_step:
ctool = choose_step(args, tool, loadingContext)
if ctool is None:
return 1
else:
tool = ctool
elif args.single_process:
ctool = choose_process(args, tool, loadingContext)
if ctool is None:
return 1
else:
tool = ctool
if args.print_subgraph:
if "name" in tool.tool:
del tool.tool["name"]
print(
json_dumps(
tool.tool,
indent=4,
sort_keys=True,
separators=(",", ": "),
default=str,
),
file=stdout,
)
return 0
except (ValidationException) as exc:
_logger.error(
"Tool definition failed validation:\n%s", str(exc), exc_info=args.debug
)
return 1
except (RuntimeError, WorkflowException) as exc:
_logger.error(
"Tool definition failed initialization:\n%s",
str(exc),
exc_info=args.debug,
)
return 1
except Exception as exc:
_logger.error(
"I'm sorry, I couldn't load this CWL file%s.\nThe error was: %s",
try_again_msg,
str(exc) if not args.debug else "",
exc_info=args.debug,
)
return 1
if isinstance(tool, int):
return tool
# If on MacOS platform, TMPDIR must be set to be under one of the
# shared volumes in Docker for Mac
# More info: https://dockstore.org/docs/faq
if sys.platform == "darwin":
default_mac_path = "/private/tmp/docker_tmp"
if runtimeContext.tmp_outdir_prefix == DEFAULT_TMP_PREFIX:
runtimeContext.tmp_outdir_prefix = default_mac_path
if runtimeContext.tmpdir_prefix == DEFAULT_TMP_PREFIX:
runtimeContext.tmpdir_prefix = default_mac_path
if check_working_directories(runtimeContext) is not None:
return 1
if args.cachedir:
if args.move_outputs == "move":
runtimeContext.move_outputs = "copy"
runtimeContext.tmp_outdir_prefix = args.cachedir
runtimeContext.secret_store = getdefault(
runtimeContext.secret_store, SecretStore()
)
runtimeContext.make_fs_access = getdefault(
runtimeContext.make_fs_access, StdFsAccess
)
if not executor:
if args.parallel:
temp_executor = MultithreadedJobExecutor()
runtimeContext.select_resources = temp_executor.select_resources
real_executor = temp_executor # type: JobExecutor
else:
real_executor = SingleJobExecutor()
else:
real_executor = executor
try:
runtimeContext.basedir = input_basedir
if isinstance(tool, ProcessGenerator):
tfjob_order = {} # type: CWLObjectType
if loadingContext.jobdefaults:
tfjob_order.update(loadingContext.jobdefaults)
if job_order_object:
tfjob_order.update(job_order_object)
tfout, tfstatus = real_executor(
tool.embedded_tool, tfjob_order, runtimeContext
)
if not tfout or tfstatus != "success":
raise WorkflowException(
"ProcessGenerator failed to generate workflow"
)
tool, job_order_object = tool.result(tfjob_order, tfout, runtimeContext)
if not job_order_object:
job_order_object = None
try:
initialized_job_order_object = init_job_order(
job_order_object,
args,
tool,
jobloader,
stdout,
print_input_deps=args.print_input_deps,
relative_deps=args.relative_deps,
make_fs_access=runtimeContext.make_fs_access,
input_basedir=input_basedir,
secret_store=runtimeContext.secret_store,
input_required=input_required,
runtime_context=runtimeContext,
)
except SystemExit as err:
return err.code
del args.workflow
del args.job_order
conf_file = getattr(
args, "beta_dependency_resolvers_configuration", None
) # str
use_conda_dependencies = getattr(
args, "beta_conda_dependencies", None
) # str
if conf_file or use_conda_dependencies:
runtimeContext.job_script_provider = DependenciesConfiguration(args)
else:
runtimeContext.find_default_container = functools.partial(
find_default_container,
default_container=runtimeContext.default_container,
use_biocontainers=args.beta_use_biocontainers,
)
(out, status) = real_executor(
tool, initialized_job_order_object, runtimeContext, logger=_logger
)
if out is not None:
if runtimeContext.research_obj is not None:
runtimeContext.research_obj.create_job(out, True)
def remove_at_id(doc: CWLObjectType) -> None:
for key in list(doc.keys()):
if key == "@id":
del doc[key]
else:
value = doc[key]
if isinstance(value, MutableMapping):
remove_at_id(value)
elif isinstance(value, MutableSequence):
for entry in value:
if isinstance(entry, MutableMapping):
remove_at_id(entry)
remove_at_id(out)
visit_class(
out,
("File",),
functools.partial(add_sizes, runtimeContext.make_fs_access("")),
)
def loc_to_path(obj: CWLObjectType) -> None:
for field in ("path", "nameext", "nameroot", "dirname"):
if field in obj:
del obj[field]
if cast(str, obj["location"]).startswith("file://"):
obj["path"] = uri_file_path(cast(str, obj["location"]))
visit_class(out, ("File", "Directory"), loc_to_path)
# Unsetting the Generation from final output object
visit_class(out, ("File",), MutationManager().unset_generation)
print(
json_dumps(out, indent=4, ensure_ascii=False, default=str),
file=stdout,
)
if hasattr(stdout, "flush"):
stdout.flush()
if status != "success":
_logger.warning("Final process status is %s", status)
return 1
_logger.info("Final process status is %s", status)
return 0
except (ValidationException) as exc:
_logger.error(
"Input object failed validation:\n%s", str(exc), exc_info=args.debug
)
return 1
except UnsupportedRequirement as exc:
_logger.error(
"Workflow or tool uses unsupported feature:\n%s",
str(exc),
exc_info=args.debug,
)
return 33
except WorkflowException as exc:
_logger.error(
"Workflow error%s:\n%s",
try_again_msg,
strip_dup_lineno(str(exc)),
exc_info=args.debug,
)
return 1
except Exception as exc: # pylint: disable=broad-except
_logger.error(
"Unhandled error%s:\n %s",
try_again_msg,
str(exc),
exc_info=args.debug,
)
return 1
finally:
if (
args
and runtimeContext
and runtimeContext.research_obj
and workflowobj
and loadingContext
):
research_obj = runtimeContext.research_obj
if loadingContext.loader is not None:
research_obj.generate_snapshot(
prov_deps(workflowobj, loadingContext.loader, uri)
)
else:
_logger.warning(
"Unable to generate provenance snapshot "
" due to missing loadingContext.loader."
)
if prov_log_handler is not None:
# Stop logging so we won't half-log adding ourself to RO
_logger.debug(
"[provenance] Closing provenance log file %s", prov_log_handler
)
_logger.removeHandler(prov_log_handler)
# Ensure last log lines are written out
prov_log_handler.flush()
# Underlying WritableBagFile will add the tagfile to the manifest
if prov_log_stream:
prov_log_stream.close()
# Why not use prov_log_handler.stream ? That is not part of the
# public API for logging.StreamHandler
prov_log_handler.close()
research_obj.close(args.provenance)
_logger.removeHandler(stderr_handler)
_logger.addHandler(defaultStreamHandler)
def find_default_container(
builder: HasReqsHints,
default_container: Optional[str] = None,
use_biocontainers: Optional[bool] = None,
) -> Optional[str]:
"""Find a container."""
if not default_container and use_biocontainers:
default_container = get_container_from_software_requirements(
use_biocontainers, builder
)
return default_container
def windows_check() -> None:
"""See if we are running on MS Windows and warn about the lack of support."""
if os.name == "nt":
warnings.warn(
"The CWL reference runner (cwltool) no longer supports running "
"CWL workflows natively on MS Windows as its previous MS Windows "
"support was incomplete and untested. Instead, please see "
"https://pypi.org/project/cwltool/#ms-windows-users "
"for instructions on running cwltool via "
"Windows Subsystem for Linux 2 (WSL2). If don't need to execute "
"CWL documents, then you can ignore this warning, but please "
"consider migrating to https://pypi.org/project/cwl-utils/ "
"for your CWL document processing needs."
)
def run(*args: Any, **kwargs: Any) -> None:
"""Run cwltool."""
windows_check()
signal.signal(signal.SIGTERM, _signal_handler)
try:
sys.exit(main(*args, **kwargs))
finally:
_terminate_processes()
if __name__ == "__main__":
run(sys.argv[1:])
| 35.765374
| 88
| 0.574881
|
883ef0f97acc6ee7478856b59000b1246b829465
| 3,976
|
py
|
Python
|
examples/contrib/strimko2.py
|
AlohaChina/or-tools
|
1ece0518104db435593a1a21882801ab6ada3e15
|
[
"Apache-2.0"
] | 8,273
|
2015-02-24T22:10:50.000Z
|
2022-03-31T21:19:27.000Z
|
examples/contrib/strimko2.py
|
AlohaChina/or-tools
|
1ece0518104db435593a1a21882801ab6ada3e15
|
[
"Apache-2.0"
] | 2,530
|
2015-03-05T04:27:21.000Z
|
2022-03-31T06:13:02.000Z
|
examples/contrib/strimko2.py
|
AlohaChina/or-tools
|
1ece0518104db435593a1a21882801ab6ada3e15
|
[
"Apache-2.0"
] | 2,057
|
2015-03-04T15:02:02.000Z
|
2022-03-30T02:29:27.000Z
|
# Copyright 2010 Hakan Kjellerstrand hakank@gmail.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Strimko problem in Google CP Solver.
From
360: A New Twist on Latin Squares
http://threesixty360.wordpress.com/2009/08/04/a-new-twist-on-latin-squares/
'''
The idea is simple: each row and column of an nxn grid must contain
the number 1, 2, ... n exactly once (that is, the grid must form a
Latin square), and each "stream" (connected path in the grid) must
also contain the numbers 1, 2, ..., n exactly once.
'''
For more information, see:
* http://www.strimko.com/
* http://www.strimko.com/rules.htm
* http://www.strimko.com/about.htm
* http://www.puzzlersparadise.com/Strimko.htm
I have blogged about this (using MiniZinc model) in
'Strimko - Latin squares puzzle with "streams"'
http://www.hakank.org/constraint_programming_blog/2009/08/strimko_latin_squares_puzzle_w_1.html
Compare with the following models:
* MiniZinc: http://hakank.org/minizinc/strimko2.mzn
* ECLiPSe: http://hakank.org/eclipse/strimko2.ecl
* SICStus: http://hakank.org/sicstus/strimko2.pl
* Gecode: http://hakank.org/gecode/strimko2.cpp
This model was created by Hakan Kjellerstrand (hakank@gmail.com)
See my other Google CP Solver models: http://www.hakank.org/google_or_tools/
"""
import sys
from ortools.constraint_solver import pywrapcp
def main(streams='', placed=''):
# Create the solver.
solver = pywrapcp.Solver('Strimko')
#
# default problem
#
if streams == '':
streams = [[1, 1, 2, 2, 2, 2, 2], [1, 1, 2, 3, 3, 3, 2],
[1, 4, 1, 3, 3, 5, 5], [4, 4, 3, 1, 3, 5, 5],
[4, 6, 6, 6, 7, 7, 5], [6, 4, 6, 4, 5, 5, 7],
[6, 6, 4, 7, 7, 7, 7]]
# Note: This is 1-based
placed = [[2, 1, 1], [2, 3, 7], [2, 5, 6], [2, 7, 4], [3, 2, 7], [3, 6, 1],
[4, 1, 4], [4, 7, 5], [5, 2, 2], [5, 6, 6]]
n = len(streams)
num_placed = len(placed)
print('n:', n)
#
# variables
#
x = {}
for i in range(n):
for j in range(n):
x[i, j] = solver.IntVar(1, n, 'x[%i,%i]' % (i, j))
x_flat = [x[i, j] for i in range(n) for j in range(n)]
#
# constraints
#
# all rows and columns must be unique, i.e. a Latin Square
for i in range(n):
row = [x[i, j] for j in range(n)]
solver.Add(solver.AllDifferent(row))
col = [x[j, i] for j in range(n)]
solver.Add(solver.AllDifferent(col))
#
# streams
#
for s in range(1, n + 1):
tmp = [x[i, j] for i in range(n) for j in range(n) if streams[i][j] == s]
solver.Add(solver.AllDifferent(tmp))
#
# placed
#
for i in range(num_placed):
# note: also adjust to 0-based
solver.Add(x[placed[i][0] - 1, placed[i][1] - 1] == placed[i][2])
#
# search and solution
#
db = solver.Phase(x_flat, solver.INT_VAR_DEFAULT, solver.INT_VALUE_DEFAULT)
solver.NewSearch(db)
num_solutions = 0
while solver.NextSolution():
for i in range(n):
for j in range(n):
print(x[i, j].Value(), end=' ')
print()
print()
num_solutions += 1
solver.EndSearch()
print()
print('num_solutions:', num_solutions)
print('failures:', solver.Failures())
print('branches:', solver.Branches())
print('WallTime:', solver.WallTime(), 'ms')
if __name__ == '__main__':
if len(sys.argv) > 1:
problem_file = sys.argv[1]
exec(compile(open(problem_file).read(), problem_file, 'exec'))
main(streams, placed)
else:
main()
| 27.804196
| 97
| 0.631036
|
4159b809cb88e6b5acd34aaf59eb1130fb70a5b5
| 17,940
|
py
|
Python
|
src/m4_sequences.py
|
LiamEnneking/21-Exam3Practice
|
335928ad67caf5a070ebf1063fa50fbe71ac009f
|
[
"MIT"
] | null | null | null |
src/m4_sequences.py
|
LiamEnneking/21-Exam3Practice
|
335928ad67caf5a070ebf1063fa50fbe71ac009f
|
[
"MIT"
] | null | null | null |
src/m4_sequences.py
|
LiamEnneking/21-Exam3Practice
|
335928ad67caf5a070ebf1063fa50fbe71ac009f
|
[
"MIT"
] | null | null | null |
"""
PRACTICE Test 3.
This problem provides practice at:
*** SEQUENCES. ***
Authors: David Mutchler, Valerie Galluzzi, Mark Hays, Amanda Stouder,
their colleagues and Liam.
""" # DONE: 1. PUT YOUR NAME IN THE ABOVE LINE.
########################################################################
# Students:
#
# These problems have DIFFICULTY and TIME ratings:
# DIFFICULTY rating: 1 to 10, where:
# 1 is very easy
# 3 is an "easy" Test 2 question.
# 5 is a "typical" Test 2 question.
# 7 is a "hard" Test 2 question.
# 10 is an EXTREMELY hard problem (too hard for a Test 2 question)
#
# TIME ratings: A ROUGH estimate of the number of minutes that we
# would expect a well-prepared student to take on the problem.
#
# IMPORTANT: For ALL the problems in this module,
# if you reach the time estimate and are NOT close to a solution,
# STOP working on that problem and ASK YOUR INSTRUCTOR FOR HELP
# on it, in class or via Piazza.
########################################################################
import simple_testing as st
import math
import rosegraphics as rg
def main():
""" Calls the TEST functions in this module. """
run_test_practice_problem4a()
run_test_practice_problem4b()
run_test_practice_problem4c()
run_test_practice_problem4d()
def is_prime(n):
"""
What comes in: An integer.
What goes out: Returns True if the given integer is prime.
Returns False if the given integer is NOT prime.
Side effects: None.
Examples:
This function returns True or False, depending on whether
the given integer is prime or not. Since the smallest prime is 2,
this function returns False on all integers < 2.
It returns True on 2, 3, 5, 7, and other primes.
Note: The algorithm used here is simple and clear but slow.
Type hints:
:type n: int
"""
if n < 2:
return False
for k in range(2, int(math.sqrt(n) + 0.1) + 1):
if n % k == 0:
return False
return True
# ------------------------------------------------------------------
# Students:
# Do NOT touch the above is_prime function - it has no TO DO.
# Do NOT copy code from this function.
#
# Instead, ** CALL ** this function as needed in the problems below.
# ------------------------------------------------------------------
# ----------------------------------------------------------------------
# Students: Some of the testing code below uses SimpleTestCase objects,
# from the imported simple_testing (st) module.
# See details in the test code below.
# ----------------------------------------------------------------------
def run_test_practice_problem4a():
""" Tests the practice_problem4a function. """
# ------------------------------------------------------------------
# 4 tests. They use the imported simple_testing (st) module.
# Each test is a SimpleTestCase with 3 arguments:
# -- the function to test,
# -- a list containing the argument(s) to send to the function,
# -- the correct returned value.
# For example, the first test below will call
# practice_problem4v((9, 33, 8, 8, 0, 4, 4, 8))
# and compare the returned value against [2, 5] (the correct answer).
# ------------------------------------------------------------------
tests = [st.SimpleTestCase(practice_problem4a,
[(9, 33, 8, 8, 0, 4, 4, 8)],
[2, 5]),
st.SimpleTestCase(practice_problem4a,
[(9, 9, 9, 9, 0, 9, 9, 9)],
[0, 1, 2, 5, 6]),
st.SimpleTestCase(practice_problem4a,
[(4, 5, 4, 5, 4, 5, 4)],
[]),
st.SimpleTestCase(practice_problem4a,
['abbabbb'],
[1, 4, 5]),
]
# Run the 4 tests in the tests list constructed above.
st.SimpleTestCase.run_tests('practice_problem4a', tests)
def practice_problem4a(sequence):
"""
What comes in: A non-empty sequence.
What goes out: Returns a list of integers,
where the integers are the places (indices)
where an item in the given sequence appears twice in a row.
Side effects: None.
Examples:
Given sequence (9, 33, 8, 8, 0, 4, 4, 8)
-- this function returns [2, 5]
since 8 appears twice in a row starting at index 2
and 4 appears twice in a row starting at index 5
Given sequence (9, 9, 9, 9, 0, 9, 9, 9)
-- this function returns [0, 1, 2, 5, 6]
Given sequence (4, 5, 4, 5, 4, 5, 4)
-- this function returns []
Given sequence 'abbabbb'
-- this function returns [1, 4, 5]
Type hints:
:type sequence: list | tuple | string
"""
####################################################################
# DONE: 2. Implement and test this function.
# The testing code is already written for you (above).
####################################################################
# DIFFICULTY AND TIME RATINGS (see top of this file for explanation)
# DIFFICULTY: 7
# TIME ESTIMATE: 15 minutes.
####################################################################
x = []
y = 0
for k in range(len(sequence)):
if sequence[k-1] == sequence[k] and k != 0:
x.insert(y, k-1)
y = y + 1
return x
def run_test_practice_problem4b():
""" Tests the practice_problem4b function. """
# ------------------------------------------------------------------
# 5 tests. They use the imported simple_testing (st) module.
# Each test is a SimpleTestCase with 3 arguments:
# -- the function to test,
# -- a list containing the argument(s) to send to the function,
# -- the correct returned value.
# For example, the first test below will call
# practice_problem4b((12, 33, 18, 9, 13, 3, 9, 20, 19, 20))
# and compare the returned value against 19 (the correct answer).
# ------------------------------------------------------------------
tests = [st.SimpleTestCase(practice_problem4b,
[(12, 33, 18, 9, 13, 3, 9, 20, 19, 20)],
19),
st.SimpleTestCase(practice_problem4b,
[(3, 12, 10, 8, 8, 9, 8, 11)],
10),
st.SimpleTestCase(practice_problem4b,
[(-9999999999, 8888888888)],
- 9999999999),
st.SimpleTestCase(practice_problem4b,
[(8888888888, -9999999999)],
8888888888),
st.SimpleTestCase(practice_problem4b,
[(-77, 20000, -33, 40000, -55,
60000, -11)],
- 11),
]
# ------------------------------------------------------------------
# Run the 5 tests in the tests list constructed above.
# ------------------------------------------------------------------
st.SimpleTestCase.run_tests('practice_problem4b', tests)
def practice_problem4b(sequence):
"""
What comes in:
A sequence of numbers, where the length of the sequence >= 2.
What goes out:
Returns the largest of the numbers at EVEN INDICES of the sequence.
Side effects: None.
Examples:
If the sequence is:
(12, 33, 18, 9, 13, 3, 99, 20, 19, 20)
then the largest of the numbers at EVEN indices is the largest of
12 18 13 99 19 which is 99.
So the function returns 99 in this example.
Type hints:
:type sequence: (list | tuple) of (float | int)
"""
# ------------------------------------------------------------------
# DONE: 3. Implement and test this function.
# The testing code is already written for you (above).
####################################################################
# DIFFICULTY AND TIME RATINGS (see top of this file for explanation)
# DIFFICULTY: 5
# TIME ESTIMATE: 10 minutes.
####################################################################
x = -10000000000
for k in range(len(sequence)):
if (k+2) % 2 == 0 and sequence[k] > x:
x = sequence[k]
print(x)
return x
def run_test_practice_problem4c():
""" Tests the practice_problem4c function. """
# ------------------------------------------------------------------
# 3 tests. They use the imported simple_testing (st) module.
# ------------------------------------------------------------------
argument1 = (rg.Point(5, 12),
rg.Point(20, 20),
rg.Point(1, 13),
rg.Point(10, 40),
rg.Point(13, 5),
rg.Point(10, 3),
rg.Point(3, 7),
rg.Point(2, 2))
answer1 = rg.Point(5, 13)
argument2 = (rg.Point(5, 12),
rg.Point(20, 20),
rg.Point(27, 13),
rg.Point(10, 40),
rg.Point(13, 4),
rg.Point(1, 1),
rg.Point(3, 7))
answer2 = rg.Point(7, 3)
argument3 = (rg.Point(5, 2),
rg.Point(20, 20),
rg.Point(27, 13),
rg.Point(10, 40),
rg.Point(13, 4),
rg.Point(1, 1),
rg.Point(3, 7))
answer3 = rg.Point(2, 5)
argument4 = (rg.Point(5, 12),
rg.Point(20, 20),
rg.Point(27, 13))
answer4 = 'Not found'
tests = [st.SimpleTestCase(practice_problem4c, [argument1], answer1),
st.SimpleTestCase(practice_problem4c, [argument2], answer2),
st.SimpleTestCase(practice_problem4c, [argument3], answer3),
st.SimpleTestCase(practice_problem4c, [argument4], answer4),
]
# ------------------------------------------------------------------
# Run the 3 tests in the tests list constructed above.
# ------------------------------------------------------------------
st.SimpleTestCase.run_tests('practice_problem4c', tests)
if argument1[4] != answer1:
print()
print('*** WARNING, WARNING, WARNING ***')
print('If your code DID pass the above tests')
print('but you get this message,')
print('then you have missed an important concept about mutation.')
print(' *** SEE YOUR INSTRUCTOR for an important explanation!')
print()
def practice_problem4c(points):
"""
What comes in: A tuple of rg.Points, each of whose coordinates
is an integer.
What goes out:
AFTER doing the side effect below, this function
returns the rg.Point to which it did the side effect.
If there is no point to which to do the side effect,
returns 'Not found'.
Side effects:
Swaps the x and y coordinates of the first occurrence of an rg.Point
in the given list whose x and y coordinates are both primes.
Has no side effect if there are no such rg.Points
in the given list.
Examples:
If the given tuple is: (rg.Point(5, 12),
rg.Point(20, 20),
rg.Point(1, 13),
rg.Point(10, 40),
rg.Point(13, 5),
rg.Point(10, 3),
rg.Point(3, 7),
rg.Point(2, 2))
then after this function the rg.Point in the given tuple
whose x and y were (13, 5) will have x and y (5, 13)
and the function returns that rg.Point.
Type hints:
:type points: tuple of rg.Point
:rtype: rg.Point | string
"""
####################################################################
# DONE: 4. Implement and test this function.
# The testing code is already written for you (above).
#
# IMPORTANT: This problem is your LOWEST PRIORITY for preparing
# for Test 2. It is a great problem but WAY more subtle
# than anything that you will see on Test 2.
####################################################################
# DIFFICULTY AND TIME RATINGS (see top of this file for explanation)
# DIFFICULTY: 9
# TIME ESTIMATE: 15 minutes.
####################################################################
y = 0
for k in range(len(points)):
x = points[k]
if is_prime(x.x) is True and is_prime(x.y) is True:
px = points[k].x
py = points[k].y
points[k].x = py
points[k].y = px
y = points[k]
break
if y == 0:
y = 'Not found'
return y
def run_test_practice_problem4d():
""" Tests the practice_problem4d function. """
# ------------------------------------------------------------------
# 5 tests. They use the imported simple_testing (st) module.
# Each test is a SimpleTestCase with 3 arguments:
# -- the function to test,
# -- a list containing the argument(s) to send to the function,
# -- the correct returned value.
# For example, the first test below will call
# practice_problem4d((6, 80, 17, 13, 40, 3, 3, 7, 13, 7, 12, 5))
# and compare the returned value against 40 (the correct answer).
# ------------------------------------------------------------------
tests = [st.SimpleTestCase(practice_problem4d,
[(6, 80, 17, 13, 40, 3, 3, 7, 13, 7, 12, 5)],
17 + 3 + 7 + 13),
st.SimpleTestCase(practice_problem4d,
[(7, 7, 7, 7, 7, 4, 4, 8, 5, 5, 6)],
0),
st.SimpleTestCase(practice_problem4d,
[(2, 3, 5, 7, 5, 3, 2)],
2 + 3 + 5 + 7 + 5 + 3),
st.SimpleTestCase(practice_problem4d,
[(11, 3, 17, 13, 40, 3, 3, 7, 13, 7, 12, 5)],
11 + 3 + 17 + 3 + 7 + 13),
st.SimpleTestCase(practice_problem4d,
[(6, 80, 17, 13, 40, 3, 3, 7, 13, 7, 11, 5)],
17 + 3 + 7 + 13 + 7 + 11),
]
# Run the 5 tests in the tests list constructed above.
st.SimpleTestCase.run_tests('practice_problem4d', tests)
def practice_problem4d(sequence):
"""
What comes in: A non-empty sequence of integers.
What goes out: An integer that is the sum of all the items
in the given sequence such that:
-- the item is a prime number, AND
-- the immediate successor of the item
is a DIFFERENT prime number.
Side effects: None.
Examples:
Given sequence (6, 80, 17, 13, 40, 3, 3, 7, 13, 7, 12, 5)
-- this function returns 17 + 3 + 7 + 13, which is 40,
because:
6 (at index 0) is NOT prime - do NOT include 6 in the sum
80 (at index 1) is NOT prime - do NOT include 80 in the sum
17 (at index 2) IS prime AND the next item (13, at index 3)
is a DIFFERENT prime - ** DO ** include 17 in the sum
13 (at index 3) IS prime but the next item (40, at index 4)
is NOT prime - do NOT include 13 in the sum
40 (at index 4) is NOT prime - do NOT include 40 in the sum
3 (at index 5) IS prime AND the next item (3, at index 6)
IS prime but is NOT a DIFFERENT prime -
do NOT include 3 in the sum
3 (at index 6) IS prime AND the next item (7, at index 7)
is a DIFFERENT prime - ** DO ** include 3 in the sum
7 (at index 7) IS prime AND the next item (13, at index 8)
is a DIFFERENT prime - ** DO ** include 7 in the sum
13 (at index 8) IS prime AND the next item (7, at index 9)
is a DIFFERENT prime - ** DO ** include 13 in the sum
7 (at index 9) IS prime but the next item (12, at index 10)
is NOT prime - do NOT include 7 in the sum
12 (at index 10) is NOT prime - do NOT include 12 in the sum
5 (at index 11) IS prime but there is NO item after it
- do NOT include 5 in the sum
Given sequence (7, 7, 7, 7, 7, 4, 4, 8, 5, 5, 6)
-- this function returns 0
Given sequence (2, 3, 5, 7, 5, 3, 2)
-- this function returns 2 + 3 + 5 + 7 + 5 + 3, which is 25
Type hints:
:type sequence: (list | tuple) of int
:rtype: int
"""
####################################################################
# DONE: 5. Implement and test this function.
# The testing code is already written for you (above).
####################################################################
# DIFFICULTY AND TIME RATINGS (see top of this file for explanation)
# DIFFICULTY: 7
# TIME ESTIMATE: 15 minutes.
####################################################################
x = 0
for k in range(len(sequence)):
if k < len(sequence)-1:
if is_prime(sequence[k]) is True and is_prime(sequence[k+1]) is True and sequence[k] != sequence[k+1]:
x = x + sequence[k]
return x
# ----------------------------------------------------------------------
# Calls main to start the ball rolling.
# ----------------------------------------------------------------------
main()
| 40.865604
| 114
| 0.474638
|
e23ad3b91595e1ac297c4aca33cc50c88662c8b4
| 3,099
|
py
|
Python
|
sui/dicewars/server/game/area.py
|
vmarcin/FIT-projects
|
69e3e0f1f271aefd3135f92a681738a4f1a24395
|
[
"MIT"
] | null | null | null |
sui/dicewars/server/game/area.py
|
vmarcin/FIT-projects
|
69e3e0f1f271aefd3135f92a681738a4f1a24395
|
[
"MIT"
] | null | null | null |
sui/dicewars/server/game/area.py
|
vmarcin/FIT-projects
|
69e3e0f1f271aefd3135f92a681738a4f1a24395
|
[
"MIT"
] | null | null | null |
import logging
class Area(object):
"""Object representing a single area.
"""
def __init__(self, name, adjacent_areas):
"""
Parameters
----------
name : int
Identifier of the area
adjacent_areas : list of int
Names of adjacent areas
Attributes
----------
adjacent_areas : list of Area
Adjacent areas
adjacent_areas_names : list of int
Names of adjacent areas
dice : int
Number of dice in the area
owner_name : int
Name of the player controlling the area
"""
self.name = name
self.adjacent_areas_names = adjacent_areas
self.logger = logging.getLogger('SERVER')
self.adjacent_areas = []
self.dice = 0
self.owner_name = None
def add_adjacent_areas(self, board):
"""Add instances of adjacent areas to the list
Parameters
----------
board : Board
Instance of the Board class
"""
for name in self.adjacent_areas_names:
self.adjacent_areas.append(board.areas[name])
def add_die(self):
"""Add die to area's dice
Returns
-------
bool
False if area already contains 8 dice, otherwise True
"""
if self.dice >= 8:
self.dice = 8
return False
else:
self.dice += 1
return True
def get_adjacent_areas(self):
"""Get list of adjacent areas
Returns
-------
list of Area
Adjacent areas
"""
return self.adjacent_areas
def get_adjacent_areas_names(self):
"""Get list of adjacent areas' names
Returns
-------
list of int
Names of adjacent areas
"""
return self.adjacent_areas_names
def get_dice(self):
"""Get number of dice
Returns
-------
int
Number of dice
"""
return self.dice
def get_name(self):
"""Get area's name
Returns
-------
int
Identifier of the area
"""
return self.name
def get_owner_name(self):
"""Get owner's name
Returns
-------
int or bool
Returns owner's name if area has an owner, otherwise False
"""
if not self.owner_name:
return False
else:
return self.owner_name
def set_dice(self, dice):
"""Set area's dice to a certain value
Parameters
----------
dice : int
Number of dice
"""
if dice < 1 or dice > 8:
self.logger.warning("Trying to assign {0} dice to area {1}".format(dice, self.name))
else:
self.dice = dice
def set_owner_name(self, name):
"""Set owner's name
Parameters
----------
name : int
Name of the owner
"""
self.owner_name = name
| 22.786765
| 96
| 0.500484
|
a9c9978436a702f197a1108091ca097435c6f40d
| 1,008
|
py
|
Python
|
Linked List/delete_middle.py
|
Anshir08/D.S.A.
|
39d518a6edf0b9a9468c6b44e7795275de9b39e6
|
[
"MIT"
] | null | null | null |
Linked List/delete_middle.py
|
Anshir08/D.S.A.
|
39d518a6edf0b9a9468c6b44e7795275de9b39e6
|
[
"MIT"
] | null | null | null |
Linked List/delete_middle.py
|
Anshir08/D.S.A.
|
39d518a6edf0b9a9468c6b44e7795275de9b39e6
|
[
"MIT"
] | null | null | null |
from insertion import linkedList, node
def deleteMiddle(head):
if head is None:
return None
if head.next is None:
del head
return None
count = 0
copyhead = head
while copyhead:
count += 1
copyhead = copyhead.next
copyhead = head
mid = count//2
while mid>1:
mid-=1
copyhead = copyhead.next
next = copyhead.next.next
copyhead.next = None
copyhead.next = next
return head
def printList(ptr):
while (ptr != None):
print(ptr.data, end = '->')
ptr = ptr.next
print('NULL')
if __name__=='__main__':
# Start with the empty list
head = node(1)
head.next = node(2)
head.next.next = node(3)
head.next.next.next = node(4)
print("Gven Linked List")
printList(head)
head = deleteMiddle(head)
print("Linked List after deletion of middle")
printList(head)
| 18.327273
| 50
| 0.536706
|
3c6b3ad8e91affe6af768061301deab46db634c2
| 5,041
|
py
|
Python
|
discogs_client/tests/test_models.py
|
muce/va
|
973adec835d6d10b2fb79163346cb1c2aee4035b
|
[
"MIT"
] | 1
|
2017-05-15T15:32:30.000Z
|
2017-05-15T15:32:30.000Z
|
discogs_client/tests/test_models.py
|
muce/va
|
973adec835d6d10b2fb79163346cb1c2aee4035b
|
[
"MIT"
] | null | null | null |
discogs_client/tests/test_models.py
|
muce/va
|
973adec835d6d10b2fb79163346cb1c2aee4035b
|
[
"MIT"
] | null | null | null |
import unittest
from discogs_client.models import Artist, Release
from discogs_client.tests import DiscogsClientTestCase
from discogs_client.exceptions import ConfigurationError, HTTPError
class ModelsTestCase(DiscogsClientTestCase):
def test_artist(self):
"""Artists can be fetched and parsed"""
a = self.d.artist(1)
self.assertEqual(a.name, 'Persuader, The')
def test_release(self):
"""Releases can be fetched and parsed"""
r = self.d.release(1)
self.assertEqual(r.title, 'Stockholm')
def test_master(self):
"""Masters can be fetched and parsed"""
m = self.d.master(4242)
self.assertEqual(len(m.tracklist), 4)
def test_user(self):
"""Users can be fetched and parsed"""
u = self.d.user('example')
self.assertEqual(u.username, 'example')
self.assertEqual(u.name, 'Example Sampleman')
def test_search(self):
results = self.d.search('trash80')
self.assertEqual(len(results), 13)
self.assertTrue(isinstance(results[0], Artist))
self.assertTrue(isinstance(results[1], Release))
def test_fee(self):
fee = self.d.fee_for(20.5, currency='EUR')
self.assertEqual(fee.currency, 'USD')
self.assertAlmostEqual(fee.value, 1.57)
def test_invalid_artist(self):
"""Invalid artist raises HTTPError"""
self.assertRaises(HTTPError, lambda: self.d.artist(0).name)
def test_invalid_release(self):
"""Invalid release raises HTTPError"""
self.assertRaises(HTTPError, lambda: self.d.release(0).title)
def test_http_error(self):
"""HTTPError provides useful information"""
self.assertRaises(HTTPError, lambda: self.d.artist(0).name)
try:
self.d.artist(0).name
except HTTPError as e:
self.assertEqual(e.status_code, 404)
self.assertEqual('404: Resource not found.', str(e))
def test_parent_label(self):
"""Test parent_label / sublabels relationship"""
l = self.d.label(1)
l2 = self.d.label(31405)
self.assertTrue(l.parent_label is None)
self.assertTrue(l2 in l.sublabels)
self.assertEqual(l2.parent_label, l)
def test_master_versions(self):
"""Test main_release / versions relationship"""
m = self.d.master(4242)
r = self.d.release(79)
v = m.versions
self.assertEqual(len(v), 2)
self.assertTrue(r in v)
self.assertEqual(r.master, m)
r2 = self.d.release(3329867)
self.assertTrue(r2.master is None)
def test_user_writable(self):
"""User profile can be updated"""
u = self.d.user('example')
u.name # Trigger a fetch
method, url, data, headers = self.d._fetcher.requests[0]
self.assertEqual(method, 'GET')
self.assertEqual(url, '/users/example')
new_home_page = 'http://www.discogs.com'
u.home_page = new_home_page
self.assertTrue('home_page' in u.changes)
self.assertFalse('profile' in u.changes)
u.save()
# Save
method, url, data, headers = self.d._fetcher.requests[1]
self.assertEqual(method, 'POST')
self.assertEqual(url, '/users/example')
self.assertEqual(data, {'home_page': new_home_page})
# Refresh
method, url, data, headers = self.d._fetcher.requests[2]
self.assertEqual(method, 'GET')
self.assertEqual(url, '/users/example')
def test_wantlist(self):
"""Wantlists can be manipulated"""
# Fetch the user/wantlist from the filesystem
u = self.d.user('example')
self.assertEqual(len(u.wantlist), 3)
# Stub out expected responses
self.m._fetcher.fetcher.responses = {
'/users/example/wants/5': ('{"id": 5}', 201),
'/users/example/wants/1': ('', 204),
}
# Now bind the user to the memory client
u.client = self.m
u.wantlist.add(5)
method, url, data, headers = self.m._fetcher.last_request
self.assertEqual(method, 'PUT')
self.assertEqual(url, '/users/example/wants/5')
u.wantlist.remove(1)
method, url, data, headers = self.m._fetcher.last_request
self.assertEqual(method, 'DELETE')
self.assertEqual(url, '/users/example/wants/1')
def test_delete_object(self):
"""Can request DELETE on an APIObject"""
u = self.d.user('example')
u.delete()
method, url, data, headers = self.d._fetcher.last_request
self.assertEqual(method, 'DELETE')
self.assertEqual(url, '/users/example')
def test_identity(self):
"""OAuth identity returns a User"""
me = self.d.identity()
self.assertEqual(me.data['consumer_name'], 'Test Client')
self.assertEqual(me, self.d.user('example'))
def suite():
suite = unittest.TestSuite()
suite = unittest.TestLoader().loadTestsFromTestCase(ModelsTestCase)
return suite
| 33.384106
| 71
| 0.623289
|
f286e4df5951274517089948eb58b0cafbefaf7c
| 22,312
|
py
|
Python
|
aries_cloudagent/protocols/issue_credential/v1_0/routes.py
|
off-grid-block/aca-py-controllers
|
98269181c392460f2fddc9f8d0cfb240f4f3acec
|
[
"Apache-2.0"
] | null | null | null |
aries_cloudagent/protocols/issue_credential/v1_0/routes.py
|
off-grid-block/aca-py-controllers
|
98269181c392460f2fddc9f8d0cfb240f4f3acec
|
[
"Apache-2.0"
] | null | null | null |
aries_cloudagent/protocols/issue_credential/v1_0/routes.py
|
off-grid-block/aca-py-controllers
|
98269181c392460f2fddc9f8d0cfb240f4f3acec
|
[
"Apache-2.0"
] | null | null | null |
"""Credential exchange admin routes."""
from aiohttp import web
from aiohttp_apispec import docs, request_schema, response_schema
from marshmallow import fields, Schema
from ....connections.models.connection_record import ConnectionRecord
from ....holder.base import BaseHolder
from ....messaging.credential_definitions.util import CRED_DEF_TAGS
from ....messaging.valid import (
INDY_CRED_DEF_ID,
INDY_DID,
INDY_SCHEMA_ID,
INDY_VERSION,
UUIDFour,
)
from ....storage.error import StorageNotFoundError
from ...problem_report.message import ProblemReport
from .manager import CredentialManager
from .messages.credential_proposal import CredentialProposal
from .messages.inner.credential_preview import (
CredentialPreview,
CredentialPreviewSchema,
)
from .models.credential_exchange import (
V10CredentialExchange,
V10CredentialExchangeSchema,
)
class V10AttributeMimeTypesResultSchema(Schema):
"""Result schema for credential attribute MIME types by credential definition."""
class V10CredentialExchangeListResultSchema(Schema):
"""Result schema for Aries#0036 v1.0 credential exchange query."""
results = fields.List(
fields.Nested(V10CredentialExchangeSchema),
description="Aries#0036 v1.0 credential exchange records",
)
class V10CredentialProposalRequestSchema(Schema):
"""Request schema for sending credential proposal admin message."""
connection_id = fields.UUID(
description="Connection identifier",
required=True,
example=UUIDFour.EXAMPLE, # typically but not necessarily a UUID4
)
cred_def_id = fields.Str(
description="Credential definition identifier",
required=False,
**INDY_CRED_DEF_ID,
)
schema_id = fields.Str(
description="Schema identifier",
required=False,
**INDY_SCHEMA_ID,
)
schema_issuer_did = fields.Str(
description="Schema issuer DID",
required=False,
**INDY_DID,
)
schema_name = fields.Str(
description="Schema name",
required=False,
example="preferences",
)
schema_version = fields.Str(
description="Schema version",
required=False,
**INDY_VERSION,
)
issuer_did = fields.Str(
description="Credential issuer DID",
required=False,
**INDY_DID,
)
comment = fields.Str(description="Human-readable comment", required=False)
credential_proposal = fields.Nested(CredentialPreviewSchema, required=True)
class V10CredentialOfferRequestSchema(Schema):
"""Request schema for sending credential offer admin message."""
connection_id = fields.UUID(
description="Connection identifier",
required=True,
example=UUIDFour.EXAMPLE, # typically but not necessarily a UUID4
)
cred_def_id = fields.Str(
description="Credential definition identifier",
required=True,
**INDY_CRED_DEF_ID,
)
auto_issue = fields.Bool(
description=(
"Whether to respond automatically to credential requests, creating "
"and issuing requested credentials"
),
required=False,
default=False,
)
comment = fields.Str(description="Human-readable comment", required=False)
credential_preview = fields.Nested(CredentialPreviewSchema, required=True)
class V10CredentialIssueRequestSchema(Schema):
"""Request schema for sending credential issue admin message."""
comment = fields.Str(description="Human-readable comment", required=False)
credential_preview = fields.Nested(CredentialPreviewSchema, required=True)
class V10CredentialProblemReportRequestSchema(Schema):
"""Request schema for sending problem report."""
explain_ltxt = fields.Str(required=True)
@docs(tags=["issue-credential"], summary="Get attribute MIME types from wallet")
@response_schema(V10AttributeMimeTypesResultSchema(), 200)
async def attribute_mime_types_get(request: web.BaseRequest):
"""
Request handler for getting credential attribute MIME types.
Args:
request: aiohttp request object
Returns:
The MIME types response
"""
context = request.app["request_context"]
credential_id = request.match_info["credential_id"]
holder: BaseHolder = await context.inject(BaseHolder)
return web.json_response(await holder.get_mime_type(credential_id))
@docs(tags=["issue-credential"], summary="Fetch all credential exchange records")
@response_schema(V10CredentialExchangeListResultSchema(), 200)
async def credential_exchange_list(request: web.BaseRequest):
"""
Request handler for searching connection records.
Args:
request: aiohttp request object
Returns:
The connection list response
"""
context = request.app["request_context"]
tag_filter = {}
if "thread_id" in request.query and request.query["thread_id"] != "":
tag_filter["thread_id"] = request.query["thread_id"]
post_filter = {}
for param_name in ("connection_id", "role", "state"):
if param_name in request.query and request.query[param_name] != "":
post_filter[param_name] = request.query[param_name]
records = await V10CredentialExchange.query(context, tag_filter, post_filter)
return web.json_response({"results": [record.serialize() for record in records]})
@docs(tags=["issue-credential"], summary="Fetch a single credential exchange record")
@response_schema(V10CredentialExchangeSchema(), 200)
async def credential_exchange_retrieve(request: web.BaseRequest):
"""
Request handler for fetching single connection record.
Args:
request: aiohttp request object
Returns:
The credential exchange record
"""
context = request.app["request_context"]
credential_exchange_id = request.match_info["cred_ex_id"]
try:
record = await V10CredentialExchange.retrieve_by_id(
context, credential_exchange_id
)
except StorageNotFoundError:
raise web.HTTPNotFound()
return web.json_response(record.serialize())
@docs(tags=["issue-credential"], summary="Send credential, automating entire flow")
@request_schema(V10CredentialProposalRequestSchema())
@response_schema(V10CredentialExchangeSchema(), 200)
async def credential_exchange_send(request: web.BaseRequest):
"""
Request handler for sending credential from issuer to holder from attr values.
If both issuer and holder are configured for automatic responses, the operation
ultimately results in credential issue; otherwise, the result waits on the first
response not automated; the credential exchange record retains state regardless.
Args:
request: aiohttp request object
Returns:
The credential exchange record
"""
context = request.app["request_context"]
outbound_handler = request.app["outbound_message_router"]
body = await request.json()
comment = body.get("comment")
connection_id = body.get("connection_id")
preview_spec = body.get("credential_proposal")
if not preview_spec:
raise web.HTTPBadRequest(reason="credential_proposal must be provided.")
try:
connection_record = await ConnectionRecord.retrieve_by_id(
context, connection_id
)
except StorageNotFoundError:
raise web.HTTPBadRequest()
if not connection_record.is_ready:
raise web.HTTPForbidden()
credential_proposal = CredentialProposal(
comment=comment,
credential_proposal=CredentialPreview.deserialize(preview_spec),
**{t: body.get(t) for t in CRED_DEF_TAGS if body.get(t)},
)
credential_manager = CredentialManager(context)
(
credential_exchange_record,
credential_offer_message,
) = await credential_manager.prepare_send(
connection_id, credential_proposal=credential_proposal
)
await outbound_handler(
credential_offer_message, connection_id=credential_exchange_record.connection_id
)
return web.json_response(credential_exchange_record.serialize())
@docs(tags=["issue-credential"], summary="Send issuer a credential proposal")
@request_schema(V10CredentialProposalRequestSchema())
@response_schema(V10CredentialExchangeSchema(), 200)
async def credential_exchange_send_proposal(request: web.BaseRequest):
"""
Request handler for sending credential proposal.
Args:
request: aiohttp request object
Returns:
The credential exchange record
"""
context = request.app["request_context"]
outbound_handler = request.app["outbound_message_router"]
body = await request.json()
connection_id = body.get("connection_id")
comment = body.get("comment")
preview_spec = body.get("credential_proposal")
if not preview_spec:
raise web.HTTPBadRequest(reason="credential_proposal must be provided.")
try:
connection_record = await ConnectionRecord.retrieve_by_id(
context, connection_id
)
except StorageNotFoundError:
raise web.HTTPBadRequest()
if not connection_record.is_ready:
raise web.HTTPForbidden()
credential_preview = CredentialPreview.deserialize(preview_spec)
credential_manager = CredentialManager(context)
credential_exchange_record = await credential_manager.create_proposal(
connection_id,
comment=comment,
credential_preview=credential_preview,
**{t: body.get(t) for t in CRED_DEF_TAGS if body.get(t)},
)
await outbound_handler(
CredentialProposal.deserialize(
credential_exchange_record.credential_proposal_dict
),
connection_id=connection_id,
)
return web.json_response(credential_exchange_record.serialize())
@docs(
tags=["issue-credential"],
summary="Send holder a credential offer, free from reference to any proposal",
)
@request_schema(V10CredentialOfferRequestSchema())
@response_schema(V10CredentialExchangeSchema(), 200)
async def credential_exchange_send_free_offer(request: web.BaseRequest):
"""
Request handler for sending free credential offer.
An issuer initiates a such a credential offer, which is free any
holder-initiated corresponding proposal.
Args:
request: aiohttp request object
Returns:
The credential exchange record
"""
context = request.app["request_context"]
outbound_handler = request.app["outbound_message_router"]
body = await request.json()
connection_id = body.get("connection_id")
cred_def_id = body.get("cred_def_id")
auto_issue = body.get(
"auto_issue", context.settings.get("debug.auto_respond_credential_request")
)
comment = body.get("comment")
preview_spec = body.get("credential_preview")
if not cred_def_id:
raise web.HTTPBadRequest(reason="cred_def_id is required")
if auto_issue and not preview_spec:
raise web.HTTPBadRequest(
reason="If auto_issue is set to"
+ " true then credential_preview must also be provided."
)
try:
connection_record = await ConnectionRecord.retrieve_by_id(
context, connection_id
)
except StorageNotFoundError:
raise web.HTTPBadRequest()
if not connection_record.is_ready:
raise web.HTTPForbidden()
if preview_spec:
credential_preview = CredentialPreview.deserialize(preview_spec)
credential_proposal = CredentialProposal(
comment=comment,
credential_proposal=credential_preview,
cred_def_id=cred_def_id,
)
credential_proposal_dict = credential_proposal.serialize()
else:
credential_proposal_dict = None
credential_exchange_record = V10CredentialExchange(
connection_id=connection_id,
initiator=V10CredentialExchange.INITIATOR_SELF,
credential_definition_id=cred_def_id,
credential_proposal_dict=credential_proposal_dict,
auto_issue=auto_issue,
)
credential_manager = CredentialManager(context)
(
credential_exchange_record,
credential_offer_message,
) = await credential_manager.create_offer(
credential_exchange_record, comment=comment
)
await outbound_handler(credential_offer_message, connection_id=connection_id)
return web.json_response(credential_exchange_record.serialize())
@docs(
tags=["issue-credential"],
summary="Send holder a credential offer in reference to a proposal",
)
@response_schema(V10CredentialExchangeSchema(), 200)
async def credential_exchange_send_bound_offer(request: web.BaseRequest):
"""
Request handler for sending bound credential offer.
A holder initiates this sequence with a credential proposal; this message
responds with an offer bound to the proposal.
Args:
request: aiohttp request object
Returns:
The credential exchange record
"""
context = request.app["request_context"]
outbound_handler = request.app["outbound_message_router"]
credential_exchange_id = request.match_info["cred_ex_id"]
credential_exchange_record = await V10CredentialExchange.retrieve_by_id(
context, credential_exchange_id
)
assert credential_exchange_record.state == (
V10CredentialExchange.STATE_PROPOSAL_RECEIVED
)
connection_id = credential_exchange_record.connection_id
try:
connection_record = await ConnectionRecord.retrieve_by_id(
context, connection_id
)
except StorageNotFoundError:
raise web.HTTPBadRequest()
if not connection_record.is_ready:
raise web.HTTPForbidden()
credential_manager = CredentialManager(context)
(
credential_exchange_record,
credential_offer_message,
) = await credential_manager.create_offer(credential_exchange_record, comment=None)
await outbound_handler(credential_offer_message, connection_id=connection_id)
return web.json_response(credential_exchange_record.serialize())
@docs(tags=["issue-credential"], summary="Send a credential request")
@response_schema(V10CredentialExchangeSchema(), 200)
async def credential_exchange_send_request(request: web.BaseRequest):
"""
Request handler for sending credential request.
Args:
request: aiohttp request object
Returns:
The credential exchange record
"""
context = request.app["request_context"]
outbound_handler = request.app["outbound_message_router"]
credential_exchange_id = request.match_info["cred_ex_id"]
credential_exchange_record = await V10CredentialExchange.retrieve_by_id(
context, credential_exchange_id
)
connection_id = credential_exchange_record.connection_id
assert credential_exchange_record.state == (
V10CredentialExchange.STATE_OFFER_RECEIVED
)
try:
connection_record = await ConnectionRecord.retrieve_by_id(
context, connection_id
)
except StorageNotFoundError:
raise web.HTTPBadRequest()
if not connection_record.is_ready:
raise web.HTTPForbidden()
credential_manager = CredentialManager(context)
(
credential_exchange_record,
credential_request_message,
) = await credential_manager.create_request(
credential_exchange_record, connection_record.my_did
)
await outbound_handler(credential_request_message, connection_id=connection_id)
return web.json_response(credential_exchange_record.serialize())
@docs(tags=["issue-credential"], summary="Send a credential")
@request_schema(V10CredentialIssueRequestSchema())
@response_schema(V10CredentialExchangeSchema(), 200)
async def credential_exchange_issue(request: web.BaseRequest):
"""
Request handler for sending credential.
Args:
request: aiohttp request object
Returns:
The credential exchange record
"""
context = request.app["request_context"]
outbound_handler = request.app["outbound_message_router"]
body = await request.json()
comment = body.get("comment")
preview_spec = body.get("credential_preview")
if not preview_spec:
raise web.HTTPBadRequest(reason="credential_preview must be provided.")
credential_exchange_id = request.match_info["cred_ex_id"]
cred_exch_record = await V10CredentialExchange.retrieve_by_id(
context, credential_exchange_id
)
connection_id = cred_exch_record.connection_id
assert cred_exch_record.state == V10CredentialExchange.STATE_REQUEST_RECEIVED
try:
connection_record = await ConnectionRecord.retrieve_by_id(
context, connection_id
)
except StorageNotFoundError:
raise web.HTTPBadRequest()
if not connection_record.is_ready:
raise web.HTTPForbidden()
credential_preview = CredentialPreview.deserialize(preview_spec)
credential_manager = CredentialManager(context)
(
cred_exch_record,
credential_issue_message,
) = await credential_manager.issue_credential(
cred_exch_record,
comment=comment,
credential_values=credential_preview.attr_dict(decode=False),
)
await outbound_handler(credential_issue_message, connection_id=connection_id)
return web.json_response(cred_exch_record.serialize())
@docs(tags=["issue-credential"], summary="Store a received credential")
@response_schema(V10CredentialExchangeSchema(), 200)
async def credential_exchange_store(request: web.BaseRequest):
"""
Request handler for storing credential.
Args:
request: aiohttp request object
Returns:
The credential exchange record
"""
context = request.app["request_context"]
outbound_handler = request.app["outbound_message_router"]
credential_exchange_id = request.match_info["cred_ex_id"]
credential_exchange_record = await V10CredentialExchange.retrieve_by_id(
context, credential_exchange_id
)
connection_id = credential_exchange_record.connection_id
assert credential_exchange_record.state == (
V10CredentialExchange.STATE_CREDENTIAL_RECEIVED
)
try:
connection_record = await ConnectionRecord.retrieve_by_id(
context, connection_id
)
except StorageNotFoundError:
raise web.HTTPBadRequest()
if not connection_record.is_ready:
raise web.HTTPForbidden()
credential_manager = CredentialManager(context)
(
credential_exchange_record,
credential_stored_message,
) = await credential_manager.store_credential(credential_exchange_record)
await outbound_handler(credential_stored_message, connection_id=connection_id)
return web.json_response(credential_exchange_record.serialize())
@docs(
tags=["issue-credential"], summary="Send a problem report for credential exchange"
)
@request_schema(V10CredentialProblemReportRequestSchema())
async def credential_exchange_problem_report(request: web.BaseRequest):
"""
Request handler for sending problem report.
Args:
request: aiohttp request object
"""
context = request.app["request_context"]
outbound_handler = request.app["outbound_message_router"]
credential_exchange_id = request.match_info["cred_ex_id"]
body = await request.json()
try:
credential_exchange_record = await V10CredentialExchange.retrieve_by_id(
context, credential_exchange_id
)
except StorageNotFoundError:
raise web.HTTPNotFound()
error_result = ProblemReport(explain_ltxt=body["explain_ltxt"])
error_result.assign_thread_id(credential_exchange_record.thread_id)
await outbound_handler(
error_result, connection_id=credential_exchange_record.connection_id
)
return web.json_response({})
@docs(
tags=["issue-credential"], summary="Remove an existing credential exchange record"
)
async def credential_exchange_remove(request: web.BaseRequest):
"""
Request handler for removing a credential exchange record.
Args:
request: aiohttp request object
"""
context = request.app["request_context"]
credential_exchange_id = request.match_info["cred_ex_id"]
try:
credential_exchange_record = await V10CredentialExchange.retrieve_by_id(
context, credential_exchange_id
)
except StorageNotFoundError:
raise web.HTTPNotFound()
await credential_exchange_record.delete_record(context)
return web.json_response({})
async def register(app: web.Application):
"""Register routes."""
app.add_routes(
[
web.get(
"/issue-credential/mime-types/{credential_id}", attribute_mime_types_get
),
web.get("/issue-credential/records", credential_exchange_list),
web.get(
"/issue-credential/records/{cred_ex_id}", credential_exchange_retrieve
),
web.post("/issue-credential/send", credential_exchange_send),
web.post(
"/issue-credential/send-proposal", credential_exchange_send_proposal
),
web.post(
"/issue-credential/send-offer", credential_exchange_send_free_offer
),
web.post(
"/issue-credential/records/{cred_ex_id}/send-offer",
credential_exchange_send_bound_offer,
),
web.post(
"/issue-credential/records/{cred_ex_id}/send-request",
credential_exchange_send_request,
),
web.post(
"/issue-credential/records/{cred_ex_id}/issue",
credential_exchange_issue,
),
web.post(
"/issue-credential/records/{cred_ex_id}/store",
credential_exchange_store,
),
web.post(
"/issue-credential/records/{cred_ex_id}/problem-report",
credential_exchange_problem_report,
),
web.post(
"/issue-credential/records/{cred_ex_id}/remove",
credential_exchange_remove,
),
]
)
| 31.469676
| 88
| 0.708946
|
69927072fb32826b173e2359ec82758fdb16f56b
| 28
|
py
|
Python
|
monster/__init__.py
|
ConnorSMaynes/monster
|
55182a243d68c5e2392b36fe89c90a8e7c3f7048
|
[
"MIT"
] | 2
|
2019-07-19T02:28:10.000Z
|
2021-01-17T11:48:30.000Z
|
monster/__init__.py
|
ConnorSMaynes/monster
|
55182a243d68c5e2392b36fe89c90a8e7c3f7048
|
[
"MIT"
] | null | null | null |
monster/__init__.py
|
ConnorSMaynes/monster
|
55182a243d68c5e2392b36fe89c90a8e7c3f7048
|
[
"MIT"
] | 3
|
2019-07-19T02:28:13.000Z
|
2021-12-09T05:50:29.000Z
|
from .monster import Monster
| 28
| 28
| 0.857143
|
666112bbb218859cb0654924f3b4ecab8f38180a
| 11,479
|
py
|
Python
|
utils3d.py
|
nsfzyzz/boundary_thickness
|
f47701b882a52f18cfa8614dac03fc58db1b2d87
|
[
"MIT"
] | 21
|
2020-11-15T03:19:40.000Z
|
2022-02-24T10:32:39.000Z
|
utils3d.py
|
nsfzyzz/boundary_thickness
|
f47701b882a52f18cfa8614dac03fc58db1b2d87
|
[
"MIT"
] | null | null | null |
utils3d.py
|
nsfzyzz/boundary_thickness
|
f47701b882a52f18cfa8614dac03fc58db1b2d87
|
[
"MIT"
] | 1
|
2021-12-29T04:15:16.000Z
|
2021-12-29T04:15:16.000Z
|
import numpy as np
import numpy.linalg as LA
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
from tqdm import tqdm, trange
from torch.utils.data import TensorDataset
from plotly.subplots import make_subplots
import plotly.graph_objects as go
def visualize3D(vis_net, x, y, dir1, dir2, dir3, len1 = 1, len2 = 1, len3 = 1, show_figure = True, save_figure = False, file_path = './temp.html'):
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Normalize the three directions
print('Take three orthogonal directions')
dir1 = dir1/torch.norm(dir1, p = float('2'))
dir2 = dir2/torch.norm(dir2, p = float('2'))
dir3 = dir3/torch.norm(dir3, p = float('2'))
# Check if the three directions are orthogonal to each other
inner_product1 = torch.abs(torch.dot(dir1.view(-1), dir2.view(-1)))
inner_product2 = torch.abs(torch.dot(dir1.view(-1), dir3.view(-1)))
inner_product3 = torch.abs(torch.dot(dir2.view(-1), dir3.view(-1)))
check_inner_product1 = (inner_product1<0.01).item()
check_inner_product2 = (inner_product2<0.01).item()
check_inner_product3 = (inner_product3<0.01).item()
assert check_inner_product1, "The three directions are not orthogonal"
assert check_inner_product2, "The three directions are not orthogonal"
assert check_inner_product3, "The three directions are not orthogonal"
# Generate the visualization and data grid
#lenx, leny, lenz = 51, 51, 51
xx, yy, zz = np.mgrid[-len1:len1:50j, -len2:len2:50j, -len3:len3:50j]
t = np.c_[xx.ravel(), yy.ravel(), zz.ravel()]
vis_grid = torch.from_numpy(t).float().to(device)
dirs_mat = torch.cat([dir1.reshape(1, -1), dir2.reshape(1, -1), dir3.reshape(1, -1)]).to(device)
x_grid = torch.mm(vis_grid, dirs_mat).reshape(len(vis_grid), 3, 32, 32).to('cpu') + x
grid_output = []
grid_loader = torch.utils.data.DataLoader(TensorDataset(x_grid), batch_size=64, shuffle=False, num_workers=2)
vis_net.eval()
softmax1 = nn.Softmax()
for grid_points in tqdm(grid_loader):
grid_points = grid_points[0].to(device)
grid_ys = vis_net(grid_points)
grid_ys = softmax1(grid_ys)
grid_ys = grid_ys[:,y].detach().cpu().numpy()
grid_output.append(grid_ys)
y_pred0 = np.concatenate(grid_output)
# and plot everything
fig = go.Figure(data=go.Volume(
x=xx.flatten(),
y=yy.flatten(),
z=zz.flatten(),
value=y_pred0.flatten(),
isomin=0,
isomax=1,
opacity=0.1, # needs to be small to see through all surfaces
surface_count=17, # needs to be a large number for good volume rendering
))
if show_figure:
fig.show()
if save_figure:
plotly.offline.plot(fig, filename=file_path)
return fig
def Assert_three_orthogonal(dirs):
dir1, dir2, dir3 = dirs[0], dirs[1], dirs[2]
# Check if the three directions are orthogonal to each other
inner_product1 = torch.abs(torch.dot(dir1.view(-1), dir2.view(-1)))
inner_product2 = torch.abs(torch.dot(dir1.view(-1), dir3.view(-1)))
inner_product3 = torch.abs(torch.dot(dir2.view(-1), dir3.view(-1)))
check_inner_product1 = (inner_product1<0.01).item()
check_inner_product2 = (inner_product2<0.01).item()
check_inner_product3 = (inner_product3<0.01).item()
assert check_inner_product1, "The three directions are not orthogonal"
assert check_inner_product2, "The three directions are not orthogonal"
assert check_inner_product3, "The three directions are not orthogonal"
def Compute_grid_outputs(vis_net, x, y, dirs, lens=[[-1,1],[-1,1],[-1,1]], resolution = "high"):
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Generate the visualization and data grid
if resolution == "high":
xx, yy, zz = np.mgrid[lens[0][0]:lens[0][1]:50j, lens[1][0]:lens[1][1]:50j, lens[2][0]:lens[2][1]:50j]
elif resolution == "medium":
xx, yy, zz = np.mgrid[lens[0][0]:lens[0][1]:20j, lens[1][0]:lens[1][1]:20j, lens[2][0]:lens[2][1]:20j]
elif resolution == "low":
xx, yy, zz = np.mgrid[lens[0][0]:lens[0][1]:8j, lens[1][0]:lens[1][1]:8j, lens[2][0]:lens[2][1]:8j]
else:
raise NameError('The resolution has to be either high, medium, or low.')
t = np.c_[xx.ravel(), yy.ravel(), zz.ravel()]
vis_grid = torch.from_numpy(t).float().to(device)
dirs_mat = torch.cat([dirs[0].reshape(1, -1), dirs[1].reshape(1, -1), dirs[2].reshape(1, -1)]).to(device)
x_grid = torch.mm(vis_grid, dirs_mat).reshape(len(vis_grid), 3, 32, 32).to('cpu')
x_grid = x_grid+ x
grid_output = []
grid_loader = torch.utils.data.DataLoader(TensorDataset(x_grid), batch_size=64, shuffle=False, num_workers=2)
vis_net.eval()
softmax1 = nn.Softmax()
for grid_points in tqdm(grid_loader):
grid_points = grid_points[0].to(device)
grid_ys = vis_net(grid_points)
grid_ys = softmax1(grid_ys)
grid_ys = grid_ys[:,y].detach().cpu().numpy()
grid_output.append(grid_ys)
y_pred0 = np.concatenate(grid_output)
return xx.flatten(), yy.flatten(), zz.flatten(), y_pred0.flatten()
def find_specific_class(specific_class, labels):
img_ind = -1
for img_ind in range(labels.shape[0]):
if labels[img_ind] == specific_class:
break
return img_ind
def run_many(PGD_attack,
data_loader,
model,
subplot_grid = [2,2],
num_adv_directions = 1,
lens = [[-1,1],[-1,1],[-1,1]],
resolution = "high",
height = 1000,
width = 1000,
show_figure = False,
save_figure = False,
file_path = './temp.html',
specific_class = -1,
title = "",
if_back_to_cpu = False):
# Create a figure grid
fig = make_subplots(
rows=subplot_grid[0], cols=subplot_grid[1],
specs = [[{'type': 'volume'} for _ in range(subplot_grid[1])] for ind2 in range(subplot_grid[0])])
num_sub_figures_plotted = 0
for i, (images, labels) in enumerate(data_loader):
if if_back_to_cpu:
images = images.cpu()
labels = labels.cpu()
num_figures_3D = subplot_grid[0]*subplot_grid[1]
if num_sub_figures_plotted < num_figures_3D:
print(f"Plotting figure {num_sub_figures_plotted+1}/{num_figures_3D}.")
if specific_class == -1:
# This means that we do not need to find a specific class
img_ind = 0
else:
img_ind = find_specific_class(specific_class, labels)
if img_ind == -1:
# This means that this batch does not contain any image of this particular class
print("No img of label {0}! Go to the next batch.".format(specific_class))
# So, go to the next batch
continue
x = images[img_ind]
y = labels[img_ind]
dirs = [0, 0, 0]
if num_adv_directions == 0:
print("The number of adversarial directions is 0")
dirs[0] = torch.rand(x.shape) - 0.5
dirs[1] = torch.rand(x.shape) - 0.5
dirs[2] = torch.rand(x.shape) - 0.5
elif num_adv_directions == 1:
print("The number of adversarial directions is 1")
labels_change = torch.randint(1, 10, (labels.shape[0],))
wrong_labels = torch.remainder(labels_change + labels, 10)
adv_images = PGD_attack.__call__(images, wrong_labels)
dirs[0] = adv_images[img_ind].cpu() - x
dirs[1] = torch.rand(x.shape) - 0.5
dirs[2] = torch.rand(x.shape) - 0.5
elif num_adv_directions == 3:
print("The number of adversarial directions is 3")
for dir_ind in range(3):
labels_change = torch.ones(labels.shape[0]) * (dir_ind+1)
labels_change = labels_change.long()
wrong_labels = torch.remainder(labels_change + labels, 10)
adv_images = PGD_attack.__call__(images, wrong_labels)
dirs[dir_ind] = adv_images[img_ind].cpu() - x
else:
raise NameError('The number of adversarial directions has to be either 0, 1, or 3.')
# Normalize the first direction
dirs[0] = dirs[0]/torch.norm(dirs[0], p=2)
# Normalize the second direction
dirs[1] = dirs[1]/torch.norm(dirs[1], p=2)
dirs[1] = dirs[1] - torch.dot(dirs[1].view(-1), dirs[0].view(-1))*dirs[0]
dirs[1] = dirs[1]/torch.norm(dirs[1], p=2)
# Normalize the third direction
dirs[2] = dirs[2]/torch.norm(dirs[2], p=2)
proj1 = torch.dot(dirs[2].view(-1), dirs[0].view(-1))
proj2 = torch.dot(dirs[2].view(-1), dirs[1].view(-1))
dirs[2] = dirs[2] - proj1*dirs[0] - proj2*dirs[1]
dirs[2] = dirs[2]/torch.norm(dirs[2], p=2)
# Check if the three directions are orthogonal
Assert_three_orthogonal(dirs)
# Compute the grid outputs
x, y, z, value = Compute_grid_outputs(model, x, y, dirs, lens = lens, resolution = resolution)
# Figure out where to put the subfigure
row_ind = int(num_sub_figures_plotted/subplot_grid[1])
col_ind = num_sub_figures_plotted - row_ind*subplot_grid[1]
row_ind += 1
col_ind += 1
# Add a subfigure
fig.add_trace(
go.Volume(
x=x,
y=y,
z=z,
value=value,
isomin=0,
isomax=1,
opacity=0.1, # needs to be small to see through all surfaces
surface_count=17, # needs to be a large number for good volume rendering
),
row=row_ind, col=col_ind
)
num_sub_figures_plotted += 1
else:
break
if num_adv_directions == 0:
title_text="All three directions are random."
elif num_adv_directions == 1:
title_text="X direction is adversarial."
elif num_adv_directions == 3:
title_text="All three directions are adversarial (with different classes)."
else:
raise NameError('The number of adversarial directions has to be either 0, 1, or 3.')
title_text += " Exp name: "
title_text += title
fig.update_layout(height=height, width=width, title_text=title_text)
if show_figure:
fig.show()
if save_figure:
plotly.offline.plot(fig, filename=file_path)
return fig
| 37.513072
| 147
| 0.566426
|
5fe536d1f281b3badb3873098e1d5423ca3dc557
| 3,254
|
py
|
Python
|
sympy/integrals/tests/test_deltafunctions.py
|
shipci/sympy
|
4b59927bed992b980c9b3faac01becb36feef26b
|
[
"BSD-3-Clause"
] | 319
|
2016-09-22T15:54:48.000Z
|
2022-03-18T02:36:58.000Z
|
sympy/integrals/tests/test_deltafunctions.py
|
curzel-it/KiPyCalc
|
909c783d5e6967ea58ca93f875106d8a8e3ca5db
|
[
"MIT"
] | 9
|
2016-11-03T21:56:41.000Z
|
2020-08-09T19:27:37.000Z
|
sympy/integrals/tests/test_deltafunctions.py
|
curzel-it/KiPyCalc
|
909c783d5e6967ea58ca93f875106d8a8e3ca5db
|
[
"MIT"
] | 27
|
2016-10-06T16:05:32.000Z
|
2022-03-18T02:37:00.000Z
|
from sympy import cos, DiracDelta, Heaviside, Function, pi, S, sin, symbols
from sympy.integrals.deltafunctions import change_mul, deltaintegrate
f = Function("f")
x_1, x_2, x, y, z = symbols("x_1 x_2 x y z")
def test_change_mul():
assert change_mul(x, x) == x
assert change_mul(x*y, x) == (None, None)
assert change_mul(x*y*DiracDelta(x), x) == (DiracDelta(x), x*y)
assert change_mul(x*y*DiracDelta(x)*DiracDelta(y), x) == \
(DiracDelta(x), x*y*DiracDelta(y))
assert change_mul(DiracDelta(x)**2, x) == \
(DiracDelta(x), DiracDelta(x))
assert change_mul(y*DiracDelta(x)**2, x) == \
(DiracDelta(x), y*DiracDelta(x))
def test_deltaintegrate():
assert deltaintegrate(x, x) is None
assert deltaintegrate(x + DiracDelta(x), x) is None
assert deltaintegrate(DiracDelta(x, 0), x) == Heaviside(x)
for n in range(10):
assert deltaintegrate(DiracDelta(x, n + 1), x) == DiracDelta(x, n)
assert deltaintegrate(DiracDelta(x), x) == Heaviside(x)
assert deltaintegrate(DiracDelta(-x), x) == Heaviside(x)
assert deltaintegrate(DiracDelta(x - y), x) == Heaviside(x - y)
assert deltaintegrate(DiracDelta(y - x), x) == Heaviside(x - y)
assert deltaintegrate(x*DiracDelta(x), x) == 0
assert deltaintegrate((x - y)*DiracDelta(x - y), x) == 0
assert deltaintegrate(DiracDelta(x)**2, x) == DiracDelta(0)*Heaviside(x)
assert deltaintegrate(y*DiracDelta(x)**2, x) == \
y*DiracDelta(0)*Heaviside(x)
assert deltaintegrate(DiracDelta(x, 1)**2, x) is None
assert deltaintegrate(y*DiracDelta(x, 1)**2, x) is None
assert deltaintegrate(DiracDelta(x) * f(x), x) == f(0) * Heaviside(x)
assert deltaintegrate(DiracDelta(-x) * f(x), x) == f(0) * Heaviside(x)
assert deltaintegrate(DiracDelta(x - 1) * f(x), x) == f(1) * Heaviside(x - 1)
assert deltaintegrate(DiracDelta(1 - x) * f(x), x) == f(1) * Heaviside(x - 1)
assert deltaintegrate(DiracDelta(x**2 + x - 2), x) == \
Heaviside(x - 1)/3 + Heaviside(x + 2)/3
p = cos(x)*(DiracDelta(x) + DiracDelta(x**2 - 1))*sin(x)*(x - pi)
assert deltaintegrate(p, x) - (-pi*(cos(1)*Heaviside(-1 + x)*sin(1)/2 - \
cos(1)*Heaviside(1 + x)*sin(1)/2) + \
cos(1)*Heaviside(1 + x)*sin(1)/2 + \
cos(1)*Heaviside(-1 + x)*sin(1)/2) == 0
p = x_2*DiracDelta(x - x_2)*DiracDelta(x_2 - x_1)
assert deltaintegrate(p, x_2) == x*DiracDelta(x - x_1)*Heaviside(x_2 - x)
p = x*y**2*z*DiracDelta(y - x)*DiracDelta(y - z)*DiracDelta(x - z)
assert deltaintegrate(p, y) == x**3*z*DiracDelta(x - z)**2*Heaviside(y - x)
assert deltaintegrate((x + 1)*DiracDelta(2*x), x) == S(1)/2 * Heaviside(x)
assert deltaintegrate((x + 1)*DiracDelta(2*x/3 + 4/S(9)), x) == \
S(1)/2 * Heaviside(x + S(2)/3)
a, b, c = symbols('a b c', commutative=False)
assert deltaintegrate(DiracDelta(x - y)*f(x - b)*f(x - a), x) == \
f(y - b)*f(y - a)*Heaviside(x - y)
p = f(x - a)*DiracDelta(x - y)*f(x - c)*f(x - b)
assert deltaintegrate(p, x) == f(y - a)*f(y - c)*f(y - b)*Heaviside(x - y)
p = DiracDelta(x - z)*f(x - b)*f(x - a)*DiracDelta(x - y)
assert deltaintegrate(p, x) == DiracDelta(y - z)*f(y - b)*f(y - a) * \
Heaviside(x - y)
| 45.194444
| 81
| 0.600184
|
8d413858e0a519d660419daa8f4f6c8af15bc8e9
| 2,715
|
py
|
Python
|
src/pyconcepticon/commands/link.py
|
concepticon/pyconcepticon
|
bd336df18545b493f59ed8c22b636ded447dede1
|
[
"Apache-2.0"
] | 5
|
2019-06-04T02:17:03.000Z
|
2021-12-28T01:59:16.000Z
|
src/pyconcepticon/commands/link.py
|
armendk/pyconcepticon
|
7764d4b0900a37a76a6cb6ff9bdc8348502fa51d
|
[
"Apache-2.0"
] | 36
|
2019-02-06T11:50:21.000Z
|
2021-12-28T18:43:11.000Z
|
src/pyconcepticon/commands/link.py
|
armendk/pyconcepticon
|
7764d4b0900a37a76a6cb6ff9bdc8348502fa51d
|
[
"Apache-2.0"
] | 5
|
2019-09-18T13:34:19.000Z
|
2021-12-28T02:01:44.000Z
|
"""
Link concepts to concept sets for a given concept list.
Notes
-----
If either CONCEPTICON_GLOSS or CONCEPTICON_ID is given in the list, the other is added.
"""
from pyconcepticon.util import rewrite, CS_GLOSS, CS_ID
from pyconcepticon.cli_util import add_conceptlist, get_conceptlist
def register(parser):
add_conceptlist(parser)
def run(args):
cl = get_conceptlist(args, path_only=True)
rewrite(cl, Linker(cl.stem, args.repos.conceptsets.values()))
class Linker(object):
def __init__(self, clid, conceptsets):
self.clid = clid
self.concepts = {
CS_ID: {cs.id: cs.gloss for cs in conceptsets},
# maps ID to GLOSS
CS_GLOSS: {cs.gloss: cs.id for cs in conceptsets},
# maps GLOSS to ID
}
self._cid_index = None
self._cgloss_index = None
self._link_col = (None, None)
self._number_index = None
def __call__(self, i, row):
if i == 0:
assert (CS_ID in row) or (CS_GLOSS in row)
assert "NUMBER" in row
if (CS_ID in row) and (CS_GLOSS in row):
self._cid_index = row.index(CS_ID)
self._cgloss_index = row.index(CS_GLOSS)
else:
# either CONCEPTICON_ID or CONCEPTICON_GLOSS is given, and the
# other is missing.
add = {CS_ID: CS_GLOSS, CS_GLOSS: CS_ID}
for j, col in enumerate(row):
if col in add:
row = [add[col]] + row
self._link_col = (j, col)
break
if "ID" not in row:
self._number_index = row.index("NUMBER")
row = ["ID"] + row
return row
if self._link_col[1]:
val = self.concepts[self._link_col[1]].get(row[self._link_col[0]], "")
if not val:
print("unknown %s: %s" % (self._link_col[1], row[self._link_col[0]]))
row = [val] + row
else:
cid = self.concepts[CS_GLOSS].get(row[self._cgloss_index], "")
if not cid:
print("unknown CONCEPTICON_GLOSS: {0}".format(row[self._cgloss_index]))
elif cid != row[self._cid_index]:
if not row[self._cid_index]:
row[self._cid_index] = cid
else:
print(
"unknown CONCEPTICON_ID/GLOSS mismatch: %s %s"
% (row[self._cid_index], row[self._cgloss_index])
)
if self._number_index is not None:
row = ["%s-%s" % (self.clid, row[self._number_index])] + row
return row
| 34.807692
| 87
| 0.535175
|
b0e54c30d504e4267b7883ac20e05a9b7a138409
| 1,095
|
py
|
Python
|
fetchai/ledger/serialisation/__init__.py
|
devjsc/ledger-api-py
|
5dba7ad97a53c52cdf1407fbce96330dcd961e0b
|
[
"Apache-2.0"
] | null | null | null |
fetchai/ledger/serialisation/__init__.py
|
devjsc/ledger-api-py
|
5dba7ad97a53c52cdf1407fbce96330dcd961e0b
|
[
"Apache-2.0"
] | null | null | null |
fetchai/ledger/serialisation/__init__.py
|
devjsc/ledger-api-py
|
5dba7ad97a53c52cdf1407fbce96330dcd961e0b
|
[
"Apache-2.0"
] | null | null | null |
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
import hashlib
from typing import Union
from .transaction import encode_transaction, decode_transaction
def sha256_hash(data: Union[str, bytes], to_hex: bool = True) -> str:
hasher = hashlib.sha256()
hasher.update(data)
if to_hex:
return hasher.digest().hex()
else:
return hasher.digest()
| 35.322581
| 80
| 0.613699
|
cf1a1287c6362e0d1272d0d5ebcf0e2ec946aed7
| 1,076
|
py
|
Python
|
lose/utils/ui/panels.py
|
brianbruggeman/lose-7drl
|
8921b464e82c8c7e6bf7cfebd4e8a3a5e290ac38
|
[
"Apache-2.0"
] | null | null | null |
lose/utils/ui/panels.py
|
brianbruggeman/lose-7drl
|
8921b464e82c8c7e6bf7cfebd4e8a3a5e290ac38
|
[
"Apache-2.0"
] | null | null | null |
lose/utils/ui/panels.py
|
brianbruggeman/lose-7drl
|
8921b464e82c8c7e6bf7cfebd4e8a3a5e290ac38
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import tcod
from ..logger import get_logger
logger = get_logger(__name__)
__all__ = ['Panel']
class Panel(object):
"""Creates a panel on the screen.
Args:
width(int): Width of panel [default: 80]
height(int): height of panel [default: 50]
name(str): Title of panel [default: Untitled]
position(tuple): (int: y, int: x) position on parent [default: (0, 0)]
parent(int): parent object of panel [default: root]
"""
def __init__(self, width=None, height=None, name=None, position=None, parent=None):
self.width = width or 80
self.height = height or 50
self.name = name or 'Untitled'
self.position = position or (0, 0)
self.parent = parent
self.panel = tcod.console_new(self.width, self.height)
def __repr__(self):
cname = self.__class__.__name__
name = self.name
size = (self.width, self.height)
position = self.position
string = f'<{cname} {name} [{position} -> ({size})]>'
return string
| 27.589744
| 87
| 0.60223
|
08d6d0eb6a83ca603b946ae37a261d62c4117b40
| 3,284
|
py
|
Python
|
airflow/contrib/operators/sagemaker_base_operator.py
|
FlyrInc/airflow-1
|
74b22337b45a1eb25585d52e35694e6b0eb81f03
|
[
"Apache-2.0"
] | 4
|
2018-12-14T05:14:02.000Z
|
2022-01-23T15:48:13.000Z
|
airflow/contrib/operators/sagemaker_base_operator.py
|
FlyrInc/airflow-1
|
74b22337b45a1eb25585d52e35694e6b0eb81f03
|
[
"Apache-2.0"
] | 4
|
2018-03-20T21:24:26.000Z
|
2020-05-03T04:23:02.000Z
|
airflow/contrib/operators/sagemaker_base_operator.py
|
FlyrInc/airflow-1
|
74b22337b45a1eb25585d52e35694e6b0eb81f03
|
[
"Apache-2.0"
] | 6
|
2020-06-09T02:16:58.000Z
|
2021-12-27T15:46:32.000Z
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
from airflow.contrib.hooks.sagemaker_hook import SageMakerHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class SageMakerBaseOperator(BaseOperator):
"""
This is the base operator for all SageMaker operators.
:param config: The configuration necessary to start a training job (templated)
:type config: dict
:param aws_conn_id: The AWS connection ID to use.
:type aws_conn_id: str
"""
template_fields = ['config']
template_ext = ()
ui_color = '#ededed'
integer_fields = []
@apply_defaults
def __init__(self,
config,
aws_conn_id='aws_default',
*args, **kwargs):
super(SageMakerBaseOperator, self).__init__(*args, **kwargs)
self.aws_conn_id = aws_conn_id
self.config = config
self.hook = None
def parse_integer(self, config, field):
if len(field) == 1:
if isinstance(config, list):
for sub_config in config:
self.parse_integer(sub_config, field)
return
head = field[0]
if head in config:
config[head] = int(config[head])
return
if isinstance(config, list):
for sub_config in config:
self.parse_integer(sub_config, field)
return
head, tail = field[0], field[1:]
if head in config:
self.parse_integer(config[head], tail)
return
def parse_config_integers(self):
# Parse the integer fields of training config to integers
# in case the config is rendered by Jinja and all fields are str
for field in self.integer_fields:
self.parse_integer(self.config, field)
def expand_role(self):
pass
def preprocess_config(self):
self.log.info(
'Preprocessing the config and doing required s3_operations'
)
self.hook = SageMakerHook(aws_conn_id=self.aws_conn_id)
self.hook.configure_s3_resources(self.config)
self.parse_config_integers()
self.expand_role()
self.log.info(
'After preprocessing the config is:\n {}'.format(
json.dumps(self.config, sort_keys=True, indent=4, separators=(',', ': ')))
)
def execute(self, context):
raise NotImplementedError('Please implement execute() in sub class!')
| 32.514851
| 90
| 0.649513
|
58bd86ee33b168aeff1943eec548564e8725e449
| 1,770
|
py
|
Python
|
dashboard/about/changelog/changelogView.py
|
DeFi-Analytics/DeFi-Analytics
|
25fa0588758313c6a207848080a5f2d994316a24
|
[
"MIT"
] | 11
|
2021-02-26T21:27:56.000Z
|
2022-01-03T06:19:09.000Z
|
dashboard/about/changelog/changelogView.py
|
DeFi-Analytics/DeFi-Analytics
|
25fa0588758313c6a207848080a5f2d994316a24
|
[
"MIT"
] | 216
|
2021-02-27T12:09:59.000Z
|
2022-03-28T19:44:37.000Z
|
dashboard/about/changelog/changelogView.py
|
DeFi-Analytics/DeFi-Analytics
|
25fa0588758313c6a207848080a5f2d994316a24
|
[
"MIT"
] | 2
|
2021-05-05T21:32:18.000Z
|
2022-02-21T11:54:01.000Z
|
import dash_html_components as html
import dash_bootstrap_components as dbc
import dash_table
class changelogViewClass:
def getChangelogContent(self, data):
content = [dbc.Card(dbc.CardBody([dbc.Row(dbc.Col(self.createChangelogContent(data)))]))]
return content
@staticmethod
def createChangelogContent(data):
ChangeTableData = dash_table.DataTable(
id='table',
#columns=[{"name": i, "id": i} for i in tableData.columns],
columns=[{"name": 'Date', "id": 'Date'},
{"name": 'Version', "id": 'Version'},
{"name": 'Changes', "id": 'Changes'}],
data=data.to_dict('records'),
# style_table={
# 'padding':'20px'},
style_cell_conditional=[
{'if': {'column_id': 'Version'}, 'width': '5%'},
{'if': {'column_id': 'Changes'}, 'width': '85%'}],
style_data={
'font-size': '14px',
'font-family': 'monospace',
'width': '100px',
'maxWidth': '100px',
'minWidth': '100px',
'whiteSpace': 'pre-line',
'overflow': 'hidden',
'textOverflow': 'ellipsis',
'height': 'auto',
'backgroundColor': 'white',
'color': "#6c757d",
'textAlign': 'left'},
style_header={
'backgroundColor': '#f4f3f8',
'color': "000",
'fontWeight': 'bold',
'textAlign': 'left'},
)
contentChangelog = [html.H4("Changelog"),
ChangeTableData]
return contentChangelog
| 36.122449
| 97
| 0.468927
|
9d2d695d13aeaef7961f4d7c9a839caa3a722bca
| 966
|
py
|
Python
|
circuitPython/examples/audio-playback/code.py
|
BRTSG-FOSS/pico-bteve
|
1697b9a972ad5e9c2cecca6d560aa16cab725a61
|
[
"MIT"
] | 1
|
2022-01-29T03:16:57.000Z
|
2022-01-29T03:16:57.000Z
|
circuitPython/examples/audio-playback/code.py
|
BRTSG-FOSS/pico-brteve
|
1697b9a972ad5e9c2cecca6d560aa16cab725a61
|
[
"MIT"
] | 15
|
2021-09-22T08:36:08.000Z
|
2022-01-26T08:51:42.000Z
|
circuitPython/examples/audio-playback/code.py
|
BRTSG-FOSS/pico-bteve
|
1697b9a972ad5e9c2cecca6d560aa16cab725a61
|
[
"MIT"
] | null | null | null |
from brteve.brt_eve_bt817_8 import BrtEve
from brteve.brt_eve_rp2040 import BrtEveRP2040
from audio_playback.audio_playback import audio_playback
from audio_playback.widgets import widgets_dialog_yes_no
host = BrtEveRP2040()
eve = BrtEve(host)
eve.init(resolution="1280x800", touch="goodix")
# Store calibration setting
eve.calibrate()
#eve.wr32(eve.REG_TOUCH_TRANSFORM_A, 0xfffefefc);
#eve.wr32(eve.REG_TOUCH_TRANSFORM_B, 0xfffffcbf);
#eve.wr32(eve.REG_TOUCH_TRANSFORM_C, 0x506adb4);
#eve.wr32(eve.REG_TOUCH_TRANSFORM_D, 0xfffffed1);
#eve.wr32(eve.REG_TOUCH_TRANSFORM_E, 0xfffefc79);
#eve.wr32(eve.REG_TOUCH_TRANSFORM_F, 0x32c3211);
#audio_playback(eve)
yes = widgets_dialog_yes_no(eve, "Preparing flash",
"Write BT81X_Flash.bin from sdcard to EVE's connected flash at first?", 120, False) == True
if yes == True:
eve.storage.write_flash_with_progressbar('/sd/pico-brteve/circuitPython/examples/audio-playback/BT81X_Flash.bin', 0)
audio_playback(eve)
| 35.777778
| 120
| 0.805383
|
4a4986190e7752e118821d814da17723f52c42c0
| 95,977
|
py
|
Python
|
google/cloud/aiplatform_v1/services/job_service/client.py
|
dizcology/python-aiplatform
|
1a135775966c8a2303ded529eba514dcf9db7205
|
[
"Apache-2.0"
] | 2
|
2021-10-02T02:25:44.000Z
|
2021-11-17T10:35:01.000Z
|
google/cloud/aiplatform_v1/services/job_service/client.py
|
pompipo/python-aiplatform
|
3612b05c62dfb46822cd2c1798fd47349dba33bc
|
[
"Apache-2.0"
] | 1
|
2021-03-02T18:25:00.000Z
|
2021-03-02T18:25:00.000Z
|
google/cloud/aiplatform_v1/services/job_service/client.py
|
pompipo/python-aiplatform
|
3612b05c62dfb46822cd2c1798fd47349dba33bc
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from distutils import util
import os
import re
from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core import client_options as client_options_lib # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
from google.api_core import operation as gac_operation # type: ignore
from google.api_core import operation_async # type: ignore
from google.cloud.aiplatform_v1.services.job_service import pagers
from google.cloud.aiplatform_v1.types import batch_prediction_job
from google.cloud.aiplatform_v1.types import (
batch_prediction_job as gca_batch_prediction_job,
)
from google.cloud.aiplatform_v1.types import completion_stats
from google.cloud.aiplatform_v1.types import custom_job
from google.cloud.aiplatform_v1.types import custom_job as gca_custom_job
from google.cloud.aiplatform_v1.types import data_labeling_job
from google.cloud.aiplatform_v1.types import data_labeling_job as gca_data_labeling_job
from google.cloud.aiplatform_v1.types import encryption_spec
from google.cloud.aiplatform_v1.types import hyperparameter_tuning_job
from google.cloud.aiplatform_v1.types import (
hyperparameter_tuning_job as gca_hyperparameter_tuning_job,
)
from google.cloud.aiplatform_v1.types import job_service
from google.cloud.aiplatform_v1.types import job_state
from google.cloud.aiplatform_v1.types import machine_resources
from google.cloud.aiplatform_v1.types import manual_batch_tuning_parameters
from google.cloud.aiplatform_v1.types import operation as gca_operation
from google.cloud.aiplatform_v1.types import study
from google.protobuf import empty_pb2 # type: ignore
from google.protobuf import struct_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from google.rpc import status_pb2 # type: ignore
from google.type import money_pb2 # type: ignore
from .transports.base import JobServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import JobServiceGrpcTransport
from .transports.grpc_asyncio import JobServiceGrpcAsyncIOTransport
class JobServiceClientMeta(type):
"""Metaclass for the JobService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = OrderedDict() # type: Dict[str, Type[JobServiceTransport]]
_transport_registry["grpc"] = JobServiceGrpcTransport
_transport_registry["grpc_asyncio"] = JobServiceGrpcAsyncIOTransport
def get_transport_class(cls, label: str = None,) -> Type[JobServiceTransport]:
"""Returns an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class JobServiceClient(metaclass=JobServiceClientMeta):
"""A service for creating and managing Vertex AI's jobs."""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Converts api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "aiplatform.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
JobServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(info)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
JobServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> JobServiceTransport:
"""Returns the transport used by the client instance.
Returns:
JobServiceTransport: The transport used by the client
instance.
"""
return self._transport
@staticmethod
def batch_prediction_job_path(
project: str, location: str, batch_prediction_job: str,
) -> str:
"""Returns a fully-qualified batch_prediction_job string."""
return "projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}".format(
project=project,
location=location,
batch_prediction_job=batch_prediction_job,
)
@staticmethod
def parse_batch_prediction_job_path(path: str) -> Dict[str, str]:
"""Parses a batch_prediction_job path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/batchPredictionJobs/(?P<batch_prediction_job>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def custom_job_path(project: str, location: str, custom_job: str,) -> str:
"""Returns a fully-qualified custom_job string."""
return "projects/{project}/locations/{location}/customJobs/{custom_job}".format(
project=project, location=location, custom_job=custom_job,
)
@staticmethod
def parse_custom_job_path(path: str) -> Dict[str, str]:
"""Parses a custom_job path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/customJobs/(?P<custom_job>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def data_labeling_job_path(
project: str, location: str, data_labeling_job: str,
) -> str:
"""Returns a fully-qualified data_labeling_job string."""
return "projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}".format(
project=project, location=location, data_labeling_job=data_labeling_job,
)
@staticmethod
def parse_data_labeling_job_path(path: str) -> Dict[str, str]:
"""Parses a data_labeling_job path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/dataLabelingJobs/(?P<data_labeling_job>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def dataset_path(project: str, location: str, dataset: str,) -> str:
"""Returns a fully-qualified dataset string."""
return "projects/{project}/locations/{location}/datasets/{dataset}".format(
project=project, location=location, dataset=dataset,
)
@staticmethod
def parse_dataset_path(path: str) -> Dict[str, str]:
"""Parses a dataset path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/datasets/(?P<dataset>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def hyperparameter_tuning_job_path(
project: str, location: str, hyperparameter_tuning_job: str,
) -> str:
"""Returns a fully-qualified hyperparameter_tuning_job string."""
return "projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}".format(
project=project,
location=location,
hyperparameter_tuning_job=hyperparameter_tuning_job,
)
@staticmethod
def parse_hyperparameter_tuning_job_path(path: str) -> Dict[str, str]:
"""Parses a hyperparameter_tuning_job path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/hyperparameterTuningJobs/(?P<hyperparameter_tuning_job>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def model_path(project: str, location: str, model: str,) -> str:
"""Returns a fully-qualified model string."""
return "projects/{project}/locations/{location}/models/{model}".format(
project=project, location=location, model=model,
)
@staticmethod
def parse_model_path(path: str) -> Dict[str, str]:
"""Parses a model path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/models/(?P<model>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def network_path(project: str, network: str,) -> str:
"""Returns a fully-qualified network string."""
return "projects/{project}/global/networks/{network}".format(
project=project, network=network,
)
@staticmethod
def parse_network_path(path: str) -> Dict[str, str]:
"""Parses a network path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/global/networks/(?P<network>.+?)$", path
)
return m.groupdict() if m else {}
@staticmethod
def trial_path(project: str, location: str, study: str, trial: str,) -> str:
"""Returns a fully-qualified trial string."""
return "projects/{project}/locations/{location}/studies/{study}/trials/{trial}".format(
project=project, location=location, study=study, trial=trial,
)
@staticmethod
def parse_trial_path(path: str) -> Dict[str, str]:
"""Parses a trial path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/studies/(?P<study>.+?)/trials/(?P<trial>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Returns a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Returns a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Returns a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Returns a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Returns a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path)
return m.groupdict() if m else {}
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, JobServiceTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the job service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, JobServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
use_client_cert = bool(
util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))
)
client_cert_source_func = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
is_mtls = True
client_cert_source_func = client_options.client_cert_source
else:
is_mtls = mtls.has_default_client_cert_source()
if is_mtls:
client_cert_source_func = mtls.default_client_cert_source()
else:
client_cert_source_func = None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
if is_mtls:
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
else:
api_endpoint = self.DEFAULT_ENDPOINT
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted "
"values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, JobServiceTransport):
# transport is a JobServiceTransport instance.
if credentials or client_options.credentials_file:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
if client_options.scopes:
raise ValueError(
"When providing a transport instance, provide its scopes "
"directly."
)
self._transport = transport
else:
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials,
credentials_file=client_options.credentials_file,
host=api_endpoint,
scopes=client_options.scopes,
client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
always_use_jwt_access=(
Transport == type(self).get_transport_class("grpc")
or Transport == type(self).get_transport_class("grpc_asyncio")
),
)
def create_custom_job(
self,
request: job_service.CreateCustomJobRequest = None,
*,
parent: str = None,
custom_job: gca_custom_job.CustomJob = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gca_custom_job.CustomJob:
r"""Creates a CustomJob. A created CustomJob right away
will be attempted to be run.
Args:
request (google.cloud.aiplatform_v1.types.CreateCustomJobRequest):
The request object. Request message for
[JobService.CreateCustomJob][google.cloud.aiplatform.v1.JobService.CreateCustomJob].
parent (str):
Required. The resource name of the Location to create
the CustomJob in. Format:
``projects/{project}/locations/{location}``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
custom_job (google.cloud.aiplatform_v1.types.CustomJob):
Required. The CustomJob to create.
This corresponds to the ``custom_job`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1.types.CustomJob:
Represents a job that runs custom
workloads such as a Docker container or
a Python package. A CustomJob can have
multiple worker pools and each worker
pool can have its own machine and input
spec. A CustomJob will be cleaned up
once the job enters terminal state
(failed or succeeded).
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, custom_job])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a job_service.CreateCustomJobRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, job_service.CreateCustomJobRequest):
request = job_service.CreateCustomJobRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if custom_job is not None:
request.custom_job = custom_job
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.create_custom_job]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def get_custom_job(
self,
request: job_service.GetCustomJobRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> custom_job.CustomJob:
r"""Gets a CustomJob.
Args:
request (google.cloud.aiplatform_v1.types.GetCustomJobRequest):
The request object. Request message for
[JobService.GetCustomJob][google.cloud.aiplatform.v1.JobService.GetCustomJob].
name (str):
Required. The name of the CustomJob resource. Format:
``projects/{project}/locations/{location}/customJobs/{custom_job}``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1.types.CustomJob:
Represents a job that runs custom
workloads such as a Docker container or
a Python package. A CustomJob can have
multiple worker pools and each worker
pool can have its own machine and input
spec. A CustomJob will be cleaned up
once the job enters terminal state
(failed or succeeded).
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a job_service.GetCustomJobRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, job_service.GetCustomJobRequest):
request = job_service.GetCustomJobRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_custom_job]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def list_custom_jobs(
self,
request: job_service.ListCustomJobsRequest = None,
*,
parent: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListCustomJobsPager:
r"""Lists CustomJobs in a Location.
Args:
request (google.cloud.aiplatform_v1.types.ListCustomJobsRequest):
The request object. Request message for
[JobService.ListCustomJobs][google.cloud.aiplatform.v1.JobService.ListCustomJobs].
parent (str):
Required. The resource name of the Location to list the
CustomJobs from. Format:
``projects/{project}/locations/{location}``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1.services.job_service.pagers.ListCustomJobsPager:
Response message for
[JobService.ListCustomJobs][google.cloud.aiplatform.v1.JobService.ListCustomJobs]
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a job_service.ListCustomJobsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, job_service.ListCustomJobsRequest):
request = job_service.ListCustomJobsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_custom_jobs]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListCustomJobsPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
def delete_custom_job(
self,
request: job_service.DeleteCustomJobRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gac_operation.Operation:
r"""Deletes a CustomJob.
Args:
request (google.cloud.aiplatform_v1.types.DeleteCustomJobRequest):
The request object. Request message for
[JobService.DeleteCustomJob][google.cloud.aiplatform.v1.JobService.DeleteCustomJob].
name (str):
Required. The name of the CustomJob resource to be
deleted. Format:
``projects/{project}/locations/{location}/customJobs/{custom_job}``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated
empty messages in your APIs. A typical example is to
use it as the request or the response type of an API
method. For instance:
service Foo {
rpc Bar(google.protobuf.Empty) returns
(google.protobuf.Empty);
}
The JSON representation for Empty is empty JSON
object {}.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a job_service.DeleteCustomJobRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, job_service.DeleteCustomJobRequest):
request = job_service.DeleteCustomJobRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.delete_custom_job]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = gac_operation.from_gapic(
response,
self._transport.operations_client,
empty_pb2.Empty,
metadata_type=gca_operation.DeleteOperationMetadata,
)
# Done; return the response.
return response
def cancel_custom_job(
self,
request: job_service.CancelCustomJobRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Cancels a CustomJob. Starts asynchronous cancellation on the
CustomJob. The server makes a best effort to cancel the job, but
success is not guaranteed. Clients can use
[JobService.GetCustomJob][google.cloud.aiplatform.v1.JobService.GetCustomJob]
or other methods to check whether the cancellation succeeded or
whether the job completed despite cancellation. On successful
cancellation, the CustomJob is not deleted; instead it becomes a
job with a
[CustomJob.error][google.cloud.aiplatform.v1.CustomJob.error]
value with a [google.rpc.Status.code][google.rpc.Status.code] of
1, corresponding to ``Code.CANCELLED``, and
[CustomJob.state][google.cloud.aiplatform.v1.CustomJob.state] is
set to ``CANCELLED``.
Args:
request (google.cloud.aiplatform_v1.types.CancelCustomJobRequest):
The request object. Request message for
[JobService.CancelCustomJob][google.cloud.aiplatform.v1.JobService.CancelCustomJob].
name (str):
Required. The name of the CustomJob to cancel. Format:
``projects/{project}/locations/{location}/customJobs/{custom_job}``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a job_service.CancelCustomJobRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, job_service.CancelCustomJobRequest):
request = job_service.CancelCustomJobRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.cancel_custom_job]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
def create_data_labeling_job(
self,
request: job_service.CreateDataLabelingJobRequest = None,
*,
parent: str = None,
data_labeling_job: gca_data_labeling_job.DataLabelingJob = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gca_data_labeling_job.DataLabelingJob:
r"""Creates a DataLabelingJob.
Args:
request (google.cloud.aiplatform_v1.types.CreateDataLabelingJobRequest):
The request object. Request message for
[JobService.CreateDataLabelingJob][google.cloud.aiplatform.v1.JobService.CreateDataLabelingJob].
parent (str):
Required. The parent of the DataLabelingJob. Format:
``projects/{project}/locations/{location}``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
data_labeling_job (google.cloud.aiplatform_v1.types.DataLabelingJob):
Required. The DataLabelingJob to
create.
This corresponds to the ``data_labeling_job`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1.types.DataLabelingJob:
DataLabelingJob is used to trigger a
human labeling job on unlabeled data
from the following Dataset:
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, data_labeling_job])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a job_service.CreateDataLabelingJobRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, job_service.CreateDataLabelingJobRequest):
request = job_service.CreateDataLabelingJobRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if data_labeling_job is not None:
request.data_labeling_job = data_labeling_job
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.create_data_labeling_job]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def get_data_labeling_job(
self,
request: job_service.GetDataLabelingJobRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> data_labeling_job.DataLabelingJob:
r"""Gets a DataLabelingJob.
Args:
request (google.cloud.aiplatform_v1.types.GetDataLabelingJobRequest):
The request object. Request message for
[JobService.GetDataLabelingJob][google.cloud.aiplatform.v1.JobService.GetDataLabelingJob].
name (str):
Required. The name of the DataLabelingJob. Format:
``projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1.types.DataLabelingJob:
DataLabelingJob is used to trigger a
human labeling job on unlabeled data
from the following Dataset:
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a job_service.GetDataLabelingJobRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, job_service.GetDataLabelingJobRequest):
request = job_service.GetDataLabelingJobRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_data_labeling_job]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def list_data_labeling_jobs(
self,
request: job_service.ListDataLabelingJobsRequest = None,
*,
parent: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListDataLabelingJobsPager:
r"""Lists DataLabelingJobs in a Location.
Args:
request (google.cloud.aiplatform_v1.types.ListDataLabelingJobsRequest):
The request object. Request message for
[JobService.ListDataLabelingJobs][google.cloud.aiplatform.v1.JobService.ListDataLabelingJobs].
parent (str):
Required. The parent of the DataLabelingJob. Format:
``projects/{project}/locations/{location}``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1.services.job_service.pagers.ListDataLabelingJobsPager:
Response message for
[JobService.ListDataLabelingJobs][google.cloud.aiplatform.v1.JobService.ListDataLabelingJobs].
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a job_service.ListDataLabelingJobsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, job_service.ListDataLabelingJobsRequest):
request = job_service.ListDataLabelingJobsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_data_labeling_jobs]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListDataLabelingJobsPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
def delete_data_labeling_job(
self,
request: job_service.DeleteDataLabelingJobRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gac_operation.Operation:
r"""Deletes a DataLabelingJob.
Args:
request (google.cloud.aiplatform_v1.types.DeleteDataLabelingJobRequest):
The request object. Request message for
[JobService.DeleteDataLabelingJob][google.cloud.aiplatform.v1.JobService.DeleteDataLabelingJob].
name (str):
Required. The name of the DataLabelingJob to be deleted.
Format:
``projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated
empty messages in your APIs. A typical example is to
use it as the request or the response type of an API
method. For instance:
service Foo {
rpc Bar(google.protobuf.Empty) returns
(google.protobuf.Empty);
}
The JSON representation for Empty is empty JSON
object {}.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a job_service.DeleteDataLabelingJobRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, job_service.DeleteDataLabelingJobRequest):
request = job_service.DeleteDataLabelingJobRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.delete_data_labeling_job]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = gac_operation.from_gapic(
response,
self._transport.operations_client,
empty_pb2.Empty,
metadata_type=gca_operation.DeleteOperationMetadata,
)
# Done; return the response.
return response
def cancel_data_labeling_job(
self,
request: job_service.CancelDataLabelingJobRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Cancels a DataLabelingJob. Success of cancellation is
not guaranteed.
Args:
request (google.cloud.aiplatform_v1.types.CancelDataLabelingJobRequest):
The request object. Request message for
[JobService.CancelDataLabelingJob][google.cloud.aiplatform.v1.JobService.CancelDataLabelingJob].
name (str):
Required. The name of the DataLabelingJob. Format:
``projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a job_service.CancelDataLabelingJobRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, job_service.CancelDataLabelingJobRequest):
request = job_service.CancelDataLabelingJobRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.cancel_data_labeling_job]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
def create_hyperparameter_tuning_job(
self,
request: job_service.CreateHyperparameterTuningJobRequest = None,
*,
parent: str = None,
hyperparameter_tuning_job: gca_hyperparameter_tuning_job.HyperparameterTuningJob = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gca_hyperparameter_tuning_job.HyperparameterTuningJob:
r"""Creates a HyperparameterTuningJob
Args:
request (google.cloud.aiplatform_v1.types.CreateHyperparameterTuningJobRequest):
The request object. Request message for
[JobService.CreateHyperparameterTuningJob][google.cloud.aiplatform.v1.JobService.CreateHyperparameterTuningJob].
parent (str):
Required. The resource name of the Location to create
the HyperparameterTuningJob in. Format:
``projects/{project}/locations/{location}``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
hyperparameter_tuning_job (google.cloud.aiplatform_v1.types.HyperparameterTuningJob):
Required. The HyperparameterTuningJob
to create.
This corresponds to the ``hyperparameter_tuning_job`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1.types.HyperparameterTuningJob:
Represents a HyperparameterTuningJob.
A HyperparameterTuningJob has a Study
specification and multiple CustomJobs
with identical CustomJob specification.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, hyperparameter_tuning_job])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a job_service.CreateHyperparameterTuningJobRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, job_service.CreateHyperparameterTuningJobRequest):
request = job_service.CreateHyperparameterTuningJobRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if hyperparameter_tuning_job is not None:
request.hyperparameter_tuning_job = hyperparameter_tuning_job
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.create_hyperparameter_tuning_job
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def get_hyperparameter_tuning_job(
self,
request: job_service.GetHyperparameterTuningJobRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> hyperparameter_tuning_job.HyperparameterTuningJob:
r"""Gets a HyperparameterTuningJob
Args:
request (google.cloud.aiplatform_v1.types.GetHyperparameterTuningJobRequest):
The request object. Request message for
[JobService.GetHyperparameterTuningJob][google.cloud.aiplatform.v1.JobService.GetHyperparameterTuningJob].
name (str):
Required. The name of the HyperparameterTuningJob
resource. Format:
``projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1.types.HyperparameterTuningJob:
Represents a HyperparameterTuningJob.
A HyperparameterTuningJob has a Study
specification and multiple CustomJobs
with identical CustomJob specification.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a job_service.GetHyperparameterTuningJobRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, job_service.GetHyperparameterTuningJobRequest):
request = job_service.GetHyperparameterTuningJobRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.get_hyperparameter_tuning_job
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def list_hyperparameter_tuning_jobs(
self,
request: job_service.ListHyperparameterTuningJobsRequest = None,
*,
parent: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListHyperparameterTuningJobsPager:
r"""Lists HyperparameterTuningJobs in a Location.
Args:
request (google.cloud.aiplatform_v1.types.ListHyperparameterTuningJobsRequest):
The request object. Request message for
[JobService.ListHyperparameterTuningJobs][google.cloud.aiplatform.v1.JobService.ListHyperparameterTuningJobs].
parent (str):
Required. The resource name of the Location to list the
HyperparameterTuningJobs from. Format:
``projects/{project}/locations/{location}``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1.services.job_service.pagers.ListHyperparameterTuningJobsPager:
Response message for
[JobService.ListHyperparameterTuningJobs][google.cloud.aiplatform.v1.JobService.ListHyperparameterTuningJobs]
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a job_service.ListHyperparameterTuningJobsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, job_service.ListHyperparameterTuningJobsRequest):
request = job_service.ListHyperparameterTuningJobsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.list_hyperparameter_tuning_jobs
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListHyperparameterTuningJobsPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
def delete_hyperparameter_tuning_job(
self,
request: job_service.DeleteHyperparameterTuningJobRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gac_operation.Operation:
r"""Deletes a HyperparameterTuningJob.
Args:
request (google.cloud.aiplatform_v1.types.DeleteHyperparameterTuningJobRequest):
The request object. Request message for
[JobService.DeleteHyperparameterTuningJob][google.cloud.aiplatform.v1.JobService.DeleteHyperparameterTuningJob].
name (str):
Required. The name of the HyperparameterTuningJob
resource to be deleted. Format:
``projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated
empty messages in your APIs. A typical example is to
use it as the request or the response type of an API
method. For instance:
service Foo {
rpc Bar(google.protobuf.Empty) returns
(google.protobuf.Empty);
}
The JSON representation for Empty is empty JSON
object {}.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a job_service.DeleteHyperparameterTuningJobRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, job_service.DeleteHyperparameterTuningJobRequest):
request = job_service.DeleteHyperparameterTuningJobRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.delete_hyperparameter_tuning_job
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = gac_operation.from_gapic(
response,
self._transport.operations_client,
empty_pb2.Empty,
metadata_type=gca_operation.DeleteOperationMetadata,
)
# Done; return the response.
return response
def cancel_hyperparameter_tuning_job(
self,
request: job_service.CancelHyperparameterTuningJobRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Cancels a HyperparameterTuningJob. Starts asynchronous
cancellation on the HyperparameterTuningJob. The server makes a
best effort to cancel the job, but success is not guaranteed.
Clients can use
[JobService.GetHyperparameterTuningJob][google.cloud.aiplatform.v1.JobService.GetHyperparameterTuningJob]
or other methods to check whether the cancellation succeeded or
whether the job completed despite cancellation. On successful
cancellation, the HyperparameterTuningJob is not deleted;
instead it becomes a job with a
[HyperparameterTuningJob.error][google.cloud.aiplatform.v1.HyperparameterTuningJob.error]
value with a [google.rpc.Status.code][google.rpc.Status.code] of
1, corresponding to ``Code.CANCELLED``, and
[HyperparameterTuningJob.state][google.cloud.aiplatform.v1.HyperparameterTuningJob.state]
is set to ``CANCELLED``.
Args:
request (google.cloud.aiplatform_v1.types.CancelHyperparameterTuningJobRequest):
The request object. Request message for
[JobService.CancelHyperparameterTuningJob][google.cloud.aiplatform.v1.JobService.CancelHyperparameterTuningJob].
name (str):
Required. The name of the HyperparameterTuningJob to
cancel. Format:
``projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a job_service.CancelHyperparameterTuningJobRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, job_service.CancelHyperparameterTuningJobRequest):
request = job_service.CancelHyperparameterTuningJobRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.cancel_hyperparameter_tuning_job
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
def create_batch_prediction_job(
self,
request: job_service.CreateBatchPredictionJobRequest = None,
*,
parent: str = None,
batch_prediction_job: gca_batch_prediction_job.BatchPredictionJob = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gca_batch_prediction_job.BatchPredictionJob:
r"""Creates a BatchPredictionJob. A BatchPredictionJob
once created will right away be attempted to start.
Args:
request (google.cloud.aiplatform_v1.types.CreateBatchPredictionJobRequest):
The request object. Request message for
[JobService.CreateBatchPredictionJob][google.cloud.aiplatform.v1.JobService.CreateBatchPredictionJob].
parent (str):
Required. The resource name of the Location to create
the BatchPredictionJob in. Format:
``projects/{project}/locations/{location}``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
batch_prediction_job (google.cloud.aiplatform_v1.types.BatchPredictionJob):
Required. The BatchPredictionJob to
create.
This corresponds to the ``batch_prediction_job`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1.types.BatchPredictionJob:
A job that uses a [Model][google.cloud.aiplatform.v1.BatchPredictionJob.model] to produce predictions
on multiple [input
instances][google.cloud.aiplatform.v1.BatchPredictionJob.input_config].
If predictions for significant portion of the
instances fail, the job may finish without attempting
predictions for all remaining instances.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, batch_prediction_job])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a job_service.CreateBatchPredictionJobRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, job_service.CreateBatchPredictionJobRequest):
request = job_service.CreateBatchPredictionJobRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if batch_prediction_job is not None:
request.batch_prediction_job = batch_prediction_job
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.create_batch_prediction_job
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def get_batch_prediction_job(
self,
request: job_service.GetBatchPredictionJobRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> batch_prediction_job.BatchPredictionJob:
r"""Gets a BatchPredictionJob
Args:
request (google.cloud.aiplatform_v1.types.GetBatchPredictionJobRequest):
The request object. Request message for
[JobService.GetBatchPredictionJob][google.cloud.aiplatform.v1.JobService.GetBatchPredictionJob].
name (str):
Required. The name of the BatchPredictionJob resource.
Format:
``projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1.types.BatchPredictionJob:
A job that uses a [Model][google.cloud.aiplatform.v1.BatchPredictionJob.model] to produce predictions
on multiple [input
instances][google.cloud.aiplatform.v1.BatchPredictionJob.input_config].
If predictions for significant portion of the
instances fail, the job may finish without attempting
predictions for all remaining instances.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a job_service.GetBatchPredictionJobRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, job_service.GetBatchPredictionJobRequest):
request = job_service.GetBatchPredictionJobRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_batch_prediction_job]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def list_batch_prediction_jobs(
self,
request: job_service.ListBatchPredictionJobsRequest = None,
*,
parent: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListBatchPredictionJobsPager:
r"""Lists BatchPredictionJobs in a Location.
Args:
request (google.cloud.aiplatform_v1.types.ListBatchPredictionJobsRequest):
The request object. Request message for
[JobService.ListBatchPredictionJobs][google.cloud.aiplatform.v1.JobService.ListBatchPredictionJobs].
parent (str):
Required. The resource name of the Location to list the
BatchPredictionJobs from. Format:
``projects/{project}/locations/{location}``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1.services.job_service.pagers.ListBatchPredictionJobsPager:
Response message for
[JobService.ListBatchPredictionJobs][google.cloud.aiplatform.v1.JobService.ListBatchPredictionJobs]
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a job_service.ListBatchPredictionJobsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, job_service.ListBatchPredictionJobsRequest):
request = job_service.ListBatchPredictionJobsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.list_batch_prediction_jobs
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListBatchPredictionJobsPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
def delete_batch_prediction_job(
self,
request: job_service.DeleteBatchPredictionJobRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gac_operation.Operation:
r"""Deletes a BatchPredictionJob. Can only be called on
jobs that already finished.
Args:
request (google.cloud.aiplatform_v1.types.DeleteBatchPredictionJobRequest):
The request object. Request message for
[JobService.DeleteBatchPredictionJob][google.cloud.aiplatform.v1.JobService.DeleteBatchPredictionJob].
name (str):
Required. The name of the BatchPredictionJob resource to
be deleted. Format:
``projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated
empty messages in your APIs. A typical example is to
use it as the request or the response type of an API
method. For instance:
service Foo {
rpc Bar(google.protobuf.Empty) returns
(google.protobuf.Empty);
}
The JSON representation for Empty is empty JSON
object {}.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a job_service.DeleteBatchPredictionJobRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, job_service.DeleteBatchPredictionJobRequest):
request = job_service.DeleteBatchPredictionJobRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.delete_batch_prediction_job
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = gac_operation.from_gapic(
response,
self._transport.operations_client,
empty_pb2.Empty,
metadata_type=gca_operation.DeleteOperationMetadata,
)
# Done; return the response.
return response
def cancel_batch_prediction_job(
self,
request: job_service.CancelBatchPredictionJobRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Cancels a BatchPredictionJob.
Starts asynchronous cancellation on the BatchPredictionJob. The
server makes the best effort to cancel the job, but success is
not guaranteed. Clients can use
[JobService.GetBatchPredictionJob][google.cloud.aiplatform.v1.JobService.GetBatchPredictionJob]
or other methods to check whether the cancellation succeeded or
whether the job completed despite cancellation. On a successful
cancellation, the BatchPredictionJob is not deleted;instead its
[BatchPredictionJob.state][google.cloud.aiplatform.v1.BatchPredictionJob.state]
is set to ``CANCELLED``. Any files already outputted by the job
are not deleted.
Args:
request (google.cloud.aiplatform_v1.types.CancelBatchPredictionJobRequest):
The request object. Request message for
[JobService.CancelBatchPredictionJob][google.cloud.aiplatform.v1.JobService.CancelBatchPredictionJob].
name (str):
Required. The name of the BatchPredictionJob to cancel.
Format:
``projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a job_service.CancelBatchPredictionJobRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, job_service.CancelBatchPredictionJobRequest):
request = job_service.CancelBatchPredictionJobRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.cancel_batch_prediction_job
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-aiplatform",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("JobServiceClient",)
| 44.474977
| 171
| 0.630745
|
407d6d2b67b491bcc1a8273e814b22887cbb0276
| 3,089
|
py
|
Python
|
tests/examples.py
|
dkedar7/fast_dash
|
4e4c52801ed430cd84371451c12b4028704993a4
|
[
"MIT"
] | 7
|
2022-01-31T12:47:48.000Z
|
2022-03-29T23:33:22.000Z
|
tests/examples.py
|
dkedar7/fast_dash
|
4e4c52801ed430cd84371451c12b4028704993a4
|
[
"MIT"
] | 1
|
2022-03-22T20:38:38.000Z
|
2022-03-22T20:38:38.000Z
|
tests/examples.py
|
dkedar7/fast_dash
|
4e4c52801ed430cd84371451c12b4028704993a4
|
[
"MIT"
] | null | null | null |
from fast_dash import FastDash, Fastify
from fast_dash.Components import Text, Image, Upload, UploadImage, Slider, html, dcc
from fast_dash.utils import pil_to_b64
def example_1_simple_text_to_text():
"Fast Dash example 1. Simple text to text Fast Dash app"
## Define callback functions
def simple_text_to_text_function(input_text):
return input_text
app = FastDash(
callback_fn=simple_text_to_text_function,
inputs=Text,
outputs=Text,
title="Fast Dash example 1",
)
return app
def example_2_text_with_slider():
"Fast Dash example 2"
# Step 1: Define your model inference
def text_to_text_function(input_text, slider_value):
processed_text = f'{input_text}. Slider value is {slider_value}.'
return processed_text
# Step 2: Specify the input and output components
app = FastDash(callback_fn=text_to_text_function,
inputs=[Text, Slider],
outputs=Text,
title='Fast Dash example 2')
return app
def example_3_image_to_image():
"Fast Dash example 3"
# Step 1: Define your model inference
def callback_fn(image):
return image
# Step 2: Specify the input and output components
app = FastDash(callback_fn=callback_fn,
inputs=Upload,
outputs=Image,
title='Fast Dash example 3')
return app
def example_4_image_slider_to_image_text():
"Fast Dash example 4. Input is Upload (with ack) and slider. Output is Image and Text."
def callback_fn(input_text, slider_value):
return input_text, f"Slider value is {slider_value}"
ack_image = Fastify(html.Img(width='100%'), 'src')
fast_upload = Fastify(dcc.Upload(children=["Click to upload"], style={'borderStyle': 'dashed', 'padding-bottom':'20px'}), 'contents', ack=ack_image)
app = FastDash(callback_fn=callback_fn,
inputs=[fast_upload, Slider],
outputs=[Image, Text],
title='Fast Dash example 4',
theme='SKETCHY')
return app
def example_5_uploadimage_to_image():
"Fast Dash example 5. Input is UploadImage. Output is Image."
def image_to_image(image):
from PIL import Image
import io
import base64
_, image_contents = image.split(',')
processed_image = Image.open(io.BytesIO(base64.b64decode(image_contents.encode())))
return pil_to_b64(processed_image)
app = FastDash(callback_fn=image_to_image,
inputs=UploadImage,
outputs=Image,
title='Fast Dash example 5',
title_image_path='https://tinyurl.com/mr44nn5y',
subheader='Build ML prototypes lightning fast!',
github_url='https://github.com/dkedar7/fast_dash/',
linkedin_url='https://linkedin.com/in/dkedar7/',
twitter_url='https://twitter.com/dkedar7/',
theme='FLATLY')
return app
| 31.845361
| 152
| 0.62933
|
dd8ce911754937283f6257f3b09f87f5b6db5793
| 1,118
|
py
|
Python
|
bootstrap_scripts/create_s3_buckets.py
|
heitorlessa/serverless-encryption-workshop
|
b2a42034aef02dc27463a857eccec9307a50ee3f
|
[
"Apache-2.0"
] | 4
|
2016-08-02T12:46:15.000Z
|
2017-08-09T18:02:03.000Z
|
bootstrap_scripts/create_s3_buckets.py
|
heitorlessa/serverless-encryption-workshop
|
b2a42034aef02dc27463a857eccec9307a50ee3f
|
[
"Apache-2.0"
] | null | null | null |
bootstrap_scripts/create_s3_buckets.py
|
heitorlessa/serverless-encryption-workshop
|
b2a42034aef02dc27463a857eccec9307a50ee3f
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# Creates/Remove S3 buckets and add prefix sample
from __future__ import print_function
import boto3
import argparse
s3 = boto3.resource('s3')
lambda_environments = ['DEV/', 'PROD/', 'STAGE/']
def create_s3(bucket_name):
"""Creates given S3 Bucket name and prefix sample for Lambda function"""
bucket = s3.create_bucket(Bucket=bucket_name, CreateBucketConfiguration={
'LocationConstraint': 'eu-west-1'})
bucket.wait_until_exists()
for prefix in lambda_environments:
obj = bucket.Object(prefix)
obj.put(
ACL='private', Body='')
return bucket
def main():
# Quick arg parsing
parser = argparse.ArgumentParser(
description='Quick and dirty s3 bucket creation for demo.')
parser.add_argument(
'-c', '--create', help='Bucket name to be created', required=True)
args = parser.parse_args()
if args.create:
try:
ret = create_s3(args.create)
except Exception, e:
raise Exception("Operation failed due to {0}: ".format(e))
if __name__ == '__main__':
main()
| 24.304348
| 77
| 0.653846
|
126900a675d6f5c209712e6fb37ada397fbd631b
| 13,040
|
py
|
Python
|
mesonbuild/compilers/cpp.py
|
juergenhoetzel/meson
|
d784b5772a55eadb88034bd1992adeaddfba28d0
|
[
"Apache-2.0"
] | null | null | null |
mesonbuild/compilers/cpp.py
|
juergenhoetzel/meson
|
d784b5772a55eadb88034bd1992adeaddfba28d0
|
[
"Apache-2.0"
] | null | null | null |
mesonbuild/compilers/cpp.py
|
juergenhoetzel/meson
|
d784b5772a55eadb88034bd1992adeaddfba28d0
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2012-2017 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os.path
from .. import coredata
from ..mesonlib import version_compare
from .c import CCompiler, VisualStudioCCompiler
from .compilers import (
GCC_MINGW,
gnu_winlibs,
msvc_winlibs,
ClangCompiler,
GnuCompiler,
ElbrusCompiler,
IntelCompiler,
ArmCompiler,
ArmclangCompiler,
)
class CPPCompiler(CCompiler):
def __init__(self, exelist, version, is_cross, exe_wrap, **kwargs):
# If a child ObjCPP class has already set it, don't set it ourselves
if not hasattr(self, 'language'):
self.language = 'cpp'
CCompiler.__init__(self, exelist, version, is_cross, exe_wrap, **kwargs)
def get_display_language(self):
return 'C++'
def get_no_stdinc_args(self):
return ['-nostdinc++']
def sanity_check(self, work_dir, environment):
code = 'class breakCCompiler;int main(int argc, char **argv) { return 0; }\n'
return self.sanity_check_impl(work_dir, environment, 'sanitycheckcpp.cc', code)
def get_compiler_check_args(self):
# -fpermissive allows non-conforming code to compile which is necessary
# for many C++ checks. Particularly, the has_header_symbol check is
# too strict without this and always fails.
return super().get_compiler_check_args() + ['-fpermissive']
def has_header_symbol(self, hname, symbol, prefix, env, extra_args=None, dependencies=None):
# Check if it's a C-like symbol
if super().has_header_symbol(hname, symbol, prefix, env, extra_args, dependencies):
return True
# Check if it's a class or a template
if extra_args is None:
extra_args = []
fargs = {'prefix': prefix, 'header': hname, 'symbol': symbol}
t = '''{prefix}
#include <{header}>
using {symbol};
int main () {{ return 0; }}'''
return self.compiles(t.format(**fargs), env, extra_args, dependencies)
class ClangCPPCompiler(ClangCompiler, CPPCompiler):
def __init__(self, exelist, version, cltype, is_cross, exe_wrapper=None, **kwargs):
CPPCompiler.__init__(self, exelist, version, is_cross, exe_wrapper, **kwargs)
ClangCompiler.__init__(self, cltype)
default_warn_args = ['-Wall', '-Winvalid-pch', '-Wnon-virtual-dtor']
self.warn_args = {'1': default_warn_args,
'2': default_warn_args + ['-Wextra'],
'3': default_warn_args + ['-Wextra', '-Wpedantic']}
def get_options(self):
opts = CPPCompiler.get_options(self)
opts.update({'cpp_std': coredata.UserComboOption('cpp_std', 'C++ language standard to use',
['none', 'c++98', 'c++03', 'c++11', 'c++14', 'c++17', 'c++1z',
'gnu++11', 'gnu++14', 'gnu++17', 'gnu++1z'],
'none')})
return opts
def get_option_compile_args(self, options):
args = []
std = options['cpp_std']
if std.value != 'none':
args.append('-std=' + std.value)
return args
def get_option_link_args(self, options):
return []
def language_stdlib_only_link_flags(self):
return ['-lstdc++']
class ArmclangCPPCompiler(ArmclangCompiler, CPPCompiler):
def __init__(self, exelist, version, is_cross, exe_wrapper=None, **kwargs):
CPPCompiler.__init__(self, exelist, version, is_cross, exe_wrapper, **kwargs)
ArmclangCompiler.__init__(self)
default_warn_args = ['-Wall', '-Winvalid-pch', '-Wnon-virtual-dtor']
self.warn_args = {'1': default_warn_args,
'2': default_warn_args + ['-Wextra'],
'3': default_warn_args + ['-Wextra', '-Wpedantic']}
def get_options(self):
opts = CPPCompiler.get_options(self)
opts.update({'cpp_std': coredata.UserComboOption('cpp_std', 'C++ language standard to use',
['none', 'c++98', 'c++03', 'c++11', 'c++14', 'c++17'
'gnu++98', 'gnu++03', 'gnu++11', 'gnu++14', 'gnu++17'],
'none')})
return opts
def get_option_compile_args(self, options):
args = []
std = options['cpp_std']
if std.value != 'none':
args.append('-std=' + std.value)
return args
def get_option_link_args(self, options):
return []
class GnuCPPCompiler(GnuCompiler, CPPCompiler):
def __init__(self, exelist, version, gcc_type, is_cross, exe_wrap, defines, **kwargs):
CPPCompiler.__init__(self, exelist, version, is_cross, exe_wrap, **kwargs)
GnuCompiler.__init__(self, gcc_type, defines)
default_warn_args = ['-Wall', '-Winvalid-pch', '-Wnon-virtual-dtor']
self.warn_args = {'1': default_warn_args,
'2': default_warn_args + ['-Wextra'],
'3': default_warn_args + ['-Wextra', '-Wpedantic']}
def get_options(self):
opts = CPPCompiler.get_options(self)
opts.update({'cpp_std': coredata.UserComboOption('cpp_std', 'C++ language standard to use',
['none', 'c++98', 'c++03', 'c++11', 'c++14', 'c++17', 'c++1z',
'gnu++03', 'gnu++11', 'gnu++14', 'gnu++17', 'gnu++1z'],
'none'),
'cpp_debugstl': coredata.UserBooleanOption('cpp_debugstl',
'STL debug mode',
False)})
if self.gcc_type == GCC_MINGW:
opts.update({
'cpp_winlibs': coredata.UserArrayOption('cpp_winlibs', 'Standard Win libraries to link against',
gnu_winlibs), })
return opts
def get_option_compile_args(self, options):
args = []
std = options['cpp_std']
if std.value != 'none':
args.append('-std=' + std.value)
if options['cpp_debugstl'].value:
args.append('-D_GLIBCXX_DEBUG=1')
return args
def get_option_link_args(self, options):
if self.gcc_type == GCC_MINGW:
return options['cpp_winlibs'].value[:]
return []
def get_pch_use_args(self, pch_dir, header):
return ['-fpch-preprocess', '-include', os.path.basename(header)]
def language_stdlib_only_link_flags(self):
return ['-lstdc++']
class ElbrusCPPCompiler(GnuCPPCompiler, ElbrusCompiler):
def __init__(self, exelist, version, gcc_type, is_cross, exe_wrapper=None, defines=None, **kwargs):
GnuCPPCompiler.__init__(self, exelist, version, gcc_type, is_cross, exe_wrapper, defines, **kwargs)
ElbrusCompiler.__init__(self, gcc_type, defines)
# It does not support c++/gnu++ 17 and 1z, but still does support 0x, 1y, and gnu++98.
def get_options(self):
opts = CPPCompiler.get_options(self)
opts['cpp_std'] = coredata.UserComboOption('cpp_std', 'C++ language standard to use',
['none', 'c++98', 'c++03', 'c++0x', 'c++11', 'c++14', 'c++1y',
'gnu++98', 'gnu++03', 'gnu++0x', 'gnu++11', 'gnu++14', 'gnu++1y'],
'none')
return opts
# Elbrus C++ compiler does not have lchmod, but there is only linker warning, not compiler error.
# So we should explicitly fail at this case.
def has_function(self, funcname, prefix, env, extra_args=None, dependencies=None):
if funcname == 'lchmod':
return False
else:
return super().has_function(funcname, prefix, env, extra_args, dependencies)
class IntelCPPCompiler(IntelCompiler, CPPCompiler):
def __init__(self, exelist, version, icc_type, is_cross, exe_wrap, **kwargs):
CPPCompiler.__init__(self, exelist, version, is_cross, exe_wrap, **kwargs)
IntelCompiler.__init__(self, icc_type)
self.lang_header = 'c++-header'
default_warn_args = ['-Wall', '-w3', '-diag-disable:remark',
'-Wpch-messages', '-Wnon-virtual-dtor']
self.warn_args = {'1': default_warn_args,
'2': default_warn_args + ['-Wextra'],
'3': default_warn_args + ['-Wextra', '-Wpedantic']}
def get_options(self):
opts = CPPCompiler.get_options(self)
c_stds = []
g_stds = ['gnu++98']
if version_compare(self.version, '>=15.0.0'):
c_stds += ['c++11', 'c++14']
g_stds += ['gnu++11']
if version_compare(self.version, '>=16.0.0'):
c_stds += ['c++17']
if version_compare(self.version, '>=17.0.0'):
g_stds += ['gnu++14']
opts.update({'cpp_std': coredata.UserComboOption('cpp_std', 'C++ language standard to use',
['none'] + c_stds + g_stds,
'none'),
'cpp_debugstl': coredata.UserBooleanOption('cpp_debugstl',
'STL debug mode',
False)})
return opts
def get_option_compile_args(self, options):
args = []
std = options['cpp_std']
if std.value != 'none':
args.append('-std=' + std.value)
if options['cpp_debugstl'].value:
args.append('-D_GLIBCXX_DEBUG=1')
return args
def get_option_link_args(self, options):
return []
class VisualStudioCPPCompiler(VisualStudioCCompiler, CPPCompiler):
def __init__(self, exelist, version, is_cross, exe_wrap, is_64):
CPPCompiler.__init__(self, exelist, version, is_cross, exe_wrap)
VisualStudioCCompiler.__init__(self, exelist, version, is_cross, exe_wrap, is_64)
self.base_options = ['b_pch'] # FIXME add lto, pgo and the like
def get_options(self):
opts = CPPCompiler.get_options(self)
opts.update({'cpp_eh': coredata.UserComboOption('cpp_eh',
'C++ exception handling type.',
['none', 'a', 's', 'sc'],
'sc'),
'cpp_winlibs': coredata.UserArrayOption('cpp_winlibs',
'Windows libs to link against.',
msvc_winlibs)})
return opts
def get_option_compile_args(self, options):
args = []
std = options['cpp_eh']
if std.value != 'none':
args.append('/EH' + std.value)
return args
def get_option_link_args(self, options):
return options['cpp_winlibs'].value[:]
def get_compiler_check_args(self):
# Visual Studio C++ compiler doesn't support -fpermissive,
# so just use the plain C args.
return VisualStudioCCompiler.get_compiler_check_args(self)
class ArmCPPCompiler(ArmCompiler, CPPCompiler):
def __init__(self, exelist, version, is_cross, exe_wrap=None, **kwargs):
CPPCompiler.__init__(self, exelist, version, is_cross, exe_wrap, **kwargs)
ArmCompiler.__init__(self)
def get_options(self):
opts = CPPCompiler.get_options(self)
opts.update({'cpp_std': coredata.UserComboOption('cpp_std', 'C++ language standard to use',
['none', 'c++03', 'c++11'],
'none')})
return opts
def get_option_compile_args(self, options):
args = []
std = options['cpp_std']
if std.value == 'c++11':
args.append('--cpp11')
elif std.value == 'c++03':
args.append('--cpp')
return args
def get_option_link_args(self, options):
return []
def get_compiler_check_args(self):
return []
| 43.466667
| 119
| 0.550844
|
414b3ef829e8576d38f93ab890c8925b8db9f0eb
| 964
|
py
|
Python
|
medicare_appeals/tests/appeals_tests.py
|
18F/medicare-appeals-prototyping
|
51f7a4a2f9dee4a78ef8197d8a80ed255e3dcc56
|
[
"CC0-1.0"
] | 1
|
2019-07-01T19:18:10.000Z
|
2019-07-01T19:18:10.000Z
|
medicare_appeals/tests/appeals_tests.py
|
18F/medicare-appeals-prototyping
|
51f7a4a2f9dee4a78ef8197d8a80ed255e3dcc56
|
[
"CC0-1.0"
] | 1
|
2019-03-07T00:36:56.000Z
|
2019-03-07T00:36:56.000Z
|
medicare_appeals/tests/appeals_tests.py
|
18F/medicare-appeals-prototyping
|
51f7a4a2f9dee4a78ef8197d8a80ed255e3dcc56
|
[
"CC0-1.0"
] | 1
|
2021-02-14T09:47:01.000Z
|
2021-02-14T09:47:01.000Z
|
import pytest
from medicare_appeals.appeals import models
from medicare_appeals.tests import factories
@pytest.fixture(scope='function')
def build_an_appeal():
"""
Build a single appeal
"""
appeal = factories.AppealFactory()
@pytest.fixture(scope='function')
def build_two_appeals():
"""
Build two appeals with the description 'test{n}'
"""
appeal1 = factories.AppealFactory(description='test0')
appeal2 = factories.AppealFactory(description='test1')
@pytest.mark.django_db
def test_appeal(build_an_appeal):
"""
An appeal should be created
"""
assert models.Appeal.objects.count() == 1
@pytest.mark.django_db
def test_two_appeals(build_two_appeals):
"""
Two appeals should be created with description 'test{n}'
"""
appeals = models.Appeal.objects.all()
assert appeals.count() == 2
for idx, appeal in enumerate(appeals):
assert appeal.description == 'test{0}'.format(idx)
| 23.512195
| 60
| 0.697095
|
98c141d070aae66f1073ca047bf1d5b224248ccd
| 1,970
|
py
|
Python
|
lifestream-utm/lifestream_utm/send_email.py
|
ds-vologdin/lifestream-utm
|
c353f2f8fe85b434e4444d834565baf5972270a7
|
[
"MIT"
] | null | null | null |
lifestream-utm/lifestream_utm/send_email.py
|
ds-vologdin/lifestream-utm
|
c353f2f8fe85b434e4444d834565baf5972270a7
|
[
"MIT"
] | null | null | null |
lifestream-utm/lifestream_utm/send_email.py
|
ds-vologdin/lifestream-utm
|
c353f2f8fe85b434e4444d834565baf5972270a7
|
[
"MIT"
] | null | null | null |
import smtplib
import os
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.base import MIMEBase
from email import encoders
from email.utils import formatdate
import logging
from settings.private_settings import SMTP_CONFIG
logger = logging.getLogger(__name__)
def send_email(receivers, text, subject, filename=None, use_tls=False):
msg = MIMEMultipart()
msg['Subject'] = subject
msg['From'] = SMTP_CONFIG['sender']
msg['To'] = ', '.join(receivers)
msg["Date"] = formatdate(localtime=True)
msg.attach(MIMEText(text))
if filename:
attachment = MIMEBase('application', "octet-stream")
try:
with open(filename, "rb") as fh:
data = fh.read()
attachment.set_payload(data)
encoders.encode_base64(attachment)
attachment.add_header(
'Content-Disposition', 'attachment',
filename=os.path.basename(filename)
)
msg.attach(attachment)
except IOError:
error = "Error opening attachment file {}".format(filename)
logger.error(error)
return -1
with smtplib.SMTP(host=SMTP_CONFIG['host'], port=SMTP_CONFIG['port']) as s:
s.ehlo()
if use_tls:
s.starttls()
s.login(SMTP_CONFIG['user'], SMTP_CONFIG['passwd'])
s.sendmail(SMTP_CONFIG['sender'], receivers, msg.as_string())
def send_report_to_email(receivers, status_change_user):
if not receivers:
return
receivers = receivers.replace(',', ' ').split()
subject = 'lifestream report'
message = ''
for user in status_change_user:
message += '{0[user].login} {0[user].full_name}: {0[status_lifestream]} -> {0[status_utm]}\n'.format(
user
)
if message:
send_email(receivers, message, subject)
logger.info('send email to {}'.format(','.join(receivers)))
| 31.269841
| 109
| 0.629949
|
0456210c07609c8174b4365207829ad12bc489cd
| 359
|
py
|
Python
|
1-lab-basicDispatchConsumeEvent/work/lambda-functions/consume-function/app.py
|
rizasaputra/workshop-eventDrivenMicroservices
|
46b4c9385eaa9d147ee294c57fb2ac1058126a18
|
[
"Apache-2.0"
] | 36
|
2020-11-11T04:26:44.000Z
|
2022-03-27T11:04:44.000Z
|
1-lab-basicDispatchConsumeEvent/work/lambda-functions/consume-function/app.py
|
rizasaputra/workshop-eventDrivenMicroservices
|
46b4c9385eaa9d147ee294c57fb2ac1058126a18
|
[
"Apache-2.0"
] | 3
|
2021-04-28T11:31:12.000Z
|
2021-06-22T08:24:14.000Z
|
1-lab-basicDispatchConsumeEvent/work/lambda-functions/consume-function/app.py
|
donnieprakoso/workshop-eventDrivenMicroservices
|
eb3ca81b11043823288fa19525e880d7257b3ec9
|
[
"Apache-2.0"
] | 18
|
2020-11-11T06:03:48.000Z
|
2021-12-14T17:15:44.000Z
|
import logging
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def handler(event, context):
try:
'''
[TASK] Logs event variable to Amazon CloudWatch Logs. This way we know that Producer emitted a message and this function will consume that event.
'''
except Exception as e:
logger.error(e)
| 27.615385
| 153
| 0.649025
|
75069fc8c38fbf3e940430384d195bb6de5761bc
| 824
|
py
|
Python
|
models/others/linear_regression.py
|
wiseodd/bayesian-models
|
cb5d5d83318878d3e5e90d03957a5070ffa91690
|
[
"BSD-3-Clause"
] | 232
|
2017-09-27T02:58:58.000Z
|
2022-02-14T12:07:23.000Z
|
models/others/linear_regression.py
|
wiseodd/bayesian-models
|
cb5d5d83318878d3e5e90d03957a5070ffa91690
|
[
"BSD-3-Clause"
] | 1
|
2019-03-07T05:13:03.000Z
|
2019-03-07T05:13:03.000Z
|
models/others/linear_regression.py
|
wiseodd/bayesian-models
|
cb5d5d83318878d3e5e90d03957a5070ffa91690
|
[
"BSD-3-Clause"
] | 75
|
2017-10-21T06:04:16.000Z
|
2021-12-14T08:40:45.000Z
|
"""
Linear Regression
-----------------
Probabilistic Linear Regression with toy data in 1D.
"""
import numpy as np
import scipy.stats as st
import matplotlib.pyplot as plt
# Generate data
M = 20
X = np.linspace(0, 2, num=M)
# X = np.array([5, 14, 19], dtype=np.float)
t_real = np.sin(X)
t = t_real + np.random.randn(M) * 0.25
plt.scatter(X, t, label='Data points')
# Infer p(t|W,X,alpha) = N(t|XW+b,alpha); the predictive distribution
# MLE for W, b, and beta
W_ml = X.T @ t / (X.T @ X) # Normal eq.
b_ml = np.mean(t) - W_ml * np.mean(X)
y = X * W_ml + b_ml
alpha_ml = np.mean((t - y)**2)
plt.plot(X, y, color='red', alpha=0.75, label='Regression line')
# Sample from predictive dist.
ys = np.random.normal(y, alpha_ml)
plt.scatter(X, ys, alpha=0.15, label='Posterior samples')
plt.legend(loc='best')
plt.show()
| 21.684211
| 69
| 0.644417
|
9cf9c4737532bfd36da82c406b52acf255217ffa
| 106
|
py
|
Python
|
build/lib/annotation_utils/old/util/labelme/__init__.py
|
HienDT27/annotation_utils
|
1f4e95f4cfa08de5bbab20f90a6a75fba66a69b9
|
[
"MIT"
] | 13
|
2020-01-28T04:45:22.000Z
|
2022-03-10T03:35:49.000Z
|
build/lib/annotation_utils/old/util/labelme/__init__.py
|
HienDT27/annotation_utils
|
1f4e95f4cfa08de5bbab20f90a6a75fba66a69b9
|
[
"MIT"
] | 4
|
2020-02-14T08:56:03.000Z
|
2021-05-21T10:38:30.000Z
|
build/lib/annotation_utils/old/util/labelme/__init__.py
|
HienDT27/annotation_utils
|
1f4e95f4cfa08de5bbab20f90a6a75fba66a69b9
|
[
"MIT"
] | 7
|
2020-04-10T07:56:25.000Z
|
2021-12-17T11:19:23.000Z
|
from .labelme_utils import write_resized_image, write_resized_json, \
copy_annotation, move_annotation
| 53
| 69
| 0.849057
|
e1a691f7725561450aab1f4551cbc26f1447d2b4
| 346
|
py
|
Python
|
practice/lis.py
|
haandol/dojo
|
c29dc54614bdfaf79eb4862ed9fa25974a0f5654
|
[
"MIT"
] | null | null | null |
practice/lis.py
|
haandol/dojo
|
c29dc54614bdfaf79eb4862ed9fa25974a0f5654
|
[
"MIT"
] | null | null | null |
practice/lis.py
|
haandol/dojo
|
c29dc54614bdfaf79eb4862ed9fa25974a0f5654
|
[
"MIT"
] | null | null | null |
def solve(arr):
n = len(arr)
lis = [1 for _ in range(n)]
for i in range(1, n):
for j in range(0, i):
if arr[j] < arr[i] and lis[i] < 1 + lis[j]:
lis[i] = 1 + lis[j]
return max(lis)
arr = [3, 10, 2, 1, 20]
assert 3 == solve(arr)
arr = [3, 2]
assert 1 == solve(arr)
arr = [50, 3, 10, 7, 40, 80]
assert 4 == solve(arr)
| 18.210526
| 49
| 0.508671
|
055bd14358b43bf9f154acb2939e153e036c927b
| 48,493
|
py
|
Python
|
ProjectFiles/bin/Release/2.80/scripts/addons/sequencer_kinoraw_tools/operators_extra_actions.py
|
BlazesRus/Bforartists
|
126bdd9e47cc984fd97ba5299bfb92ec5278e754
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 1
|
2019-07-08T15:51:14.000Z
|
2019-07-08T15:51:14.000Z
|
ProjectFiles/bin/Release/2.80/scripts/addons/sequencer_kinoraw_tools/operators_extra_actions.py
|
BlazesRus/Bforartists
|
126bdd9e47cc984fd97ba5299bfb92ec5278e754
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
ProjectFiles/bin/Release/2.80/scripts/addons/sequencer_kinoraw_tools/operators_extra_actions.py
|
BlazesRus/Bforartists
|
126bdd9e47cc984fd97ba5299bfb92ec5278e754
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
import bpy
import os
from bpy.types import Operator
from bpy.props import (
IntProperty,
FloatProperty,
EnumProperty,
BoolProperty,
)
from . import functions
# Skip one second
class Sequencer_Extra_FrameSkip(Operator):
bl_label = "Skip One Second"
bl_idname = "screenextra.frame_skip"
bl_description = "Skip through the Timeline by one-second increments"
bl_options = {'REGISTER', 'UNDO'}
back = BoolProperty(
name="Back",
default=False
)
def execute(self, context):
one_second = bpy.context.scene.render.fps
if self.back is True:
one_second *= -1
bpy.ops.screen.frame_offset(delta=one_second)
return {'FINISHED'}
# Trim timeline
class Sequencer_Extra_TrimTimeline(Operator):
bl_label = "Trim to Timeline Content"
bl_idname = "timeextra.trimtimeline"
bl_description = "Automatically set start and end frames"
bl_options = {'REGISTER', 'UNDO'}
@classmethod
def poll(self, context):
scn = context.scene
if scn and scn.sequence_editor:
return scn.sequence_editor.sequences
else:
return False
def execute(self, context):
scn = context.scene
seq = scn.sequence_editor
meta_level = len(seq.meta_stack)
if meta_level > 0:
seq = seq.meta_stack[meta_level - 1]
frame_start = 300000
frame_end = -300000
for i in seq.sequences:
try:
if i.frame_final_start < frame_start:
frame_start = i.frame_final_start
if i.frame_final_end > frame_end:
frame_end = i.frame_final_end - 1
except AttributeError:
pass
if frame_start != 300000:
scn.frame_start = frame_start
if frame_end != -300000:
scn.frame_end = frame_end
bpy.ops.sequencer.view_all()
return {'FINISHED'}
# Trim timeline to selection
class Sequencer_Extra_TrimTimelineToSelection(Operator):
bl_label = "Trim to Selection"
bl_idname = "timeextra.trimtimelinetoselection"
bl_description = "Set start and end frames to selection"
bl_options = {'REGISTER', 'UNDO'}
@classmethod
def poll(self, context):
scn = context.scene
if scn and scn.sequence_editor:
return scn.sequence_editor.sequences
else:
return False
def execute(self, context):
scn = context.scene
seq = scn.sequence_editor
meta_level = len(seq.meta_stack)
if meta_level > 0:
seq = seq.meta_stack[meta_level - 1]
frame_start = 300000
frame_end = -300000
for i in seq.sequences:
try:
if i.frame_final_start < frame_start and i.select is True:
frame_start = i.frame_final_start
if i.frame_final_end > frame_end and i.select is True:
frame_end = i.frame_final_end - 1
except AttributeError:
pass
if frame_start != 300000:
scn.frame_start = frame_start
if frame_end != -300000:
scn.frame_end = frame_end
bpy.ops.sequencer.view_selected()
return {'FINISHED'}
# Open image with editor and create movie clip strip
"""
When a movie or image strip is selected, this operator creates a movieclip
or find the correspondent movieclip that already exists for this footage,
and add a VSE clip strip with same cuts the original strip has.
It can convert movie strips and image sequences, both with hard cuts or
soft cuts.
"""
class Sequencer_Extra_CreateMovieclip(Operator):
bl_label = "Create a Movieclip from selected strip"
bl_idname = "sequencerextra.createmovieclip"
bl_description = "Create a Movieclip strip from a MOVIE or IMAGE strip"
@classmethod
def poll(self, context):
strip = functions.act_strip(context)
scn = context.scene
if scn and scn.sequence_editor and scn.sequence_editor.active_strip:
return strip.type in ('MOVIE', 'IMAGE')
else:
return False
def execute(self, context):
strip = functions.act_strip(context)
scn = context.scene
if strip.type == 'MOVIE':
path = strip.filepath
data_exists = False
for i in bpy.data.movieclips:
if i.filepath == path:
data_exists = True
data = i
newstrip = None
if data_exists is False:
try:
data = bpy.data.movieclips.load(filepath=path)
newstrip = bpy.ops.sequencer.movieclip_strip_add(
replace_sel=True, overlap=False,
clip=data.name
)
newstrip = functions.act_strip(context)
newstrip.frame_start = strip.frame_start\
- strip.animation_offset_start
tin = strip.frame_offset_start + strip.frame_start
tout = tin + strip.frame_final_duration
# print(newstrip.frame_start, strip.frame_start, tin, tout)
functions.triminout(newstrip, tin, tout)
except:
self.report({'ERROR_INVALID_INPUT'}, 'Error loading file')
return {'CANCELLED'}
else:
try:
newstrip = bpy.ops.sequencer.movieclip_strip_add(
replace_sel=True, overlap=False,
clip=data.name
)
newstrip = functions.act_strip(context)
newstrip.frame_start = strip.frame_start\
- strip.animation_offset_start
# i need to declare the strip this way in order
# to get triminout() working
clip = bpy.context.scene.sequence_editor.sequences[
newstrip.name
]
# i cannot change these movie clip attributes via scripts
# but it works in the python console...
# clip.animation_offset_start = strip.animation.offset_start
# clip.animation_offset_end = strip.animation.offset_end
# clip.frame_final_duration = strip.frame_final_duration
tin = strip.frame_offset_start + strip.frame_start
tout = tin + strip.frame_final_duration
# print(newstrip.frame_start, strip.frame_start, tin, tout)
functions.triminout(clip, tin, tout)
except:
self.report({'ERROR_INVALID_INPUT'}, 'Error loading file')
return {'CANCELLED'}
elif strip.type == 'IMAGE':
# print("image")
base_dir = bpy.path.abspath(strip.directory)
scn.frame_current = strip.frame_start - strip.animation_offset_start
# searching for the first frame of the sequencer. This is mandatory
# for hard cutted sequence strips to be correctly converted,
# avoiding to create a new movie clip if not needed
filename = sorted(os.listdir(base_dir))[0]
path = os.path.join(base_dir, filename)
# print(path)
data_exists = False
for i in bpy.data.movieclips:
# print(i.filepath, path)
if i.filepath == path:
data_exists = True
data = i
# print(data_exists)
if data_exists is False:
try:
data = bpy.data.movieclips.load(filepath=path)
newstrip = bpy.ops.sequencer.movieclip_strip_add(
replace_sel=True, overlap=False,
clip=data.name
)
newstrip = functions.act_strip(context)
newstrip.frame_start = strip.frame_start\
- strip.animation_offset_start
clip = bpy.context.scene.sequence_editor.sequences[
newstrip.name
]
tin = strip.frame_offset_start + strip.frame_start
tout = tin + strip.frame_final_duration
# print(newstrip.frame_start, strip.frame_start, tin, tout)
functions.triminout(clip, tin, tout)
except:
self.report({'ERROR_INVALID_INPUT'}, 'Error loading file')
return {'CANCELLED'}
else:
try:
newstrip = bpy.ops.sequencer.movieclip_strip_add(
replace_sel=True, overlap=False,
clip=data.name
)
newstrip = functions.act_strip(context)
newstrip.frame_start = strip.frame_start\
- strip.animation_offset_start
# need to declare the strip this way in order
# to get triminout() working
clip = bpy.context.scene.sequence_editor.sequences[
newstrip.name
]
# cannot change this attributes via scripts...
# but it works in the python console...
# clip.animation_offset_start = strip.animation.offset_start
# clip.animation_offset_end = strip.animation.offset_end
# clip.frame_final_duration = strip.frame_final_duration
tin = strip.frame_offset_start + strip.frame_start
tout = tin + strip.frame_final_duration
# print(newstrip.frame_start, strip.frame_start, tin, tout)
functions.triminout(clip, tin, tout)
except:
self.report({'ERROR_INVALID_INPUT'}, 'Error loading file')
return {'CANCELLED'}
# show the new clip in a movie clip editor, if available.
if strip.type == 'MOVIE' or 'IMAGE':
for a in context.window.screen.areas:
if a.type == 'CLIP_EDITOR':
a.spaces[0].clip = data
return {'FINISHED'}
# Open image with editor
class Sequencer_Extra_Edit(Operator):
bl_label = "Open with Editor"
bl_idname = "sequencerextra.edit"
bl_description = "Open with Movie Clip or Image Editor"
@classmethod
def poll(self, context):
strip = functions.act_strip(context)
scn = context.scene
if scn and scn.sequence_editor and scn.sequence_editor.active_strip:
return strip.type in ('MOVIE', 'IMAGE')
else:
return False
def execute(self, context):
strip = functions.act_strip(context)
scn = context.scene
data_exists = False
if strip.type == 'MOVIE':
path = strip.filepath
for i in bpy.data.movieclips:
if i.filepath == path:
data_exists = True
data = i
if data_exists is False:
try:
data = bpy.data.movieclips.load(filepath=path)
except:
self.report({'ERROR_INVALID_INPUT'}, "Error loading file")
return {'CANCELLED'}
elif strip.type == 'IMAGE':
base_dir = bpy.path.abspath(strip.directory)
strip_elem = strip.strip_elem_from_frame(scn.frame_current)
elem_name = strip_elem.filename
path = base_dir + elem_name
for i in bpy.data.images:
if i.filepath == path:
data_exists = True
data = i
if data_exists is False:
try:
data = bpy.data.images.load(filepath=path)
except:
self.report({'ERROR_INVALID_INPUT'}, 'Error loading file')
return {'CANCELLED'}
if strip.type == 'MOVIE':
for a in context.window.screen.areas:
if a.type == 'CLIP_EDITOR':
a.spaces[0].clip = data
elif strip.type == 'IMAGE':
for a in context.window.screen.areas:
if a.type == 'IMAGE_EDITOR':
a.spaces[0].image = data
return {'FINISHED'}
# Open image with external editor
class Sequencer_Extra_EditExternally(Operator):
bl_label = "Open with External Editor"
bl_idname = "sequencerextra.editexternally"
bl_description = "Open with the default external image editor"
@classmethod
def poll(self, context):
strip = functions.act_strip(context)
scn = context.scene
if scn and scn.sequence_editor and scn.sequence_editor.active_strip:
return strip.type == 'IMAGE'
else:
return False
def execute(self, context):
strip = functions.act_strip(context)
scn = context.scene
base_dir = bpy.path.abspath(strip.directory)
strip_elem = strip.strip_elem_from_frame(scn.frame_current)
path = base_dir + strip_elem.filename
try:
bpy.ops.image.external_edit(filepath=path)
except:
self.report({'ERROR_INVALID_INPUT'},
"Please specify an Image Editor in Preferences > File")
return {'CANCELLED'}
return {'FINISHED'}
# File name to strip name
class Sequencer_Extra_FileNameToStripName(Operator):
bl_label = "File Name to Selected Strips Name"
bl_idname = "sequencerextra.striprename"
bl_description = "Set strip name to input file name"
bl_options = {'REGISTER', 'UNDO'}
@classmethod
def poll(self, context):
scn = context.scene
if scn and scn.sequence_editor:
return scn.sequence_editor.sequences
else:
return False
def execute(self, context):
scn = context.scene
seq = scn.sequence_editor
meta_level = len(seq.meta_stack)
if meta_level > 0:
seq = seq.meta_stack[meta_level - 1]
selection = False
for i in seq.sequences:
if i.select is True:
if i.type == 'IMAGE' and not i.mute:
selection = True
i.name = i.elements[0].filename
if (i.type == 'SOUND' or i.type == 'MOVIE') and not i.mute:
selection = True
i.name = bpy.path.display_name_from_filepath(i.filepath)
if selection is False:
self.report({'ERROR_INVALID_INPUT'},
"No image or movie strip selected")
return {'CANCELLED'}
return {'FINISHED'}
# Navigate up
class Sequencer_Extra_NavigateUp(Operator):
bl_label = "Navigate Up"
bl_idname = "sequencerextra.navigateup"
bl_description = "Move to Parent Timeline"
@classmethod
def poll(self, context):
try:
if context.scene.sequence_editor.meta_stack:
return True
return False
except:
return False
def execute(self, context):
if (functions.act_strip(context)):
strip = functions.act_strip(context)
seq_type = strip.type
if seq_type == 'META':
context.scene.sequence_editor.active_strip = None
bpy.ops.sequencer.meta_toggle()
return {'FINISHED'}
# Ripple delete
class Sequencer_Extra_RippleDelete(Operator):
bl_label = "Ripple Delete"
bl_idname = "sequencerextra.rippledelete"
bl_description = "Delete a strip and shift back following ones"
bl_options = {'REGISTER', 'UNDO'}
@classmethod
def poll(self, context):
scn = context.scene
if scn and scn.sequence_editor and scn.sequence_editor.active_strip:
return True
else:
return False
def execute(self, context):
scn = context.scene
seq = scn.sequence_editor
meta_level = len(seq.meta_stack)
if meta_level > 0:
seq = seq.meta_stack[meta_level - 1]
# strip = functions.act_strip(context)
for strip in context.selected_editable_sequences:
cut_frame = strip.frame_final_start
next_edit = 300000
bpy.ops.sequencer.select_all(action='DESELECT')
strip.select = True
bpy.ops.sequencer.delete()
striplist = []
for i in seq.sequences:
try:
if (i.frame_final_start > cut_frame and
not i.mute):
if i.frame_final_start < next_edit:
next_edit = i.frame_final_start
if not i.mute:
striplist.append(i)
except AttributeError:
pass
if next_edit == 300000:
return {'FINISHED'}
ripple_length = next_edit - cut_frame
for i in range(len(striplist)):
str = striplist[i]
try:
if str.frame_final_start > cut_frame:
str.frame_start = str.frame_start - ripple_length
except AttributeError:
pass
bpy.ops.sequencer.reload()
return {'FINISHED'}
# Ripple cut
class Sequencer_Extra_RippleCut(Operator):
bl_label = "Ripple Cut"
bl_idname = "sequencerextra.ripplecut"
bl_description = "Move a strip to buffer and shift back following ones"
bl_options = {'REGISTER', 'UNDO'}
@classmethod
def poll(self, context):
scn = context.scene
if scn and scn.sequence_editor and scn.sequence_editor.active_strip:
return True
else:
return False
def execute(self, context):
scn = context.scene
seq = scn.sequence_editor
meta_level = len(seq.meta_stack)
if meta_level > 0:
seq = seq.meta_stack[meta_level - 1]
strip = functions.act_strip(context)
bpy.ops.sequencer.select_all(action='DESELECT')
strip.select = True
temp_cf = scn.frame_current
scn.frame_current = strip.frame_final_start
bpy.ops.sequencer.copy()
scn.frame_current = temp_cf
bpy.ops.sequencerextra.rippledelete()
return {'FINISHED'}
# Insert
class Sequencer_Extra_Insert(Operator):
bl_label = "Insert"
bl_idname = "sequencerextra.insert"
bl_description = ("Move active strip to current frame and shift "
"forward following ones")
bl_options = {'REGISTER', 'UNDO'}
singlechannel = BoolProperty(
name="Single Channel",
default=False
)
@classmethod
def poll(self, context):
scn = context.scene
if scn and scn.sequence_editor and scn.sequence_editor.active_strip:
return True
else:
return False
def execute(self, context):
scn = context.scene
seq = scn.sequence_editor
meta_level = len(seq.meta_stack)
if meta_level > 0:
seq = seq.meta_stack[meta_level - 1]
strip = functions.act_strip(context)
gap = strip.frame_final_duration
bpy.ops.sequencer.select_all(action='DESELECT')
current_frame = scn.frame_current
striplist = []
for i in seq.sequences:
try:
if (i.frame_final_start >= current_frame and
not i.mute):
if self.singlechannel is True:
if i.channel == strip.channel:
striplist.append(i)
else:
striplist.append(i)
except AttributeError:
pass
try:
bpy.ops.sequencerextra.selectcurrentframe('EXEC_DEFAULT',
mode='AFTER')
except:
self.report({'ERROR_INVALID_INPUT'}, "Execution Error, "
"check your Blender version")
return {'CANCELLED'}
for i in range(len(striplist)):
str = striplist[i]
try:
if str.select is True:
str.frame_start += gap
except AttributeError:
pass
try:
diff = current_frame - strip.frame_final_start
strip.frame_start += diff
except AttributeError:
pass
strip = functions.act_strip(context)
scn.frame_current += strip.frame_final_duration
bpy.ops.sequencer.reload()
return {'FINISHED'}
# Copy strip properties
class Sequencer_Extra_CopyProperties(Operator):
bl_label = "Copy Properties"
bl_idname = "sequencerextra.copyproperties"
bl_description = "Copy properties of active strip to selected strips"
bl_options = {'REGISTER', 'UNDO'}
prop = EnumProperty(
name="Property",
items=[
# common
('name', 'Name', ''),
('blend_alpha', 'Opacity', ''),
('blend_type', 'Blend Mode', ''),
('animation_offset', 'Input - Trim Duration', ''),
# non-sound
('use_translation', 'Input - Image Offset', ''),
('crop', 'Input - Image Crop', ''),
('proxy', 'Proxy / Timecode', ''),
('strobe', 'Filter - Strobe', ''),
('color_multiply', 'Filter - Multiply', ''),
('color_saturation', 'Filter - Saturation', ''),
('deinterlace', 'Filter - De-Interlace', ''),
('flip', 'Filter - Flip', ''),
('float', 'Filter - Convert Float', ''),
('alpha_mode', 'Filter - Alpha Mode', ''),
('reverse', 'Filter - Backwards', ''),
# sound
('pan', 'Sound - Pan', ''),
('pitch', 'Sound - Pitch', ''),
('volume', 'Sound - Volume', ''),
('cache', 'Sound - Caching', ''),
# image
('directory', 'Image - Directory', ''),
# movie
('mpeg_preseek', 'Movie - MPEG Preseek', ''),
('stream_index', 'Movie - Stream Index', ''),
# wipe
('wipe', 'Effect - Wipe', ''),
# transform
('transform', 'Effect - Transform', ''),
# color
('color', 'Effect - Color', ''),
# speed
('speed', 'Effect - Speed', ''),
# multicam
('multicam_source', 'Effect - Multicam Source', ''),
# effect
('effect_fader', 'Effect - Effect Fader', ''),
],
default='blend_alpha'
)
@classmethod
def poll(self, context):
scn = context.scene
if scn and scn.sequence_editor and scn.sequence_editor.active_strip:
return True
else:
return False
def execute(self, context):
strip = functions.act_strip(context)
scn = context.scene
seq = scn.sequence_editor
meta_level = len(seq.meta_stack)
if meta_level > 0:
seq = seq.meta_stack[meta_level - 1]
for i in seq.sequences:
if (i.select is True and not i.mute):
try:
if self.prop == 'name':
i.name = strip.name
elif self.prop == 'blend_alpha':
i.blend_alpha = strip.blend_alpha
elif self.prop == 'blend_type':
i.blend_type = strip.blend_type
elif self.prop == 'animation_offset':
i.animation_offset_start = strip.animation_offset_start
i.animation_offset_end = strip.animation_offset_end
elif self.prop == 'use_translation':
i.use_translation = strip.use_translation
i.transform.offset_x = strip.transform.offset_x
i.transform.offset_y = strip.transform.offset_y
elif self.prop == 'crop':
i.use_crop = strip.use_crop
i.crop.min_x = strip.crop.min_x
i.crop.min_y = strip.crop.min_y
i.crop.max_x = strip.crop.max_x
i.crop.max_y = strip.crop.max_y
elif self.prop == 'proxy':
i.use_proxy = strip.use_proxy
p = strip.proxy.use_proxy_custom_directory # pep80
i.proxy.use_proxy_custom_directory = p
i.proxy.use_proxy_custom_file = strip.proxy.use_proxy_custom_file
i.proxy.build_100 = strip.proxy.build_100
i.proxy.build_25 = strip.proxy.build_25
i.proxy.build_50 = strip.proxy.build_50
i.proxy.build_75 = strip.proxy.build_75
i.proxy.directory = strip.proxy.directory
i.proxy.filepath = strip.proxy.filepath
i.proxy.quality = strip.proxy.quality
i.proxy.timecode = strip.proxy.timecode
i.proxy.use_overwrite = strip.proxy.use_overwrite
elif self.prop == 'strobe':
i.strobe = strip.strobe
elif self.prop == 'color_multiply':
i.color_multiply = strip.color_multiply
elif self.prop == 'color_saturation':
i.color_saturation = strip.color_saturation
elif self.prop == 'deinterlace':
i.use_deinterlace = strip.use_deinterlace
elif self.prop == 'flip':
i.use_flip_x = strip.use_flip_x
i.use_flip_y = strip.use_flip_y
elif self.prop == 'float':
i.use_float = strip.use_float
elif self.prop == 'alpha_mode':
i.alpha_mode = strip.alpha_mode
elif self.prop == 'reverse':
i.use_reverse_frames = strip.use_reverse_frames
elif self.prop == 'pan':
i.pan = strip.pan
elif self.prop == 'pitch':
i.pitch = strip.pitch
elif self.prop == 'volume':
i.volume = strip.volume
elif self.prop == 'cache':
i.use_memory_cache = strip.use_memory_cache
elif self.prop == 'directory':
i.directory = strip.directory
elif self.prop == 'mpeg_preseek':
i.mpeg_preseek = strip.mpeg_preseek
elif self.prop == 'stream_index':
i.stream_index = strip.stream_index
elif self.prop == 'wipe':
i.angle = strip.angle
i.blur_width = strip.blur_width
i.direction = strip.direction
i.transition_type = strip.transition_type
elif self.prop == 'transform':
i.interpolation = strip.interpolation
i.rotation_start = strip.rotation_start
i.use_uniform_scale = strip.use_uniform_scale
i.scale_start_x = strip.scale_start_x
i.scale_start_y = strip.scale_start_y
i.translation_unit = strip.translation_unit
i.translate_start_x = strip.translate_start_x
i.translate_start_y = strip.translate_start_y
elif self.prop == 'color':
i.color = strip.color
elif self.prop == 'speed':
i.use_default_fade = strip.use_default_fade
i.speed_factor = strip.speed_factor
i.use_as_speed = strip.use_as_speed
i.scale_to_length = strip.scale_to_length
i.multiply_speed = strip.multiply_speed
i.use_frame_blend = strip.use_frame_blend
elif self.prop == 'multicam_source':
i.multicam_source = strip.multicam_source
elif self.prop == 'effect_fader':
i.use_default_fade = strip.use_default_fade
i.effect_fader = strip.effect_fader
except:
pass
bpy.ops.sequencer.reload()
return {'FINISHED'}
# Fade in and out
class Sequencer_Extra_FadeInOut(Operator):
bl_idname = "sequencerextra.fadeinout"
bl_label = "Fade..."
bl_description = "Fade volume or opacity of active strip"
bl_options = {'REGISTER', 'UNDO'}
mode = EnumProperty(
name='Direction',
items=(
('IN', "Fade In...", ""),
('OUT', "Fade Out...", ""),
('INOUT', "Fade In and Out...", "")),
default='IN',
)
fade_duration = IntProperty(
name='Duration',
description='Number of frames to fade',
min=1, max=250,
default=25)
fade_amount = FloatProperty(
name='Amount',
description='Maximum value of fade',
min=0.0,
max=100.0,
default=1.0)
@classmethod
def poll(cls, context):
scn = context.scene
if scn and scn.sequence_editor and scn.sequence_editor.active_strip:
return True
else:
return False
def execute(self, context):
seq = context.scene.sequence_editor
scn = context.scene
strip = seq.active_strip
tmp_current_frame = context.scene.frame_current
if strip.type == 'SOUND':
if(self.mode) == 'OUT':
scn.frame_current = strip.frame_final_end - self.fade_duration
strip.volume = self.fade_amount
strip.keyframe_insert('volume')
scn.frame_current = strip.frame_final_end
strip.volume = 0
strip.keyframe_insert('volume')
elif(self.mode) == 'INOUT':
scn.frame_current = strip.frame_final_start
strip.volume = 0
strip.keyframe_insert('volume')
scn.frame_current += self.fade_duration
strip.volume = self.fade_amount
strip.keyframe_insert('volume')
scn.frame_current = strip.frame_final_end - self.fade_duration
strip.volume = self.fade_amount
strip.keyframe_insert('volume')
scn.frame_current = strip.frame_final_end
strip.volume = 0
strip.keyframe_insert('volume')
else:
scn.frame_current = strip.frame_final_start
strip.volume = 0
strip.keyframe_insert('volume')
scn.frame_current += self.fade_duration
strip.volume = self.fade_amount
strip.keyframe_insert('volume')
else:
if(self.mode) == 'OUT':
scn.frame_current = strip.frame_final_end - self.fade_duration
strip.blend_alpha = self.fade_amount
strip.keyframe_insert('blend_alpha')
scn.frame_current = strip.frame_final_end
strip.blend_alpha = 0
strip.keyframe_insert('blend_alpha')
elif(self.mode) == 'INOUT':
scn.frame_current = strip.frame_final_start
strip.blend_alpha = 0
strip.keyframe_insert('blend_alpha')
scn.frame_current += self.fade_duration
strip.blend_alpha = self.fade_amount
strip.keyframe_insert('blend_alpha')
scn.frame_current = strip.frame_final_end - self.fade_duration
strip.blend_alpha = self.fade_amount
strip.keyframe_insert('blend_alpha')
scn.frame_current = strip.frame_final_end
strip.blend_alpha = 0
strip.keyframe_insert('blend_alpha')
else:
scn.frame_current = strip.frame_final_start
strip.blend_alpha = 0
strip.keyframe_insert('blend_alpha')
scn.frame_current += self.fade_duration
strip.blend_alpha = self.fade_amount
strip.keyframe_insert('blend_alpha')
scn.frame_current = tmp_current_frame
scn.kr_default_fade_duration = self.fade_duration
scn.kr_default_fade_amount = self.fade_amount
return{'FINISHED'}
def invoke(self, context, event):
scn = context.scene
functions.initSceneProperties(context)
self.fade_duration = scn.kr_default_fade_duration
self.fade_amount = scn.kr_default_fade_amount
return context.window_manager.invoke_props_dialog(self)
# Extend to fill
class Sequencer_Extra_ExtendToFill(Operator):
bl_idname = "sequencerextra.extendtofill"
bl_label = "Extend to Fill"
bl_description = "Extend active strip forward to fill adjacent space"
bl_options = {'REGISTER', 'UNDO'}
@classmethod
def poll(cls, context):
scn = context.scene
if scn and scn.sequence_editor and scn.sequence_editor.active_strip:
return True
else:
return False
def execute(self, context):
scn = context.scene
seq = scn.sequence_editor
meta_level = len(seq.meta_stack)
if meta_level > 0:
seq = seq.meta_stack[meta_level - 1]
strip = functions.act_strip(context)
chn = strip.channel
stf = strip.frame_final_end
enf = 300000
for i in seq.sequences:
ffs = i.frame_final_start
if (i.channel == chn and ffs > stf):
if ffs < enf:
enf = ffs
if enf == 300000 and stf < scn.frame_end:
enf = scn.frame_end
if enf == 300000 or enf == stf:
self.report({'ERROR_INVALID_INPUT'}, 'Unable to extend')
return {'CANCELLED'}
else:
strip.frame_final_end = enf
bpy.ops.sequencer.reload()
return {'FINISHED'}
# Place from file browser
class Sequencer_Extra_PlaceFromFileBrowser(Operator):
bl_label = "Place"
bl_idname = "sequencerextra.placefromfilebrowser"
bl_description = "Place or insert active file from File Browser"
bl_options = {'REGISTER', 'UNDO'}
insert = BoolProperty(
name="Insert",
default=False
)
def execute(self, context):
scn = context.scene
for a in context.window.screen.areas:
if a.type == 'FILE_BROWSER':
params = a.spaces[0].params
break
try:
params
except UnboundLocalError:
self.report({'ERROR_INVALID_INPUT'}, 'No visible File Browser')
return {'CANCELLED'}
if params.filename == '':
self.report({'ERROR_INVALID_INPUT'}, 'No file selected')
return {'CANCELLED'}
path = os.path.join(params.directory, params.filename)
frame = context.scene.frame_current
strip_type = functions.detect_strip_type(params.filename)
try:
if strip_type == 'IMAGE':
image_file = []
filename = {"name": params.filename}
image_file.append(filename)
f_in = scn.frame_current
f_out = f_in + scn.render.fps - 1
bpy.ops.sequencer.image_strip_add(files=image_file,
directory=params.directory, frame_start=f_in,
frame_end=f_out, relative_path=False)
elif strip_type == 'MOVIE':
bpy.ops.sequencer.movie_strip_add(filepath=path,
frame_start=frame, relative_path=False)
elif strip_type == 'SOUND':
bpy.ops.sequencer.sound_strip_add(filepath=path,
frame_start=frame, relative_path=False)
else:
self.report({'ERROR_INVALID_INPUT'}, 'Invalid file format')
return {'CANCELLED'}
except:
self.report({'ERROR_INVALID_INPUT'}, 'Error loading file')
return {'CANCELLED'}
if self.insert is True:
try:
striplist = []
for i in bpy.context.selected_editable_sequences:
if (i.select is True and i.type == "SOUND"):
striplist.append(i)
bpy.ops.sequencerextra.insert()
if striplist[0]:
striplist[0].frame_start = frame
except:
self.report({'ERROR_INVALID_INPUT'}, "Execution Error, "
"check your Blender version")
return {'CANCELLED'}
else:
strip = functions.act_strip(context)
scn.frame_current += strip.frame_final_duration
bpy.ops.sequencer.reload()
return {'FINISHED'}
# Select strips on same channel
class Sequencer_Extra_SelectSameChannel(Operator):
bl_label = "Select Strips on the Same Channel"
bl_idname = "sequencerextra.selectsamechannel"
bl_description = "Select strips on the same channel as active one"
bl_options = {'REGISTER', 'UNDO'}
@classmethod
def poll(self, context):
scn = context.scene
if scn and scn.sequence_editor and scn.sequence_editor.active_strip:
return True
else:
return False
def execute(self, context):
scn = context.scene
seq = scn.sequence_editor
meta_level = len(seq.meta_stack)
if meta_level > 0:
seq = seq.meta_stack[meta_level - 1]
bpy.ops.sequencer.select_active_side(side="LEFT")
bpy.ops.sequencer.select_active_side(side="RIGHT")
return {'FINISHED'}
# Current-frame-aware select
class Sequencer_Extra_SelectCurrentFrame(Operator):
bl_label = "Current-Frame-Aware Select"
bl_idname = "sequencerextra.selectcurrentframe"
bl_description = "Select strips according to current frame"
bl_options = {'REGISTER', 'UNDO'}
mode = EnumProperty(
name='Mode',
items=(
('BEFORE', 'Before Current Frame', ''),
('AFTER', 'After Current Frame', ''),
('ON', 'On Current Frame', '')),
default='BEFORE',
)
@classmethod
def poll(self, context):
scn = context.scene
if scn and scn.sequence_editor:
return scn.sequence_editor.sequences
else:
return False
def execute(self, context):
mode = self.mode
scn = context.scene
seq = scn.sequence_editor
cf = scn.frame_current
meta_level = len(seq.meta_stack)
if meta_level > 0:
seq = seq.meta_stack[meta_level - 1]
if mode == 'AFTER':
for i in seq.sequences:
try:
if (i.frame_final_start >= cf and not i.mute):
i.select = True
except AttributeError:
pass
elif mode == 'ON':
for i in seq.sequences:
try:
if (i.frame_final_start <= cf and
i.frame_final_end > cf and
not i.mute):
i.select = True
except AttributeError:
pass
else:
for i in seq.sequences:
try:
if (i.frame_final_end < cf and not i.mute):
i.select = True
except AttributeError:
pass
return {'FINISHED'}
# Select by type
class Sequencer_Extra_SelectAllByType(Operator):
bl_label = "All by Type"
bl_idname = "sequencerextra.select_all_by_type"
bl_description = "Select all the strips of the same type"
bl_options = {'REGISTER', 'UNDO'}
type = EnumProperty(
name="Strip Type",
items=(
('ACTIVE', 'Same as Active Strip', ''),
('IMAGE', 'Image', ''),
('META', 'Meta', ''),
('SCENE', 'Scene', ''),
('MOVIE', 'Movie', ''),
('SOUND', 'Sound', ''),
('TRANSFORM', 'Transform', ''),
('COLOR', 'Color', '')),
default='ACTIVE',
)
@classmethod
def poll(self, context):
scn = context.scene
if scn and scn.sequence_editor:
return scn.sequence_editor.sequences
else:
return False
def execute(self, context):
strip_type = self.type
scn = context.scene
seq = scn.sequence_editor
meta_level = len(seq.meta_stack)
if meta_level > 0:
seq = seq.meta_stack[meta_level - 1]
active_strip = functions.act_strip(context)
if strip_type == 'ACTIVE':
if active_strip is None:
self.report({'ERROR_INVALID_INPUT'},
'No active strip')
return {'CANCELLED'}
strip_type = active_strip.type
striplist = []
for i in seq.sequences:
try:
if (i.type == strip_type and not i.mute):
striplist.append(i)
except AttributeError:
pass
for i in range(len(striplist)):
str = striplist[i]
try:
str.select = True
except AttributeError:
pass
return {'FINISHED'}
# Open in movie clip editor from file browser
class Clip_Extra_OpenFromFileBrowser(Operator):
bl_label = "Open from File Browser"
bl_idname = "clipextra.openfromfilebrowser"
bl_description = "Load a Movie or Image Sequence from File Browser"
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
for a in context.window.screen.areas:
if a.type == 'FILE_BROWSER':
params = a.spaces[0].params
break
try:
params
except:
self.report({'ERROR_INVALID_INPUT'}, 'No visible File Browser')
return {'CANCELLED'}
if params.filename == '':
self.report({'ERROR_INVALID_INPUT'}, 'No file selected')
return {'CANCELLED'}
path = params.directory + params.filename
strip_type = functions.detect_strip_type(params.filename)
data_exists = False
if strip_type in ('MOVIE', 'IMAGE'):
for i in bpy.data.movieclips:
if i.filepath == path:
data_exists = True
data = i
if data_exists is False:
try:
data = bpy.data.movieclips.load(filepath=path)
except:
self.report({'ERROR_INVALID_INPUT'}, 'Error loading file')
return {'CANCELLED'}
else:
self.report({'ERROR_INVALID_INPUT'}, 'Invalid file format')
return {'CANCELLED'}
for a in context.window.screen.areas:
if a.type == 'CLIP_EDITOR':
a.spaces[0].clip = data
return {'FINISHED'}
# Open in movie clip editor from sequencer
class Clip_Extra_OpenActiveStrip(Operator):
bl_label = "Open Active Strip"
bl_idname = "clipextra.openactivestrip"
bl_description = "Load a Movie or Image Sequence from Sequence Editor"
bl_options = {'REGISTER', 'UNDO'}
@classmethod
def poll(cls, context):
scn = context.scene
strip = functions.act_strip(context)
if scn and scn.sequence_editor and scn.sequence_editor.active_strip:
return strip.type in ('MOVIE', 'IMAGE')
else:
return False
def execute(self, context):
strip = functions.act_strip(context)
data_exists = False
if strip.type == 'MOVIE':
path = strip.filepath
elif strip.type == 'IMAGE':
base_dir = bpy.path.relpath(strip.directory)
filename = strip.elements[0].filename
path = base_dir + '/' + filename
else:
self.report({'ERROR_INVALID_INPUT'}, 'Invalid file format')
return {'CANCELLED'}
for i in bpy.data.movieclips:
if i.filepath == path:
data_exists = True
data = i
if data_exists is False:
try:
data = bpy.data.movieclips.load(filepath=path)
except:
self.report({'ERROR_INVALID_INPUT'}, 'Error loading file')
return {'CANCELLED'}
for a in context.window.screen.areas:
if a.type == 'CLIP_EDITOR':
a.spaces[0].clip = data
return {'FINISHED'}
# Jog / Shuttle
class Sequencer_Extra_JogShuttle(Operator):
bl_label = "Jog/Shuttle"
bl_idname = "sequencerextra.jogshuttle"
bl_description = ("Jog through the current sequence\n"
"Left Mouse button to confirm, Right mouse\Esc to cancel")
def execute(self, context):
scn = context.scene
start_frame = scn.frame_start
end_frame = scn.frame_end
duration = end_frame - start_frame
diff = self.x - self.init_x
diff /= 5
diff = int(diff)
extended_frame = diff + (self.init_current_frame - start_frame)
looped_frame = extended_frame % (duration + 1)
target_frame = start_frame + looped_frame
context.scene.frame_current = target_frame
def modal(self, context, event):
if event.type == 'MOUSEMOVE':
self.x = event.mouse_x
self.execute(context)
elif event.type == 'LEFTMOUSE':
return {'FINISHED'}
elif event.type in ('RIGHTMOUSE', 'ESC'):
return {'CANCELLED'}
return {'RUNNING_MODAL'}
def invoke(self, context, event):
scn = context.scene
self.x = event.mouse_x
self.init_x = self.x
self.init_current_frame = scn.frame_current
self.execute(context)
context.window_manager.modal_handler_add(self)
return {'RUNNING_MODAL'}
| 37.216424
| 89
| 0.540181
|
32a23449344afef9c12ba745afcdde2a2f2b43ff
| 71
|
py
|
Python
|
rq_config.py
|
dmitrypol/dlm
|
768bfe8c79f023822ca391e5ee5210fb40417332
|
[
"MIT"
] | 10
|
2020-06-13T09:15:17.000Z
|
2021-08-23T14:00:09.000Z
|
rq_config.py
|
dmitrypol/dlm
|
768bfe8c79f023822ca391e5ee5210fb40417332
|
[
"MIT"
] | null | null | null |
rq_config.py
|
dmitrypol/dlm
|
768bfe8c79f023822ca391e5ee5210fb40417332
|
[
"MIT"
] | 1
|
2020-05-15T05:33:47.000Z
|
2020-05-15T05:33:47.000Z
|
import os
REDIS_URL = f"redis://{os.environ.get('REDIS_HOST')}:6379/1"
| 23.666667
| 60
| 0.704225
|
4bca7ea1c751baeac4c6ef81d0bfe184a761381d
| 12,359
|
py
|
Python
|
python/rrc_simulation/gym_wrapper/envs/cube_env.py
|
prstolpe/rrc_simulation
|
b430fe4e575641cdd64945cf57d0dd67a0eea17a
|
[
"BSD-3-Clause"
] | null | null | null |
python/rrc_simulation/gym_wrapper/envs/cube_env.py
|
prstolpe/rrc_simulation
|
b430fe4e575641cdd64945cf57d0dd67a0eea17a
|
[
"BSD-3-Clause"
] | null | null | null |
python/rrc_simulation/gym_wrapper/envs/cube_env.py
|
prstolpe/rrc_simulation
|
b430fe4e575641cdd64945cf57d0dd67a0eea17a
|
[
"BSD-3-Clause"
] | null | null | null |
"""Gym environment for the Real Robot Challenge Phase 1 (Simulation)."""
import enum
import numpy as np
import gym
from rrc_simulation import TriFingerPlatform
from rrc_simulation import visual_objects
from rrc_simulation.tasks import move_cube
class RandomInitializer:
"""Initializer that samples random initial states and goals."""
def __init__(self, difficulty):
"""Initialize.
Args:
difficulty (int): Difficulty level for sampling goals.
"""
self.difficulty = difficulty
def get_initial_state(self):
"""Get a random initial object pose (always on the ground)."""
return move_cube.sample_goal(difficulty=-1)
def get_goal(self):
"""Get a random goal depending on the difficulty."""
return move_cube.sample_goal(difficulty=self.difficulty)
class FixedInitializer:
"""Initializer that uses fixed values for initial pose and goal."""
def __init__(self, difficulty, initial_state, goal):
"""Initialize.
Args:
difficulty (int): Difficulty level of the goal. This is still
needed even for a fixed goal, as it is also used for computing
the reward (the cost function is different for the different
levels).
initial_state (move_cube.Pose): Initial pose of the object.
goal (move_cube.Pose): Goal pose of the object.
Raises:
Exception: If initial_state or goal are not valid. See
:meth:`move_cube.validate_goal` for more information.
"""
move_cube.validate_goal(initial_state)
move_cube.validate_goal(goal)
self.difficulty = difficulty
self.initial_state = initial_state
self.goal = goal
def get_initial_state(self):
"""Get the initial state that was set in the constructor."""
return self.initial_state
def get_goal(self):
"""Get the goal that was set in the constructor."""
return self.goal
class ActionType(enum.Enum):
"""Different action types that can be used to control the robot."""
#: Use pure torque commands. The action is a list of torques (one per
#: joint) in this case.
TORQUE = enum.auto()
#: Use joint position commands. The action is a list of angular joint
#: positions (one per joint) in this case. Internally a PD controller is
#: executed for each action to determine the torques that are applied to
#: the robot.
POSITION = enum.auto()
#: Use both torque and position commands. In this case the action is a
#: dictionary with keys "torque" and "position" which contain the
#: corresponding lists of values (see above). The torques resulting from
#: the position controller are added to the torques in the action before
#: applying them to the robot.
TORQUE_AND_POSITION = enum.auto()
class CubeEnv(gym.GoalEnv):
"""Gym environment for moving cubes with simulated TriFingerPro."""
def __init__(
self,
initializer,
action_type=ActionType.POSITION,
frameskip=1,
visualization=False,
):
"""Initialize.
Args:
initializer: Initializer class for providing initial cube pose and
goal pose. See :class:`RandomInitializer` and
:class:`FixedInitializer`.
action_type (ActionType): Specify which type of actions to use.
See :class:`ActionType` for details.
frameskip (int): Number of actual control steps to be performed in
one call of step().
visualization (bool): If true, the pyBullet GUI is run for
visualization.
"""
# Basic initialization
# ====================
self.initializer = initializer
self.action_type = action_type
self.visualization = visualization
# TODO: The name "frameskip" makes sense for an atari environment but
# not really for our scenario. The name is also misleading as
# "frameskip = 1" suggests that one frame is skipped while it actually
# means "do one step per step" (i.e. no skip).
if frameskip < 1:
raise ValueError("frameskip cannot be less than 1.")
self.frameskip = frameskip
# will be initialized in reset()
self.platform = None
# Create the action and observation spaces
# ========================================
spaces = TriFingerPlatform.spaces
object_state_space = gym.spaces.Dict(
{
"position": spaces.object_position.gym,
"orientation": spaces.object_orientation.gym,
}
)
if self.action_type == ActionType.TORQUE:
self.action_space = spaces.robot_torque.gym
elif self.action_type == ActionType.POSITION:
self.action_space = spaces.robot_position.gym
elif self.action_type == ActionType.TORQUE_AND_POSITION:
self.action_space = gym.spaces.Dict(
{
"torque": spaces.robot_torque.gym,
"position": spaces.robot_position.gym,
}
)
else:
raise ValueError("Invalid action_type")
self.observation_space = gym.spaces.Dict(
{
"observation": gym.spaces.Dict(
{
"position": spaces.robot_position.gym,
"velocity": spaces.robot_velocity.gym,
"torque": spaces.robot_torque.gym,
}
),
"desired_goal": object_state_space,
"achieved_goal": object_state_space,
}
)
def compute_reward(self, achieved_goal, desired_goal, info):
"""Compute the reward for the given achieved and desired goal.
Args:
achieved_goal (dict): Current pose of the object.
desired_goal (dict): Goal pose of the object.
info (dict): An info dictionary containing a field "difficulty"
which specifies the difficulty level.
Returns:
float: The reward that corresponds to the provided achieved goal
w.r.t. to the desired goal. Note that the following should always
hold true::
ob, reward, done, info = env.step()
assert reward == env.compute_reward(
ob['achieved_goal'],
ob['desired_goal'],
info,
)
"""
return np.float32((move_cube.evaluate_state(
move_cube.Pose.from_dict(desired_goal),
move_cube.Pose.from_dict(achieved_goal),
info["difficulty"],
) < 0.1))
def step(self, action):
"""Run one timestep of the environment's dynamics.
When end of episode is reached, you are responsible for calling
``reset()`` to reset this environment's state.
Args:
action: An action provided by the agent (depends on the selected
:class:`ActionType`).
Returns:
tuple:
- observation (dict): agent's observation of the current
environment.
- reward (float) : amount of reward returned after previous action.
- done (bool): whether the episode has ended, in which case further
step() calls will return undefined results.
- info (dict): info dictionary containing the difficulty level of
the goal.
"""
if self.platform is None:
raise RuntimeError("Call `reset()` before starting to step.")
if not self.action_space.contains(action):
raise ValueError(
"Given action is not contained in the action space."
)
num_steps = self.frameskip
# ensure episode length is not exceeded due to frameskip
step_count_after = self.step_count + num_steps
if step_count_after > move_cube.episode_length:
excess = step_count_after - move_cube.episode_length
num_steps = max(1, num_steps - excess)
reward = 0.0
for _ in range(num_steps):
self.step_count += 1
if self.step_count > move_cube.episode_length:
raise RuntimeError("Exceeded number of steps for one episode.")
# send action to robot
robot_action = self._gym_action_to_robot_action(action)
t = self.platform.append_desired_action(robot_action)
# Use observations of step t + 1 to follow what would be expected
# in a typical gym environment. Note that on the real robot, this
# will not be possible
observation = self._create_observation(t + 1)
reward += self.compute_reward(
observation["achieved_goal"],
observation["desired_goal"],
self.info,
)
is_done = self.step_count == move_cube.episode_length
return observation, reward, is_done, self.info
def reset(self):
# reset simulation
del self.platform
# initialize simulation
initial_robot_position = (
TriFingerPlatform.spaces.robot_position.default
)
initial_object_pose = self.initializer.get_initial_state()
goal_object_pose = self.initializer.get_goal()
self.platform = TriFingerPlatform(
visualization=self.visualization,
initial_robot_position=initial_robot_position,
initial_object_pose=initial_object_pose,
)
self.goal = {
"position": goal_object_pose.position,
"orientation": goal_object_pose.orientation,
}
# visualize the goal
if self.visualization:
self.goal_marker = visual_objects.CubeMarker(
width=0.065,
position=goal_object_pose.position,
orientation=goal_object_pose.orientation,
physicsClientId=self.platform.simfinger._pybullet_client_id,
)
self.info = {"difficulty": self.initializer.difficulty}
self.step_count = 0
return self._create_observation(0)
def seed(self, seed=None):
"""Sets the seed for this env’s random number generator.
.. note::
Spaces need to be seeded separately. E.g. if you want to sample
actions directly from the action space using
``env.action_space.sample()`` you can set a seed there using
``env.action_space.seed()``.
Returns:
List of seeds used by this environment. This environment only uses
a single seed, so the list contains only one element.
"""
self.np_random, seed = gym.utils.seeding.np_random(seed)
move_cube.random = self.np_random
return [seed]
def _create_observation(self, t):
robot_observation = self.platform.get_robot_observation(t)
object_observation = self.platform.get_object_pose(t)
observation = {
"observation": {
"position": robot_observation.position,
"velocity": robot_observation.velocity,
"torque": robot_observation.torque,
},
"desired_goal": self.goal,
"achieved_goal": {
"position": object_observation.position,
"orientation": object_observation.orientation,
},
}
return observation
def _gym_action_to_robot_action(self, gym_action):
# construct robot action depending on action type
if self.action_type == ActionType.TORQUE:
robot_action = self.platform.Action(torque=gym_action)
elif self.action_type == ActionType.POSITION:
robot_action = self.platform.Action(position=gym_action)
elif self.action_type == ActionType.TORQUE_AND_POSITION:
robot_action = self.platform.Action(
torque=gym_action["torque"], position=gym_action["position"]
)
else:
raise ValueError("Invalid action_type")
return robot_action
| 36.243402
| 79
| 0.603528
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.