hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1bb125fc740d034c7426baf1e225b7c858e4ea41
| 356
|
py
|
Python
|
euphro_auth/api_urls.py
|
betagouv/euphrosyne
|
a67857a8716b5060cd9a2c6fa5f3d45c3fff435a
|
[
"MIT"
] | 1
|
2022-02-21T19:46:20.000Z
|
2022-02-21T19:46:20.000Z
|
euphro_auth/api_urls.py
|
betagouv/euphrosyne
|
a67857a8716b5060cd9a2c6fa5f3d45c3fff435a
|
[
"MIT"
] | 37
|
2021-10-18T18:33:26.000Z
|
2022-03-31T12:38:38.000Z
|
euphro_auth/api_urls.py
|
betagouv/euphrosyne
|
a67857a8716b5060cd9a2c6fa5f3d45c3fff435a
|
[
"MIT"
] | 2
|
2022-03-03T15:41:30.000Z
|
2022-03-07T14:20:26.000Z
|
from django.urls import path
from rest_framework_simplejwt.views import TokenRefreshView
from .jwt.api_views import SessionTokenObtainPairView
urlpatterns = [
path(
"token/",
SessionTokenObtainPairView.as_view(),
name="token_obtain_pair",
),
path("token/refresh/", TokenRefreshView.as_view(), name="token_refresh"),
]
| 25.428571
| 77
| 0.72191
|
9b99fb38e6942d8d563c80dbf7ebda6ad4640cd3
| 2,374
|
py
|
Python
|
judge.py
|
shin-sforzando/PAC2020-RPS
|
ccc65ee95c0d0e0ffce34f07d667f1fd0306d7c5
|
[
"MIT"
] | null | null | null |
judge.py
|
shin-sforzando/PAC2020-RPS
|
ccc65ee95c0d0e0ffce34f07d667f1fd0306d7c5
|
[
"MIT"
] | 8
|
2020-08-18T11:51:19.000Z
|
2020-08-18T21:42:45.000Z
|
judge.py
|
shin-sforzando/PAC2020-RPS
|
ccc65ee95c0d0e0ffce34f07d667f1fd0306d7c5
|
[
"MIT"
] | null | null | null |
from typing import Dict
from typing import TypeVar
from consequence import Consequence
from hand import Hand
from player import Player
from players.doraemon import Doraemon
from players.dorami import Dorami
from players.nobita import Nobita
from players.shizuka import Shizuka
from players.suneo import Suneo
TypePlayer = TypeVar("TypePlayer", bound=Player)
player_dictionary: Dict[str, TypePlayer] = {
"源静香": Shizuka,
"ドラえもん": Doraemon,
"骨川スネ夫": Suneo,
"野比のび太": Nobita,
"ドラミ": Dorami,
}
class Judge:
def __init__(self, first_player: str, second_player: str):
self.first_player: TypePlayer = player_dictionary[first_player](is_first=True)
self.second_player: TypePlayer = player_dictionary[second_player](is_first=False)
self.history = []
def game(self):
first_hand = self.first_player.next_hand()
second_hand = self.second_player.next_hand()
consequence = self.judge(first_hand=first_hand, second_hand=second_hand)
result = (first_hand, second_hand, consequence)
self.history.append(result)
if consequence is consequence.Win:
self.first_player.is_won = True
self.second_player.is_won = False
if consequence is consequence.Lose:
self.first_player.is_won = False
self.second_player.is_won = True
return first_hand, second_hand, consequence
def get_converted_history(self):
return [(h[0].value, h[1].value, h[2].value) for h in self.history]
def get_result(self):
wins = [h for h in self.history if h[-1] == Consequence.Win]
return len(wins) / len(self.history)
@staticmethod
def judge(first_hand: Hand, second_hand: Hand):
if first_hand is second_hand:
return Consequence.Draw
if first_hand is Hand.G:
if second_hand is Hand.C:
return Consequence.Win
if second_hand is Hand.P:
return Consequence.Lose
if first_hand is Hand.C:
if second_hand is Hand.P:
return Consequence.Win
if second_hand is Hand.G:
return Consequence.Lose
if first_hand is Hand.P:
if second_hand is Hand.G:
return Consequence.Win
if second_hand is Hand.C:
return Consequence.Lose
| 33.43662
| 89
| 0.655013
|
678c48f7c508dcd90c175c1cbe345f7e3c5ed0ff
| 5,101
|
py
|
Python
|
recipes/zlib/1.2.8/conanfile.py
|
cqjjjzr/conan-center-index
|
1e1ecf6e0032ce3d341d49a10737f70d9bdb45dd
|
[
"MIT"
] | null | null | null |
recipes/zlib/1.2.8/conanfile.py
|
cqjjjzr/conan-center-index
|
1e1ecf6e0032ce3d341d49a10737f70d9bdb45dd
|
[
"MIT"
] | null | null | null |
recipes/zlib/1.2.8/conanfile.py
|
cqjjjzr/conan-center-index
|
1e1ecf6e0032ce3d341d49a10737f70d9bdb45dd
|
[
"MIT"
] | null | null | null |
import os
import stat
import shutil
from conans import ConanFile, tools, CMake, AutoToolsBuildEnvironment
from conans.errors import ConanException, NotFoundException
class ZlibConan(ConanFile):
name = "zlib"
version = "1.2.8"
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://zlib.net"
license = "Zlib"
description = ("A Massively Spiffy Yet Delicately Unobtrusive Compression Library "
"(Also Free, Not to Mention Unencumbered by Patents)")
settings = "os", "arch", "compiler", "build_type"
options = {"shared": [True, False], "fPIC": [True, False]}
default_options = "shared=False", "fPIC=True"
exports_sources = ["CMakeLists.txt"]
generators = "cmake"
_source_subfolder = "source_subfolder"
_build_subfolder = "build_subfolder"
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def configure(self):
del self.settings.compiler.libcxx
del self.settings.compiler.cppstd
def source(self):
tools.get(**self.conan_data["sources"][self.version])
os.rename("{}-{}".format(self.name, self.version), self._source_subfolder)
tools.rmdir(os.path.join(self._source_subfolder, "contrib"))
if not tools.os_info.is_windows:
configure_file = os.path.join(self._source_subfolder, "configure")
st = os.stat(configure_file)
os.chmod(configure_file, st.st_mode | stat.S_IEXEC)
def build(self):
if self.settings.os != "Windows":
with tools.chdir(self._source_subfolder):
env_build = AutoToolsBuildEnvironment(self)
if self.settings.arch == "x86" or self.settings.arch == "x86_64":
env_build.flags.append('-mstackrealign')
if self.settings.os == "Macos":
old_str = '-install_name $libdir/$SHAREDLIBM'
new_str = '-install_name $SHAREDLIBM'
tools.replace_in_file("./configure", old_str, new_str)
# Zlib configure doesnt allow this parameters (in 1.2.8)
env_build.configure("./", build=False, host=False, target=False)
env_build.make()
else:
cmake = CMake(self)
cmake.configure(build_dir=self._build_subfolder)
cmake.build()
def package(self):
# Extract the License/s from the header to a file
with tools.chdir(self._source_subfolder):
tmp = tools.load("zlib.h")
license_contents = tmp[2:tmp.find("*/", 1)]
tools.save("LICENSE", license_contents)
# Copy the license files
self.copy("LICENSE", src=self._source_subfolder, dst="licenses")
# Copying zlib.h, zutil.h, zconf.h
self.copy("*.h", "include", "%s" % self._source_subfolder, keep_path=False)
# Copying static and dynamic libs
if self.settings.os == "Windows":
suffix = "d" if self.settings.build_type == "Debug" else ""
self.copy(pattern="*.h", dst="include", src=self._build_subfolder, keep_path=False)
if self.options.shared:
self.copy(pattern="*.dll", dst="bin", src=self._build_subfolder, keep_path=False)
self.copy(pattern="libzlib.dll.a", dst="lib", src=os.path.join(self._build_subfolder, "lib"))
self.copy(pattern="zlib%s.lib" % suffix, dst="lib", src=os.path.join(self._build_subfolder, "lib"))
else:
self.copy(pattern="zlibstatic%s.lib" % suffix, dst="lib", src=os.path.join(self._build_subfolder, "lib"))
self.copy(pattern="libzlibstatic.a", dst="lib", src=os.path.join(self._build_subfolder, "lib"))
lib_path = os.path.join(self.package_folder, "lib")
if self.settings.compiler == "Visual Studio":
current_lib = os.path.join(lib_path, "zlibstatic%s.lib" % suffix)
shutil.move(current_lib, os.path.join(lib_path, "zlib%s.lib" % suffix))
elif self.settings.compiler == "gcc":
current_lib = os.path.join(lib_path, "libzlibstatic.a")
shutil.move(current_lib, os.path.join(lib_path, "libzlib.a"))
else:
if self.options.shared:
if self.settings.os == "Macos":
self.copy(pattern="*.dylib", dst="lib", src=self._source_subfolder, keep_path=False)
else:
self.copy(pattern="*.so*", dst="lib", src=self._source_subfolder, keep_path=False)
else:
self.copy(pattern="*.a", dst="lib", src=self._source_subfolder, keep_path=False)
def package_info(self):
self.cpp_info.names["pkg_config"] = "zlib"
if self.settings.os == "Windows":
self.cpp_info.libs = ['zlib']
if self.settings.build_type == "Debug" and self.settings.compiler == "Visual Studio":
self.cpp_info.libs[0] += "d"
else:
self.cpp_info.libs = ['z']
| 45.954955
| 121
| 0.598902
|
bc029681620d289812865f87585a1f85faf6f5da
| 24,030
|
py
|
Python
|
graphik/solvers/trust_region.py
|
utiasSTARS/GraphIK
|
c2d05386bf9f9baf8ad146125bfebc3b73fccd14
|
[
"MIT"
] | 1
|
2020-11-08T23:26:03.000Z
|
2020-11-08T23:26:03.000Z
|
graphik/solvers/trust_region.py
|
utiasSTARS/GraphIK
|
c2d05386bf9f9baf8ad146125bfebc3b73fccd14
|
[
"MIT"
] | null | null | null |
graphik/solvers/trust_region.py
|
utiasSTARS/GraphIK
|
c2d05386bf9f9baf8ad146125bfebc3b73fccd14
|
[
"MIT"
] | null | null | null |
# References, taken from trustregions.m in manopt:
# Please cite the Manopt paper as well as the research paper:
# @Article{genrtr,
# Title = {Trust-region methods on {Riemannian} manifolds},
# Author = {Absil, P.-A. and Baker, C. G. and Gallivan, K. A.},
# Journal = {Foundations of Computational Mathematics},
# Year = {2007},
# Number = {3},
# Pages = {303--330},
# Volume = {7},
# Doi = {10.1007/s10208-005-0179-9}
# }
#
# See also: steepestdescent conjugategradient manopt/examples
# An explicit, general listing of this algorithm, with preconditioning,
# can be found in the following paper:
# @Article{boumal2015lowrank,
# Title = {Low-rank matrix completion via preconditioned optimization
# on the {G}rassmann manifold},
# Author = {Boumal, N. and Absil, P.-A.},
# Journal = {Linear Algebra and its Applications},
# Year = {2015},
# Pages = {200--239},
# Volume = {475},
# Doi = {10.1016/j.laa.2015.02.027},
# }
# When the Hessian is not specified, it is approximated with
# finite-differences of the gradient. The resulting method is called
# RTR-FD. Some convergence theory for it is available in this paper:
# @incollection{boumal2015rtrfd
# author={Boumal, N.},
# title={Riemannian trust regions with finite-difference Hessian
# approximations are globally convergent},
# year={2015},
# booktitle={Geometric Science of Information}
# }
# This file is part of Manopt: www.manopt.org.
# This code is an adaptation to Manopt of the original GenRTR code:
# RTR - Riemannian Trust-Region
# (c) 2004-2007, P.-A. Absil, C. G. Baker, K. A. Gallivan
# Florida State University
# School of Computational Science
# (http://www.math.fsu.edu/~cbaker/GenRTR/?page=download)
# See accompanying license file.
# The adaptation was executed by Nicolas Boumal.
# Ported to pymanopt by Jamie Townsend. January 2016.
from __future__ import print_function, division
import time
import numpy as np
# from numba import jit
from pymanopt.solvers.solver import Solver
if not hasattr(__builtins__, "xrange"):
xrange = range
class TrustRegions(Solver):
(
NEGATIVE_CURVATURE,
EXCEEDED_TR,
REACHED_TARGET_LINEAR,
REACHED_TARGET_SUPERLINEAR,
MAX_INNER_ITER,
MODEL_INCREASED,
) = range(6)
TCG_STOP_REASONS = {
NEGATIVE_CURVATURE: "negative curvature",
EXCEEDED_TR: "exceeded trust region",
REACHED_TARGET_LINEAR: "reached target residual-kappa (linear)",
REACHED_TARGET_SUPERLINEAR: "reached target residual-theta " "(superlinear)",
MAX_INNER_ITER: "maximum inner iterations",
MODEL_INCREASED: "model increased",
}
def __init__(
self,
miniter=3,
kappa=0.1,
theta=1.0,
rho_prime=0.1,
use_rand=False,
rho_regularization=1e3, # increasing reduces accuracy and comp. time
*args,
**kwargs
):
"""
Trust regions algorithm based on trustregions.m from the
Manopt MATLAB package.
Also included is the Truncated (Steihaug-Toint) Conjugate-Gradient
algorithm, based on tCG.m from the Manopt MATLAB package.
"""
super(TrustRegions, self).__init__(*args, **kwargs)
self.miniter = miniter
self.kappa = kappa
self.theta = theta
self.rho_prime = rho_prime
self.use_rand = use_rand
self.rho_regularization = rho_regularization
def solve(
self,
problem,
x=None,
mininner=1,
# maxinner=None,
maxinner=10000,
Delta_bar=None,
Delta0=None,
mincost=1e-12,
):
man = problem.manifold
verbosity = problem.verbosity
if maxinner is None:
maxinner = man.dim
# Set default Delta_bar and Delta0 separately to deal with additional
# logic: if Delta_bar is provided but not Delta0, let Delta0
# automatically be some fraction of the provided Delta_bar.
if Delta_bar is None:
try:
Delta_bar = man.typicaldist
except NotImplementedError:
Delta_bar = np.sqrt(man.dim)
if Delta0 is None:
Delta0 = Delta_bar / 8
cost = problem.cost
grad = problem.grad
hess = problem.hess
norm = man.norm
inner = man.inner
retr = man.retr
# If no starting point is specified, generate one at random.
if x is None:
x = man.rand()
# Initializations
time0 = time.time()
# k counts the outer (TR) iterations. The semantic is that k counts the
# number of iterations fully executed so far.
k = 0
# Initialize solution and companion measures: f(x), fgrad(x)
fx = cost(x)
fgradx = grad(x)
norm_grad = man.norm(x, fgradx)
# Initialize the trust region radius
Delta = Delta0
# To keep track of consecutive radius changes, so that we can warn the
# user if it appears necessary.
consecutive_TRplus = 0
consecutive_TRminus = 0
# ** Display:
if verbosity >= 1:
print("Optimizing...")
if verbosity >= 2:
print("{:44s}f: {:+.6e} |grad|: {:.6e}".format(" ", float(fx), norm_grad))
self._start_optlog()
while True:
# *************************
# ** Begin TR Subproblem **
# *************************
# Determine eta0
if not self.use_rand:
# Pick the zero vector
eta = man.zerovec(x)
else:
# Random vector in T_x M (this has to be very small)
eta = 1e-6 * man.randvec(x)
# Must be inside trust region
while norm(x, eta) > Delta:
eta = np.sqrt(np.sqrt(np.spacing(1)))
# Solve TR subproblem approximately
eta, Heta, numit, stop_inner = self._truncated_conjugate_gradient(
problem,
x,
fgradx,
eta,
Delta,
self.theta,
self.kappa,
mininner,
maxinner,
)
srstr = self.TCG_STOP_REASONS[stop_inner]
# If using randomized approach, compare result with the Cauchy
# point. Convergence proofs assume that we achieve at least (a
# fraction of) the reduction of the Cauchy point. After this
# if-block, either all eta-related quantities have been changed
# consistently, or none of them have.
if self.use_rand:
used_cauchy = False
# Check the curvature
Hg = hess(x, fgradx)
g_Hg = man.inner(x, fgradx, Hg)
if g_Hg <= 0:
tau_c = 1
else:
tau_c = min(norm_grad ** 3 / (Delta * g_Hg), 1)
# and generate the Cauchy point.
eta_c = -tau_c * Delta / norm_grad * fgradx
Heta_c = -tau_c * Delta / norm_grad * Hg
# Now that we have computed the Cauchy point in addition to the
# returned eta, we might as well keep the best of them.
mdle = fx + inner(x, fgradx, eta) + 0.5 * inner(x, Heta, eta)
mdlec = (
fx + inner(x, fgradx, eta_c) + 0.5 * inner(x, Heta_c, eta_c)
)
if mdlec < mdle:
eta = eta_c
Heta = Heta_c
used_cauchy = True
# This is only computed for logging purposes, because it may be
# useful for some user-defined stopping criteria. If this is not
# cheap for specific applications (compared to evaluating the
# cost), we should reconsider this.
# norm_eta = man.norm(x, eta)
# Compute the tentative next iterate (the proposal)
x_prop = retr(x, eta)
# Compute the function value of the proposal
fx_prop = cost(x_prop)
# Will we accept the proposal or not? Check the performance of the
# quadratic model against the actual cost.
rhonum = fx - fx_prop
rhoden = -inner(x, fgradx, eta) - 0.5 * inner(x, eta, Heta)
# rhonum could be anything.
# rhoden should be nonnegative, as guaranteed by tCG, baring
# numerical errors.
# Heuristic -- added Dec. 2, 2013 (NB) to replace the former
# heuristic. This heuristic is documented in the book by Conn Gould
# and Toint on trust-region methods, section 17.4.2. rhonum
# measures the difference between two numbers. Close to
# convergence, these two numbers are very close to each other, so
# that computing their difference is numerically challenging: there
# may be a significant loss in accuracy. Since the acceptance or
# rejection of the step is conditioned on the ratio between rhonum
# and rhoden, large errors in rhonum result in a very large error
# in rho, hence in erratic acceptance / rejection. Meanwhile, close
# to convergence, steps are usually trustworthy and we should
# transition to a Newton- like method, with rho=1 consistently. The
# heuristic thus shifts both rhonum and rhoden by a small amount
# such that far from convergence, the shift is irrelevant and close
# to convergence, the ratio rho goes to 1, effectively promoting
# acceptance of the step. The rationale is that close to
# convergence, both rhonum and rhoden are quadratic in the distance
# between x and x_prop. Thus, when this distance is on the order of
# sqrt(eps), the value of rhonum and rhoden is on the order of eps,
# which is indistinguishable from the numerical error, resulting in
# badly estimated rho's.
# For abs(fx) < 1, this heuristic is invariant under offsets of f
# but not under scaling of f. For abs(fx) > 1, the opposite holds.
# This should not alarm us, as this heuristic only triggers at the
# very last iterations if very fine convergence is demanded.
rho_reg = max(1, abs(fx)) * np.spacing(1) * self.rho_regularization
rhonum = rhonum + rho_reg
rhoden = rhoden + rho_reg
# This is always true if a linear, symmetric operator is used for
# the Hessian (approximation) and if we had infinite numerical
# precision. In practice, nonlinear approximations of the Hessian
# such as the built-in finite difference approximation and finite
# numerical accuracy can cause the model to increase. In such
# scenarios, we decide to force a rejection of the step and a
# reduction of the trust-region radius. We test the sign of the
# regularized rhoden since the regularization is supposed to
# capture the accuracy to which rhoden is computed: if rhoden were
# negative before regularization but not after, that should not be
# (and is not) detected as a failure.
#
# Note (Feb. 17, 2015, NB): the most recent version of tCG already
# includes a mechanism to ensure model decrease if the Cauchy step
# attained a decrease (which is theoretically the case under very
# lax assumptions). This being said, it is always possible that
# numerical errors will prevent this, so that it is good to keep a
# safeguard.
#
# The current strategy is that, if this should happen, then we
# reject the step and reduce the trust region radius. This also
# ensures that the actual cost values are monotonically decreasing.
model_decreased = rhoden >= 0
if not model_decreased:
srstr = srstr + ", model did not decrease"
try:
rho = rhonum / rhoden
except ZeroDivisionError:
# Added June 30, 2015 following observation by BM. With this
# modification, it is guaranteed that a step rejection is
# always accompanied by a TR reduction. This prevents
# stagnation in this "corner case" (NaN's really aren't
# supposed to occur, but it's nice if we can handle them
# nonetheless).
print(
"rho is NaN! Forcing a radius decrease. This should " "not happen."
)
rho = np.nan
# Choose the new TR radius based on the model performance
trstr = " "
# If the actual decrease is smaller than 1/4 of the predicted
# decrease, then reduce the TR radius.
if rho < 1.0 / 4 or not model_decreased or np.isnan(rho):
trstr = "TR-"
Delta = Delta / 4
consecutive_TRplus = 0
consecutive_TRminus = consecutive_TRminus + 1
if consecutive_TRminus >= 5 and verbosity >= 1:
consecutive_TRminus = -np.inf
print(" +++ Detected many consecutive TR- (radius " "decreases).")
print(
" +++ Consider decreasing options.Delta_bar "
"by an order of magnitude."
)
print(
" +++ Current values: Delta_bar = {:g} and "
"Delta0 = {:g}".format(Delta_bar, Delta0)
)
# If the actual decrease is at least 3/4 of the precicted decrease
# and the tCG (inner solve) hit the TR boundary, increase the TR
# radius. We also keep track of the number of consecutive
# trust-region radius increases. If there are many, this may
# indicate the need to adapt the initial and maximum radii.
elif rho > 3.0 / 4 and (
stop_inner == self.NEGATIVE_CURVATURE or stop_inner == self.EXCEEDED_TR
):
trstr = "TR+"
Delta = min(2 * Delta, Delta_bar)
consecutive_TRminus = 0
consecutive_TRplus = consecutive_TRplus + 1
if consecutive_TRplus >= 5 and verbosity >= 1:
consecutive_TRplus = -np.inf
print(" +++ Detected many consecutive TR+ (radius " "increases).")
print(
" +++ Consider increasing options.Delta_bar "
"by an order of magnitude."
)
print(
" +++ Current values: Delta_bar = {:g} and "
"Delta0 = {:g}.".format(Delta_bar, Delta0)
)
else:
# Otherwise, keep the TR radius constant.
consecutive_TRplus = 0
consecutive_TRminus = 0
# Choose to accept or reject the proposed step based on the model
# performance. Note the strict inequality.
if model_decreased and rho > self.rho_prime:
# accept = True
accstr = "acc"
x = x_prop
fx = fx_prop
fgradx = grad(x)
norm_grad = norm(x, fgradx)
else:
# accept = False
accstr = "REJ"
# k is the number of iterations we have accomplished.
k = k + 1
# ** Display:
if verbosity == 2:
print(
"{:.3s} {:.3s} k: {:5d} num_inner: "
"{:5d} f: {:+e} |grad|: {:e} "
"{:s}".format(accstr, trstr, k, numit, float(fx), norm_grad, srstr)
)
elif verbosity > 2:
if self.use_rand and used_cauchy:
print("USED CAUCHY POINT")
print(
"{:.3s} {:.3s} k: {:5d} num_inner: "
"{:5d} {:s}".format(accstr, trstr, k, numit, srstr)
)
print(" f(x) : {:+e} |grad| : " "{:e}".format(fx, norm_grad))
print(" rho : {:e}".format(rho))
# ** CHECK STOPPING criteria
stop_reason = self._check_stopping_criterion(
time0, gradnorm=norm_grad, iter=k
)
if stop_reason:
if verbosity >= 1:
print(stop_reason)
print("")
break
# if fx <= mincost:
# # if verbosity >= 1:
# # print("Reached acceptable cost!")
# self._stop_optlog(x, fx, stop_reason, time0, gradnorm=norm_grad, iter=k)
# return x, self._optlog
if self._logverbosity <= 0:
return x
else:
self._stop_optlog(x, fx, stop_reason, time0, gradnorm=norm_grad, iter=k)
return x, self._optlog
def _truncated_conjugate_gradient(
self, problem, x, fgradx, eta, Delta, theta, kappa, mininner, maxinner
):
man = problem.manifold
inner = man.inner
hess = problem.hess
precon = problem.precon
if not self.use_rand: # and therefore, eta == 0
Heta = man.zerovec(x)
r = fgradx
e_Pe = 0
else: # and therefore, no preconditioner
# eta (presumably) ~= 0 was provided by the caller.
Heta = hess(x, eta)
r = fgradx + Heta
e_Pe = inner(x, eta, eta)
r_r = inner(x, r, r)
norm_r = np.sqrt(r_r)
norm_r0 = norm_r
# Precondition the residual
if not self.use_rand:
z = precon(x, r)
else:
z = r
# Compute z'*r
z_r = inner(x, z, r)
d_Pd = z_r
# Initial search direction
delta = -z
if not self.use_rand:
e_Pd = 0
else:
e_Pd = inner(x, eta, delta)
# If the Hessian or a linear Hessian approximation is in use, it is
# theoretically guaranteed that the model value decreases strictly with
# each iteration of tCG. Hence, there is no need to monitor the model
# value. But, when a nonlinear Hessian approximation is used (such as
# the built-in finite-difference approximation for example), the model
# may increase. It is then important to terminate the tCG iterations
# and return the previous (the best-so-far) iterate. The variable below
# will hold the model value.
if not self.use_rand:
model_value = 0
else:
# model_value = model_fun(eta, Heta)
model_value = inner(x, eta, fgradx) + 0.5 * inner(x, eta, Heta)
# Pre-assume termination because j == end.
stop_tCG = self.MAX_INNER_ITER
# Begin inner/tCG loop.
# for j in xrange(0, int(maxinner)):
for j in range(int(maxinner)):
# This call is the computationally intensive step
Hdelta = hess(x, delta)
# Compute curvature (often called kappa)
d_Hd = inner(x, delta, Hdelta)
# Note that if d_Hd == 0, we will exit at the next "if" anyway.
alpha = z_r / d_Hd
# <neweta,neweta>_P =
# <eta,eta>_P + 2*alpha*<eta,delta>_P + alpha*alpha*<delta,delta>_P
e_Pe_new = e_Pe + 2 * alpha * e_Pd + alpha ** 2 * d_Pd
# Check against negative curvature and trust-region radius
# violation. If either condition triggers, we bail out.
if d_Hd <= 0 or e_Pe_new >= Delta ** 2:
# want
# ee = <eta,eta>_prec,x
# ed = <eta,delta>_prec,x
# dd = <delta,delta>_prec,x
tau = (-e_Pd + np.sqrt(e_Pd * e_Pd + d_Pd * (Delta ** 2 - e_Pe))) / d_Pd
eta = eta + tau * delta
# If only a nonlinear Hessian approximation is available, this
# is only approximately correct, but saves an additional
# Hessian call.
Heta = Heta + tau * Hdelta
# Technically, we may want to verify that this new eta is
# indeed better than the previous eta before returning it (this
# is always the case if the Hessian approximation is linear,
# but I am unsure whether it is the case or not for nonlinear
# approximations.) At any rate, the impact should be limited,
# so in the interest of code conciseness (if we can still hope
# for that), we omit this.
if d_Hd <= 0:
stop_tCG = self.NEGATIVE_CURVATURE
else:
stop_tCG = self.EXCEEDED_TR
break
# No negative curvature and eta_prop inside TR: accept it.
e_Pe = e_Pe_new
new_eta = eta + alpha * delta
# If only a nonlinear Hessian approximation is available, this is
# only approximately correct, but saves an additional Hessian call.
new_Heta = Heta + alpha * Hdelta
# Verify that the model cost decreased in going from eta to
# new_eta. If it did not (which can only occur if the Hessian
# approximation is nonlinear or because of numerical errors), then
# we return the previous eta (which necessarily is the best reached
# so far, according to the model cost). Otherwise, we accept the
# new eta and go on.
# new_model_value = model_fun(new_eta, new_Heta)
new_model_value = inner(x, new_eta, fgradx) + 0.5 * inner(x, new_eta, new_Heta)
if new_model_value >= model_value:
stop_tCG = self.MODEL_INCREASED
break
eta = new_eta
Heta = new_Heta
model_value = new_model_value
# Update the residual.
r = r + alpha * Hdelta
# Compute new norm of r.
r_r = inner(x, r, r)
norm_r = np.sqrt(r_r)
# Check kappa/theta stopping criterion.
# Note that it is somewhat arbitrary whether to check this stopping
# criterion on the r's (the gradients) or on the z's (the
# preconditioned gradients). [CGT2000], page 206, mentions both as
# acceptable criteria.
if j >= mininner and norm_r <= norm_r0 * min(norm_r0 ** theta, kappa):
# Residual is small enough to quit
if kappa < norm_r0 ** theta:
stop_tCG = self.REACHED_TARGET_LINEAR
else:
stop_tCG = self.REACHED_TARGET_SUPERLINEAR
break
# Precondition the residual.
if not self.use_rand:
z = precon(x, r)
else:
z = r
# Save the old z'*r.
zold_rold = z_r
# Compute new z'*r.
z_r = inner(x, z, r)
# Compute new search direction
beta = z_r / zold_rold
delta = -z + beta * delta
# Update new P-norms and P-dots [CGT2000, eq. 7.5.6 & 7.5.7].
e_Pd = beta * (e_Pd + alpha * d_Pd)
d_Pd = z_r + beta * beta * d_Pd
return eta, Heta, j, stop_tCG
| 40.05
| 91
| 0.546983
|
bcd5fdd6a249159d8e14cf58167b31c3685453b8
| 801
|
py
|
Python
|
archivebox/cli/archivebox_shell.py
|
sarvex/ArchiveBox
|
2427e6d3dc377c665f785f1d845da4e5a20b50a0
|
[
"MIT"
] | 6,340
|
2018-12-20T21:12:13.000Z
|
2020-11-23T02:39:32.000Z
|
archivebox/cli/archivebox_shell.py
|
sarvex/ArchiveBox
|
2427e6d3dc377c665f785f1d845da4e5a20b50a0
|
[
"MIT"
] | 388
|
2018-12-20T07:58:08.000Z
|
2020-11-23T03:20:36.000Z
|
archivebox/cli/archivebox_shell.py
|
sarvex/ArchiveBox
|
2427e6d3dc377c665f785f1d845da4e5a20b50a0
|
[
"MIT"
] | 439
|
2018-12-21T21:51:47.000Z
|
2020-11-21T21:21:35.000Z
|
#!/usr/bin/env python3
__package__ = 'archivebox.cli'
__command__ = 'archivebox shell'
import sys
import argparse
from typing import Optional, List, IO
from ..main import shell
from ..util import docstring
from ..config import OUTPUT_DIR
from ..logging_util import SmartFormatter, reject_stdin
@docstring(shell.__doc__)
def main(args: Optional[List[str]]=None, stdin: Optional[IO]=None, pwd: Optional[str]=None) -> None:
parser = argparse.ArgumentParser(
prog=__command__,
description=shell.__doc__,
add_help=True,
formatter_class=SmartFormatter,
)
parser.parse_args(args or ())
reject_stdin(__command__, stdin)
shell(
out_dir=pwd or OUTPUT_DIR,
)
if __name__ == '__main__':
main(args=sys.argv[1:], stdin=sys.stdin)
| 22.885714
| 100
| 0.696629
|
cf6b4ec2b16b5f226561359b41f748e0eb9850f2
| 2,837
|
py
|
Python
|
Morphology/diameter_closing.py
|
Joevaen/Scikit-image_On_CT
|
e3bf0eeadc50691041b4b7c44a19d07546a85001
|
[
"Apache-2.0"
] | null | null | null |
Morphology/diameter_closing.py
|
Joevaen/Scikit-image_On_CT
|
e3bf0eeadc50691041b4b7c44a19d07546a85001
|
[
"Apache-2.0"
] | null | null | null |
Morphology/diameter_closing.py
|
Joevaen/Scikit-image_On_CT
|
e3bf0eeadc50691041b4b7c44a19d07546a85001
|
[
"Apache-2.0"
] | null | null | null |
# 执行图像的直径封闭。
#
# 直径关闭会删除图像的所有深色结构,最大扩展长度小于diameter_threshold。 最大扩展定义为边界框的最大扩展。 该运算符也称为边界框关闭。 在实践中,结果类似于形态学上的闭合,但是长而薄的结构没有被去除。
#
# 从技术上讲,此运算符基于图像的最大树表示。
import numpy as np
import matplotlib.pyplot as plt
from skimage.morphology import diameter_closing
from skimage import data
from skimage.morphology import closing
from skimage.morphology import square
datasets = {
'retina': {'image': data.microaneurysms(),
'figsize': (15, 9),
'diameter': 10,
'vis_factor': 3,
'title': 'Detection of microaneurysm'},
'page': {'image': data.page(),
'figsize': (15, 7),
'diameter': 23,
'vis_factor': 1,
'title': 'Text detection'}
}
for dataset in datasets.values():
# image with printed letters
image = dataset['image']
figsize = dataset['figsize']
diameter = dataset['diameter']
fig, ax = plt.subplots(2, 3, figsize=figsize)
# Original image
ax[0, 0].imshow(image, cmap='gray', aspect='equal',
vmin=0, vmax=255)
ax[0, 0].set_title('Original', fontsize=16)
ax[0, 0].axis('off')
ax[1, 0].imshow(image, cmap='gray', aspect='equal',
vmin=0, vmax=255)
ax[1, 0].set_title('Original', fontsize=16)
ax[1, 0].axis('off')
# Diameter closing : we remove all dark structures with a maximal
# extension of less than <diameter> (12 or 23). I.e. in closed_attr, all
# local minima have at least a maximal extension of <diameter>.
closed_attr = diameter_closing(image, diameter, connectivity=2)
# We then calculate the difference to the original image.
tophat_attr = closed_attr - image
ax[0, 1].imshow(closed_attr, cmap='gray', aspect='equal',
vmin=0, vmax=255)
ax[0, 1].set_title('Diameter Closing', fontsize=16)
ax[0, 1].axis('off')
ax[0, 2].imshow(dataset['vis_factor'] * tophat_attr, cmap='gray',
aspect='equal', vmin=0, vmax=255)
ax[0, 2].set_title('Tophat (Difference)', fontsize=16)
ax[0, 2].axis('off')
# A morphological closing removes all dark structures that cannot
# contain a structuring element of a certain size.
closed = closing(image, square(diameter))
# Again we calculate the difference to the original image.
tophat = closed - image
ax[1, 1].imshow(closed, cmap='gray', aspect='equal',
vmin=0, vmax=255)
ax[1, 1].set_title('Morphological Closing', fontsize=16)
ax[1, 1].axis('off')
ax[1, 2].imshow(dataset['vis_factor'] * tophat, cmap='gray',
aspect='equal', vmin=0, vmax=255)
ax[1, 2].set_title('Tophat (Difference)', fontsize=16)
ax[1, 2].axis('off')
fig.suptitle(dataset['title'], fontsize=18)
fig.tight_layout(rect=(0, 0, 1, 0.88))
plt.show()
| 34.597561
| 110
| 0.621079
|
e08cac015225889b41a64cea277cdf893c53fbaa
| 346
|
py
|
Python
|
awsfabrictasks/ubuntu.py
|
newgene/awsfabrictasks
|
7e0d014f9fd6f83ef24e9913eba8c1c17d67e4a4
|
[
"BSD-3-Clause"
] | 37
|
2015-01-25T19:27:37.000Z
|
2018-02-22T04:00:00.000Z
|
awsfabrictasks/ubuntu.py
|
newgene/awsfabrictasks
|
7e0d014f9fd6f83ef24e9913eba8c1c17d67e4a4
|
[
"BSD-3-Clause"
] | 4
|
2015-01-24T23:54:04.000Z
|
2016-01-13T17:36:17.000Z
|
awsfabrictasks/ubuntu.py
|
newgene/awsfabrictasks
|
7e0d014f9fd6f83ef24e9913eba8c1c17d67e4a4
|
[
"BSD-3-Clause"
] | 13
|
2015-01-24T23:44:46.000Z
|
2016-06-05T03:55:32.000Z
|
"""
Ubuntu utilities.
"""
from fabric.api import sudo
def set_locale(locale='en_US'):
"""
Set locale to avoid the warnings from perl and others about locale
failures.
"""
sudo('locale-gen {locale}.UTF-8'.format(**vars()))
sudo('update-locale LANG={locale}.UTF-8 LC_ALL={locale}.UTF-8 LC_MESSAGES=POSIX'.format(**vars()))
| 26.615385
| 102
| 0.66474
|
f344b6d80e98e78e9bc3f04a5a41f74729d7087e
| 16,063
|
py
|
Python
|
pytransform3d/rotations/_utils.py
|
alek5k/pytransform3d
|
c6fb10b1d17713bd8a2d6becb928c4f6dcf611f9
|
[
"BSD-3-Clause"
] | 304
|
2019-01-16T15:14:31.000Z
|
2022-03-31T16:14:37.000Z
|
pytransform3d/rotations/_utils.py
|
alek5k/pytransform3d
|
c6fb10b1d17713bd8a2d6becb928c4f6dcf611f9
|
[
"BSD-3-Clause"
] | 94
|
2018-12-07T14:54:05.000Z
|
2022-03-19T22:38:20.000Z
|
pytransform3d/rotations/_utils.py
|
alek5k/pytransform3d
|
c6fb10b1d17713bd8a2d6becb928c4f6dcf611f9
|
[
"BSD-3-Clause"
] | 37
|
2018-12-09T23:58:40.000Z
|
2022-03-16T02:29:53.000Z
|
"""Utility functions for rotations."""
import warnings
import math
import numpy as np
from ._constants import unitz, eps
def norm_vector(v):
"""Normalize vector.
Parameters
----------
v : array-like, shape (n,)
nd vector
Returns
-------
u : array, shape (n,)
nd unit vector with norm 1 or the zero vector
"""
norm = np.linalg.norm(v)
if norm == 0.0:
return v
return np.asarray(v) / norm
def norm_matrix(R):
"""Normalize rotation matrix.
Parameters
----------
R : array-like, shape (3, 3)
Rotation matrix with small numerical errors
Returns
-------
R : array, shape (3, 3)
Normalized rotation matrix
"""
R = np.asarray(R)
c2 = R[:, 1]
c3 = norm_vector(R[:, 2])
c1 = norm_vector(np.cross(c2, c3))
c2 = norm_vector(np.cross(c3, c1))
return np.column_stack((c1, c2, c3))
def norm_angle(a):
"""Normalize angle to (-pi, pi].
Parameters
----------
a : float or array-like, shape (n,)
Angle(s) in radians
Returns
-------
a_norm : float or array-like, shape (n,)
Normalized angle(s) in radians
"""
# Source of the solution: http://stackoverflow.com/a/32266181
return -((np.pi - np.asarray(a)) % (2.0 * np.pi) - np.pi)
def norm_axis_angle(a):
"""Normalize axis-angle representation.
Parameters
----------
a : array-like, shape (4,)
Axis of rotation and rotation angle: (x, y, z, angle)
Returns
-------
a : array-like, shape (4,)
Axis of rotation and rotation angle: (x, y, z, angle). The length
of the axis vector is 1 and the angle is in [0, pi). No rotation
is represented by [1, 0, 0, 0].
"""
angle = a[3]
norm = np.linalg.norm(a[:3])
if angle == 0.0 or norm == 0.0:
return np.array([1.0, 0.0, 0.0, 0.0])
res = np.empty(4)
res[:3] = a[:3] / norm
angle = norm_angle(angle)
if angle < 0.0:
angle *= -1.0
res[:3] *= -1.0
res[3] = angle
return res
def norm_compact_axis_angle(a):
"""Normalize compact axis-angle representation.
Parameters
----------
a : array-like, shape (3,)
Axis of rotation and rotation angle: angle * (x, y, z)
Returns
-------
a : array-like, shape (3,)
Axis of rotation and rotation angle: angle * (x, y, z).
The angle is in [0, pi). No rotation is represented by [0, 0, 0].
"""
angle = np.linalg.norm(a)
if angle == 0.0:
return np.zeros(3)
axis = a / angle
return axis * norm_angle(angle)
def perpendicular_to_vectors(a, b):
"""Compute perpendicular vector to two other vectors.
Parameters
----------
a : array-like, shape (3,)
3d vector
b : array-like, shape (3,)
3d vector
Returns
-------
c : array-like, shape (3,)
3d vector that is orthogonal to a and b
"""
return np.cross(a, b)
def perpendicular_to_vector(a):
"""Compute perpendicular vector to one other vector.
There is an infinite number of solutions to this problem. Thus, we
restrict the solutions to [1, 0, z] and return [0, 0, 1] if the
z component of a is 0.
Parameters
----------
a : array-like, shape (3,)
3d vector
Returns
-------
b : array-like, shape (3,)
A 3d vector that is orthogonal to a. It does not necessarily have
unit length.
"""
if abs(a[2]) < eps:
return np.copy(unitz)
# Now that we solved the problem for [x, y, 0], we can solve it for all
# other vectors by restricting solutions to [1, 0, z] and find z.
# The dot product of orthogonal vectors is 0, thus
# a[0] * 1 + a[1] * 0 + a[2] * z == 0 or -a[0] / a[2] = z
return np.array([1.0, 0.0, -a[0] / a[2]])
def angle_between_vectors(a, b, fast=False):
"""Compute angle between two vectors.
Parameters
----------
a : array-like, shape (n,)
nd vector
b : array-like, shape (n,)
nd vector
fast : bool, optional (default: False)
Use fast implementation instead of numerically stable solution
Returns
-------
angle : float
Angle between a and b
"""
if len(a) != 3 or fast:
return np.arccos(
np.clip(np.dot(a, b) / (np.linalg.norm(a) * np.linalg.norm(b)),
-1.0, 1.0))
else:
return np.arctan2(np.linalg.norm(np.cross(a, b)), np.dot(a, b))
def vector_projection(a, b):
"""Orthogonal projection of vector a on vector b.
Parameters
----------
a : array-like, shape (3,)
Vector a that will be projected on vector b
b : array-like, shape (3,)
Vector b on which vector a will be projected
Returns
-------
a_on_b : array, shape (3,)
Vector a
"""
b_norm_squared = np.dot(b, b)
if b_norm_squared == 0.0:
return np.zeros(3)
return np.dot(a, b) * b / b_norm_squared
def plane_basis_from_normal(plane_normal):
"""Compute two basis vectors of a plane from the plane's normal vector.
Note that there are infinitely many solutions because any rotation of the
basis vectors about the normal is also a solution. This function
deterministically picks one of the solutions.
The two basis vectors of the plane together with the normal form an
orthonormal basis in 3D space and could be used as columns to form a
rotation matrix.
Parameters
----------
plane_normal : array-like, shape (3,)
Plane normal of unit length.
Returns
-------
x_axis : array, shape (3,)
x-axis of the plane.
y_axis : array, shape (3,)
y-axis of the plane.
"""
if abs(plane_normal[0]) >= abs(plane_normal[1]):
# x or z is the largest magnitude component, swap them
length = math.sqrt(
plane_normal[0] * plane_normal[0]
+ plane_normal[2] * plane_normal[2])
x_axis = np.array([-plane_normal[2] / length, 0.0,
plane_normal[0] / length])
y_axis = np.array([
plane_normal[1] * x_axis[2],
plane_normal[2] * x_axis[0] - plane_normal[0] * x_axis[2],
-plane_normal[1] * x_axis[0]])
else:
# y or z is the largest magnitude component, swap them
length = math.sqrt(plane_normal[1] * plane_normal[1]
+ plane_normal[2] * plane_normal[2])
x_axis = np.array([0.0, plane_normal[2] / length,
-plane_normal[1] / length])
y_axis = np.array([
plane_normal[1] * x_axis[2] - plane_normal[2] * x_axis[1],
-plane_normal[0] * x_axis[2], plane_normal[0] * x_axis[1]])
return x_axis, y_axis
def random_vector(random_state=np.random.RandomState(0), n=3):
r"""Generate an nd vector with normally distributed components.
Each component will be sampled from :math:`\mathcal{N}(\mu=0, \sigma=1)`.
Parameters
----------
random_state : np.random.RandomState, optional (default: random seed 0)
Random number generator
n : int, optional (default: 3)
Number of vector components
Returns
-------
v : array, shape (n,)
Random vector
"""
return random_state.randn(n)
def random_axis_angle(random_state=np.random.RandomState(0)):
r"""Generate random axis-angle.
The angle will be sampled uniformly from the interval :math:`[0, \pi)`
and each component of the rotation axis will be sampled from
:math:`\mathcal{N}(\mu=0, \sigma=1)` and than the axis will be normalized
to length 1.
Parameters
----------
random_state : np.random.RandomState, optional (default: random seed 0)
Random number generator
Returns
-------
a : array, shape (4,)
Axis of rotation and rotation angle: (x, y, z, angle)
"""
angle = np.pi * random_state.rand()
a = np.array([0, 0, 0, angle])
a[:3] = norm_vector(random_state.randn(3))
return a
def random_compact_axis_angle(random_state=np.random.RandomState(0)):
r"""Generate random compact axis-angle.
The angle will be sampled uniformly from the interval :math:`[0, \pi)`
and each component of the rotation axis will be sampled from
:math:`\mathcal{N}(\mu=0, \sigma=1)` and than the axis will be normalized
to length 1.
Parameters
----------
random_state : np.random.RandomState, optional (default: random seed 0)
Random number generator
Returns
-------
a : array, shape (3,)
Axis of rotation and rotation angle: angle * (x, y, z)
"""
a = random_axis_angle(random_state)
return a[:3] * a[3]
def random_quaternion(random_state=np.random.RandomState(0)):
"""Generate random quaternion.
Parameters
----------
random_state : np.random.RandomState, optional (default: random seed 0)
Random number generator
Returns
-------
q : array, shape (4,)
Unit quaternion to represent rotation: (w, x, y, z)
"""
return norm_vector(random_state.randn(4))
def check_skew_symmetric_matrix(V, tolerance=1e-6, strict_check=True):
"""Input validation of a skew-symmetric matrix.
Check whether the transpose of the matrix is its negative:
.. math::
V^T = -V
Parameters
----------
V : array-like, shape (3, 3)
Cross-product matrix
tolerance : float, optional (default: 1e-6)
Tolerance threshold for checks.
strict_check : bool, optional (default: True)
Raise a ValueError if V.T is not numerically close enough to -V.
Otherwise we print a warning.
Returns
-------
V : array, shape (3, 3)
Validated cross-product matrix
Raises
------
ValueError
If input is invalid
"""
V = np.asarray(V, dtype=np.float64)
if V.ndim != 2 or V.shape[0] != 3 or V.shape[1] != 3:
raise ValueError("Expected skew-symmetric matrix with shape (3, 3), "
"got array-like object with shape %s" % (V.shape,))
if not np.allclose(V.T, -V, atol=tolerance):
error_msg = ("Expected skew-symmetric matrix, but it failed the test "
"V.T = %r\n-V = %r" % (V.T, -V))
if strict_check:
raise ValueError(error_msg)
else:
warnings.warn(error_msg)
return V
def check_matrix(R, tolerance=1e-6, strict_check=True):
"""Input validation of a rotation matrix.
We check whether R multiplied by its inverse is approximately the identity
matrix and the determinant is approximately 1.
Parameters
----------
R : array-like, shape (3, 3)
Rotation matrix
tolerance : float, optional (default: 1e-6)
Tolerance threshold for checks. Default tolerance is the same as in
assert_rotation_matrix(R).
strict_check : bool, optional (default: True)
Raise a ValueError if the rotation matrix is not numerically close
enough to a real rotation matrix. Otherwise we print a warning.
Returns
-------
R : array, shape (3, 3)
Validated rotation matrix
Raises
------
ValueError
If input is invalid
"""
R = np.asarray(R, dtype=np.float64)
if R.ndim != 2 or R.shape[0] != 3 or R.shape[1] != 3:
raise ValueError("Expected rotation matrix with shape (3, 3), got "
"array-like object with shape %s" % (R.shape,))
RRT = np.dot(R, R.T)
if not np.allclose(RRT, np.eye(3), atol=tolerance):
error_msg = ("Expected rotation matrix, but it failed the test "
"for inversion by transposition. np.dot(R, R.T) "
"gives %r" % RRT)
if strict_check:
raise ValueError(error_msg)
else:
warnings.warn(error_msg)
R_det = np.linalg.det(R)
if abs(R_det - 1) > tolerance:
error_msg = ("Expected rotation matrix, but it failed the test "
"for the determinant, which should be 1 but is %g; "
"that is, it probably represents a rotoreflection"
% R_det)
if strict_check:
raise ValueError(error_msg)
else:
warnings.warn(error_msg)
return R
def check_axis_angle(a):
"""Input validation of axis-angle representation.
Parameters
----------
a : array-like, shape (4,)
Axis of rotation and rotation angle: (x, y, z, angle)
Returns
-------
a : array, shape (4,)
Validated axis of rotation and rotation angle: (x, y, z, angle)
Raises
------
ValueError
If input is invalid
"""
a = np.asarray(a, dtype=np.float64)
if a.ndim != 1 or a.shape[0] != 4:
raise ValueError("Expected axis and angle in array with shape (4,), "
"got array-like object with shape %s" % (a.shape,))
return norm_axis_angle(a)
def check_compact_axis_angle(a):
"""Input validation of compact axis-angle representation.
Parameters
----------
a : array-like, shape (3,)
Axis of rotation and rotation angle: angle * (x, y, z)
Returns
-------
a : array, shape (3,)
Validated axis of rotation and rotation angle: angle * (x, y, z)
Raises
------
ValueError
If input is invalid
"""
a = np.asarray(a, dtype=np.float64)
if a.ndim != 1 or a.shape[0] != 3:
raise ValueError("Expected axis and angle in array with shape (3,), "
"got array-like object with shape %s" % (a.shape,))
return norm_compact_axis_angle(a)
def check_quaternion(q, unit=True):
"""Input validation of quaternion representation.
Parameters
----------
q : array-like, shape (4,)
Quaternion to represent rotation: (w, x, y, z)
unit : bool, optional (default: True)
Normalize the quaternion so that it is a unit quaternion
Returns
-------
q : array-like, shape (4,)
Validated quaternion to represent rotation: (w, x, y, z)
Raises
------
ValueError
If input is invalid
"""
q = np.asarray(q, dtype=np.float64)
if q.ndim != 1 or q.shape[0] != 4:
raise ValueError("Expected quaternion with shape (4,), got "
"array-like object with shape %s" % (q.shape,))
if unit:
return norm_vector(q)
else:
return q
def check_quaternions(Q, unit=True):
"""Input validation of quaternion representation.
Parameters
----------
Q : array-like, shape (n_steps, 4)
Quaternions to represent rotations: (w, x, y, z)
unit : bool, optional (default: True)
Normalize the quaternions so that they are unit quaternions
Returns
-------
Q : array-like, shape (n_steps, 4)
Validated quaternions to represent rotations: (w, x, y, z)
Raises
------
ValueError
If input is invalid
"""
Q_checked = np.asarray(Q, dtype=np.float64)
if Q_checked.ndim != 2 or Q_checked.shape[1] != 4:
raise ValueError(
"Expected quaternion array with shape (n_steps, 4), got "
"array-like object with shape %s" % (Q_checked.shape,))
if unit:
for i in range(len(Q)):
Q_checked[i] = norm_vector(Q_checked[i])
return Q_checked
def check_rotor(rotor):
"""Input validation of rotor.
Parameters
----------
rotor : array-like, shape (4,)
Rotor: (a, b_yz, b_zx, b_xy)
Returns
-------
rotor : array, shape (4,)
Validated rotor (with unit norm): (a, b_yz, b_zx, b_xy)
Raises
------
ValueError
If input is invalid
"""
rotor = np.asarray(rotor, dtype=np.float64)
if rotor.ndim != 1 or rotor.shape[0] != 4:
raise ValueError("Expected rotor with shape (4,), got "
"array-like object with shape %s" % (rotor.shape,))
return norm_vector(rotor)
| 27.64716
| 78
| 0.580776
|
83188cf5b40fb1fc1ae77bc96e7cc09bc2d41101
| 2,828
|
py
|
Python
|
backend/signals.py
|
lucasmgana/Pharmacy-Light-weight
|
9d6efe714d60b3a04f78f174e1e6c2a2ab98bd9a
|
[
"MIT"
] | 192
|
2020-08-14T22:17:34.000Z
|
2022-03-29T05:56:26.000Z
|
backend/signals.py
|
lucasmgana/Pharmacy-Light-weight
|
9d6efe714d60b3a04f78f174e1e6c2a2ab98bd9a
|
[
"MIT"
] | 9
|
2021-03-30T14:29:00.000Z
|
2022-02-27T11:06:35.000Z
|
backend/signals.py
|
lucasmgana/Pharmacy-Light-weight
|
9d6efe714d60b3a04f78f174e1e6c2a2ab98bd9a
|
[
"MIT"
] | 28
|
2020-08-15T08:26:34.000Z
|
2022-03-17T01:15:52.000Z
|
from .models.publications import Publication
from .models.subscribers import Subscriber
from .utils import unique_slug_generator, smart_truncate, format_wpp_number
from django.db.models.signals import pre_save, post_save
from django.dispatch import receiver
from django.core.mail import EmailMultiAlternatives
from django.template.loader import render_to_string
from core.settings.base import EMAIL_HOST_USER, TWILIO_ACCOUNT_SID, TWILIO_AUTH_TOKEN, TWILIO_WPP_NUMBER
from string import Template
from twilio.rest import Client
USE_TWILIO = TWILIO_ACCOUNT_SID and TWILIO_AUTH_TOKEN
if USE_TWILIO:
twilio_client = Client(TWILIO_ACCOUNT_SID, TWILIO_AUTH_TOKEN)
def send_wpp_message(body, to):
twilio_client.messages.create(
body=body,
from_='whatsapp:{}'.format(TWILIO_WPP_NUMBER),
to='whatsapp:{}'.format(to)
)
@receiver(pre_save, sender=Publication)
def populate_slug_field(sender, instance, **kwargs):
if not instance.slug:
instance.slug = unique_slug_generator(instance)
@receiver(post_save, sender=Publication)
def send_newsletter(sender, instance, created, **kwargs):
if created:
subscribers_emails = list(Subscriber.objects.filter(contact_method='EMAIL').values_list('contact_info', flat=True))
html_content = render_to_string('backend/email.html', {
'title': instance.title,
'description': instance.description,
'body': smart_truncate(instance.body),
'slug': instance.slug
})
sent = []
for subscriber_email in subscribers_emails:
if subscriber_email not in sent:
sent.append(subscriber_email)
email = EmailMultiAlternatives(
'News from Django-React-Typescript: {}'.format(instance.title),
None,
EMAIL_HOST_USER,
[subscriber_email]
)
email.attach_alternative(html_content, "text/html")
email.send()
if USE_TWILIO:
for wpp_number in list(Subscriber.objects.filter(contact_method='WHATSAPP').values_list('contact_info', flat=True)):
if wpp_number not in sent_wpps:
def get_message_body():
if instance.description:
return instance.description
return smart_truncate(instance.body)
body = Template('News from Django-React-Typescript: $title\n$body\nLearn more at $link')
body.substitute(title=instance.title, body=get_message_body(), link='https://www.example.com/blog/{}'.format(instance.slug))
send_wpp_message(
body,
format_wpp_number(wpp_number)
)
| 36.727273
| 140
| 0.649929
|
b5d53046bd57bc7b4de62b8f94bb4b0e8cc5d5ed
| 15,019
|
py
|
Python
|
disnake/components.py
|
Enegg/disnake
|
1d48cbf4e0dfec82fdfb65d7f58396767ce7c009
|
[
"MIT"
] | 290
|
2021-11-03T12:33:16.000Z
|
2022-03-31T19:30:19.000Z
|
disnake/components.py
|
Enegg/disnake
|
1d48cbf4e0dfec82fdfb65d7f58396767ce7c009
|
[
"MIT"
] | 200
|
2021-11-03T10:41:41.000Z
|
2022-03-31T08:13:11.000Z
|
disnake/components.py
|
Enegg/disnake
|
1d48cbf4e0dfec82fdfb65d7f58396767ce7c009
|
[
"MIT"
] | 118
|
2021-11-03T18:27:09.000Z
|
2022-03-25T22:00:45.000Z
|
"""
The MIT License (MIT)
Copyright (c) 2015-2021 Rapptz
Copyright (c) 2021-present Disnake Development
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
from typing import (
TYPE_CHECKING,
Any,
ClassVar,
Dict,
List,
Optional,
Tuple,
Type,
TypeVar,
Union,
cast,
)
from .enums import ButtonStyle, ComponentType, TextInputStyle, try_enum
from .partial_emoji import PartialEmoji, _EmojiTag
from .utils import MISSING, get_slots
if TYPE_CHECKING:
from .emoji import Emoji
from .types.components import (
ActionRow as ActionRowPayload,
ButtonComponent as ButtonComponentPayload,
Component as ComponentPayload,
SelectMenu as SelectMenuPayload,
SelectOption as SelectOptionPayload,
TextInput as TextInputPayload,
)
__all__ = (
"Component",
"ActionRow",
"Button",
"SelectMenu",
"SelectOption",
"TextInput",
)
C = TypeVar("C", bound="Component")
NestedComponent = Union["Button", "SelectMenu", "TextInput"]
class Component:
"""Represents a Discord Bot UI Kit Component.
Currently, the only components supported by Discord are:
- :class:`ActionRow`
- :class:`Button`
- :class:`SelectMenu`
- :class:`TextInput`
This class is abstract and cannot be instantiated.
.. versionadded:: 2.0
Attributes
----------
type: :class:`ComponentType`
The type of component.
"""
__slots__: Tuple[str, ...] = ("type",)
__repr_info__: ClassVar[Tuple[str, ...]]
type: ComponentType
def __repr__(self) -> str:
attrs = " ".join(f"{key}={getattr(self, key)!r}" for key in self.__repr_info__)
return f"<{self.__class__.__name__} {attrs}>"
@classmethod
def _raw_construct(cls: Type[C], **kwargs) -> C:
self: C = cls.__new__(cls)
for slot in get_slots(cls):
try:
value = kwargs[slot]
except KeyError:
pass
else:
setattr(self, slot, value)
return self
def to_dict(self) -> Dict[str, Any]:
raise NotImplementedError
class ActionRow(Component):
"""Represents an action row.
This is a component that holds up to 5 children components in a row.
This inherits from :class:`Component`.
.. versionadded:: 2.0
Attributes
----------
type: :class:`ComponentType`
The type of component.
children: List[Union[:class:`Button`, :class:`SelectMenu`, :class:`TextInput`]]
The children components that this holds, if any.
"""
__slots__: Tuple[str, ...] = ("children",)
__repr_info__: ClassVar[Tuple[str, ...]] = __slots__
def __init__(self, data: ComponentPayload):
self.type: ComponentType = try_enum(ComponentType, data["type"])
self.children: List[NestedComponent] = [ # type: ignore
_component_factory(d) for d in data.get("components", [])
]
def to_dict(self) -> ActionRowPayload:
return {
"type": int(self.type),
"components": [child.to_dict() for child in self.children],
} # type: ignore
class Button(Component):
"""Represents a button from the Discord Bot UI Kit.
This inherits from :class:`Component`.
.. note::
The user constructible and usable type to create a button is :class:`disnake.ui.Button`
not this one.
.. versionadded:: 2.0
Attributes
----------
style: :class:`.ButtonStyle`
The style of the button.
custom_id: Optional[:class:`str`]
The ID of the button that gets received during an interaction.
If this button is for a URL, it does not have a custom ID.
url: Optional[:class:`str`]
The URL this button sends you to.
disabled: :class:`bool`
Whether the button is disabled or not.
label: Optional[:class:`str`]
The label of the button, if any.
emoji: Optional[:class:`PartialEmoji`]
The emoji of the button, if available.
"""
__slots__: Tuple[str, ...] = (
"style",
"custom_id",
"url",
"disabled",
"label",
"emoji",
)
__repr_info__: ClassVar[Tuple[str, ...]] = __slots__
def __init__(self, data: ButtonComponentPayload):
self.type: ComponentType = try_enum(ComponentType, data["type"])
self.style: ButtonStyle = try_enum(ButtonStyle, data["style"])
self.custom_id: Optional[str] = data.get("custom_id")
self.url: Optional[str] = data.get("url")
self.disabled: bool = data.get("disabled", False)
self.label: Optional[str] = data.get("label")
self.emoji: Optional[PartialEmoji]
try:
self.emoji = PartialEmoji.from_dict(data["emoji"])
except KeyError:
self.emoji = None
def to_dict(self) -> ButtonComponentPayload:
payload = {
"type": 2,
"style": int(self.style),
"label": self.label,
"disabled": self.disabled,
}
if self.custom_id:
payload["custom_id"] = self.custom_id
if self.url:
payload["url"] = self.url
if self.emoji:
payload["emoji"] = self.emoji.to_dict()
return payload # type: ignore
class SelectMenu(Component):
"""Represents a select menu from the Discord Bot UI Kit.
A select menu is functionally the same as a dropdown, however
on mobile it renders a bit differently.
.. note::
The user constructible and usable type to create a select menu is
:class:`disnake.ui.Select` not this one.
.. versionadded:: 2.0
Attributes
----------
custom_id: Optional[:class:`str`]
The ID of the select menu that gets received during an interaction.
placeholder: Optional[:class:`str`]
The placeholder text that is shown if nothing is selected, if any.
min_values: :class:`int`
The minimum number of items that must be chosen for this select menu.
Defaults to 1 and must be between 1 and 25.
max_values: :class:`int`
The maximum number of items that must be chosen for this select menu.
Defaults to 1 and must be between 1 and 25.
options: List[:class:`SelectOption`]
A list of options that can be selected in this select menu.
disabled: :class:`bool`
Whether the select menu is disabled or not.
"""
__slots__: Tuple[str, ...] = (
"custom_id",
"placeholder",
"min_values",
"max_values",
"options",
"disabled",
)
__repr_info__: ClassVar[Tuple[str, ...]] = __slots__
def __init__(self, data: SelectMenuPayload):
self.type = ComponentType.select
self.custom_id: str = data["custom_id"]
self.placeholder: Optional[str] = data.get("placeholder")
self.min_values: int = data.get("min_values", 1)
self.max_values: int = data.get("max_values", 1)
self.options: List[SelectOption] = [
SelectOption.from_dict(option) for option in data.get("options", [])
]
self.disabled: bool = data.get("disabled", False)
def to_dict(self) -> SelectMenuPayload:
payload: SelectMenuPayload = {
"type": self.type.value,
"custom_id": self.custom_id,
"min_values": self.min_values,
"max_values": self.max_values,
"options": [op.to_dict() for op in self.options],
"disabled": self.disabled,
}
if self.placeholder:
payload["placeholder"] = self.placeholder
return payload
class SelectOption:
"""Represents a select menu's option.
These can be created by users.
.. versionadded:: 2.0
Attributes
----------
label: :class:`str`
The label of the option. This is displayed to users.
Can only be up to 100 characters.
value: :class:`str`
The value of the option. This is not displayed to users.
If not provided when constructed then it defaults to the
label. Can only be up to 100 characters.
description: Optional[:class:`str`]
An additional description of the option, if any.
Can only be up to 100 characters.
emoji: Optional[Union[:class:`str`, :class:`Emoji`, :class:`PartialEmoji`]]
The emoji of the option, if available.
default: :class:`bool`
Whether this option is selected by default.
"""
__slots__: Tuple[str, ...] = (
"label",
"value",
"description",
"emoji",
"default",
)
def __init__(
self,
*,
label: str,
value: str = MISSING,
description: Optional[str] = None,
emoji: Optional[Union[str, Emoji, PartialEmoji]] = None,
default: bool = False,
) -> None:
self.label = label
self.value = label if value is MISSING else value
self.description = description
if emoji is not None:
if isinstance(emoji, str):
emoji = PartialEmoji.from_str(emoji)
elif isinstance(emoji, _EmojiTag):
emoji = emoji._to_partial()
else:
raise TypeError(
f"expected emoji to be str, Emoji, or PartialEmoji not {emoji.__class__}"
)
self.emoji = emoji
self.default = default
def __repr__(self) -> str:
return (
f"<SelectOption label={self.label!r} value={self.value!r} description={self.description!r} "
f"emoji={self.emoji!r} default={self.default!r}>"
)
def __str__(self) -> str:
if self.emoji:
base = f"{self.emoji} {self.label}"
else:
base = self.label
if self.description:
return f"{base}\n{self.description}"
return base
@classmethod
def from_dict(cls, data: SelectOptionPayload) -> SelectOption:
try:
emoji = PartialEmoji.from_dict(data["emoji"])
except KeyError:
emoji = None
return cls(
label=data["label"],
value=data["value"],
description=data.get("description"),
emoji=emoji,
default=data.get("default", False),
)
def to_dict(self) -> SelectOptionPayload:
payload: SelectOptionPayload = {
"label": self.label,
"value": self.value,
"default": self.default,
}
if self.emoji:
payload["emoji"] = self.emoji.to_dict() # type: ignore
if self.description:
payload["description"] = self.description
return payload
class TextInput(Component):
"""Represents a text input from the Discord Bot UI Kit.
.. versionadded:: 2.4
.. note::
The user constructible and usable type to create a text input is
:class:`disnake.ui.TextInput`, not this one.
Attributes
----------
style: :class:`TextInputStyle`
The style of the text input.
label: Optional[:class:`str`]
The label of the text input.
custom_id: :class:`str`
The ID of the text input that gets received during an interaction.
placeholder: Optional[:class:`str`]
The placeholder text that is shown if nothing is entered.
value: Optional[:class:`str`]
The pre-filled text of the text input.
required: :class:`bool`
Whether the text input is required. Defaults to ``True``.
min_length: Optional[:class:`int`]
The minimum length of the text input.
max_length: Optional[:class:`int`]
The maximum length of the text input.
"""
__slots__: Tuple[str, ...] = (
"style",
"custom_id",
"label",
"placeholder",
"value",
"required",
"max_length",
"min_length",
)
__repr_info__: ClassVar[Tuple[str, ...]] = __slots__
def __init__(self, data: TextInputPayload) -> None:
style = data.get("style", TextInputStyle.short.value)
self.type: ComponentType = try_enum(ComponentType, data["type"])
self.custom_id: str = data["custom_id"]
self.style: TextInputStyle = try_enum(TextInputStyle, style)
self.label: Optional[str] = data.get("label")
self.placeholder: Optional[str] = data.get("placeholder")
self.value: Optional[str] = data.get("value")
self.required: bool = data.get("required", True)
self.min_length: Optional[int] = data.get("min_length")
self.max_length: Optional[int] = data.get("max_length")
def to_dict(self) -> TextInputPayload:
payload: TextInputPayload = {
"type": self.type.value,
"style": self.style.value,
"label": cast(str, self.label),
"custom_id": self.custom_id,
"required": self.required,
}
if self.placeholder is not None:
payload["placeholder"] = self.placeholder
if self.value is not None:
payload["value"] = self.value
if self.min_length is not None:
payload["min_length"] = self.min_length
if self.max_length is not None:
payload["max_length"] = self.max_length
return payload
def _component_factory(data: ComponentPayload) -> Component:
# NOTE: due to speed, this method does not use the ComponentType enum
# as this runs every single time a component is received from the api
component_type = data["type"]
if component_type == 1:
return ActionRow(data)
elif component_type == 2:
return Button(data) # type: ignore
elif component_type == 3:
return SelectMenu(data) # type: ignore
elif component_type == 4:
return TextInput(data) # type: ignore
else:
as_enum = try_enum(ComponentType, component_type)
return Component._raw_construct(type=as_enum)
| 30.526423
| 104
| 0.612025
|
af2741cc6daf24b94a2227c3007a70a1a18d73ea
| 358
|
py
|
Python
|
pjproject_android/tests/pjsua/scripts-call/150_srtp_3_0.py
|
WachterJud/qaul.net_legacy
|
9c2be0a38ad6e90fadc0d1150340e37d220997ae
|
[
"MIT",
"BSD-2-Clause",
"BSD-3-Clause"
] | 4
|
2019-11-11T08:16:08.000Z
|
2020-08-25T03:08:44.000Z
|
pjproject_android/tests/pjsua/scripts-call/150_srtp_3_0.py
|
WachterJud/qaul.net_legacy
|
9c2be0a38ad6e90fadc0d1150340e37d220997ae
|
[
"MIT",
"BSD-2-Clause",
"BSD-3-Clause"
] | 1
|
2020-02-20T06:58:16.000Z
|
2020-02-20T07:08:07.000Z
|
my_softphone/pjproject-2.9/tests/pjsua/scripts-call/150_srtp_3_0.py
|
sashkaseltsov1/reposCpp
|
3ff5ce2a14a368a36b1758099ce4f3e8c4cdf11d
|
[
"Unlicense"
] | 5
|
2019-07-02T02:03:24.000Z
|
2022-03-30T09:58:52.000Z
|
# $Id: 150_srtp_3_0.py 3334 2010-10-05 16:32:04Z nanang $
#
from inc_cfg import *
test_param = TestParam(
"Callee=optional (with duplicated offer) SRTP, caller=no SRTP",
[
InstanceParam("callee", "--null-audio --use-srtp=3 --srtp-secure=0 --max-calls=1"),
InstanceParam("caller", "--null-audio --use-srtp=0 --srtp-secure=0 --max-calls=1")
]
)
| 29.833333
| 86
| 0.664804
|
8b01a0d91f93e82d9f69bee032db33e32409e5cf
| 2,320
|
py
|
Python
|
selfdrive/test/update_ci_routes.py
|
otaku/openpilot
|
09283f4d6af839756e7ff49035d0b2251859aebe
|
[
"MIT"
] | 1
|
2021-04-10T09:14:43.000Z
|
2021-04-10T09:14:43.000Z
|
selfdrive/test/update_ci_routes.py
|
otaku/openpilot
|
09283f4d6af839756e7ff49035d0b2251859aebe
|
[
"MIT"
] | null | null | null |
selfdrive/test/update_ci_routes.py
|
otaku/openpilot
|
09283f4d6af839756e7ff49035d0b2251859aebe
|
[
"MIT"
] | 1
|
2021-04-10T09:14:45.000Z
|
2021-04-10T09:14:45.000Z
|
#!/usr/bin/env python3
import tempfile
import shutil
import subprocess
from common.basedir import BASEDIR
from azure.storage.blob import BlockBlobService
from selfdrive.test.test_car_models import routes as test_car_models_routes, non_public_routes
from selfdrive.test.process_replay.test_processes import segments as replay_segments
from xx.chffr.lib import azureutil
from xx.chffr.lib.storage import upload_dir_serial, download_dir_tpe
from xx.chffr.lib.storage import _DATA_ACCOUNT_PRODUCTION, _DATA_ACCOUNT_CI, _DATA_BUCKET_PRODUCTION, _DATA_BUCKET_CI
SOURCES = [
(_DATA_ACCOUNT_PRODUCTION, _DATA_BUCKET_PRODUCTION),
(_DATA_ACCOUNT_PRODUCTION, "preserve"),
]
DEST_KEY = azureutil.get_user_token(_DATA_ACCOUNT_CI, "openpilotci")
SOURCE_KEYS = [azureutil.get_user_token(account, bucket) for account, bucket in SOURCES]
SERVICE = BlockBlobService(_DATA_ACCOUNT_CI, sas_token=DEST_KEY)
def sync_to_ci_public(route):
print(f"Uploading {route}")
key_prefix = route.replace('|', '/')
if next(azureutil.list_all_blobs(SERVICE, "openpilotci", prefix=key_prefix), None) is not None:
print("Already synced")
return True
for (source_account, source_bucket), source_key in zip(SOURCES, SOURCE_KEYS):
print(f"Trying {source_account}/{source_bucket}")
cmd = [
f"{BASEDIR}/external/bin/azcopy",
"copy",
"https://{}.blob.core.windows.net/{}/{}?{}".format(source_account, source_bucket, key_prefix, source_key),
"https://{}.blob.core.windows.net/{}?{}".format(_DATA_ACCOUNT_CI, "openpilotci", DEST_KEY),
"--recursive=true",
"--overwrite=false",
]
try:
result = subprocess.call(cmd, stdout=subprocess.DEVNULL)
if result == 0:
print("Success")
return True
except subprocess.CalledProcessError:
print("Failed")
return False
if __name__ == "__main__":
failed_routes = []
# sync process replay routes
for s in replay_segments:
route_name, _ = s.rsplit('--', 1)
if not sync_to_ci_public(route_name):
failed_routes.append(route_name)
# sync test_car_models routes
for r in list(test_car_models_routes.keys()):
if r not in non_public_routes:
if not sync_to_ci_public(r):
failed_routes.append(r)
if len(failed_routes):
print("failed routes:")
print(failed_routes)
| 31.780822
| 117
| 0.733621
|
883cb609785ee91af2548b8fbc67db5d621d7111
| 30,846
|
py
|
Python
|
pyvaspflow/io/vasp_input.py
|
Zhiwei-Lu/pyvaspflow
|
b80eab3e8bfc52aed6a2459dd32655f1075d9058
|
[
"MIT"
] | 1
|
2021-11-23T12:42:56.000Z
|
2021-11-23T12:42:56.000Z
|
pyvaspflow/io/vasp_input.py
|
Zhiwei-Lu/pyvaspflow
|
b80eab3e8bfc52aed6a2459dd32655f1075d9058
|
[
"MIT"
] | null | null | null |
pyvaspflow/io/vasp_input.py
|
Zhiwei-Lu/pyvaspflow
|
b80eab3e8bfc52aed6a2459dd32655f1075d9058
|
[
"MIT"
] | 1
|
2021-09-10T14:19:14.000Z
|
2021-09-10T14:19:14.000Z
|
#!/usr/bin/env python3.6
# -*- coding: utf-8 -*-
from pyvaspflow.utils import str_delimited, clean_lines,zread,read_json
import re,math,json,seekpath
from os import path
import numpy as np
from enum import Enum
from pyvaspflow.utils import is_2d_structure
import itertools
class Incar(dict):
def __init__(self, params=None):
self.update({'ISIF':3,'ISTART':0,'ICHARG':2,'NSW':50,'IBRION':2,
'EDIFF':1E-5,'EDIFFG':-0.01,'ISMEAR':0,'NPAR':4,'LREAL':'Auto',
'LWAVE':'F','LCHARG':'F'})
if params:
if (params.get("MAGMOM") and isinstance(params["MAGMOM"][0], (int, float))) \
and (params.get("LSORBIT") or params.get("LNONCOLLINEAR")):
val = []
for i in range(len(params["MAGMOM"])//3):
val.append(params["MAGMOM"][i*3:(i+1)*3])
params["MAGMOM"] = val
self.update(params)
def __setitem__(self, key, val):
key = key.strip()
val = Incar.proc_val(key.strip(), str(val).strip())
super().__setitem__(key, val)
def as_dict(self):
d = dict(self)
d["@module"] = self.__class__.__module__
d["@class"] = self.__class__.__name__
return d
@classmethod
def from_dict(cls, d):
if d.get("MAGMOM") and isinstance(d["MAGMOM"][0], dict):
d["MAGMOM"] = [Magmom.from_dict(m) for m in d["MAGMOM"]]
return Incar({k: v for k, v in d.items() if k not in ("@module", "@class")})
def get_string(self, sort_keys=False, pretty=True):
keys = self.keys()
if sort_keys:
keys = sorted(keys)
lines = []
for k in keys:
if k == "MAGMOM" and isinstance(self[k], list):
value = []
if (isinstance(self[k][0], list) ) and \
(self.get("LSORBIT") or self.get("LNONCOLLINEAR")):
value.append(" ".join(str(i) for j in self[k] for i in j))
elif self.get("LSORBIT") or self.get("LNONCOLLINEAR"):
for m, g in itertools.groupby(self[k]):
value.append("3*{}*{}".format(len(tuple(g)), m))
else:
# float() to ensure backwards compatibility between
# float magmoms and Magmom objects
for m, g in itertools.groupby(self[k], lambda x: float(x)):
value.append("{}*{}".format(len(tuple(g)), m))
lines.append([k, " ".join(value)])
elif isinstance(self[k], list):
lines.append([k, " ".join([str(i) for i in self[k]])])
else:
lines.append([k, self[k]])
if pretty:
return str(tabulate([[l[0], "=", l[1]] for l in lines],
tablefmt="plain"))
else:
return str_delimited(lines, None, " = ") + "\n"
def __str__(self):
return self.get_string(sort_keys=True, pretty=False)
def write_file(self, filename='INCAR'):
with open(filename, "wt") as f:
f.write(self.__str__())
def from_file(self,filename):
with open(filename, "r") as f:
self.update(Incar.from_string(f.read()))
@staticmethod
def from_string(string):
lines = list(clean_lines(string.splitlines()))
params = {}
for line in lines:
for sline in line.split(';'):
m = re.match(r'(\w+)\s*=\s*(.*)', sline.strip())
if m:
key = m.group(1).strip()
val = m.group(2).strip()
val = Incar.proc_val(key, val)
params[key] = val
return params
@staticmethod
def proc_val(key, val):
"""
Static helper method to convert INCAR parameters to proper types, e.g.,
integers, floats, lists, etc.
Args:
key: INCAR parameter key
val: Actual value of INCAR parameter.
"""
list_keys = ("LDAUU", "LDAUL", "LDAUJ", "MAGMOM", "DIPOL",
"LANGEVIN_GAMMA", "QUAD_EFG", "EINT")
bool_keys = ("LDAU", "LWAVE", "LSCALU", "LCHARG", "LPLANE", "LUSE_VDW",
"LHFCALC", "ADDGRID", "LSORBIT", "LNONCOLLINEAR")
float_keys = ("EDIFF", "SIGMA", "TIME", "ENCUTFOCK", "HFSCREEN",
"POTIM", "EDIFFG", "AGGAC", "PARAM1", "PARAM2")
int_keys = ("NSW", "NBANDS", "NELMIN", "ISIF", "IBRION", "ISPIN",
"ICHARG", "NELM", "ISMEAR", "NPAR", "LDAUPRINT", "LMAXMIX",
"ENCUT", "NSIM", "NKRED", "NUPDOWN", "ISPIND", "LDAUTYPE",
"IVDW")
def smart_int_or_float(numstr):
import pdb; pdb.set_trace()
if numstr.find(".") != -1 or numstr.lower().find("e") != -1:
return float(numstr)
else:
return int(numstr)
try:
if key in list_keys:
output = []
toks = re.findall(
r"(-?\d+\.?\d*)\*?(-?\d+\.?\d*)?\*?(-?\d+\.?\d*)?", val)
for tok in toks:
if tok[2] and "3" in tok[0]:
output.extend(
[smart_int_or_float(tok[2])] * int(tok[0])
* int(tok[1]))
elif tok[1]:
output.extend([smart_int_or_float(tok[1])] *
int(tok[0]))
else:
output.append(smart_int_or_float(tok[0]))
return output
if key in bool_keys:
m = re.match(r"^\.?([T|F|t|f])[A-Za-z]*\.?", val)
if m:
if m.group(1) == "T" or m.group(1) == "t":
return True
else:
return False
raise ValueError(key + " should be a boolean type!")
if key in float_keys:
return float(re.search(r"^-?\d*\.?\d*[e|E]?-?\d*", val).group(0))
if key in int_keys:
return int(re.match(r"^-?[0-9]+", val).group(0))
except ValueError:
pass
# Not in standard keys. We will try a hierarchy of conversions.
try:
val = int(val)
return val
except ValueError:
pass
try:
val = float(val)
return val
except ValueError:
pass
if "true" in val.lower():
return True
if "false" in val.lower():
return False
return val.strip().capitalize()
def diff(self, other):
similar_param = {}
different_param = {}
for k1, v1 in self.items():
if k1 not in other:
different_param[k1] = {"INCAR1": v1, "INCAR2": None}
elif v1 != other[k1]:
different_param[k1] = {"INCAR1": v1, "INCAR2": other[k1]}
else:
similar_param[k1] = v1
for k2, v2 in other.items():
if k2 not in similar_param and k2 not in different_param:
if k2 not in self:
different_param[k2] = {"INCAR1": None, "INCAR2": v2}
return {"Same": similar_param, "Different": different_param}
def __add__(self, other):
params = {k: v for k, v in self.items()}
for k, v in other.items():
if k in self and v != self[k]:
raise ValueError("Incars have conflicting values!")
else:
params[k] = v
return Incar(params)
class Potcar(list):
"""
This class can generate POTCAR file from POSCAR and you can specify
some functional potcar you want to choose
"""
def __init__(self,poscar='POSCAR',functional='paw_PBE',sym_potcar_map=None):
with open(poscar,'r') as f:
lines = f.readlines()
atom_type = lines[5].strip()
if len(atom_type) != 1:
atom_type = re.split(pattern=r"\s+",string=atom_type)
else:
atom_type = list(atom_type)
self.atom_type = atom_type
new_sym_potcar_map = []
if not sym_potcar_map:
sym_potcar_map = []
elif isinstance(sym_potcar_map,str):
sym_potcar_map = [sym_potcar_map]
for atom in self.atom_type:
add_map = False
for map in sym_potcar_map:
if atom in map:
new_sym_potcar_map.append(map)
add_map = True
break
if not add_map:
new_sym_potcar_map.append(atom)
self.sym_potcar_map = new_sym_potcar_map
self.functional = functional
def __repr__(self):
return self.__str__()
def __str__(self):
res = 'The functional is : ' + self.functional + '\n'
for i in range(len(self.atom_type)):
res += 'Atom '+self.atom_type[i]+'using '+self.sym_potcar_map[i]+' type'+'\n'
return res
def write_file(self,filename='POTCAR'):
json_f = read_json()
potcar_main_dir_path = json_f['potcar_path'][self.functional]
all_pot_file = []
for map in self.sym_potcar_map:
pot_path = path.join(potcar_main_dir_path,map)
if path.isfile(path.join(pot_path,'POTCAR')):
all_pot_file.append(path.join(pot_path,'POTCAR'))
elif path.isfile(path.join(pot_path,'POTCAR.Z')):
all_pot_file.append(path.join(pot_path,'POTCAR.Z'))
else:
from os import listdir
from os.path import isfile, join
possible = [dir for dir in listdir(json_f['potcar_path'][self.functional]) if map.split('_')[0] in dir]
raise FileNotFoundError('Not found supported POTCAR file'
+' you can set sym_potcar_map='+ ','.join(possible))
with open(filename, 'w') as outfile:
for fname in all_pot_file:
outfile.write(zread(fname))
class Kpoints_supported_modes(Enum):
Automatic = 0
Gamma = 1
Monkhorst = 2
Line_mode = 3
Cartesian = 4
Reciprocal = 5
def __str__(self):
return self.name
@staticmethod
def from_string(s):
c = s.lower()[0]
for m in Kpoints_supported_modes:
if m.name.lower()[0] == c:
return m
raise ValueError("Can't interprete Kpoint mode %s" % s)
class Kpoints:
supported_modes = Kpoints_supported_modes
def __init__(self, comment="Default gamma", num_kpts=0,
style=supported_modes.Gamma,
kpts=((1, 1, 1),), kpts_shift=(0, 0, 0),
kpts_weights=None, coord_type=None, labels=None,
tet_number=0, tet_weight=0, tet_connections=None):
"""
Highly flexible constructor for Kpoints object. The flexibility comes
at the cost of usability and in general, it is recommended that you use
the default constructor only if you know exactly what you are doing and
requires the flexibility. For most usage cases, the three automatic
schemes can be constructed far more easily using the convenience static
constructors (automatic, gamma_automatic, monkhorst_automatic) and it
is recommended that you use those.
Args:
comment (str): String comment for Kpoints
num_kpts: Following VASP method of defining the KPOINTS file, this
parameter is the number of kpoints specified. If set to 0
(or negative), VASP automatically generates the KPOINTS.
style: Style for generating KPOINTS. Use one of the
Kpoints.supported_modes enum types.
kpts (2D array): 2D array of kpoints. Even when only a single
specification is required, e.g. in the automatic scheme,
the kpts should still be specified as a 2D array. e.g.,
[[20]] or [[2,2,2]].
kpts_shift (3x1 array): Shift for Kpoints.
kpts_weights: Optional weights for kpoints. Weights should be
integers. For explicit kpoints.
coord_type: In line-mode, this variable specifies whether the
Kpoints were given in Cartesian or Reciprocal coordinates.
labels: In line-mode, this should provide a list of labels for
each kpt. It is optional in explicit kpoint mode as comments for
k-points.
tet_number: For explicit kpoints, specifies the number of
tetrahedrons for the tetrahedron method.
tet_weight: For explicit kpoints, specifies the weight for each
tetrahedron for the tetrahedron method.
tet_connections: For explicit kpoints, specifies the connections
of the tetrahedrons for the tetrahedron method.
Format is a list of tuples, [ (sym_weight, [tet_vertices]),
...]
The default behavior of the constructor is for a Gamma centered,
1x1x1 KPOINTS with no shift.
"""
if num_kpts > 0 and (not labels) and (not kpts_weights):
raise ValueError("For explicit or line-mode kpoints, either the "
"labels or kpts_weights must be specified.")
self.comment = comment
self.num_kpts = num_kpts
self.kpts = kpts
self.style = style
self.coord_type = coord_type
self.kpts_weights = kpts_weights
self.kpts_shift = kpts_shift
self.labels = labels
self.tet_number = tet_number
self.tet_weight = tet_weight
self.tet_connections = tet_connections
@property
def style(self):
return self._style
@style.setter
def style(self, style):
if isinstance(style, str):
style = Kpoints.supported_modes.from_string(style)
if style in (Kpoints.supported_modes.Automatic,
Kpoints.supported_modes.Gamma,
Kpoints.supported_modes.Monkhorst) and len(self.kpts) > 1:
raise ValueError("For fully automatic or automatic gamma or monk "
"kpoints, only a single line for the number of "
"divisions is allowed.")
self._style = style
def automatic(self,subdivisions):
"""
Convenient static constructor for a fully automatic Kpoint grid, with
gamma centered Monkhorst-Pack grids and the number of subdivisions
along each reciprocal lattice vector determined by the scheme in the
VASP manual.
Args:
subdivisions: Parameter determining number of subdivisions along
each reciprocal lattice vector.
"""
self.comment = "Fully automatic kpoint scheme"
self.num_kpts = 0
self._style=Kpoints.supported_modes.Automatic
self.kpts=[[subdivisions]]
def gamma_automatic(self, kpts=(1, 1, 1), shift=(0, 0, 0)):
"""
Convenient static constructor for an automatic Gamma centered Kpoint
grid.
Args:
kpts: Subdivisions N_1, N_2 and N_3 along reciprocal lattice
vectors. Defaults to (1,1,1)
shift: Shift to be applied to the kpoints. Defaults to (0,0,0).
"""
self.comment = "Fully automatic kpoint scheme"
self.num_kpts = 0
self._style = Kpoints.supported_modes.Gamma
self.kpts = [kpts]
self.kpts_shift = shift
def monkhorst_automatic(self, kpts=(2, 2, 2), shift=(0, 0, 0)):
"""
Convenient static constructor for an automatic Monkhorst pack Kpoint
grid.
Args:
kpts: Subdivisions N_1, N_2 and N_3 along reciprocal lattice
vectors. Defaults to (2,2,2)
shift: Shift to be applied to the kpoints. Defaults to (0,0,0).
"""
self.comment = "Automatic kpoint scheme"
self.num_kpts = 0
self._style = Kpoints.supported_modes.Monkhorst
self.kpts = [kpts]
self.kpts_shift = shift
def automatic_density(self, structure, kppa, force_gamma=False):
"""
Returns an automatic Kpoint object based on a structure and a kpoint
density. Uses Gamma centered meshes for hexagonal cells and
Monkhorst-Pack grids otherwise.
Algorithm:
Uses a simple approach scaling the number of divisions along each
reciprocal lattice vector proportional to its length.
Args:
structure (Structure): Input structure
kppa (int): Grid density
force_gamma (bool): Force a gamma centered mesh (default is to
use gamma only for hexagonal cells or odd meshes)
"""
comment = "grid density = %.0f / atom"%kppa
if math.fabs((math.floor(kppa ** (1 / 3) + 0.5)) ** 3 - kppa) < 1:
kppa += kppa * 0.01
ngrid = kppa / len(structure.atoms)
latt = structure.lattice
lengths = np.linalg.norm(latt,axis=1)
is_2d = is_2d_structure(structure)
if type(is_2d) is tuple:
print('This structure will be treated as a two dimensional structure here',
'so the mesh of one direction will be set to 1')
vac_idx = is_2d[1]
atom_idx = np.setdiff1d(range(3),vac_idx)
mult = (ngrid * lengths[atom_idx[0]] * lengths[atom_idx[1]]) ** (1 / 2)
num_div = np.zeros((3,))
num_div[atom_idx[0]] = int(math.floor(max(mult / lengths[atom_idx[0]], 1)))
num_div[atom_idx[1]] = int(math.floor(max(mult / lengths[atom_idx[1]], 1)))
num_div[vac_idx] = 1
num_div = num_div.astype(int).tolist()
elif not is_2d :
mult = (ngrid * lengths[0] * lengths[1] * lengths[2]) ** (1 / 3)
num_div = [int(math.floor(max(mult / l, 1))) for l in lengths]
spg = structure.get_spacegroup()
if int(spg.split('(')[1].split(')')[0]) in range(168,195):
is_hexagonal = True# latt.is_hexagonal()
else:
is_hexagonal = False
has_odd = any([i % 2 == 1 for i in num_div])
if has_odd or is_hexagonal or force_gamma:
style = Kpoints.supported_modes.Gamma
else:
style = Kpoints.supported_modes.Monkhorst
self.comment = comment
self.num_kpts = 0
self._style = style
self.kpts = [num_div]
self.kpts_shift = [0,0,0]
def automatic_gamma_density(self,structure, kppa):
"""
Returns an automatic Kpoint object based on a structure and a kpoint
density. Uses Gamma centered meshes always. For GW.
"""
latt = structure.lattice
ngrid = kppa / len(structure.atoms)
lengths = np.linalg.norm(latt,axis=1)
is_2d = is_2d_structure(structure)
if type(is_2d) is tuple:
print('This structure will be treated as a two dimensional structure here',
'so the mesh of one direction will be set to 1 or 2')
vac_idx = is_2d[1]
atom_idx = np.setdiff1d(range(3),vac_idx)
mult = (ngrid * lengths[atom_idx[0]] * lengths[atom_idx[1]]) ** (1 / 2)
num_div = np.zeros((3,))
num_div[atom_idx[0]] = int(math.floor(max(mult / lengths[atom_idx[0]], 1)))
num_div[atom_idx[1]] = int(math.floor(max(mult / lengths[atom_idx[1]], 1)))
num_div[vac_idx] = 1
num_div = num_div.astype(int).tolist()
elif not is_2d :
mult = (ngrid * lengths[0] * lengths[1] * lengths[2]) ** (1 / 3)
num_div = [int(math.floor(max(mult / l, 1))) for l in lengths]
# ensure that numDiv[i] > 0
num_div = [i if i > 0 else 1 for i in num_div]
# VASP documentation recommends to use even grids for n <= 8 and odd
# grids for n > 8.
# num_div = [i + i % 2 if i <= 8 else i - i % 2 + 1 for i in num_div]
style = Kpoints.supported_modes.Gamma
comment = "KPOINTS with grid density = " +"{} / atom".format(kppa)
self.comment = comment
self.num_kpts = 0
self._style = style
self.kpts = [num_div]
self.kpts_shift = [0,0,0]
def automatic_density_by_vol(self,structure, kppvol, force_gamma=False):
"""
Returns an automatic Kpoint object based on a structure and a kpoint
density per inverse Angstrom^3 of reciprocal cell.
Algorithm:
Same as automatic_density()
Args:
structure (Structure): Input structure
kppvol (int): Grid density per Angstrom^(-3) of reciprocal cell
force_gamma (bool): Force a gamma centered mesh
"""
# vol = structure.lattice.reciprocal_lattice.volume
latt = structure.lattice
latt_vol = np.linalg.det(latt)
r_x = np.cross(latt[1],latt[2])/latt_vol
r_y = np.cross(latt[2],latt[0])/latt_vol
r_z = np.cross(latt[0],latt[1])/latt_vol
vol = 2*np.pi*np.linalg.det([r_x,r_y,r_z])
kppa = kppvol * vol * len(structure.atoms)
self.comment = "KPOINTS with grid density = " +"{} / atom".format(kppa)
self.num_kpts = 0
if force_gamma:
self._style = Kpoints.supported_modes.Gamma
else:
self._style = Kpoints.supported_modes.Monkhorst
lengths = np.linalg.norm(latt,axis=1)
ngrid = kppa / len(structure.atoms)
mult = (ngrid * lengths[0] * lengths[1] * lengths[2]) ** (1 / 3)
num_div = [int(math.floor(max(mult / l, 1))) for l in lengths]
spg = structure.get_spacegroup()
if int(spg.split('(')[1].split(')')[0]) in range(168,195):
is_hexagonal = True# latt.is_hexagonal()
else:
is_hexagonal = False
has_odd = any([i % 2 == 1 for i in num_div])
self.kpts = [num_div]
self.kpts_shift = [0,0,0]
def automatic_linemode(self, structure,num_kpts=16):
all_kpath = seekpath.get_explicit_k_path((structure.lattice,
structure.positions,structure.atoms))
points = all_kpath['point_coords']
path = all_kpath['path']
kpoints,labels = [],[]
for p in path:
kpoints.append(points[p[0]])
kpoints.append(points[p[1]])
labels.append(p[0])
labels.append(p[1])
comment = 'Line_mode KPOINTS file, '+'num_kpts: '+str(num_kpts)
self.comment = comment
self._style = Kpoints.supported_modes.Line_mode
self.coord_type = 'Reciprocal'
self.kpts = kpoints
self.labels = labels
self.num_kpts = num_kpts
@staticmethod
def from_file(filename):
"""
Reads a Kpoints object from a KPOINTS file.
Args:
filename (str): filename to read from.
Returns:
Kpoints object
"""
with open(filename, "rt") as f:
return Kpoints.from_string(f.read())
@staticmethod
def from_string(string):
"""
Reads a Kpoints object from a KPOINTS string.
Args:
string (str): KPOINTS string.
Returns:
Kpoints object
"""
lines = [line.strip() for line in string.splitlines()]
comment = lines[0]
num_kpts = int(lines[1].split()[0].strip())
style = lines[2].lower()[0]
# Fully automatic KPOINTS
if style == "a":
return Kpoints.automatic(int(lines[3]))
coord_pattern = re.compile(r'^\s*([\d+.\-Ee]+)\s+([\d+.\-Ee]+)\s+'
r'([\d+.\-Ee]+)')
# Automatic gamma and Monk KPOINTS, with optional shift
if style == "g" or style == "m":
kpts = [int(i) for i in lines[3].split()]
kpts_shift = (0, 0, 0)
if len(lines) > 4 and coord_pattern.match(lines[4]):
try:
kpts_shift = [float(i) for i in lines[4].split()]
except ValueError:
pass
return Kpoints.gamma_automatic(kpts, kpts_shift) if style == "g" \
else Kpoints.monkhorst_automatic(kpts, kpts_shift)
# Automatic kpoints with basis
if num_kpts <= 0:
style = Kpoints.supported_modes.Cartesian if style in "ck" \
else Kpoints.supported_modes.Reciprocal
kpts = [[float(j) for j in lines[i].split()] for i in range(3, 6)]
kpts_shift = [float(i) for i in lines[6].split()]
return Kpoints(comment=comment, num_kpts=num_kpts, style=style,
kpts=kpts, kpts_shift=kpts_shift)
# Line-mode KPOINTS, usually used with band structures
if style == "l":
coord_type = "Cartesian" if lines[3].lower()[0] in "ck" \
else "Reciprocal"
style = Kpoints.supported_modes.Line_mode
kpts = []
labels = []
patt = re.compile(r'([e0-9.\-]+)\s+([e0-9.\-]+)\s+([e0-9.\-]+)'
r'\s*!*\s*(.*)')
for i in range(4, len(lines)):
line = lines[i]
m = patt.match(line)
if m:
kpts.append([float(m.group(1)), float(m.group(2)),
float(m.group(3))])
labels.append(m.group(4).strip())
return Kpoints(comment=comment, num_kpts=num_kpts, style=style,
kpts=kpts, coord_type=coord_type, labels=labels)
# Assume explicit KPOINTS if all else fails.
style = Kpoints.supported_modes.Cartesian if style in "ck" \
else Kpoints.supported_modes.Reciprocal
kpts = []
kpts_weights = []
labels = []
tet_number = 0
tet_weight = 0
tet_connections = None
for i in range(3, 3 + num_kpts):
toks = lines[i].split()
kpts.append([float(j) for j in toks[0:3]])
kpts_weights.append(float(toks[3]))
if len(toks) > 4:
labels.append(toks[4])
else:
labels.append(None)
try:
# Deal with tetrahedron method
if lines[3 + num_kpts].strip().lower()[0] == "t":
toks = lines[4 + num_kpts].split()
tet_number = int(toks[0])
tet_weight = float(toks[1])
tet_connections = []
for i in range(5 + num_kpts, 5 + num_kpts + tet_number):
toks = lines[i].split()
tet_connections.append((int(toks[0]),
[int(toks[j])
for j in range(1, 5)]))
except IndexError:
pass
return Kpoints(comment=comment, num_kpts=num_kpts,
style=Kpoints.supported_modes[str(style)],
kpts=kpts, kpts_weights=kpts_weights,
tet_number=tet_number, tet_weight=tet_weight,
tet_connections=tet_connections, labels=labels)
def write_file(self, filename='KPOINTS'):
with open(filename, "wt") as f:
f.write(self.__str__())
def __repr__(self):
return self.__str__()
def __str__(self):
lines = [self.comment, str(self.num_kpts), self.style.name]
style = self.style.name.lower()[0]
if style == "l":
lines.append(self.coord_type)
for i in range(len(self.kpts)):
lines.append(" ".join([str(x) for x in self.kpts[i]]))
if style == "l":
lines[-1] += " ! " + self.labels[i]
if i % 2 == 1:
lines[-1] += "\n"
elif self.num_kpts > 0:
if self.labels is not None:
lines[-1] += " %i %s" % (self.kpts_weights[i],
self.labels[i])
else:
lines[-1] += " %i" % (self.kpts_weights[i])
# Print tetrahedron parameters if the number of tetrahedrons > 0
if style not in "lagm" and self.tet_number > 0:
lines.append("Tetrahedron")
lines.append("%d %f" % (self.tet_number, self.tet_weight))
for sym_weight, vertices in self.tet_connections:
lines.append("%d %d %d %d %d" % (sym_weight, vertices[0],
vertices[1], vertices[2],
vertices[3]))
# Print shifts for automatic kpoints types if not zero.
if self.num_kpts <= 0 and tuple(self.kpts_shift) != (0, 0, 0):
lines.append(" ".join([str(x) for x in self.kpts_shift]))
return "\n".join(lines) + "\n"
def as_dict(self):
"""json friendly dict representation of Kpoints"""
d = {"comment": self.comment, "nkpoints": self.num_kpts,
"generation_style": self.style.name, "kpoints": self.kpts,
"usershift": self.kpts_shift,
"kpts_weights": self.kpts_weights, "coord_type": self.coord_type,
"labels": self.labels, "tet_number": self.tet_number,
"tet_weight": self.tet_weight,
"tet_connections": self.tet_connections}
optional_paras = ["genvec1", "genvec2", "genvec3", "shift"]
for para in optional_paras:
if para in self.__dict__:
d[para] = self.__dict__[para]
d["@module"] = self.__class__.__module__
d["@class"] = self.__class__.__name__
return d
@classmethod
def from_dict(cls, d):
comment = d.get("comment", "")
generation_style = d.get("generation_style")
kpts = d.get("kpoints", [[1, 1, 1]])
kpts_shift = d.get("usershift", [0, 0, 0])
num_kpts = d.get("nkpoints", 0)
return cls(comment=comment, kpts=kpts, style=generation_style,
kpts_shift=kpts_shift, num_kpts=num_kpts,
kpts_weights=d.get("kpts_weights"),
coord_type=d.get("coord_type"),
labels=d.get("labels"), tet_number=d.get("tet_number", 0),
tet_weight=d.get("tet_weight", 0),
tet_connections=d.get("tet_connections"))
if __name__ == '__main__':
from sagar.io.vasp import read_vasp
c = read_vasp('/home/hecc/Documents/python-package/Defect-Formation-Calculation/pyvaspflow/examples/POSCAR')
kpoints = Kpoints()
kpoints.automatic_density(structure=c,kppa=3000)
kpoints.write_file()
| 40.964143
| 120
| 0.543377
|
719e9714bb7fc9d805ca413112bf15b410d9612b
| 456
|
py
|
Python
|
pdappend/gui.py
|
cnpls/pdappend
|
9f00fea5d9df072ab90d74c96ebaffe8033eb572
|
[
"MIT"
] | null | null | null |
pdappend/gui.py
|
cnpls/pdappend
|
9f00fea5d9df072ab90d74c96ebaffe8033eb572
|
[
"MIT"
] | 10
|
2021-03-29T02:34:05.000Z
|
2021-03-30T00:24:37.000Z
|
pdappend/gui.py
|
cnpls/pdappend
|
9f00fea5d9df072ab90d74c96ebaffe8033eb572
|
[
"MIT"
] | 1
|
2020-11-11T23:29:41.000Z
|
2020-11-11T23:29:41.000Z
|
import os
from pdappend import pdappend, cli
from tkinter import filedialog
from tkinter import *
def main():
root = Tk()
root.withdraw()
# TODO: from pdappend.pdappend.FILE_TYPES
files = filedialog.askopenfilenames(
initialdir=os.getcwd(), filetypes=[(".xlsx .xls .csv", ".xlsx .xls .csv")]
)
args = pdappend.Args(
targets=pdappend.Targets(values=files), flags=pdappend.DEFAULT_CONFIG
)
cli.main(args)
| 21.714286
| 82
| 0.66886
|
053715207de03dc7c72267367b804107ae464caa
| 5,776
|
py
|
Python
|
sdk/python/pulumi_azure_nextgen/web/v20201001/get_web_app_swift_virtual_network_connection_slot.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 31
|
2020-09-21T09:41:01.000Z
|
2021-02-26T13:21:59.000Z
|
sdk/python/pulumi_azure_nextgen/web/v20201001/get_web_app_swift_virtual_network_connection_slot.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 231
|
2020-09-21T09:38:45.000Z
|
2021-03-01T11:16:03.000Z
|
sdk/python/pulumi_azure_nextgen/web/v20201001/get_web_app_swift_virtual_network_connection_slot.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 4
|
2020-09-29T14:14:59.000Z
|
2021-02-10T20:38:16.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetWebAppSwiftVirtualNetworkConnectionSlotResult',
'AwaitableGetWebAppSwiftVirtualNetworkConnectionSlotResult',
'get_web_app_swift_virtual_network_connection_slot',
]
@pulumi.output_type
class GetWebAppSwiftVirtualNetworkConnectionSlotResult:
"""
Swift Virtual Network Contract. This is used to enable the new Swift way of doing virtual network integration.
"""
def __init__(__self__, id=None, kind=None, name=None, subnet_resource_id=None, swift_supported=None, system_data=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if kind and not isinstance(kind, str):
raise TypeError("Expected argument 'kind' to be a str")
pulumi.set(__self__, "kind", kind)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if subnet_resource_id and not isinstance(subnet_resource_id, str):
raise TypeError("Expected argument 'subnet_resource_id' to be a str")
pulumi.set(__self__, "subnet_resource_id", subnet_resource_id)
if swift_supported and not isinstance(swift_supported, bool):
raise TypeError("Expected argument 'swift_supported' to be a bool")
pulumi.set(__self__, "swift_supported", swift_supported)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
Resource Id.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def kind(self) -> Optional[str]:
"""
Kind of resource.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource Name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="subnetResourceId")
def subnet_resource_id(self) -> Optional[str]:
"""
The Virtual Network subnet's resource ID. This is the subnet that this Web App will join. This subnet must have a delegation to Microsoft.Web/serverFarms defined first.
"""
return pulumi.get(self, "subnet_resource_id")
@property
@pulumi.getter(name="swiftSupported")
def swift_supported(self) -> Optional[bool]:
"""
A flag that specifies if the scale unit this Web App is on supports Swift integration.
"""
return pulumi.get(self, "swift_supported")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
The system metadata relating to this resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetWebAppSwiftVirtualNetworkConnectionSlotResult(GetWebAppSwiftVirtualNetworkConnectionSlotResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetWebAppSwiftVirtualNetworkConnectionSlotResult(
id=self.id,
kind=self.kind,
name=self.name,
subnet_resource_id=self.subnet_resource_id,
swift_supported=self.swift_supported,
system_data=self.system_data,
type=self.type)
def get_web_app_swift_virtual_network_connection_slot(name: Optional[str] = None,
resource_group_name: Optional[str] = None,
slot: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetWebAppSwiftVirtualNetworkConnectionSlotResult:
"""
Swift Virtual Network Contract. This is used to enable the new Swift way of doing virtual network integration.
:param str name: Name of the app.
:param str resource_group_name: Name of the resource group to which the resource belongs.
:param str slot: Name of the deployment slot. If a slot is not specified, the API will get a gateway for the production slot's Virtual Network.
"""
__args__ = dict()
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
__args__['slot'] = slot
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:web/v20201001:getWebAppSwiftVirtualNetworkConnectionSlot', __args__, opts=opts, typ=GetWebAppSwiftVirtualNetworkConnectionSlotResult).value
return AwaitableGetWebAppSwiftVirtualNetworkConnectionSlotResult(
id=__ret__.id,
kind=__ret__.kind,
name=__ret__.name,
subnet_resource_id=__ret__.subnet_resource_id,
swift_supported=__ret__.swift_supported,
system_data=__ret__.system_data,
type=__ret__.type)
| 39.027027
| 190
| 0.659453
|
8c9a653f2c96ebe2d5b9571ad132429bd4b7e5d3
| 7,351
|
py
|
Python
|
test/unit/tasks/test_forecasting.py
|
Kwentar/FEDOT
|
97a561698c0aa006aa627fc56965a0bc251a4ed8
|
[
"BSD-3-Clause"
] | null | null | null |
test/unit/tasks/test_forecasting.py
|
Kwentar/FEDOT
|
97a561698c0aa006aa627fc56965a0bc251a4ed8
|
[
"BSD-3-Clause"
] | null | null | null |
test/unit/tasks/test_forecasting.py
|
Kwentar/FEDOT
|
97a561698c0aa006aa627fc56965a0bc251a4ed8
|
[
"BSD-3-Clause"
] | null | null | null |
from random import seed
import numpy as np
import pytest
from sklearn.metrics import mean_squared_error, mean_absolute_error
from statsmodels.tsa.arima_process import ArmaProcess
from fedot.core.chains.chain import Chain
from fedot.core.chains.chain_ts_wrappers import out_of_sample_ts_forecast, \
in_sample_ts_forecast
from fedot.core.chains.node import PrimaryNode, SecondaryNode
from fedot.core.data.data import InputData
from fedot.core.data.data_split import train_test_data_setup
from fedot.core.repository.dataset_types import DataTypesEnum
from fedot.core.repository.tasks import Task, TaskTypesEnum, TsForecastingParams
from fedot.utilities.synth_dataset_generator import generate_synthetic_data
np.random.seed(42)
seed(42)
def _max_rmse_threshold_by_std(values, is_strict=True):
tolerance_coeff = 3.0 if is_strict else 5.0
return np.std(values) * tolerance_coeff
def get_synthetic_ts_data_period(n_steps=1000, forecast_length=5):
simulated_data = ArmaProcess().generate_sample(nsample=n_steps)
x1 = np.arange(0, n_steps)
x2 = np.arange(0, n_steps) + 1
simulated_data = simulated_data + x1 * 0.0005 - x2 * 0.0001
periodicity = np.sin(x1 / 50)
simulated_data = simulated_data + periodicity
task = Task(TaskTypesEnum.ts_forecasting,
TsForecastingParams(forecast_length=forecast_length))
data = InputData(idx=np.arange(0, n_steps),
features=simulated_data,
target=simulated_data,
task=task,
data_type=DataTypesEnum.ts)
return train_test_data_setup(data)
def get_multiscale_chain():
# First branch
node_lagged_1 = PrimaryNode('lagged')
node_lagged_1.custom_params = {'window_size': 20}
node_ridge_1 = SecondaryNode('ridge', nodes_from=[node_lagged_1])
# Second branch, which will try to make prediction based on smoothed ts
node_filtering = PrimaryNode('gaussian_filter')
node_filtering.custom_params = {'sigma': 3}
node_lagged_2 = SecondaryNode('lagged', nodes_from=[node_filtering])
node_lagged_2.custom_params = {'window_size': 100}
node_ridge_2 = SecondaryNode('ridge', nodes_from=[node_lagged_2])
node_final = SecondaryNode('linear', nodes_from=[node_ridge_1, node_ridge_2])
chain = Chain(node_final)
return chain
def get_simple_ts_chain(model_root: str = 'ridge', window_size: int = 20):
node_lagged = PrimaryNode('lagged')
node_lagged.custom_params = {'window_size': window_size}
node_root = SecondaryNode(model_root, nodes_from=[node_lagged])
chain = Chain(node_root)
return chain
def get_statsmodels_chain():
node_ar = PrimaryNode('ar')
node_ar.custom_params = {'lag_1': 20, 'lag_2': 100}
chain = Chain(node_ar)
return chain
def test_arima_chain_fit_correct():
train_data, test_data = get_synthetic_ts_data_period(forecast_length=12)
chain = get_statsmodels_chain()
chain.fit(input_data=train_data)
test_pred = chain.predict(input_data=test_data)
# Calculate metric
test_pred = np.ravel(np.array(test_pred.predict))
test_target = np.ravel(np.array(test_data.target))
rmse_test = mean_squared_error(test_target, test_pred, squared=False)
rmse_threshold = _max_rmse_threshold_by_std(test_data.target)
assert rmse_test < rmse_threshold
def test_simple_chain_forecast_correct():
train_data, test_data = get_synthetic_ts_data_period(forecast_length=5)
chain = get_simple_ts_chain()
chain.fit(input_data=train_data)
test_pred = chain.predict(input_data=test_data)
# Calculate metric
test_pred = np.ravel(np.array(test_pred.predict))
test_target = np.ravel(np.array(test_data.target))
rmse_test = mean_squared_error(test_target, test_pred, squared=False)
rmse_threshold = _max_rmse_threshold_by_std(test_data.target, is_strict=True)
assert rmse_test < rmse_threshold
def test_regression_multiscale_chain_forecast_correct():
train_data, test_data = get_synthetic_ts_data_period(forecast_length=5)
chain = get_multiscale_chain()
chain.fit(input_data=train_data)
test_pred = chain.predict(input_data=test_data)
# Calculate metric
test_pred = np.ravel(np.array(test_pred.predict))
test_target = np.ravel(np.array(test_data.target))
rmse_test = mean_squared_error(test_target, test_pred, squared=False)
rmse_threshold = _max_rmse_threshold_by_std(test_data.target,
is_strict=True)
assert rmse_test < rmse_threshold
def test_ts_single_chain_model_without_multiotput_support():
time_series = generate_synthetic_data(20)
len_forecast = 2
train_part = time_series[:-len_forecast]
test_part = time_series[-len_forecast:]
task = Task(TaskTypesEnum.ts_forecasting,
TsForecastingParams(forecast_length=len_forecast))
train_data = InputData(idx=np.arange(0, len(train_part)),
features=train_part,
target=train_part,
task=task,
data_type=DataTypesEnum.ts)
start_forecast = len(train_part)
end_forecast = start_forecast + len_forecast
idx_for_predict = np.arange(start_forecast, end_forecast)
# Data for making prediction for a specific length
test_data = InputData(idx=idx_for_predict,
features=train_part,
target=test_part,
task=task,
data_type=DataTypesEnum.ts)
for model_id in ['xgbreg', 'gbr', 'adareg', 'svr', 'sgdr']:
chain = get_simple_ts_chain(model_root=model_id, window_size=2)
# making predictions for the missing part in the time series
chain.fit_from_scratch(train_data)
predicted_values = chain.predict(test_data)
chain_forecast = np.ravel(np.array(predicted_values.predict))
test_part = np.ravel(np.array(test_part))
mae = mean_absolute_error(test_part, chain_forecast)
assert mae < 50
def test_exception_if_incorrect_forecast_length():
with pytest.raises(ValueError) as exc:
_, _ = get_synthetic_ts_data_period(forecast_length=0)
assert str(exc.value) == f'Forecast length should be more then 0'
def test_multistep_out_of_sample_forecasting():
horizon = 12
train_data, test_data = get_synthetic_ts_data_period(forecast_length=5)
chain = get_multiscale_chain()
# Fit chain to make forecasts 5 elements above
chain.fit(input_data=train_data)
# Make prediction for 12 elements
predicted = out_of_sample_ts_forecast(chain=chain,
input_data=test_data,
horizon=horizon)
assert len(predicted) == horizon
def test_multistep_in_sample_forecasting():
horizon = 12
train_data, test_data = get_synthetic_ts_data_period(forecast_length=5)
chain = get_multiscale_chain()
# Fit chain to make forecasts 5 elements above
chain.fit(input_data=train_data)
# Make prediction for 12 elements
predicted = in_sample_ts_forecast(chain=chain,
input_data=test_data,
horizon=horizon)
assert len(predicted) == horizon
| 33.56621
| 81
| 0.706162
|
344e90432f18f9807af80121018c3a4f92271304
| 36,969
|
py
|
Python
|
scraper/expand/get_genbank_sequences.py
|
HobnobMancer/cazy_webscraper
|
3f74492f46db2093f7e6cd91fffcb8347694e54e
|
[
"MIT"
] | 3
|
2020-10-22T08:31:29.000Z
|
2021-05-19T13:13:12.000Z
|
scraper/expand/get_genbank_sequences.py
|
HobnobMancer/cazy_webscraper
|
3f74492f46db2093f7e6cd91fffcb8347694e54e
|
[
"MIT"
] | 62
|
2020-11-30T11:29:20.000Z
|
2022-03-28T13:50:30.000Z
|
scraper/expand/get_genbank_sequences.py
|
HobnobMancer/cazy_webscraper
|
3f74492f46db2093f7e6cd91fffcb8347694e54e
|
[
"MIT"
] | 1
|
2021-03-10T16:30:11.000Z
|
2021-03-10T16:30:11.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# (c) University of St Andrews 2020-2021
# (c) University of Strathclyde 2020-2021
# Author:
# Emma E. M. Hobbs
# Contact
# eemh1@st-andrews.ac.uk
# Emma E. M. Hobbs,
# Biomolecular Sciences Building,
# University of St Andrews,
# North Haugh Campus,
# St Andrews,
# KY16 9ST
# Scotland,
# UK
# The MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Retrieve proteins sequences from GenBank and populate the local database and write to FASTA"""
import logging
import os
import re
import sys
import time
import pandas as pd
from datetime import datetime
from typing import List, Optional
from Bio import Entrez, SeqIO
from tqdm import tqdm
from scraper.sql.sql_orm import (
Cazyme,
CazyFamily,
Cazymes_Genbanks,
Genbank,
Kingdom,
Taxonomy,
get_db_session,
)
from scraper.utilities import config_logger, file_io, parse_configuration
from scraper.utilities.parsers import build_genbank_sequences_parser
def main(argv: Optional[List[str]] = None, logger: Optional[logging.Logger] = None):
"""Set up programme and initate run."""
start_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S") # used in terminating message
start_time = pd.to_datetime(start_time)
date_today = datetime.now().strftime("%Y/%m/%d") # used as seq_update_date in the db
# parse cmd-line arguments
if argv is None:
parser = build_genbank_sequences_parser()
args = parser.parse_args()
else:
args = build_genbank_sequences_parser(argv).parse_args()
if logger is None:
logger = logging.getLogger(__name__)
config_logger(args)
# check database was passed
if os.path.isfile(args.database) is False:
logger.error(
"Could not find local CAZy database. Check path is correct. Terminating programme."
)
sys.exit(1)
Entrez.email = args.email
# create session to local database
session = get_db_session(args)
# retrieve configuration data
file_io_path = file_io.__file__
config_dict, taxonomy_filters, kingdoms = parse_configuration.get_configuration(
args,
)
if config_dict is None:
if args.update:
# get sequence for everything without a sequence and those with newer remote sequence
add_and_update_all_sequences(date_today, taxonomy_filters, kingdoms, session, args)
else:
# get sequences for everything without a sequence
get_missing_sequences_for_everything(
date_today,
taxonomy_filters,
kingdoms,
session,
args,
)
else:
# get sequences for only specified classes/families
if args.update:
update_sequences_for_specific_records(
date_today,
config_dict,
taxonomy_filters,
kingdoms,
session,
args,
)
else:
get_missing_sequences_for_specific_records(
date_today,
config_dict,
taxonomy_filters,
kingdoms,
session,
args,
)
if args.blastdb is not None:
file_io.build_blast_db(args)
end_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S") # used in terminating message
end_time = pd.to_datetime(start_time)
end_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
end_time = pd.to_datetime(end_time)
total_time = end_time - start_time
logger.info(
"Finished populating local CAZy database with GenBank protein sequences. "
"Terminating program.\n"
f"Scrape initated at {start_time}\n"
f"Scrape finished at {end_time}\n"
f"Total run time: {total_time}"
)
print(
"=====================cazy_webscraper-expand-genank_sequences=====================\n"
"Finished populating local CAZy database with GenBank protein sequences. "
"Terminating program.\n"
f"Scrape initated at {start_time}\n"
f"Scrape finished at {end_time}\n"
f"Total run time: {total_time}\n"
)
# The folowing functions are for querying the local database to get GenBank accessions
def get_missing_sequences_for_everything(date_today, taxonomy_filters, kingdoms, session, args):
"""Retrieve protein sequences for all CAZymes in the local CAZy database that don't have seq.
:param date_today: str, today's date, used for logging the date the seq is retrieved in the db
:param taxonomy_filters: set of genera, species and strains to restrict sequence retrieval
:param kingdoms: set of taxonomy Kingdoms to retrieve sequences for
:param session: open SQLite db session
:param args: cmd-line argument parser
Return nothing.
"""
logger = logging.getLogger(__name__)
# retrieve only sequences for primary GenBank accessions, and those without sequences
if args.primary is True:
logger.warning(
"Retrieving sequences for all primary GenBank accessions that do not have sequences"
)
genbank_query = session.query(Genbank, Cazymes_Genbanks, Cazyme, Taxonomy, Kingdom).\
join(Taxonomy, (Taxonomy.kingdom_id == Kingdom.kingdom_id)).\
join(Cazyme, (Cazyme.taxonomy_id == Taxonomy.taxonomy_id)).\
join(Cazymes_Genbanks, (Cazymes_Genbanks.cazyme_id == Cazyme.cazyme_id)).\
join(Genbank, (Genbank.genbank_id == Cazymes_Genbanks.genbank_id)).\
filter(Cazymes_Genbanks.primary == True).\
filter(Genbank.sequence == None).\
all()
# retrieve sequences for all GenBank accessions without sequences
else:
logger.warning(
"Retrieving sequences for all GenBank accessions that do not have sequences"
)
genbank_query = session.query(Genbank, Cazymes_Genbanks, Cazyme, Taxonomy, Kingdom).\
join(Taxonomy, (Taxonomy.kingdom_id == Kingdom.kingdom_id)).\
join(Cazyme, (Cazyme.taxonomy_id == Taxonomy.taxonomy_id)).\
join(Cazymes_Genbanks, (Cazymes_Genbanks.cazyme_id == Cazyme.cazyme_id)).\
join(Genbank, (Genbank.genbank_id == Cazymes_Genbanks.genbank_id)).\
filter(Genbank.sequence == None).\
all()
# retrieve the genbank_accessions
accessions = extract_accessions(genbank_query, taxonomy_filters)
if len(accessions) == 0:
logger.warning(
"Did not retrieve any GenBank accessions from the local database\n"
"that have sequences missing. Not adding sequences to the local database."
)
return
# separate accesions in to separate lists of length args.epost, epost doesn't like more than 200
accessions = get_accession_chunks(accessions, args.epost) # args.epost = number per chunk
for lst in accessions:
get_sequences_add_to_db(lst, date_today, session, args)
return
def add_and_update_all_sequences(date_today, taxonomy_filters, kingdoms, session, args):
"""Retrieve sequences for all proteins in the database.
For records with no sequences, add the retrieved sequence.
For records with a sequence, check if the remove sequence is more recent than the existing
sequence. It it is, update the local sequence.
:param date_today: str, today's date, used for logging the date the seq is retrieved in the db
:param taxonomy_filters: set of genera, species and strains to retrieve sequences for
:param kingdoms: set of taxonomy Kingdoms to retrieve sequences for
:param session: open SQLite db session
:param args: cmd-line argument parser
Return nothing.
"""
logger = logging.getLogger(__name__)
# retrieve only sequences for primary GenBank accessions, and those without sequences
if args.primary is True:
logger.warning(
"Retrieving sequences for all primary GenBank accessions that do not have sequences\n"
"and those whose sequences have been updated in NCBI "
"since they were retrieved previously"
)
genbank_query = session.query(Genbank, Cazymes_Genbanks, Cazyme, Taxonomy, Kingdom).\
join(Taxonomy, (Taxonomy.kingdom_id == Kingdom.kingdom_id)).\
join(Cazyme, (Cazyme.taxonomy_id == Taxonomy.taxonomy_id)).\
join(Cazymes_Genbanks, (Cazymes_Genbanks.cazyme_id == Cazyme.cazyme_id)).\
join(Genbank, (Genbank.genbank_id == Cazymes_Genbanks.genbank_id)).\
filter(Cazymes_Genbanks.primary == True).\
all()
# retrieve sequences for all GenBank accessions
else:
logger.warning(
"Retrieving sequences for all GenBank accessions that do not have sequences\n"
"and those whose sequences have been updated in NCBI "
"since they were retrieved previously"
)
genbank_query = session.query(Genbank, Cazymes_Genbanks, Cazyme, Taxonomy, Kingdom).\
join(Taxonomy, (Taxonomy.kingdom_id == Kingdom.kingdom_id)).\
join(Cazyme, (Cazyme.taxonomy_id == Taxonomy.taxonomy_id)).\
join(Cazymes_Genbanks, (Cazymes_Genbanks.cazyme_id == Cazyme.cazyme_id)).\
join(Genbank, (Genbank.genbank_id == Cazymes_Genbanks.genbank_id)).\
all()
# create dictionary of {genbank_accession: 'sequence update date' (str)}
accessions = extract_accessions_and_dates(genbank_query, taxonomy_filters)
if len(accessions.keys()) == 0:
logger.warning(
"Did not retrieve any GenBank accessions from the local database.\n"
"Not adding sequences to the local database."
)
return
accessions = get_accessions_for_new_sequences(accessions) # list of genkbank_accession
if len(accessions) == 0:
logger.warning(
"Did not retrieve any GenBank accessions whose sequences need updating.\n"
"Not adding sequences to the local database."
)
return
# separate accesions in to separate lists of length args.epost, epost doesn't like more than 200
accessions = get_accession_chunks(accessions, args.epost) # args.epost = number per chunk
for lst in accessions:
get_sequences_add_to_db(lst, date_today, session, args)
return
def get_missing_sequences_for_specific_records(
date_today,
config_dict,
taxonomy_filters,
kingdoms,
session,
args,
):
"""Coordinate getting the sequences for specific CAZymes, not with seqs in the db.
:param date_today: str, today's date, used for logging the date the seq is retrieved in the db
:param config_dict: dict, defines CAZy classes and families to get sequences for
:param taxonomy_filters: set of genera, species and strains to restrict sequence retrieval
:param kingdoms: set of taxonomy Kingdoms to retrieve sequences for
:param session: open SQL database session
:param args: cmd-line args parser
Return nothing.
"""
logger = logging.getLogger(__name__)
logger.warning(
"Retrieving sequences for GenBank accessions that do not have a sequence in the database"
)
# start with the classes
if len(config_dict["classes"]) != 0:
# retrieve list of CAZy classes to get sequences for
cazy_classes = config_dict["classes"]
for cazy_class in tqdm(cazy_classes, desc="Parsing CAZy classes"):
# retrieve class name abbreviation
cazy_class = cazy_class[((cazy_class.find("(")) + 1):((cazy_class.find(")")) - 1)]
# get the CAZymes within the CAZy class
class_subquery = session.query(Cazyme.cazyme_id).\
join(CazyFamily, Cazyme.families).\
filter(CazyFamily.family.regexp(rf"{cazy_class}\d+")).\
subquery()
# retrieve the GenBank accessions of the CAZymes in the CAZy class without seqs
if args.primary:
genbank_query = session.query(Genbank, Cazymes_Genbanks, Cazyme, Taxonomy, Kingdom).\
join(Taxonomy, (Taxonomy.kingdom_id == Kingdom.kingdom_id)).\
join(Cazyme, (Cazyme.taxonomy_id == Taxonomy.taxonomy_id)).\
join(Cazymes_Genbanks, (Cazymes_Genbanks.cazyme_id == Cazyme.cazyme_id)).\
join(Genbank, (Genbank.genbank_id == Cazymes_Genbanks.genbank_id)).\
filter(Cazyme.cazyme_id.in_(class_subquery)).\
filter(Cazymes_Genbanks.primary == True).\
filter(Genbank.sequence == None).\
all()
else:
genbank_query = session.query(Genbank, Cazymes_Genbanks, Cazyme, Taxonomy, Kingdom).\
join(Taxonomy, (Taxonomy.kingdom_id == Kingdom.kingdom_id)).\
join(Cazyme, (Cazyme.taxonomy_id == Taxonomy.taxonomy_id)).\
join(Cazymes_Genbanks, (Cazymes_Genbanks.cazyme_id == Cazyme.cazyme_id)).\
join(Genbank, (Genbank.genbank_id == Cazymes_Genbanks.genbank_id)).\
filter(Cazyme.cazyme_id.in_(class_subquery)).\
filter(Genbank.sequence == None).\
all()
# retrieve the genbank_accessions from the sql collection object returned by the query
accessions = extract_accessions(genbank_query, taxonomy_filters)
if len(accessions) == 0:
logger.warning(
f"Did not retrieve any GenBank accessions for the CAZy class {cazy_class}\n"
"that have missing sequences. Not adding sequences to the local database."
)
continue
# separate accesions in to separate lists of length args.epost
# epost doesn't like posting more than 200 at once
accessions = get_accession_chunks(accessions, args.epost) # args.epost = number/chunk
for lst in accessions:
get_sequences_add_to_db(lst, date_today, session, args)
continue
# Retrieve protein sequences for specified families
for key in config_dict:
if key == "classes":
continue
if config_dict[key] is None:
continue # no families to parse
for family in tqdm(config_dict[key], desc=f"Parsing families in {key}"):
if family.find("_") != -1: # subfamily
# Retrieve GenBank accessions catalogued under the subfamily
family_subquery = session.query(Cazyme.cazyme_id).\
join(CazyFamily, Cazyme.families).\
filter(CazyFamily.subfamily == family).\
subquery()
else: # family
# Retrieve GenBank accessions catalogued under the family
family_subquery = session.query(Cazyme.cazyme_id).\
join(CazyFamily, Cazyme.families).\
filter(CazyFamily.family == family).\
subquery()
# get the GenBank accessions of thes CAZymes, without sequences
if args.primary:
genbank_query = session.query(Genbank, Cazymes_Genbanks, Cazyme, Taxonomy, Kingdom).\
join(Taxonomy, (Taxonomy.kingdom_id == Kingdom.kingdom_id)).\
join(Cazyme, (Cazyme.taxonomy_id == Taxonomy.taxonomy_id)).\
join(Cazymes_Genbanks, (Cazymes_Genbanks.cazyme_id == Cazyme.cazyme_id)).\
join(Genbank, (Genbank.genbank_id == Cazymes_Genbanks.genbank_id)).\
filter(Cazyme.cazyme_id.in_(family_subquery)).\
filter(Cazymes_Genbanks.primary == True).\
filter(Genbank.sequence == None).\
all()
else:
genbank_query = session.query(Genbank, Cazymes_Genbanks, Cazyme, Taxonomy, Kingdom).\
join(Taxonomy, (Taxonomy.kingdom_id == Kingdom.kingdom_id)).\
join(Cazyme, (Cazyme.taxonomy_id == Taxonomy.taxonomy_id)).\
join(Cazymes_Genbanks, (Cazymes_Genbanks.cazyme_id == Cazyme.cazyme_id)).\
join(Genbank, (Genbank.genbank_id == Cazymes_Genbanks.genbank_id)).\
filter(Cazyme.cazyme_id.in_(family_subquery)).\
filter(Genbank.sequence == None).\
all()
# retrieve a list of GenBank accessions from the sql collection returned from the query
accessions = extract_accessions(genbank_query, taxonomy_filters)
if len(accessions) == 0:
logger.warning(
f"Did not retrieve any GenBank accessions for the CAZy class {family}\n"
"that have missing sequences. Not adding sequences to the local database."
)
continue
# separate accesions in to separate lists of length args.epost
# epost doesn't like posting more than 200 at once
accessions = get_accession_chunks(accessions, args.epost) # args.epost = acc/chunk
for lst in accessions:
get_sequences_add_to_db(lst, date_today, session, args)
return
def update_sequences_for_specific_records(
date_today,
config_dict,
taxonomy_filters,
kingdoms,
session,
args,
):
"""Coordinate getting the sequences for specific CAZymes, not with seqs in the db nad those
whose seq in NCBI has been updated since the last retrieval.
For records with no sequences, add the retrieved sequence.
For records with a sequence, check if the remove sequence is more recent than the existing
sequence. It it is, update the local sequence.
:param date_today: str, today's date, used for logging the date the seq is retrieved in the db
:param config_dict: dict, defines CAZy classes and families to get sequences for
:param taxonomy_filters: set of genera, species and strains to restrict sequence retrieval
:param kingdoms: set of taxonomy Kingdoms to retrieve sequences for
:param session: open SQL database session
:param args: cmd-line args parser
Return nothing.
"""
logger = logging.getLogger(__name__)
logger.warning(
"Retrieving sequences for GenBank accessions that do not have a sequence in the database,\n"
"and those whose sequence in NCBI has been updated since they were previously retrieved."
)
# start with the classes
if len(config_dict["classes"]) != 0:
# retrieve list of CAZy classes to get sequences for
cazy_classes = config_dict["classes"]
for cazy_class in tqdm(cazy_classes, desc="Parsing CAZy classes"):
# retrieve class name abbreviation
cazy_class = cazy_class[((cazy_class.find("(")) + 1):((cazy_class.find(")")) - 1)]
# get the CAZymes within the CAZy class
class_subquery = session.query(Cazyme.cazyme_id).\
join(CazyFamily, Cazyme.families).\
filter(CazyFamily.family.regexp(rf"{cazy_class}\d+")).\
subquery()
# retrieve the GenBank accessions of the CAZymes in the CAZy class without seqs
if args.primary:
genbank_query = session.query(Genbank, Cazymes_Genbanks, Cazyme, Taxonomy, Kingdom).\
join(Taxonomy, (Taxonomy.kingdom_id == Kingdom.kingdom_id)).\
join(Cazyme, (Cazyme.taxonomy_id == Taxonomy.taxonomy_id)).\
join(Cazymes_Genbanks, (Cazymes_Genbanks.cazyme_id == Cazyme.cazyme_id)).\
join(Genbank, (Genbank.genbank_id == Cazymes_Genbanks.genbank_id)).\
filter(Cazyme.cazyme_id.in_(class_subquery)).\
filter(Cazymes_Genbanks.primary == True).\
all()
else:
genbank_query = session.query(Genbank, Cazymes_Genbanks, Cazyme, Taxonomy, Kingdom).\
join(Taxonomy, (Taxonomy.kingdom_id == Kingdom.kingdom_id)).\
join(Cazyme, (Cazyme.taxonomy_id == Taxonomy.taxonomy_id)).\
join(Cazymes_Genbanks, (Cazymes_Genbanks.cazyme_id == Cazyme.cazyme_id)).\
join(Genbank, (Genbank.genbank_id == Cazymes_Genbanks.genbank_id)).\
filter(Cazyme.cazyme_id.in_(class_subquery)).\
all()
# create dictionary of genbank_accession: 'sequence update date' (str)
accessions = extract_accessions_and_dates(genbank_query, taxonomy_filters)
if len(accessions.keys()) == 0:
logger.warning(
f"Did not retrieve any GenBank accessions for the CAZy class {cazy_class}.\n"
"Not adding sequences to the local database."
)
continue
accessions = get_accessions_for_new_sequences(accessions) # list of genkbank_accession
if len(accessions) == 0:
logger.warning(
"Did not retrieve any GenBank accessions whose sequences need updating for "
f"the CAZy class {cazy_class}.\n"
"Not adding sequences to the local database."
)
continue
# separate accesions in to separate lists of length args.epost
# epost doesn't like posting more than 200 at once
accessions = get_accession_chunks(accessions, args.epost) # args.epost = acc/chunk
for lst in accessions:
get_sequences_add_to_db(lst, date_today, session, args)
# Retrieve protein sequences for specified families
for key in config_dict:
if key == "classes":
continue
if config_dict[key] is None:
continue # no families to parse
for family in tqdm(config_dict[key], desc=f"Parsing families in {key}"):
if family.find("_") != -1: # subfamily
# Retrieve GenBank accessions catalogued under the subfamily
family_subquery = session.query(Cazyme.cazyme_id).\
join(CazyFamily, Cazyme.families).\
filter(CazyFamily.subfamily == family).\
subquery()
else: # family
# Retrieve GenBank accessions catalogued under the family
family_subquery = session.query(Cazyme.cazyme_id).\
join(CazyFamily, Cazyme.families).\
filter(CazyFamily.family == family).\
subquery()
# get the GenBank accessions of thes CAZymes, without sequences
if args.primary:
genbank_query = session.query(Genbank, Cazymes_Genbanks, Cazyme, Taxonomy, Kingdom).\
join(Taxonomy, (Taxonomy.kingdom_id == Kingdom.kingdom_id)).\
join(Cazyme, (Cazyme.taxonomy_id == Taxonomy.taxonomy_id)).\
join(Cazymes_Genbanks, (Cazymes_Genbanks.cazyme_id == Cazyme.cazyme_id)).\
join(Genbank, (Genbank.genbank_id == Cazymes_Genbanks.genbank_id)).\
filter(Cazyme.cazyme_id.in_(family_subquery)).\
filter(Cazymes_Genbanks.primary == True).\
filter(Genbank.sequence == None).\
all()
else:
genbank_query = session.query(Genbank, Cazymes_Genbanks, Cazyme, Taxonomy, Kingdom).\
join(Taxonomy, (Taxonomy.kingdom_id == Kingdom.kingdom_id)).\
join(Cazyme, (Cazyme.taxonomy_id == Taxonomy.taxonomy_id)).\
join(Cazymes_Genbanks, (Cazymes_Genbanks.cazyme_id == Cazyme.cazyme_id)).\
join(Genbank, (Genbank.genbank_id == Cazymes_Genbanks.genbank_id)).\
filter(Cazyme.cazyme_id.in_(family_subquery)).\
filter(Genbank.sequence == None).\
all()
# create dictionary of {genbank_accession: 'sequence update date' (str)}
accessions = extract_accessions_and_dates(genbank_query, taxonomy_filters)
if len(accessions.keys()) == 0:
logger.warning(
f"Did not retrieve any GenBank accessions for the CAZy class {family}.\n"
"Not adding sequences to the local database."
)
continue
accessions = get_accessions_for_new_sequences(accessions) # list of genkbank_accession
if len(accessions) == 0:
logger.warning(
"Did not retrieve any GenBank accessions whose sequences need updating for "
f"the CAZy class {family}.\n"
"Not adding sequences to the local database."
)
continue
# separate accesions in to separate lists of length args.epost
# epost doesn't like posting more than 200 at once
accessions = get_accession_chunks(accessions, args.epost) # args.epost = acc/chunk
for lst in accessions:
get_sequences_add_to_db(lst, date_today, session, args)
return
# The following functions are retrieving the list of Genbank accessions to retrieve sequences for #
def extract_accessions(genbank_query, taxonomy_filters):
"""The query contains GenBank accessions and Cazymes_Genbanks records, retrieve the accessions.
:param genbank_query: sql collection
:param taxonomy_filters: set of genera, species and strains to restrict retrieval of sequences
Return a list of GenBank accessions. Each element is a string of a unique accession.
"""
if taxonomy_filters is None:
accessions = [item[0] for item in genbank_query]
return [x for x in accessions if "NA" != x]
else:
accessions = []
for item in genbank_query:
if item[0] != "NA": # if GenBank accession not stored as 'NA'
source_organism = item[-1].genus + item[-1].species
if any(filter in source_organism for filter in taxonomy_filters):
accessions.append(item[0])
return accessions
def extract_accessions_and_dates(genbank_query, taxonomy_filters):
"""Retrieve the GenBank accessions and retrieval dates of existing sequences from the db query.
:param genbank_query: sql collection
:param taxonomy_filters: set of genera, species and strains to restrict retrieval of sequences
Return a dict {GenBank_accession: retrieval_date}
"""
accessions = {}
if taxonomy_filters is None:
for item in genbank_query:
if item[0].genbank_accession == "NA": # no GenBank accession stored in CAZy
continue
accessions[item[0].genbank_accession] = item[0].seq_update_date
else:
for item in genbank_query:
if item[0].genbank_accession == "NA": # no GenBank accession stored in CAZy
continue
source_organism = item[-1].genus + item[-1].species
if any(filter in source_organism for filter in taxonomy_filters):
accessions[item[0].genbank_accession] = item[0].seq_update_date
return accessions
def get_accessions_for_new_sequences(accessions):
"""Get the GenBank accessions of sequences to be added to the local database.
For records currently with no protein sequence, the retrieved protein sequence will be added
to the record. For records with a sequence, the 'UpdateDate' for the sequence from NCBI will
be compared against the 'seq_update_date' in the local database. The 'seq_update_date' is the
'UpdateDate' previosuly retrieved from NCBI. If the NCBI sequence is newer,
the local database will be updated with the new sequence.
:param accessions: dict, {GenBank accessions (str):sequence retrieval data (str)}
:param session: open SQL database session
Return nothing.
"""
logger = logging.getLogger(__name__)
accessions_list = list(accessions.keys())
accessions_string = ",".join(accessions_list)
# perform batch query of Entrez
epost_result = Entrez.read(
entrez_retry(
Entrez.epost, "Protein", id=accessions_string, retmode="text",
)
)
# retrieve the web environment and query key from the Entrez post
epost_webenv = epost_result["WebEnv"]
epost_query_key = epost_result["QueryKey"]
# retrieve summary docs to check the sequence 'UpdateDates' in NCBI
with entrez_retry(
Entrez.efetch,
db="Protein",
query_key=epost_query_key,
WebEnv=epost_webenv,
rettype="docsum",
retmode="xml",
) as handle:
summary_docs = Entrez.read(handle)
for doc in summary_docs:
try:
temp_accession = doc["AccessionVersion"] # accession of the current working protein
except KeyError:
logger.warning(
f"Retrieved protein with accession {temp_accession} but this accession is not in "
"the local database.\n"
"Not retrieving a sequence for this accession."
)
continue
previous_data = accessions[temp_accession]
if previous_data is not None:
# sequence retrieved previosuly, thus check if the NCBI seq has been updated since
previous_data = previous_data.split("/") # Y=[0], M=[1], D=[]
update_date = doc["UpdateDate"]
update_date = update_date.split("/") # Y=[0], M=[1], D=[]
if datetime.date(
previous_data[0], previous_data[1], previous_data[2],
) < datetime.data(
update_date[0], update_date[1], update_date[2],
) is False:
# the sequence at NCBI has not been updated since the seq was retrieved
# thus no need to retrieve it again
accessions_list.remove(temp_accession)
return accessions_list
def get_accession_chunks(lst, chunk_length):
"""Separate the long list into separate chunks.
:param lst: list to be separated into smaller lists (or chunks)
:param chunk_length: int, the length of the lists the longer list is to be split up into
Return a generator object containing lists.
"""
for i in range(0, len(lst), chunk_length):
yield lst[i:i + chunk_length]
# The following functions are for retrieving sequences, adding to the db and writing fasta files
def get_sequences_add_to_db(accessions, date_today, session, args):
"""Retrieve protein sequences from Entrez and add to the local database.
:param accessions: list, GenBank accessions
:param date_today: str, YYYY/MM/DD
:param session: open SQL database session
:param args: cmb-line args parser
Return nothing.
"""
logger = logging.getLogger(__name__)
# perform batch query of Entrez
accessions_string = ",".join(accessions)
epost_result = Entrez.read(
entrez_retry(
Entrez.epost, "Protein", id=accessions_string,
)
)
# retrieve the web environment and query key from the Entrez post
epost_webenv = epost_result["WebEnv"]
epost_query_key = epost_result["QueryKey"]
# retrieve the protein sequences
with entrez_retry(
Entrez.efetch,
db="Protein",
query_key=epost_query_key,
WebEnv=epost_webenv,
rettype="fasta",
retmode="text",
) as seq_handle:
for record in SeqIO.parse(seq_handle, "fasta"):
# retrieve the accession of the record
temp_accession = record.id # accession of the current working protein record
if temp_accession.find("|") != -1: # sometimes multiple items are listed
success = False # will be true if finds protein accession
temp_accession = temp_accession.split("|")
for item in temp_accession:
# check if a accession number
try:
re.match(
(
r"(\D{3}\d{5,7}\.\d+)|(\D\d(\D|\d){3}\d)|"
r"(\D\d(\D|\d){3}\d\D(\D|\d){2}\d)"
),
item,
).group()
temp_accession = item
success = True
break
except AttributeError: # raised if not an accession
continue
else:
success = True # have protein accession number
if success is False:
logger.error(
f"Could not retrieve accession from {record.id}, therefore, "
"protein sequence not added to the database,\n"
"because cannot retrieve the necessary CAZyme record"
)
continue
# check the retrieve protein accession is in the list of retrieved accession
if temp_accession not in accessions:
logger.warning(
f"Retrieved the accession {temp_accession} from the record id={record.id}, "
"but this accession is not in the database.\n"
"Therefore, not adding this protein seqence to the local database"
)
continue
# retrieve the GenBank record from the local data base to add the seq to
genbank_record = session.query(Genbank).\
filter(Genbank.genbank_accession == temp_accession).first()
retrieved_sequence = str(record.seq) # convert to a string becuase SQL expects a string
genbank_record.sequence = retrieved_sequence
genbank_record.seq_update_date = date_today
session.commit()
if args.fasta is not None:
file_io.write_out_fasta(record, temp_accession, args)
if args.blastdb is not None:
file_io.write_fasta_for_db(record, temp_accession, args)
# remove the accession from the list
accessions.remove(temp_accession)
if len(accessions) != 0:
logger.warning(
"Protein sequences were not retrieved for the following CAZyme in the local database"
)
for acc in accessions:
logger.warning(f"GenBank accession: {acc}")
return
def entrez_retry(entrez_func, *func_args, **func_kwargs):
"""Call to NCBI using Entrez.
Maximum number of retries is 10, retry initated when network error encountered.
:param logger: logger object
:param retries: parser argument, maximum number of retries excepted if network error encountered
:param entrez_func: function, call method to NCBI
:param *func_args: tuple, arguments passed to Entrez function
:param ** func_kwargs: dictionary, keyword arguments passed to Entrez function
Returns record.
"""
logger = logging.getLogger(__name__)
record, retries, tries = None, 10, 0
while record is None and tries < retries:
try:
record = entrez_func(*func_args, **func_kwargs)
except IOError:
# log retry attempt
if tries < retries:
logger.warning(
f"Network error encountered during try no.{tries}.\nRetrying in 10s",
exc_info=1,
)
time.sleep(10)
tries += 1
if record is None:
logger.error(
"Network error encountered too many times. Exiting attempt to call to NCBI"
)
return
return record
if __name__ == "__main__":
main()
| 42.250286
| 101
| 0.629068
|
e5038b2d43307a9172356d58f8b0bd022b7ddde0
| 6,527
|
py
|
Python
|
04 - Classes-inheritance-oops/53-classes-pickling-magic-methods.py
|
python-demo-codes/basics
|
2a151bbff4b528cefd52978829c632fd087c8f20
|
[
"DOC"
] | 2
|
2019-08-23T06:05:55.000Z
|
2019-08-26T03:56:07.000Z
|
04 - Classes-inheritance-oops/53-classes-pickling-magic-methods.py
|
python-lang-codes/basics
|
2a151bbff4b528cefd52978829c632fd087c8f20
|
[
"DOC"
] | null | null | null |
04 - Classes-inheritance-oops/53-classes-pickling-magic-methods.py
|
python-lang-codes/basics
|
2a151bbff4b528cefd52978829c632fd087c8f20
|
[
"DOC"
] | 4
|
2020-10-01T07:16:07.000Z
|
2021-07-17T07:55:08.000Z
|
# HEAD
# Classes - Pickling Concept
# DESCRIPTION
# Describes the magic methods of classes
# getinitargs, getnewargs,
# getstate, setstate,
# reduce, reduce_ex
# RESOURCES
#
# https://rszalski.github.io/magicmethods/
# Pickling your own Objects
# Pickling isn't just for built-in types.
# It's for any class that follows the pickle protocol.
# The pickle protocol has four optional methods for Python
# objects to customize how they act (it's a bit
# different for C extensions, but that's not in our scope):
# __getinitargs__(self)
# If you'd like for __init__ to be called when your class is unpickled, you can define __getinitargs__, which should return a tuple of the arguments that you'd like to be passed to __init__. Note that this method will only work for old-style classes.
# __getnewargs__(self)
# For new-style classes, you can influence what arguments get passed to __new__ upon unpickling. This method should also return a tuple of arguments that will then be passed to __new__.
# __getstate__(self)
# Instead of the object's __dict__ attribute being stored, you can return a custom state to be stored when the object is pickled. That state will be used by __setstate__ when the object is unpickled.
# __setstate__(self, state)
# When the object is unpickled, if __setstate__ is defined the object's state will be passed to it instead of directly applied to the object's __dict__. This goes hand in hand with __getstate__: when both are defined, you can represent the object's pickled state however you want with whatever you want.
# __reduce__(self)
# When defining extension types (i.e., types implemented using Python's C API), you have to tell Python how to pickle them if you want them to pickle them. __reduce__() is called when an object defining it is pickled. It can either return a string representing a global name that Python will look up and pickle, or a tuple. The tuple contains between 2 and 5 elements: a callable object that is called to recreate the object, a tuple of arguments for that callable object, state to be passed to __setstate__ (optional), an iterator yielding list items to be pickled (optional), and an iterator yielding dictionary items to be pickled (optional).
# __reduce_ex__(self)
# __reduce_ex__ exists for compatibility. If it is defined, __reduce_ex__ will be called over __reduce__ on pickling. __reduce__ can be defined as well for older versions of the pickling API that did not support __reduce_ex__.
# An Example
# Our example is a Slate, which remembers what its values
# have been and when those values were written to it.
# However, this particular slate goes blank each time
# it is pickled: the current value will not be saved.
# import time
# class Slate:
# '''Class to store a string and a changelog, and forget its value when
# pickled.'''
# def __init__(self, value):
# self.value = value
# self.last_change = time.asctime()
# self.history = {}
# def change(self, new_value):
# # Change the value. Commit last value to history
# self.history[self.last_change] = self.value
# self.value = new_value
# self.last_change = time.asctime()
# def print_changes(self):
# print 'Changelog for Slate object:'
# for k, v in self.history.items():
# print '%s\t %s' % (k, v)
# def __getstate__(self):
# # Deliberately do not return self.value or self.last_change.
# # We want to have a "blank slate" when we unpickle.
# return self.history
# def __setstate__(self, state):
# # Make self.history = state and last_change and value undefined
# self.history = state
# self.value, self.last_change = None, None
# Conclusion
# The goal of this guide is to bring something to anyone that reads it, regardless of their experience with Python or object-oriented programming. If you're just getting started with Python, you've gained valuable knowledge of the basics of writing feature-rich, elegant, and easy-to-use classes. If you're an intermediate Python programmer, you've probably picked up some slick new concepts and strategies and some good ways to reduce the amount of code written by you and clients. If you're an expert Pythonista, you've been refreshed on some of the stuff you might have forgotten about and maybe picked up a few new tricks along the way. Whatever your experience level, I hope that this trip through Python's special methods has been truly magical. (I couldn't resist the final pun!)
# Appendix 1: How to Call Magic Methods
# Some of the magic methods in Python directly map to built-in functions; in this case, how to invoke them is fairly obvious. However, in other cases, the invocation is far less obvious. This appendix is devoted to exposing non-obvious syntax that leads to magic methods getting called.
# Magic Method When it gets invoked (example) Explanation
# __new__(cls [,...]) instance = MyClass(arg1, arg2) __new__ is called on instance creation
# __init__(self [,...]) instance = MyClass(arg1, arg2) __init__ is called on instance creation
# __cmp__(self, other) self == other, self > other, etc. Called for any comparison
# __pos__(self) +self Unary plus sign
# __neg__(self) -self Unary minus sign
# __invert__(self) ~self Bitwise inversion
# __index__(self) x[self] Conversion when object is used as index
# __nonzero__(self) bool(self) Boolean value of the object
# __getattr__(self, name) self.name # name doesn't exist Accessing nonexistent attribute
# __setattr__(self, name, val) self.name = val Assigning to an attribute
# __delattr__(self, name) del self.name Deleting an attribute
# __getattribute__(self, name) self.name Accessing any attribute
# __getitem__(self, key) self[key] Accessing an item using an index
# __setitem__(self, key, val) self[key] = val Assigning to an item using an index
# __delitem__(self, key) del self[key] Deleting an item using an index
# __iter__(self) for x in self Iteration
# __contains__(self, value) value in self, value not in self Membership tests using in
# __call__(self [,...]) self(args) "Calling" an instance
# __enter__(self) with self as x: with statement context managers
# __exit__(self, exc, val, trace) with self as x: with statement context managers
# __getstate__(self) pickle.dump(pkl_file, self) Pickling
# __setstate__(self) data = pickle.load(pkl_file) Pickling
| 63.368932
| 786
| 0.735254
|
51c8073449214e02e876fec13f4eafe26e17ae37
| 7,161
|
py
|
Python
|
digits/dataset/generic/views.py
|
Linda-liugongzi/DIGITS-digits-py3
|
6df5eb6972574a628b9544934518ec8dfa9c7439
|
[
"BSD-3-Clause"
] | null | null | null |
digits/dataset/generic/views.py
|
Linda-liugongzi/DIGITS-digits-py3
|
6df5eb6972574a628b9544934518ec8dfa9c7439
|
[
"BSD-3-Clause"
] | null | null | null |
digits/dataset/generic/views.py
|
Linda-liugongzi/DIGITS-digits-py3
|
6df5eb6972574a628b9544934518ec8dfa9c7439
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
import os
# Find the best implementation available
try:
from io import StringIO, BytesIO
except ImportError:
from io import StringIO, BytesIO
from caffe.proto import caffe_pb2
import flask
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import PIL.Image
from .forms import GenericDatasetForm
from .job import GenericDatasetJob
from digits import extensions, utils
from digits.utils.constants import COLOR_PALETTE_ATTRIBUTE
from digits.utils.routing import request_wants_json, job_from_request
from digits.utils.lmdbreader import DbReader
from digits.webapp import scheduler
from flask_babel import lazy_gettext as _
blueprint = flask.Blueprint(__name__, __name__)
@blueprint.route('/new/<extension_id>', methods=['GET'])
@utils.auth.requires_login
def new(extension_id):
"""
Returns a form for a new GenericDatasetJob
"""
form = GenericDatasetForm()
# Is there a request to clone a job with ?clone=<job_id>
utils.forms.fill_form_if_cloned(form)
extension = extensions.data.get_extension(extension_id)
if extension is None:
raise ValueError("Unknown extension '%s'" % extension_id)
extension_form = extension.get_dataset_form()
# Is there a request to clone a job with ?clone=<job_id>
utils.forms.fill_form_if_cloned(extension_form)
template, context = extension.get_dataset_template(extension_form)
rendered_extension = flask.render_template_string(template, **context)
return flask.render_template(
'datasets/generic/new.html',
extension_title=extension.get_title(),
extension_id=extension_id,
extension_html=rendered_extension,
form=form
)
@blueprint.route('/create/<extension_id>.json', methods=['POST'])
@blueprint.route('/create/<extension_id>',
methods=['POST'],
strict_slashes=False)
@utils.auth.requires_login(redirect=False)
def create(extension_id):
"""
Creates a new GenericDatasetJob
Returns JSON when requested: {job_id,name,status} or {errors:[]}
"""
form = GenericDatasetForm()
form_valid = form.validate_on_submit()
extension_class = extensions.data.get_extension(extension_id)
extension_form = extension_class.get_dataset_form()
extension_form_valid = extension_form.validate_on_submit()
if not (extension_form_valid and form_valid):
# merge errors
errors = form.errors.copy()
errors.update(extension_form.errors)
template, context = extension_class.get_dataset_template(
extension_form)
rendered_extension = flask.render_template_string(
template,
**context)
if request_wants_json():
return flask.jsonify({'errors': errors}), 400
else:
return flask.render_template(
'datasets/generic/new.html',
extension_title=extension_class.get_title(),
extension_id=extension_id,
extension_html=rendered_extension,
form=form,
errors=errors), 400
# create instance of extension class
extension = extension_class(**extension_form.data)
job = None
try:
# create job
job = GenericDatasetJob(
username=utils.auth.get_username(),
name=form.dataset_name.data,
group=form.group_name.data,
backend=form.dsopts_backend.data,
feature_encoding=form.dsopts_feature_encoding.data,
label_encoding=form.dsopts_label_encoding.data,
batch_size=int(form.dsopts_batch_size.data),
num_threads=int(form.dsopts_num_threads.data),
force_same_shape=form.dsopts_force_same_shape.data,
extension_id=extension_id,
extension_userdata=extension.get_user_data(),
)
# Save form data with the job so we can easily clone it later.
utils.forms.save_form_to_job(job, form)
utils.forms.save_form_to_job(job, extension_form)
# schedule tasks
scheduler.add_job(job)
if request_wants_json():
return flask.jsonify(job.json_dict())
else:
return flask.redirect(flask.url_for(
'digits.dataset.views.show',
job_id=job.id()))
except:
if job:
scheduler.delete_job(job)
raise
@blueprint.route('/explore', methods=['GET'])
@utils.auth.requires_login
def explore():
"""
Returns a gallery consisting of the images of one of the dbs
"""
job = job_from_request()
# Get LMDB
db = job.path(flask.request.args.get('db'))
db_path = job.path(db)
if (os.path.basename(db_path) == 'labels' and
COLOR_PALETTE_ATTRIBUTE in job.extension_userdata and
job.extension_userdata[COLOR_PALETTE_ATTRIBUTE]):
# assume single-channel 8-bit palette
palette = job.extension_userdata[COLOR_PALETTE_ATTRIBUTE]
palette = np.array(palette).reshape((len(palette) / 3, 3)) / 255.
# normalize input pixels to [0,1]
norm = mpl.colors.Normalize(vmin=0, vmax=255)
# create map
cmap = plt.cm.ScalarMappable(norm=norm,
cmap=mpl.colors.ListedColormap(palette))
else:
cmap = None
page = int(flask.request.args.get('page', 0))
size = int(flask.request.args.get('size', 25))
reader = DbReader(db_path)
count = 0
imgs = []
min_page = max(0, page - 5)
total_entries = reader.total_entries
max_page = min((total_entries - 1) / size, page + 5)
pages = list(range(min_page, max_page + 1))
for key, value in reader.entries():
if count >= page * size:
datum = caffe_pb2.Datum()
datum.ParseFromString(value)
if not datum.encoded:
raise RuntimeError(_("Expected encoded database"))
s = BytesIO()
s.write(datum.data)
s.seek(0)
img = PIL.Image.open(s)
if cmap and img.mode in ['L', '1']:
data = np.array(img)
data = cmap.to_rgba(data) * 255
data = data.astype('uint8')
# keep RGB values only, remove alpha channel
data = data[:, :, 0:3]
img = PIL.Image.fromarray(data)
imgs.append({"label": None, "b64": utils.image.embed_image_html(img)})
count += 1
if len(imgs) >= size:
break
return flask.render_template(
'datasets/images/explore.html',
page=page, size=size, job=job, imgs=imgs, labels=None,
pages=pages, label=None, total_entries=total_entries, db=db)
def show(job, related_jobs=None):
"""
Called from digits.dataset.views.show()
"""
return flask.render_template('datasets/generic/show.html', job=job, related_jobs=related_jobs)
def summary(job):
"""
Return a short HTML summary of a GenericDatasetJob
"""
return flask.render_template('datasets/generic/summary.html', dataset=job)
| 32.55
| 98
| 0.647256
|
e0728e26e0dfda931a10a9fdb2417adf4af9bb16
| 15,775
|
py
|
Python
|
FeatureExtraction/BroExtraction/ConnectionFeatures.py
|
frenky-strasak/HTTPSDetector
|
2b0c8d171b345ec0051603fc3c2730b6e62a295e
|
[
"MIT"
] | 9
|
2020-02-06T18:39:58.000Z
|
2022-02-04T12:14:20.000Z
|
FeatureExtraction/BroExtraction/ConnectionFeatures.py
|
frenky-strasak/HTTPSDetector
|
2b0c8d171b345ec0051603fc3c2730b6e62a295e
|
[
"MIT"
] | 1
|
2018-03-30T08:47:27.000Z
|
2019-04-30T09:08:23.000Z
|
FeatureExtraction/BroExtraction/ConnectionFeatures.py
|
frenky-strasak/HTTPSDetector
|
2b0c8d171b345ec0051603fc3c2730b6e62a295e
|
[
"MIT"
] | 4
|
2018-07-13T16:31:11.000Z
|
2021-01-07T07:58:05.000Z
|
import numpy
from Connection4tuple import Connection4tuple
class ConnectionFeatures(Connection4tuple):
def __init__(self, tuple_index):
super(ConnectionFeatures, self).__init__(tuple_index)
"""
---------- Get Feature -------------------
"""
# ---------------------------------------------------
# 01. ---------- Number of flows --------------------
def get_number_of_flows(self):
return self.get_number_of_ssl_flows() + self.get_number_of_not_ssl_flows()
# ---------------------------------------------------
# ---------- Duration of flows ----------------------
# 02. Average
def get_average_of_duration(self):
# self.check_zero_dividing(self.flow_which_has_duration_number, "flow_which_has_duration_number is 0 !!!")
if self.flow_which_has_duration_number != 0:
return self.average_duration / float(self.flow_which_has_duration_number)
return -1
# 03. Standard deviation
def get_standard_deviation_duration(self):
# self.check_zero_dividing(self.flow_which_has_duration_number, "flow_which_has_duration_number is 0 !!!")
# EX = self.average_duration / float(self.flow_which_has_duration_number)
# EX2 = self.average_duration_power / float(self.flow_which_has_duration_number) # E(X^2)
# DX = EX2 - EX*EX
# return pow(DX, 0.5)
if len(self.duration_list) != 0:
return numpy.std(self.duration_list)
return -1
# 04. Percent of flows which are bigger or less than standard deviation with average
def get_percent_of_standard_deviation_duration(self):
# self.check_zero_dividing(self.flow_which_has_duration_number, "flow_which_has_duration_number is 0 !!!")
if len(self.duration_list) != 0:
out_of_bounds = 0
lower_level = self.get_average_of_duration() - self.get_standard_deviation_duration()
upper_level = self.get_average_of_duration() + self.get_standard_deviation_duration()
for i in range(len(self.duration_list)):
if self.duration_list[i] < lower_level:
out_of_bounds += 1
elif self.duration_list[i] > upper_level:
out_of_bounds += 1
return out_of_bounds / float(self.flow_which_has_duration_number)
return -1
# -------------------------------------------------------------------
# 05 -------- Total payload size of flows the originator sent --------
def get_total_size_of_flows_orig(self):
return self.total_size_of_flows_orig
# ------------------------------------------------------------------
# 06 -------- Total payload size of flows the responder sent --------
def get_total_size_of_flows_resp(self):
return self.total_size_of_flows_resp
# ---------------------------------------------------------------------------
# 07 ------ Ratio of responder payload sizes and originator payload sizes ----
def get_ratio_of_sizes(self):
# self.check_zero_dividing(self.total_size_of_flows_orig, "Original size is 0 !!!")
if self.total_size_of_flows_orig != 0:
return self.total_size_of_flows_resp / float(self.total_size_of_flows_orig)
return -1
# --------------------------------------------------------------------
# ------ State of connection -----------------------------------------
# 08 Percent of established connection
def get_percent_of_established_states(self):
establihed_states = 0
total_value_states = 0
for key in self.state_of_connection_dict.keys():
total_value_states += self.state_of_connection_dict[key]
if total_value_states != 0:
establihed_states += self.state_of_connection_dict.get('SF', 0)
establihed_states += self.state_of_connection_dict.get('S1', 0)
establihed_states += self.state_of_connection_dict.get('S2', 0)
establihed_states += self.state_of_connection_dict.get('S3', 0)
establihed_states += self.state_of_connection_dict.get('RSTO', 0) # delete this
establihed_states += self.state_of_connection_dict.get('RSTR', 0) # delete this
return (establihed_states / float(total_value_states))
return -1
"""
These functions are not used.
"""
# 09 - return 4 items
# def get_based_states_ratio(self):
# SF_S1 = self.state_of_connection_dict['SF'] + self.state_of_connection_dict['S1']
# S0 = self.state_of_connection_dict['S0']
# OTH = self.state_of_connection_dict['OTH']
# REJ = self.state_of_connection_dict['REJ']
# biggest = max(SF_S1, S0, OTH, REJ) / 100.0
# return SF_S1 / float(biggest), S0 / float(biggest), OTH / float(biggest), REJ / float(biggest)
#
# # 10 - return 6 items
# def get_extended_states_ratio(self):
# SF_S1 = self.state_of_connection_dict['SF'] + self.state_of_connection_dict['S1']
# S0 = self.state_of_connection_dict['S0']
# OTH = self.state_of_connection_dict['OTH']
# REJ = self.state_of_connection_dict['REJ']
# RSTO_1 = self.state_of_connection_dict['RSTO'] + self.state_of_connection_dict['RSTR'] + self.state_of_connection_dict['S2'] + self.state_of_connection_dict['S3']
# RSTO_2 = self.state_of_connection_dict['RSTOS0'] + self.state_of_connection_dict['RSTRH'] + self.state_of_connection_dict['SH'] + self.state_of_connection_dict['SHR']
# biggest = max(SF_S1, S0, OTH, REJ, RSTO_1, RSTO_2) / 100.0
# return SF_S1 / float(biggest), S0 / float(biggest), OTH / float(biggest), REJ / float(biggest), RSTO_1 / float(biggest), RSTO_2 / float(biggest)
# 11 inbound packets == resp_pkts (18)
# Number of packets that the responder sent.
def get_inbound_pckts(self):
return self.inbound_packtes
# 12 outbound packets == orig_pkts (16)
def get_outbound_pckts(self):
return self.outbound_packtes
# Periodicity
# 13 Average of periodicity
def get_periodicity_average(self):
per_list = self.get_periodicity_list()
sum = 0
for i in range(len(per_list)):
sum += per_list[i]
if len(per_list) != 0:
return sum / float(len(per_list))
# print "periodicity list is zero. Number of flows:", self.get_number_of_flows()
return -1
# 14
def get_periodicity_standart_deviation(self):
per_list = self.get_periodicity_list()
if len(per_list) != 0:
# sum = 0
# for i in range(len(per_list)):
# sum += pow(per_list[i], 2)
# EX2 = sum / float(len(per_list))
# DX = EX2 - EX * EX
# return pow(DX, 0.5)
return numpy.std(self.get_periodicity_list())
return -1
# -----------------------------------------------------
# 15 ------ Ratio of not ssl flows and ssl flows -------
def get_ssl_ratio(self):
self.check_zero_dividing(len(self.ssl_flow_list), "Original size is 0 !!!")
return len(self.not_ssl_flow_list) / float(len(self.ssl_flow_list))
# 16 Average Public key lenghts
# certificate feature
def get_average_public_key(self):
total = 0
index = 0
for key in self.certificate_key_length_dict.keys():
total += self.certificate_key_length_dict[key] * int(key)
index += 1
if index != 0:
return total / float(index)
return -1
# ------------------------------------------------------
# 17 Version of ssl ratio
def get_tls_version_ratio(self):
tls = 0
ssl = 0
total = 0
for key in self.version_of_ssl_dict.keys():
if 'tls' in key.lower():
tls += self.version_of_ssl_dict[key]
elif 'ssl' in key.lower():
ssl += self.version_of_ssl_dict[key]
total += self.version_of_ssl_dict[key]
if total != 0:
return tls / float(total)
return -1
# ----------------------------------------------
# Certificate validation length
# 18 Average of certificate length
# certificate_valid_length = sum of certificate valid length in days
# certificate_valid_number = number of certificate*
def get_average_of_certificate_length(self):
# self.check_zero_dividing(self.certificate_valid_number, "certificate_valid_number is 0 !!!")
if self.certificate_valid_number != 0:
if numpy.mean(self.temp_list) != self.certificate_valid_length / float(self.certificate_valid_number):
print "Error: numpy mean and mean by hand are not same."
return self.certificate_valid_length / float(self.certificate_valid_number)
return -1
# 19
def get_standart_deviation_cert_length(self):
# self.check_zero_dividing(self.certificate_valid_number, "certificate_valid_number is 0 !!!")
if self.certificate_valid_number != 0:
EX = self.certificate_valid_length / self.certificate_valid_number
EX2 = self.certificate_valid_length_pow / self.certificate_valid_number
DX = EX2 - (EX * EX)
# if DX < 0:
# print "EX:", (EX*EX)
# print "EX2:", EX2
# print "DX:", DX
# print self.temp_list
# print "std:", numpy.std(self.temp_list)
# print len(self.x509_list)
return pow(DX, 0.5)
return -1
# ---------------------------------------------
# 20 Validity of the certificate during the capture
# certificate feature
# 0 == no certficate was out of validity range
def is_valid_certificate_during_capture(self):
if len(self.cert_percent_validity) != 0:
return self.not_valid_certificate_number
return -1
# 21 Amount of different certificates
# certificate feature
def get_amount_diff_certificates(self):
return len(self.certificate_serial_dict.keys())
# -------------------------------------------------------
# 22 Number of domains in certificate
# certificate feature
def get_number_of_domains_in_certificate(self):
if self.number_san_domains_index != 0:
return self.number_san_domains / float(self.number_san_domains_index)
return -1
# 23 Certificate ratio
# certificate feature
# List of length of certificate validity length.
def get_certificate_ratio(self):
if len(self.cert_percent_validity) != 0:
temp = 0
for value in self.cert_percent_validity:
temp += value
return temp / float(len(self.cert_percent_validity))
else:
return -1
# 24 Certificate path
# number of signed certificate in our first certificate
# It is EX (vazeny prumer)
def get_number_of_certificate_path(self):
up = 0
down = 0
for key in self.certificate_path.keys():
up += int(key) * self.certificate_path[key]
down += self.certificate_path[key]
if down != 0:
return up/float(down)
return -1
# 25 x509/ssl ratio
# ratio about how many ssl log has x509 information in this connection
def x509_ssl_ratio(self):
if len(self.ssl_logs_list) == 0:
return -1
return len(self.x509_list) / float(len(self.ssl_logs_list))
# 26 SNI and SSL ratio
# ratio, how many ssl flows have SNI (server name)
def SNI_ssl_ratio(self):
return self.ssl_with_SNI / float(len(self.ssl_logs_list))
# 27 Self_signed cert and all cert ratio
def self_signed_ratio(self):
# number_of_certificate = len(self.certificate_serial_dict.keys())
if len(self.ssl_logs_list) != 0:
return self.self_signed_cert / float(len(self.ssl_logs_list))
return -1
# 28 Is there any SNI, which not in san.dns ?
def is_SNIs_in_SNA_dns(self):
if len(self.is_SNI_in_san_dns) != 0:
for a in self.is_SNI_in_san_dns:
if a == 0:
return 0
return 1
return -1
# 29 if SNI is IP, so dst is same ip?
def get_SNI_equal_DstIP(self):
return self.SNI_equal_DstIP
# 30 Is there any CN, which not in san.dns ?
def is_CNs_in_SNA_dns(self):
if len(self.is_CN_in_SAN_list) != 0:
for a in self.is_CN_in_SAN_list:
if a == 0:
return 0
return 1
return -1
"""
----------------- New Features ------------------
"""
# 31 How many ssl lines has different SNI ?
def ratio_of_differ_SNI_in_ssl_log(self):
# Delete stars.
for i in range(0, len(self.SNI_list)):
if '*' in self.SNI_list[i]:
self.SNI_list[i] = self.SNI_list[i].replace('*', '')
return compute_differents_in_lines(self.SNI_list)
# 32 How many ssl lines has different subject
def ratio_of_differ_subject_in_ssl_log(self):
return compute_differents_in_lines(self.subject_ssl_list)
# 33 How many ssl lines has differ issuer
def ratio_of_differ_issuer_in_ssl_log(self):
return compute_differents_in_lines(self.issuer_ssl_list)
# 34 How many cert has differ subject
def ratio_of_differ_subject_in_cert(self):
return compute_differents_in_lines(self.subject_x509_list)
# 35 How many cert has differ issuer
def ratio_of_differ_issuer_in_cert(self):
return compute_differents_in_lines(self.issuer_x509_list)
# 36 How many cert has differ san dns
def ratio_of_differ_sandns_in_cert(self):
return compute_differents_in_lines(self.san_x509_list)
# 37 Do ssl and x509 lines have same subjects?
def ratio_of_same_subjects(self):
if len(self.x509_list) == 0:
return -1
return self.subject_diff / float(len(self.x509_list))
# 38 Do ssl and x509 lines have same issuer?
def ratio_of_same_issuer(self):
if len(self.x509_list) == 0:
return -1
return self.issuer_diff / float(len(self.x509_list))
# 39 Is SNI and CN same?
def ratio_is_same_CN_and_SNI(self):
if len(self.x509_list) == 0:
return -1
return self.SNI_is_in_CN / float(len(self.x509_list))
# 40 Certificate exponent average
def average_certificate_exponent(self):
if len(self.certificate_serial_dict.keys()) == 0:
return -1
return self.certificate_exponent / float(len(self.certificate_serial_dict.keys()))
# 41 Is server name in top-level-domain ?
def is_SNI_in_top_level_domain(self):
if self.ssl_with_SNI == 0:
return -1
return self.top_level_domain_error / float(self.ssl_with_SNI)
# 42 Is certificate path right ? (issuer of first certificate is subject in second cert...)
def ratio_certificate_path_error(self):
if len(self.ssl_logs_list):
return -1
return self.certificate_path_error / float(len(self.ssl_logs_list))
# 43 Missing certificate in certificate path.
def ratio_missing_cert_in_cert_path(self):
if len(self.ssl_logs_list):
return -1
return self.missing_cert_in_cert_path / float(len(self.ssl_logs_list))
"""
------- Computation method ---------
"""
def compute_differents_in_lines(array):
_dict = dict()
for item in array:
try:
_dict[item] += 1
except:
_dict[item] = 1
if len(array) == 0:
return -1.0
if len(_dict.keys()) == 1:
return 0.0
return len(_dict.keys()) / float(len(array))
| 40.448718
| 176
| 0.607163
|
299d55f7ac04b9ef653d4f4d4d2251a531a84c51
| 11,688
|
py
|
Python
|
evaluation/evalCorr/getResults.py
|
tim885/RANSAC-Flow
|
4a10c204fbb8a1ea92826263661761f91c91839c
|
[
"MIT"
] | 1
|
2020-11-20T19:35:01.000Z
|
2020-11-20T19:35:01.000Z
|
evaluation/evalCorr/getResults.py
|
ducha-aiki/RANSAC-Flow
|
1cfa2707ac695ca29dab4011eca81e0e24807221
|
[
"MIT"
] | null | null | null |
evaluation/evalCorr/getResults.py
|
ducha-aiki/RANSAC-Flow
|
1cfa2707ac695ca29dab4011eca81e0e24807221
|
[
"MIT"
] | 1
|
2021-01-28T12:24:46.000Z
|
2021-01-28T12:24:46.000Z
|
import numpy as np
import torch
import kornia.geometry as tgm
import pickle
import os
from PIL import Image
from torchvision import transforms
from torch.nn import functional as F
import cv2
from pathlib import Path
from tqdm import tqdm
import argparse
import pandas as pd
def alignmentError(wB, hB, wA, hA, XA, YA, XB, YB, flow, match2, pixelGrid) :
estimX = flow.narrow(3, 1, 1).view(1, 1, hB, wB)
estimY = flow.narrow(3, 0, 1).view(1, 1, hB, wB)
estimY = ((estimY + 1) * 0.5 * (wA - 1))
estimX = ((estimX + 1) * 0.5 * (hA - 1))
match = match2.squeeze().numpy()
estimY = estimY.squeeze().numpy()
estimX = estimX.squeeze().numpy()
xa, ya, xb, yb = XA.astype(np.int64), YA.astype(np.int64), XB.astype(np.int64), YB.astype(np.int64)
index = np.where(match[yb, xb] > 0.5)[0]
nbAlign = len(index)
if nbAlign > 0 :
xa, ya, xb, yb = xa[index], ya[index], xb[index], yb[index]
xaH = estimY[yb, xb]
yaH = estimX[yb, xb]
pixelDiff = ((xaH - xa) ** 2 + (yaH - ya) ** 2)**0.5
pixelDiffT = pixelDiff.reshape((-1, 1))
pixelDiffT = np.sum(pixelDiffT <= pixelGrid, axis = 0)
else :
pixelDiffT = np.zeros(pixelGrid.shape[1])
return pixelDiffT, nbAlign
## resize image according to the minsize, at the same time resize the x,y coordinate
def ResizeMinResolution(minSize, I, x, y, strideNet) :
x = np.array(list(map(float, x.split(';')))).astype(np.float32)
y = np.array(list(map(float, y.split(';')))).astype(np.float32)
w, h = I.size
ratio = min(w / float(minSize), h / float(minSize))
new_w, new_h = round(w/ ratio), round(h / ratio)
new_w, new_h = new_w // strideNet * strideNet , new_h // strideNet * strideNet
ratioW, ratioH = new_w / float(w), new_h / float(h)
I = I.resize((new_w, new_h), resample=Image.LANCZOS)
x, y = x * ratioW, y * ratioH
return I, x, y
## resize image according to the minsize, at the same time resize the x,y coordinate
## if it is megadepth dataset, remove the point that are outside the image (errors of 3D points)
def ResizeMinResolution_megadepth(minSize, I, x, y, strideNet) :
x = np.array(list(map(float, x.split(';')))).astype(np.float32)
y = np.array(list(map(float, y.split(';')))).astype(np.float32)
w, h = I.size
ratio = min(w / float(minSize), h / float(minSize))
new_w, new_h = round(w/ ratio), round(h / ratio)
new_w, new_h = new_w // strideNet * strideNet , new_h // strideNet * strideNet
ratioW, ratioH = new_w / float(w), new_h / float(h)
I = I.resize((new_w, new_h), resample=Image.LANCZOS)
x, y = x * ratioW, y * ratioH
index_valid = (x > 0) * (x < new_w) * (y > 0) * (y < new_h)
return I, x, y, index_valid
def getFlow(pairID, finePath, flowList, coarsePath, maskPath, multiH, th) :
find = False
for flowName in flowList :
if flowName.split('_')[1] == str(pairID) :
nbH = flowName.split('_')[2].split('H')[0]
find = True
break
if not find :
return [], []
flow = torch.from_numpy ( np.load(os.path.join(finePath, 'flow_{:d}_{}H.npy'.format(pairID, nbH))).astype(np.float32) )
param = torch.from_numpy ( np.load(os.path.join(coarsePath, 'flow_{:d}_{}H.npy'.format(pairID, nbH))).astype(np.float32) )
match = np.load(os.path.join(finePath, 'mask_{:d}_{}H.npy'.format(pairID, nbH)))
matchBG = np.load(os.path.join(maskPath, 'maskBG_{:d}_{}H.npy'.format(pairID, nbH)))
h, w = flow.size()[2], flow.size()[3]
#### -- grid
gridY = torch.linspace(-1, 1, steps = h * 8).view(1, -1, 1, 1).expand(1, h * 8, w * 8, 1)
gridX = torch.linspace(-1, 1, steps = w * 8).view(1, 1, -1, 1).expand(1, h * 8, w * 8, 1)
grid = torch.cat((gridX, gridY), dim=3)
warper = tgm.HomographyWarper(h * 8, w * 8)
coarse = warper.warp_grid(param)
flow = F.interpolate(input = flow, scale_factor = 8, mode='bilinear')
flow = flow.permute(0, 2, 3, 1)
flowUp = torch.clamp(flow + grid, min=-1, max=1)
flow = F.grid_sample(coarse.permute(0, 3, 1, 2), flowUp).permute(0, 2, 3, 1).contiguous()
match = torch.from_numpy(match)
match = F.interpolate(input = match, scale_factor = 8, mode='bilinear')
match = match.narrow(1, 0, 1) * F.grid_sample(match.narrow(1, 1, 1), flowUp) * (((flow.narrow(3, 0, 1) >= -1) * ( flow.narrow(3, 0, 1) <= 1)).type(torch.FloatTensor) * ((flow.narrow(3, 1, 1) >= -1) * ( flow.narrow(3, 1, 1) <= 1)).type(torch.FloatTensor)).permute(0, 3, 1, 2)
#match = match.narrow(1, 0, 1) * (((flow.narrow(3, 0, 1) >= -1) * ( flow.narrow(3, 0, 1) <= 1)).type(torch.FloatTensor) * ((flow.narrow(3, 1, 1) >= -1) * ( flow.narrow(3, 1, 1) <= 1)).type(torch.FloatTensor)).permute(0, 3, 1, 2)
match = match.permute(0, 2, 3, 1)
flow = torch.clamp(flow, min=-1, max=1)
flowGlobal = flow[:1]
match_binary = match[:1] >= th
matchGlobal = match[:1]
## aggregate mask
if multiH :
for i in range(1, len(match)) :
tmp_match = (match.narrow(0, i, 1) >= th) * (~ match_binary)
matchGlobal[tmp_match] = match.narrow(0, i, 1)[tmp_match]
match_binary = match_binary + tmp_match
tmp_match = tmp_match.expand_as(flowGlobal)
flowGlobal[tmp_match] = flow.narrow(0, i, 1)[tmp_match]
return flowGlobal, matchGlobal
def getFlow_Coarse(pairID, flowList, finePath, coarsePath) :
find = False
for flowName in flowList :
if flowName.split('_')[1] == str(pairID) :
nbH = flowName.split('_')[2].split('H')[0]
find = True
break
if not find :
return [], []
flow = torch.from_numpy ( np.load(os.path.join(finePath, 'flow_{:d}_{}H.npy'.format(pairID, nbH))).astype(np.float32) )
param = torch.from_numpy ( np.load(os.path.join(coarsePath, 'flow_{:d}_{}H.npy'.format(pairID, nbH))).astype(np.float32) )
h, w = flow.size()[2], flow.size()[3]
#### -- grid
gridY = torch.linspace(-1, 1, steps = h * 8).view(1, -1, 1, 1).expand(1, h * 8, w * 8, 1)
gridX = torch.linspace(-1, 1, steps = w * 8).view(1, 1, -1, 1).expand(1, h * 8, w * 8, 1)
grid = torch.cat((gridX, gridY), dim=3)
warper = tgm.HomographyWarper(h * 8, w * 8)
coarse = warper.warp_grid(param.narrow(0, 0, 1))
return coarse, torch.ones(1, h * 8, w * 8, 1)
parser = argparse.ArgumentParser()
## model parameters
parser.add_argument('--multiH', action='store_true', help='multiple homograhy or not')
parser.add_argument('--onlyCoarse', action='store_true', help='only Coarse')
parser.add_argument('--minSize', type=int, default = 480, help='min size')
parser.add_argument('--matchabilityTH',type=float, nargs='+', default = [0], help='matchability threshold list')
parser.add_argument('--coarsePth', type=str, help='prediction file coarse flow ')
parser.add_argument('--finePth', type=str, help='prediction file fine flow')
parser.add_argument('--maskPth', type=str, help='prediction file mask')
parser.add_argument('--th', type=float, default=0.95, help='threshold')
parser.add_argument('--dataset', type=str, default='MegaDepth', help='RobotCar or megadepth')
subparsers = parser.add_subparsers(title="test dataset", dest="subcommand")
robotCar = subparsers.add_parser("RobotCar", help="parser for training arguments")
## test file
robotCar.add_argument('--testDir', type=str, default = '../../data/RobotCar/imgs/', help='RGB image directory')
robotCar.add_argument('--testCSV', type=str, default = '../../data/RobotCar/test6511.csv', help='RGB image directory')
megaDepth1600 = subparsers.add_parser("MegaDepth", help="parser for training arguments")
## test file
megaDepth1600.add_argument('--testDir', type=str, default = '../../data/MegaDepth/Test/test1600Pairs', help='RGB image directory')
megaDepth1600.add_argument('--testCSV', type=str, default = '../../data/MegaDepth/Test/test1600Pairs.csv', help='RGB image directory')
megaDepth1600.add_argument('--beginIndex', type=int, default = 0, help='begin index')
megaDepth1600.add_argument('--endIndex', type=int, default = 1600, help='end index')
args = parser.parse_args()
args = parser.parse_args()
print (args)
minSize = args.minSize
strideNet = 16
## Loading data
# Set up for real validation
df = pd.read_csv(args.testCSV, dtype=str)
precAllAlign = {}
validAlign = {}
for th in args.matchabilityTH :
precAllAlign[th] = np.zeros(8)
validAlign[th] = 0
pixelGrid = np.around(np.logspace(0, np.log10(36), 8).reshape(-1, 8))
print ('Evaluation for pixel grid : \n')
print ('--> ', pixelGrid, '\n')
nbImg = len(df)
flowList = os.listdir(args.finePth)
for i in tqdm(range(nbImg)) :
scene = df['scene'][i]
#### -- Source Image feature
Is = Image.open( os.path.join( os.path.join(args.testDir, scene), df['source_image'][i]) ).convert('RGB') if scene != '/' else Image.open( os.path.join( args.testDir, df['source_image'][i]) ).convert('RGB')
if args.dataset == 'RobotCar' :
Is, Xs, Ys = ResizeMinResolution(args.minSize, Is, df['XA'][i], df['YA'][i], strideNet)
Isw, Ish = Is.size
#### -- Target Image feature
It = Image.open( os.path.join( os.path.join(args.testDir, scene), df['target_image'][i]) ).convert('RGB') if scene != '/' else Image.open( os.path.join( args.testDir, df['target_image'][i]) ).convert('RGB')
It, Xt, Yt = ResizeMinResolution(args.minSize, It, df['XB'][i], df['YB'][i], strideNet)
else :
Is, Xs, Ys, valids = ResizeMinResolution_megadepth(args.minSize, Is, df['XA'][i], df['YA'][i], strideNet)
Isw, Ish = Is.size
#### -- Target Image feature
It = Image.open( os.path.join( os.path.join(args.testDir, scene), df['target_image'][i]) ).convert('RGB') if scene != '/' else Image.open( os.path.join( args.testDir, df['target_image'][i]) ).convert('RGB')
It, Xt, Yt, validt = ResizeMinResolution_megadepth(args.minSize, It, df['XB'][i], df['YB'][i], strideNet)
index_valid = valids * validt
Xs, Ys, Xt, Yt = Xs[index_valid], Ys[index_valid], Xt[index_valid], Yt[index_valid]
Itw, Ith = It.size
flow, match = getFlow_Coarse(i, flowList, args.finePth, args.coarsePth) if args.onlyCoarse else getFlow(i, args.finePth, flowList, args.coarsePth, args.maskPth, args.multiH, args.th)
if len(flow) == 0 :
precAllAlign[0] = precAllAlign[th] + np.zeros(8)
validAlign[0] += len(Xs)
continue
for th in args.matchabilityTH :
matchTH = (match >= th ).type(torch.FloatTensor)
matchabilityBinary = matchTH * (((flow.narrow(3, 0, 1) >= -1) * ( flow.narrow(3, 0, 1) <= 1)).type(torch.FloatTensor) * ((flow.narrow(3, 1, 1) >= -1) * ( flow.narrow(3, 1, 1) <= 1))).permute(0, 3, 1, 2).type(torch.FloatTensor) if th > 0 else torch.ones(match.size())
pixelDiffT, nbAlign = alignmentError(Itw, Ith, Isw, Ish, Xs, Ys, Xt, Yt, flow, matchabilityBinary, pixelGrid)
precAllAlign[th] = precAllAlign[th] + pixelDiffT
validAlign[th] += nbAlign
for th in args.matchabilityTH :
msg = '\nthreshold {:.1f}, precision '.format(th)
print (msg, precAllAlign[th] / validAlign[th], validAlign[th])
| 39.620339
| 279
| 0.603183
|
7e323ef2bc2fa5d513d1a7e7d08266b2e7b533ac
| 9,862
|
py
|
Python
|
lib/reaction/direction.py
|
avcopan/mechdriver
|
63069cfb21d6fdb6d0b091dfe204b1e09c8e10a1
|
[
"Apache-2.0"
] | null | null | null |
lib/reaction/direction.py
|
avcopan/mechdriver
|
63069cfb21d6fdb6d0b091dfe204b1e09c8e10a1
|
[
"Apache-2.0"
] | null | null | null |
lib/reaction/direction.py
|
avcopan/mechdriver
|
63069cfb21d6fdb6d0b091dfe204b1e09c8e10a1
|
[
"Apache-2.0"
] | null | null | null |
"""
Functions to handle direction of a reaction
"""
import os
import automol
import autofile
import chemkin_io
from ioformat import remove_whitespace
# from routines.es._routines import geom
from lib.phydat import phycon
from lib.filesys.mincnf import min_energy_conformer_locators
from lib.filesys.inf import modify_orb_restrict
from lib.amech_io.parser import ptt
CLA_INP = 'inp/class.csv'
# Main direction function
def set_reaction_direction(reacs, prods, spc_dct, cla_dct,
thy_info, ini_thy_info, save_prefix,
direction='forw'):
""" Set the reaction of a direction
"""
# Check if reaction is present in the class direction
if cla_dct:
given_class, flip_rxn = set_class_with_dct(
cla_dct, reacs, prods)
if flip_rxn:
reacs, prods = prods, reacs
else:
given_class = None
# If no class, given set direction to requested direction
if given_class is not None:
print(' Reaction present in class dct, Setting direction to that.')
else:
if direction == 'forw':
print(' User requested forward direction.')
elif direction == 'back':
print(' User requested reverse direction, flipping reaction.')
reacs, prods = prods, reacs
elif direction == 'exo':
print(' User requested exothermic direction.',
'Checking energies...')
reacs, prods = assess_rxn_ene(
reacs, prods, spc_dct, thy_info, ini_thy_info, save_prefix)
print(' Running reaction as:')
print(' {} = {}'.format('+'.join(reacs), '+'.join(prods)))
return reacs, prods, given_class
# Handle setting reaction directions with the class dictionary
def set_class_with_dct(cla_dct, reacs, prods):
""" set the class using the class dictionary
"""
rxn = (reacs, prods)
rxn_rev = (prods, reacs)
if rxn in cla_dct:
given_class = cla_dct[rxn]
flip_rxn = False
elif rxn_rev in cla_dct:
given_class = cla_dct[rxn_rev]
flip_rxn = True
else:
given_class = None
flip_rxn = False
return given_class, flip_rxn
def parse_rxn_class_file(job_path):
""" Read the class dictionary
"""
if os.path.exists(os.path.join(job_path, CLA_INP)):
print(' class.dat found. Reading contents...')
cla_str = ptt.read_inp_str(job_path, CLA_INP, remove_comments='#')
cla_dct = _build_cla_dct(cla_str)
else:
print(' No class.dat found.')
cla_dct = {}
return cla_dct
def _build_cla_dct(cla_str):
""" read file
"""
cla_dct = {}
cla_str = remove_whitespace(cla_str)
for line in cla_str.splitlines():
# try:
[rxn_line, rclass] = line.split('||')
reacs = chemkin_io.parser.reaction.reactant_names(rxn_line)
prods = chemkin_io.parser.reaction.product_names(rxn_line)
cla_dct[(reacs, prods)] = rclass
# except:
# print('*ERROR: Error in formatting line')
# print(line)
# sys.exit()
return cla_dct
# Functions for the exothermicity check
def assess_rxn_ene(reacs, prods, spc_dct, thy_info, ini_thy_info, save_prefix):
""" Check the directionality of the reaction
"""
rxn_ichs = [[], []]
rxn_chgs = [[], []]
rxn_muls = [[], []]
for spc in reacs:
rxn_ichs[0].append(spc_dct[spc]['inchi'])
rxn_chgs[0].append(spc_dct[spc]['charge'])
rxn_muls[0].append(spc_dct[spc]['mult'])
for spc in prods:
rxn_ichs[1].append(spc_dct[spc]['inchi'])
rxn_chgs[1].append(spc_dct[spc]['charge'])
rxn_muls[1].append(spc_dct[spc]['mult'])
rxn_ene = reaction_energy(
save_prefix, rxn_ichs, rxn_chgs, rxn_muls,
thy_info, ini_thy_info)
method1, method2 = thy_info, ini_thy_info
if rxn_ene is None:
rxn_ene = reaction_energy(
save_prefix, rxn_ichs, rxn_chgs, rxn_muls,
ini_thy_info, ini_thy_info)
method1, method2 = ini_thy_info, ini_thy_info
# except AssertionError:
# rxn_ene = reaction_energy(
# save_prefix, rxn_ichs, rxn_chgs, rxn_muls, ini_thy_info)
# method = ini_thy_info
# except IOError:
# rxn_ene = reaction_energy(
# save_prefix, rxn_ichs, rxn_chgs, rxn_muls, ini_thy_info)
# method = ini_thy_info
print(' Reaction energy is {:.2f} at {}//{} level'.format(
rxn_ene*phycon.EH2KCAL, method1[1], method2[1]))
if rxn_ene > 0:
reacs, prods = prods, reacs
print(' Reaction is endothermic, flipping reaction.')
return reacs, prods
def reaction_energy(save_prefix, rxn_ich, rxn_chg, rxn_mul,
sp_thy_info, geo_thy_info):
""" reaction energy """
rct_ichs, prd_ichs = rxn_ich
rct_chgs, prd_chgs = rxn_chg
rct_muls, prd_muls = rxn_mul
rct_enes = reagent_energies(
save_prefix, rct_ichs, rct_chgs, rct_muls,
sp_thy_info, geo_thy_info)
prd_enes = reagent_energies(
save_prefix, prd_ichs, prd_chgs, prd_muls,
sp_thy_info, geo_thy_info)
if rct_enes is not None and prd_enes is not None:
rxn_ene = sum(prd_enes) - sum(rct_enes)
else:
rxn_ene = None
return rxn_ene
def reagent_energies(save_prefix, rgt_ichs, rgt_chgs, rgt_muls,
sp_thy_info, geo_thy_info):
""" reagent energies """
enes = []
for rgt_ich, rgt_chg, rgt_mul in zip(rgt_ichs, rgt_chgs, rgt_muls):
# Set filesys
spc_save_fs = autofile.fs.species(save_prefix)
rgt_info = [rgt_ich, rgt_chg, rgt_mul]
spc_save_path = spc_save_fs[-1].path(rgt_info)
mod_geo_thy_info = modify_orb_restrict(rgt_info, geo_thy_info)
mod_sp_thy_info = modify_orb_restrict(rgt_info, sp_thy_info)
thy_save_fs = autofile.fs.theory(spc_save_path)
thy_save_path = thy_save_fs[-1].path(mod_geo_thy_info[1:4])
cnf_save_fs = autofile.fs.conformer(thy_save_path)
min_cnf_locs, _ = min_energy_conformer_locators(
cnf_save_fs, mod_geo_thy_info)
# Read energy
ene = None
if min_cnf_locs:
cnf_path = cnf_save_fs[-1].path(min_cnf_locs)
sp_fs = autofile.fs.single_point(cnf_path)
if sp_fs[-1].file.energy.exists(mod_sp_thy_info[1:4]):
ene = sp_fs[-1].file.energy.read(mod_sp_thy_info[1:4])
enes.append(ene)
if any(ene is None for ene in enes):
enes = None
return enes
def get_zmas(
reacs, prods, spc_dct, ini_thy_info, save_prefix, run_prefix,
kickoff_size, kickoff_backward):
"""get the zmats for reactants and products using the initial level of theory
"""
if len(reacs) > 2:
ich = spc_dct[reacs[-1]]['inchi']
ichgeo = automol.inchi.geometry(ich)
ichzma = automol.geom.zmatrix(ichgeo)
reacs = reacs[:-1]
elif len(prods) > 2:
ich = spc_dct[prods[-1]]['inchi']
ichgeo = automol.inchi.geometry(ich)
ichzma = automol.geom.zmatrix(ichgeo)
prods = prods[:-1]
rct_geos, rct_cnf_save_fs_lst = get_geos(
reacs, spc_dct, ini_thy_info, save_prefix, run_prefix, kickoff_size,
kickoff_backward)
prd_geos, prd_cnf_save_fs_lst = get_geos(
prods, spc_dct, ini_thy_info, save_prefix, run_prefix, kickoff_size,
kickoff_backward)
rct_zmas = list(map(automol.geom.zmatrix, rct_geos))
prd_zmas = list(map(automol.geom.zmatrix, prd_geos))
if len(rct_zmas) > 2:
rct_zmas.append(ichzma)
if len(prd_zmas) > 2:
prd_zmas.append(ichzma)
return rct_zmas, prd_zmas, rct_cnf_save_fs_lst, prd_cnf_save_fs_lst
def get_geos(
spcs, spc_dct, ini_thy_info, save_prefix, run_prefix, kickoff_size,
kickoff_backward):
"""get geos for reactants and products using the initial level of theory
"""
spc_geos = []
cnf_save_fs_lst = []
for spc in spcs:
spc_info = [spc_dct[spc]['inchi'],
spc_dct[spc]['charge'],
spc_dct[spc]['mult']]
ini_thy_lvl = modify_orb_restrict(spc_info, ini_thy_info)
spc_save_fs = autofile.fs.species(save_prefix)
spc_save_fs[-1].create(spc_info)
spc_save_path = spc_save_fs[-1].path(spc_info)
ini_thy_save_fs = autofile.fs.theory(spc_save_path)
ini_thy_save_path = ini_thy_save_fs[-1].path(ini_thy_lvl[1:4])
cnf_save_fs = autofile.fs.conformer(ini_thy_save_path)
cnf_save_fs_lst.append(cnf_save_fs)
min_cnf_locs, _ = min_energy_conformer_locators(
cnf_save_fs, ini_thy_lvl)
# print('min_cnf_locs test:', min_cnf_locs)
if min_cnf_locs:
geo = cnf_save_fs[-1].file.geometry.read(min_cnf_locs)
# else:
# spc_run_fs = autofile.fs.species(run_prefix)
# spc_run_fs[-1].create(spc_info)
# spc_run_path = spc_run_fs[-1].path(spc_info)
# ini_thy_run_fs = autofile.fs.theory(spc_run_path)
# ini_thy_run_path = ini_thy_run_fs[-1].path(ini_thy_lvl[1:4])
# cnf_run_fs = autofile.fs.conformer(ini_thy_run_path)
# run_fs = autofile.fs.run(ini_thy_run_path)
# run_fs[0].create()
# geo = geom.reference_geometry(
# spc_dct[spc], spc_info,
# ini_thy_lvl, ini_thy_lvl,
# ini_thy_run_fs, ini_thy_save_fs,
# ini_thy_save_fs,
# cnf_run_fs, cnf_save_fs,
# run_fs,
# opt_script_str, overwrite,
# kickoff_size=kickoff_size,
# kickoff_backward=kickoff_backward)
spc_geos.append(geo)
return spc_geos, cnf_save_fs_lst
| 34.725352
| 81
| 0.630298
|
f3efa8e8929c876d53708b87f1cb69958fc6633d
| 4,311
|
py
|
Python
|
vendor/tornado/tornado/win32_support.py
|
bopopescu/cc-2
|
37444fb16b36743c439b0d6c3cac2347e0cc0a94
|
[
"Apache-2.0"
] | 12
|
2017-03-09T07:06:07.000Z
|
2020-10-21T02:20:36.000Z
|
vendor/tornado/tornado/win32_support.py
|
bopopescu/cc-2
|
37444fb16b36743c439b0d6c3cac2347e0cc0a94
|
[
"Apache-2.0"
] | 1
|
2020-08-02T15:40:49.000Z
|
2020-08-02T15:40:49.000Z
|
vendor/tornado/tornado/win32_support.py
|
bopopescu/cc-2
|
37444fb16b36743c439b0d6c3cac2347e0cc0a94
|
[
"Apache-2.0"
] | 8
|
2017-05-22T06:41:36.000Z
|
2019-09-26T02:29:23.000Z
|
# NOTE: win32 support is currently experimental, and not recommended
# for production use.
import ctypes
import ctypes.wintypes
import os
import socket
import errno
# See: http://msdn.microsoft.com/en-us/library/ms738573(VS.85).aspx
ioctlsocket = ctypes.windll.ws2_32.ioctlsocket
ioctlsocket.argtypes = (ctypes.wintypes.HANDLE, ctypes.wintypes.LONG, ctypes.wintypes.ULONG)
ioctlsocket.restype = ctypes.c_int
# See: http://msdn.microsoft.com/en-us/library/ms724935(VS.85).aspx
SetHandleInformation = ctypes.windll.kernel32.SetHandleInformation
SetHandleInformation.argtypes = (ctypes.wintypes.HANDLE, ctypes.wintypes.DWORD, ctypes.wintypes.DWORD)
SetHandleInformation.restype = ctypes.wintypes.BOOL
HANDLE_FLAG_INHERIT = 0x00000001
F_GETFD = 1
F_SETFD = 2
F_GETFL = 3
F_SETFL = 4
FD_CLOEXEC = 1
os.O_NONBLOCK = 2048
FIONBIO = 126
def fcntl(fd, op, arg=0):
if op == F_GETFD or op == F_GETFL:
return 0
elif op == F_SETFD:
# Check that the flag is CLOEXEC and translate
if arg == FD_CLOEXEC:
success = SetHandleInformation(fd, HANDLE_FLAG_INHERIT, arg)
if not success:
raise ctypes.GetLastError()
else:
raise ValueError("Unsupported arg")
#elif op == F_SETFL:
## Check that the flag is NONBLOCK and translate
#if arg == os.O_NONBLOCK:
##pass
#result = ioctlsocket(fd, FIONBIO, 1)
#if result != 0:
#raise ctypes.GetLastError()
#else:
#raise ValueError("Unsupported arg")
else:
raise ValueError("Unsupported op")
class Pipe(object):
"""Create an OS independent asynchronous pipe"""
def __init__(self):
# Based on Zope async.py: http://svn.zope.org/zc.ngi/trunk/src/zc/ngi/async.py
self.writer = socket.socket()
# Disable buffering -- pulling the trigger sends 1 byte,
# and we want that sent immediately, to wake up ASAP.
self.writer.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
count = 0
while 1:
count += 1
# Bind to a local port; for efficiency, let the OS pick
# a free port for us.
# Unfortunately, stress tests showed that we may not
# be able to connect to that port ("Address already in
# use") despite that the OS picked it. This appears
# to be a race bug in the Windows socket implementation.
# So we loop until a connect() succeeds (almost always
# on the first try). See the long thread at
# http://mail.zope.org/pipermail/zope/2005-July/160433.html
# for hideous details.
a = socket.socket()
a.bind(("127.0.0.1", 0))
connect_address = a.getsockname() # assigned (host, port) pair
a.listen(1)
try:
self.writer.connect(connect_address)
break # success
except socket.error, detail:
if detail[0] != errno.WSAEADDRINUSE:
# "Address already in use" is the only error
# I've seen on two WinXP Pro SP2 boxes, under
# Pythons 2.3.5 and 2.4.1.
raise
# (10048, 'Address already in use')
# assert count <= 2 # never triggered in Tim's tests
if count >= 10: # I've never seen it go above 2
a.close()
self.writer.close()
raise socket.error("Cannot bind trigger!")
# Close `a` and try again. Note: I originally put a short
# sleep() here, but it didn't appear to help or hurt.
a.close()
self.reader, addr = a.accept()
self.reader.setblocking(0)
self.writer.setblocking(0)
a.close()
self.reader_fd = self.reader.fileno()
def read(self):
"""Emulate a file descriptors read method"""
try:
return self.reader.recv(1)
except socket.error, ex:
if ex.args[0] == errno.EWOULDBLOCK:
raise IOError
raise
def write(self, data):
"""Emulate a file descriptors write method"""
return self.writer.send(data)
| 34.766129
| 102
| 0.591046
|
29adcdadcc22f94d2caaee26d7818362d3e56f3c
| 2,401
|
py
|
Python
|
drain-service/drain3/template_miner_config.py
|
jameson-mcghee/opni-drain-service
|
3f0a5bb962a740b7d78113f95c5e5db57e85180e
|
[
"Apache-2.0"
] | 3
|
2021-07-29T19:46:25.000Z
|
2021-11-08T10:26:42.000Z
|
drain-service/drain3/template_miner_config.py
|
jameson-mcghee/opni-drain-service
|
3f0a5bb962a740b7d78113f95c5e5db57e85180e
|
[
"Apache-2.0"
] | null | null | null |
drain-service/drain3/template_miner_config.py
|
jameson-mcghee/opni-drain-service
|
3f0a5bb962a740b7d78113f95c5e5db57e85180e
|
[
"Apache-2.0"
] | 3
|
2021-05-21T20:25:51.000Z
|
2021-10-06T15:06:52.000Z
|
"""
Adopted from https://github.com/IBM/Drain3
"""
# Standard Library
import ast
import configparser
import logging
logger = logging.getLogger(__name__)
class TemplateMinerConfig:
def __init__(self):
self.profiling_enabled = False
self.profiling_report_sec = 60
self.snapshot_interval_minutes = 5
self.snapshot_compress_state = True
self.drain_extra_delimiters = []
self.drain_sim_th = 0.4
self.drain_depth = 4
self.drain_max_children = 100
self.drain_max_clusters = None
self.masking_instructions = []
self.mask_prefix = "<"
self.mask_suffix = ">"
def load(self, config_filename: str):
parser = configparser.ConfigParser()
read_files = parser.read(config_filename)
if len(read_files) == 0:
logger.warning(f"config file not found: {config_filename}")
section_profiling = "PROFILING"
section_snapshot = "SNAPSHOT"
section_drain = "DRAIN"
section_masking = "MASKING"
self.profiling_enabled = parser.getboolean(
section_profiling, "enabled", fallback=self.profiling_enabled
)
self.profiling_report_sec = parser.getint(
section_profiling, "report_sec", fallback=self.profiling_report_sec
)
self.snapshot_interval_minutes = parser.getint(
section_snapshot,
"snapshot_interval_minutes",
fallback=self.snapshot_interval_minutes,
)
self.snapshot_compress_state = parser.getboolean(
section_snapshot, "compress_state", fallback=self.snapshot_compress_state
)
drain_extra_delimiters_str = parser.get(
section_drain, "extra_delimiters", fallback=str(self.drain_extra_delimiters)
)
self.drain_extra_delimiters = ast.literal_eval(drain_extra_delimiters_str)
self.drain_sim_th = parser.getfloat(
section_drain, "sim_th", fallback=self.drain_sim_th
)
self.drain_depth = parser.getint(
section_drain, "depth", fallback=self.drain_depth
)
self.drain_max_children = parser.getint(
section_drain, "max_children", fallback=self.drain_max_children
)
self.drain_max_clusters = parser.getint(
section_drain, "max_clusters", fallback=self.drain_max_clusters
)
| 33.816901
| 88
| 0.658892
|
01b7a1151dc091d9fc61daa71926c13883309816
| 4,438
|
py
|
Python
|
app/customer/common_util/image.py
|
B-ROY/TESTGIT
|
40221cf254c90d37d21afb981635740aebf11949
|
[
"Apache-2.0"
] | 2
|
2017-12-02T13:58:30.000Z
|
2018-08-02T17:07:59.000Z
|
app/customer/common_util/image.py
|
B-ROY/TESTGIT
|
40221cf254c90d37d21afb981635740aebf11949
|
[
"Apache-2.0"
] | null | null | null |
app/customer/common_util/image.py
|
B-ROY/TESTGIT
|
40221cf254c90d37d21afb981635740aebf11949
|
[
"Apache-2.0"
] | null | null | null |
import re
import os
import tencentyun
import hmac
import urllib2
import random
import time
import binascii
import base64
import hashlib
import json
__author__ = 'zen'
APPID = '10048692'
BUCKET = 'heydopic'
SECRET_ID = 'AKIDgknyBYkNKnpONeweTRwK9t6Nn0jn78yG'
SECRET_KEY = 'fBCXVJK1PpWPtYizb7vIGVMIJFm90GBa'
UPLOAD_TYPE_FILE = 1
UPLOAD_TYPE_BIN = 2
class UploadImage(object):
def __init__(self, file_handler, save_path=''):
self.file_handler = file_handler
self.save_path = save_path
filename = self.file_handler.name.replace(" ",'')
file_name_t = re.sub(u'[\u4e00-\u9fa5]', 'X', filename)
file_name_t = re.sub(u'[\uFF00-\uFFFF]', 'X', file_name_t)
self.out_file_name = "%s_%s" % (int(time.time()), file_name_t)
self.file_id = "%s_%s" % (int(time.time()), hashlib.md5(filename).hexdigest())
def save_to_local(self):
local_file_path = os.path.join(self.save_path, self.out_file_name)
temp_file = local_file_path + '.tmp'
output_file = open(temp_file, 'wb')
# Finally write the data to a temporary file
self.file_handler.incoming_file.seek(0)
h = hashlib.md5()
while True:
data = self.file_handler.incoming_file.read(2 << 16)
if not data:
break
output_file.write(data)
h.update(data)
output_file.close()
os.rename(temp_file, local_file_path)
def push_to_qclude(self):
image = tencentyun.ImageV2(
APPID, SECRET_ID, SECRET_KEY)
return image.upload_binary(
self.file_handler,
bucket=BUCKET,
fileid=self.file_id)
@classmethod
def push_binary_to_qclude(cls,binary,price=0):
image = tencentyun.ImageV2(
APPID, SECRET_ID, SECRET_KEY)
if price == 0:
pic_bucket = "hdlive"
else:
pic_bucket = "heydopic"
return image.upload_binary(
binary,
bucket=pic_bucket,
fileid= hashlib.md5("logo"+str(int(time.time()) ) ).hexdigest() )
def delete_pic(file_id):
secret_key = "fBCXVJK1PpWPtYizb7vIGVMIJFm90GBa"
appid = "10048692"
bucket = "hdlive"
secret_id = "AKIDgknyBYkNKnpONeweTRwK9t6Nn0jn78yG"
expiredTime = int(time.time()) + 999
currentTime = time.time()
rand = random.randint(0, 9999999)
userid = "0"
delete_url = "http://web.image.myqcloud.com/photos/v2/%s/%s/%s/%s/del" % (appid, bucket, userid, file_id)
plain_text = "a=%s&b=%s&k=%s&e=%s&t=%s&r=%s&u=%s&f=%s" % \
(appid, bucket, secret_id, expiredTime, currentTime, rand, userid, file_id)
b = hmac.new(secret_key, plain_text, hashlib.sha1)
s = b.hexdigest()
s = binascii.unhexlify(s)
s += plain_text
signature = base64.b64encode(s).rstrip()
headers = {
"Host": "web.image.myqcloud.com",
"Authorization": signature,
"Content-Length": 0
}
print delete_url
req = urllib2.Request(delete_url, data="", headers=headers)
return json.loads(urllib2.urlopen(req).read())
def porncheck(pic_url):
porncheck_url = "http://service.image.myqcloud.com/detection/porn_detect"
secret_key = "fBCXVJK1PpWPtYizb7vIGVMIJFm90GBa"
appid = "10048692"
bucket = "hdlive"
secret_id = "AKIDgknyBYkNKnpONeweTRwK9t6Nn0jn78yG"
expiredTime = int(time.time()) + 999
currentTime = time.time()
rand = random.randint(0, 9999999)
userid = "0"
plain_text = "a=%s&b=%s&k=%s&e=%s&t=%s&r=%s&u=%s" % \
(appid, bucket, secret_id, expiredTime, currentTime, rand, userid)
b = hmac.new(secret_key, plain_text, hashlib.sha1)
s = b.hexdigest()
s = binascii.unhexlify(s)
s += plain_text
signature = base64.b64encode(s).rstrip()
body = {
"appid": appid,
"bucket": bucket,
"url_list": [
pic_url
]
}
headers = {
"Host": "service.image.myqcloud.com",
"Content-Type": "Application/json",
"Authorization": signature,
"Content_lenth": len(json.dumps(body))
}
req = urllib2.Request(porncheck_url, data=json.dumps(body), headers=headers)
return json.loads(urllib2.urlopen(req).read())
if __name__ == "__main__":
f = open('/Users/yinxing/e857d628a6e213558dd18a67bf9d666a.gif','rb')
print dir(f)
up = UploadImage(f)
print up.push_to_qclude()
| 28.818182
| 109
| 0.624606
|
ced017199ecd3464859f5bc89b3b52389b595aa0
| 335
|
py
|
Python
|
tests/unit/ssg/test_checks.py
|
dhanushkar-wso2/scap-security-guide
|
e4134011d3274f828a0d2119e1fa24396ef73a1b
|
[
"BSD-3-Clause"
] | null | null | null |
tests/unit/ssg/test_checks.py
|
dhanushkar-wso2/scap-security-guide
|
e4134011d3274f828a0d2119e1fa24396ef73a1b
|
[
"BSD-3-Clause"
] | null | null | null |
tests/unit/ssg/test_checks.py
|
dhanushkar-wso2/scap-security-guide
|
e4134011d3274f828a0d2119e1fa24396ef73a1b
|
[
"BSD-3-Clause"
] | null | null | null |
import pytest
import ssg.checks
def test_is_cce_valid():
icv = ssg.checks.is_cce_valid
assert icv("CCE-27191-6")
assert icv("CCE-7223-7")
assert not icv("not-valid")
assert not icv("1234-5")
assert not icv("12345-6")
assert not icv("TBD")
assert not icv("CCE-TBD")
assert not icv("CCE-abcde-f")
| 19.705882
| 33
| 0.641791
|
9d2a9c8e1690393903392dc689fc40ec06516364
| 900
|
py
|
Python
|
tests/unit/conftest.py
|
sshink/testrail-api
|
18bc28a0b76ae475974e5f9ed6f7f9d9da58f5bc
|
[
"MIT"
] | null | null | null |
tests/unit/conftest.py
|
sshink/testrail-api
|
18bc28a0b76ae475974e5f9ed6f7f9d9da58f5bc
|
[
"MIT"
] | null | null | null |
tests/unit/conftest.py
|
sshink/testrail-api
|
18bc28a0b76ae475974e5f9ed6f7f9d9da58f5bc
|
[
"MIT"
] | null | null | null |
import os
from pathlib import Path
import pytest
import responses
from testrail_api import TestRailAPI
@pytest.fixture(scope='session')
def host():
yield 'https://example.testrail.com/'
@pytest.fixture(scope='session')
def base_path():
path = Path(__file__).absolute().parent
yield str(path)
@pytest.fixture(scope='session')
def auth_data(host):
yield host, 'example@mail.com', 'password'
@pytest.fixture
def mock():
with responses.RequestsMock() as resp:
yield resp
@pytest.fixture
def api(auth_data):
api = TestRailAPI(*auth_data)
yield api
@pytest.fixture
def environ(auth_data):
os.environ['TESTRAIL_URL'] = auth_data[0]
os.environ['TESTRAIL_EMAIL'] = auth_data[1]
os.environ['TESTRAIL_PASSWORD'] = auth_data[2]
yield
del os.environ['TESTRAIL_URL']
del os.environ['TESTRAIL_EMAIL']
del os.environ['TESTRAIL_PASSWORD']
| 19.148936
| 50
| 0.705556
|
5e9f7ff994fc157adc06bfc9e7d69d0c59d2e26f
| 1,663
|
py
|
Python
|
Game23/Game23.py
|
ttkaixin1998/pikachupythongames
|
609a3a5a2be3f5a187c332c7980bb5bb14548f02
|
[
"MIT"
] | 4,013
|
2018-06-16T08:00:02.000Z
|
2022-03-30T11:48:14.000Z
|
Game23/Game23.py
|
pigbearcat/Games
|
b8c47ef1bcce9a9db3f3730c162e6e8e08b508a2
|
[
"MIT"
] | 22
|
2018-10-18T00:15:50.000Z
|
2022-01-13T08:16:15.000Z
|
Game23/Game23.py
|
pigbearcat/Games
|
b8c47ef1bcce9a9db3f3730c162e6e8e08b508a2
|
[
"MIT"
] | 2,172
|
2018-07-20T04:03:14.000Z
|
2022-03-31T14:18:29.000Z
|
'''
Function:
2048小游戏
Author:
Charles
微信公众号:
Charles的皮卡丘
'''
import cfg
import sys
import pygame
from modules import *
'''主程序'''
def main(cfg):
# 游戏初始化
pygame.init()
screen = pygame.display.set_mode(cfg.SCREENSIZE)
pygame.display.set_caption('2048 —— Charles的皮卡丘')
# 播放背景音乐
pygame.mixer.music.load(cfg.BGMPATH)
pygame.mixer.music.play(-1)
# 实例化2048游戏
game_2048 = Game2048(matrix_size=cfg.GAME_MATRIX_SIZE, max_score_filepath=cfg.MAX_SCORE_FILEPATH)
# 游戏主循环
clock = pygame.time.Clock()
is_running = True
while is_running:
screen.fill(pygame.Color(cfg.BG_COLOR))
# --按键检测
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
elif event.type == pygame.KEYDOWN:
if event.key in [pygame.K_UP, pygame.K_DOWN, pygame.K_LEFT, pygame.K_RIGHT]:
game_2048.setDirection({pygame.K_UP: 'up', pygame.K_DOWN: 'down', pygame.K_LEFT: 'left', pygame.K_RIGHT: 'right'}[event.key])
# --更新游戏状态
game_2048.update()
if game_2048.isgameover:
game_2048.saveMaxScore()
is_running = False
# --将必要的游戏元素画到屏幕上
drawGameMatrix(screen, game_2048.game_matrix, cfg)
start_x, start_y = drawScore(screen, game_2048.score, game_2048.max_score, cfg)
drawGameIntro(screen, start_x, start_y, cfg)
# --屏幕更新
pygame.display.update()
clock.tick(cfg.FPS)
return endInterface(screen, cfg)
'''run'''
if __name__ == '__main__':
while True:
if not main(cfg):
break
| 28.672414
| 145
| 0.615755
|
1f8f9e391109c41227336b2bb762cb77a40123c1
| 6,413
|
py
|
Python
|
src/harvester.py
|
bmoxon/azfinsim
|
3e203855410abd6c9636377b93ed5d33ac896c41
|
[
"MIT"
] | 5
|
2021-02-24T19:10:34.000Z
|
2022-02-24T21:11:24.000Z
|
src/harvester.py
|
bmoxon/azfinsim
|
3e203855410abd6c9636377b93ed5d33ac896c41
|
[
"MIT"
] | null | null | null |
src/harvester.py
|
bmoxon/azfinsim
|
3e203855410abd6c9636377b93ed5d33ac896c41
|
[
"MIT"
] | 2
|
2021-05-03T11:57:31.000Z
|
2021-12-09T10:24:29.000Z
|
#! /usr/bin/env python3
#-- harvest scheduler that runs on the compute pool nodes
import argparse
import time
import sys
import logging
import os
import psutil
from applicationinsights import TelemetryClient
from applicationinsights.logging import LoggingHandler
from getargs import getargs
import azlog
azlog.color=False
#-- Timeout between polling the harvest #cores api/file
HARVESTPOLLTIMEOUT = 30
#-- Executable to launch per cpu slot
#ENGINE="burn.sh" # (for testing)
ENGINE="/azfinsim/azfinsim.py"
#KVP_MONITOR="/var/lib/hyperv/.kvp_pool_0"
#-- mounted via: sudo docker run -v /var/lib/hyperv:/kvp -it mkharvestazcr.azurecr.io/azfinsim/azfinsimub1804
KVP_MONITOR="/kvp/.kvp_pool_0"
def read_harvest_cores() :
vcores = psutil.cpu_count(logical=True)
pcores = psutil.cpu_count(logical=False)
log.info("Polling Harvester: Physical Cores: %d Logical Cores: %d" % (pcores,vcores))
kvp=KVP_MONITOR
try:
f = open(kvp, "r")
str=f.read()
if (len(str) > 0):
str = str.replace("CurrentCoreCount","")
str = str.replace('\0','')
ncores = int(str.split('.')[0])
log.info("Harvest file %s has current physical core count: %d" % (kvp,ncores))
else:
ncores = vcores
log.warn("Harvest file %s is empty; using static vcore count: %d" % (kvp,ncores))
except OSError:
ncores = vcores
log.warn("Harvest file %s doesn't exist; using static vcore count: %d" % (kvp,ncores))
tc.track_metric('HARVESTCORES', ncores)
tc.flush()
return ncores
def spawn(ncores) :
env = {"PATH":"."}
args = ("null","null")
log.info("spawning %d processes" % ncores)
for i in range(ncores):
pid = os.fork()
if not pid:
try:
os.execvpe("burn.sh", args, env)
except OSError as e:
log.error("Exec failed: %s\n" % (e.strerror))
os._exit(1)
else:
pid = os.waitpid(pid,0)
def spawn_one(start_trade,trade_window,inputargs):
#path = os.environ['PATH']
argtup = tuple(inputargs)
pid = os.fork()
if not pid:
#-- child process
log.info("spawning new process %s: pid %d: start_trade=%d, ntrades=%d" % (ENGINE,os.getpid(),start_trade,trade_window))
#logging.info(argtup)
try:
os.execve(ENGINE, argtup, os.environ.copy())
except OSError as e:
log.error("Exec failed: %s\n" % (e.strerror))
os._exit(1)
#else:
#pid = os.waitpid(pid,0)
def replace_args(start_trade,trade_window,inputargs):
result = []
skip=False
for i in range(len(inputargs)):
if (skip==True):
skip=False
continue
if (inputargs[i]=='start_trade'):
result.append('start_trade')
result.append(str(start_trade))
skip=True
elif (inputargs[i]=='trade_window'):
result.append('trade_window')
result.append(str(trade_window))
skip=True
else:
result.append(inputargs[i])
skip=False
return(result)
#-- register the absolute start time
#launch=time.time_ns() #-- python3.8 only
launch=time.time()
log = azlog.getLogger(__name__)
if __name__ == "__main__":
#-- grab cli args: will be passed through to child processes
args = getargs("harvester")
#-- reformat args into a list of strings for execvpe
inputargs = []
inputargs.append(ENGINE) #-- first arg to execvpe() should be progname
for arg in vars(args):
#print(arg, getattr(args,arg))
val = str(getattr(args,arg))
arg=arg.replace("_","-")
inputargs.append(str("--" + arg)) #-- re-add the stripped "--" prefix
inputargs.append(val)
#print(inputargs)
#-- setup azure application insights handle for telemetry
tc = TelemetryClient("%s" % args.appinsights_key)
# set up logging - STDOUT & Azure AppInsights EventLog
#handler = LoggingHandler(args.appinsights_key)
#logging.basicConfig(
# format="%(asctime)s harvester: %(name)s %(threadName)-10.10s %(levelname)-5.5s %(message)s",
# handlers=[
# LoggingHandler(args.appinsights_key), #-- send to AZURE
# logging.StreamHandler(stream=sys.stdout) #-- send to STDOUT
# ],level=args.loglevel)
#-- log start time
log.info("TRADE %10d: LAUNCH : %d" % (args.start_trade,launch))
tc.track_metric('STARTTIME', launch)
tc.flush()
#-- get initial harvest core count
slots = read_harvest_cores()
log.info("%d x Cores available." % slots)
#-- calculate number of trades per process/batch/cpu
max_batch_size = 10
total_trades = args.trade_window
lastbatch = total_trades % max_batch_size
nbatchesfl = total_trades / max_batch_size
nbatches = int(nbatchesfl)
offset = args.start_trade
log.info("%d trades to process in this task (%.2f batches of %d)" % (total_trades,nbatchesfl,max_batch_size))
#-- Main loop: monitor harvest api/file & dispatch processes to available cores
batchesdone=0
trades_processed=0
while (batchesdone <= nbatches):
procs = psutil.Process().children()
gone, alive = psutil.wait_procs(procs,timeout=1,callback=None)
nprocs = len(alive)
freeslots = slots - nprocs
log.info("%d processes running on %d total slots: %d slots available." % (nprocs,slots,freeslots))
if (nprocs < slots):
for i in range(freeslots):
if (batchesdone == nbatches): batch_size = lastbatch
else: batch_size = max_batch_size
inputargs = replace_args(offset,batch_size,inputargs) # substitute the command line args
spawn_one(offset,batch_size,inputargs)
trades_processed += batch_size
offset += batch_size
batchesdone+=1
if (batch_size == lastbatch):
break
time.sleep(HARVESTPOLLTIMEOUT)
#-- re-read the harvest file - check if #slots has changed
slots = read_harvest_cores()
log.info("%d trades processed. No trades left to process; relinquishing cores" % trades_processed)
# flush all un-sent telemetry items
tc.flush()
#logging.shutdown()
#-- when all work done, exit and allow orchestration to recover node.
exit(0)
| 34.478495
| 127
| 0.626072
|
088eb78bf81aa3056a55d7e3bfc584ab417cace2
| 746
|
py
|
Python
|
generated-libraries/python/netapp/ntdtest/ntdtest_get_iter_key_td.py
|
radekg/netapp-ontap-lib-get
|
6445ebb071ec147ea82a486fbe9f094c56c5c40d
|
[
"MIT"
] | 2
|
2017-03-28T15:31:26.000Z
|
2018-08-16T22:15:18.000Z
|
generated-libraries/python/netapp/ntdtest/ntdtest_get_iter_key_td.py
|
radekg/netapp-ontap-lib-get
|
6445ebb071ec147ea82a486fbe9f094c56c5c40d
|
[
"MIT"
] | null | null | null |
generated-libraries/python/netapp/ntdtest/ntdtest_get_iter_key_td.py
|
radekg/netapp-ontap-lib-get
|
6445ebb071ec147ea82a486fbe9f094c56c5c40d
|
[
"MIT"
] | null | null | null |
from netapp.netapp_object import NetAppObject
class NtdtestGetIterKeyTd(NetAppObject):
"""
Key typedef for table ntdtest
"""
_key_0 = None
@property
def key_0(self):
"""
Field group
"""
return self._key_0
@key_0.setter
def key_0(self, val):
if val != None:
self.validate('key_0', val)
self._key_0 = val
@staticmethod
def get_api_name():
return "ntdtest-get-iter-key-td"
@staticmethod
def get_desired_attrs():
return [
'key-0',
]
def describe_properties(self):
return {
'key_0': { 'class': basestring, 'is_list': False, 'required': 'optional' },
}
| 21.314286
| 87
| 0.536193
|
1c92be83eaadbc03a9d34c152a5679ffff17e3c7
| 1,140
|
py
|
Python
|
exercises/0780-ReachingPoints/reaching_points_test.py
|
tqa236/leetcode-solutions
|
556147981c43509a6e8a7f59f138d1ab027ebfd1
|
[
"MIT"
] | 1
|
2020-09-26T15:09:25.000Z
|
2020-09-26T15:09:25.000Z
|
exercises/0780-ReachingPoints/reaching_points_test.py
|
tqa236/leetcode-solutions
|
556147981c43509a6e8a7f59f138d1ab027ebfd1
|
[
"MIT"
] | null | null | null |
exercises/0780-ReachingPoints/reaching_points_test.py
|
tqa236/leetcode-solutions
|
556147981c43509a6e8a7f59f138d1ab027ebfd1
|
[
"MIT"
] | null | null | null |
import unittest
import hypothesis.strategies as st
from hypothesis import given
from reaching_points import Solution
class Test(unittest.TestCase):
def test_1(self):
solution = Solution()
self.assertEqual(solution.reachingPoints(1, 1, 3, 5), True)
def test_2(self):
solution = Solution()
self.assertEqual(solution.reachingPoints(1, 1, 2, 2), False)
def test_3(self):
solution = Solution()
self.assertEqual(solution.reachingPoints(1, 1, 1, 1), True)
def test_4(self):
solution = Solution()
self.assertEqual(solution.reachingPoints(1, 1, 10 ** 9, 1), True)
def test_5(self):
solution = Solution()
self.assertEqual(solution.reachingPoints(9, 5, 12, 8), False)
@given(st.lists(st.integers(min_value=1, max_value=100), min_size=4, max_size=4))
def test_random(self, array):
sx, sy, tx, ty = array
solution = Solution()
self.assertEqual(
solution.reachingPoints(sx, sy, tx, ty),
solution.reachingPointsNaive(sx, sy, tx, ty),
)
if __name__ == "__main__":
unittest.main()
| 29.230769
| 85
| 0.636842
|
92dc1d67ee82c4dfcd1c585c9e3788f2ed9aa0cf
| 2,216
|
py
|
Python
|
benchmark/startPyquil1179.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
benchmark/startPyquil1179.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
benchmark/startPyquil1179.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
# qubit number=5
# total number=51
import pyquil
from pyquil.api import local_forest_runtime, QVMConnection
from pyquil import Program, get_qc
from pyquil.gates import *
import numpy as np
conn = QVMConnection()
def make_circuit()-> Program:
prog = Program() # circuit begin
prog += H(0) # number=3
prog += H(0) # number=48
prog += CZ(2,0) # number=49
prog += H(0) # number=50
prog += Z(2) # number=46
prog += CNOT(2,0) # number=47
prog += H(1) # number=4
prog += RX(2.664070570244145,1) # number=39
prog += H(2) # number=5
prog += H(3) # number=6
prog += H(4) # number=21
prog += H(0) # number=1
prog += H(3) # number=40
prog += Y(4) # number=35
prog += H(1) # number=2
prog += H(2) # number=7
prog += H(3) # number=8
prog += H(0) # number=25
prog += CZ(1,0) # number=26
prog += H(0) # number=27
prog += H(0) # number=36
prog += CZ(1,0) # number=37
prog += H(0) # number=38
prog += CNOT(1,0) # number=41
prog += X(0) # number=42
prog += CNOT(1,0) # number=43
prog += CNOT(1,0) # number=34
prog += CNOT(1,0) # number=24
prog += CNOT(0,1) # number=29
prog += CNOT(2,3) # number=44
prog += X(1) # number=30
prog += CNOT(0,1) # number=31
prog += X(2) # number=11
prog += X(3) # number=12
prog += X(0) # number=13
prog += X(1) # number=14
prog += X(2) # number=15
prog += X(3) # number=16
prog += H(0) # number=17
prog += H(1) # number=18
prog += H(2) # number=19
prog += H(3) # number=20
# circuit end
return prog
def summrise_results(bitstrings) -> dict:
d = {}
for l in bitstrings:
if d.get(l) is None:
d[l] = 1
else:
d[l] = d[l] + 1
return d
if __name__ == '__main__':
prog = make_circuit()
qvm = get_qc('5q-qvm')
results = qvm.run_and_measure(prog,1024)
bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T
bitstrings = [''.join(map(str, l)) for l in bitstrings]
writefile = open("../data/startPyquil1179.csv","w")
print(summrise_results(bitstrings),file=writefile)
writefile.close()
| 25.181818
| 64
| 0.544675
|
d1c7845cd68849fc528d3d6b1d983d996daa50d0
| 819
|
py
|
Python
|
learn/django/tutorial01/first_site/first_site/urls.py
|
zhmz90/Daily
|
25e13f6334c58d3a075b3fc502ecb34832392be7
|
[
"MIT"
] | null | null | null |
learn/django/tutorial01/first_site/first_site/urls.py
|
zhmz90/Daily
|
25e13f6334c58d3a075b3fc502ecb34832392be7
|
[
"MIT"
] | 25
|
2016-01-03T14:23:44.000Z
|
2016-03-05T07:34:40.000Z
|
learn/django/tutorial01/first_site/first_site/urls.py
|
zhmz90/Daily
|
25e13f6334c58d3a075b3fc502ecb34832392be7
|
[
"MIT"
] | null | null | null |
"""first_site URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url,include
from django.contrib import admin
urlpatterns = [
url(r'^polls/', include('polls.urls')),
url(r'^admin/', admin.site.urls),
]
| 35.608696
| 79
| 0.699634
|
9e946d668fbac592a07601f97d6fb5a207ec5d83
| 236
|
py
|
Python
|
src/pycamara/django_camara/management/commands/import_legislatures.py
|
msfernandes/pycamara
|
01648ba95aa5ce780dd1aed32b4347684204e327
|
[
"MIT"
] | null | null | null |
src/pycamara/django_camara/management/commands/import_legislatures.py
|
msfernandes/pycamara
|
01648ba95aa5ce780dd1aed32b4347684204e327
|
[
"MIT"
] | 1
|
2017-07-24T19:35:48.000Z
|
2017-07-25T20:21:13.000Z
|
src/pycamara/django_camara/management/commands/import_legislatures.py
|
msfernandes/pycamara
|
01648ba95aa5ce780dd1aed32b4347684204e327
|
[
"MIT"
] | null | null | null |
from django.core.management.base import BaseCommand
from pycamara.django_camara.importers import legislatures
class Command(BaseCommand):
def handle(self, *args, **options):
legislatures.LegislatureImporter().save_data()
| 26.222222
| 57
| 0.779661
|
52a886b58bdc36a565dcc126a4bebd02475f0a3d
| 7,265
|
py
|
Python
|
util/pyclient/test_client.py
|
big-data-lab-umbc/concord-bft
|
7061695406885604471a8a7bd5944e8d16d7280f
|
[
"Apache-2.0"
] | null | null | null |
util/pyclient/test_client.py
|
big-data-lab-umbc/concord-bft
|
7061695406885604471a8a7bd5944e8d16d7280f
|
[
"Apache-2.0"
] | null | null | null |
util/pyclient/test_client.py
|
big-data-lab-umbc/concord-bft
|
7061695406885604471a8a7bd5944e8d16d7280f
|
[
"Apache-2.0"
] | null | null | null |
# Concord
#
# Copyright (c) 2019 VMware, Inc. All Rights Reserved.
#
# This product is licensed to you under the Apache 2.0 license (the "License").
# You may not use this product except in compliance with the Apache 2.0 License.
#
# This product may include a number of subcomponents with separate copyright
# notices and license terms. Your use of these subcomponents is subject to the
# terms and conditions of the subcomponent's license, as noted in the LICENSE
# file.
import unittest
import struct
import tempfile
import shutil
import os
import os.path
import subprocess
import trio
import bft_client
import bft_config
# This requires python 3.5 for subprocess.run
class SimpleTest(unittest.TestCase):
"""
Test a UDP client against simpleTest servers
Use n=4, f=1, c=0
"""
@classmethod
def setUpClass(cls):
cls.origdir = os.getcwd()
cls.testdir = tempfile.mkdtemp()
cls.builddir = os.path.abspath("../../build")
cls.toolsdir = os.path.join(cls.builddir, "tools")
cls.serverbin = os.path.join(cls.builddir,"tests/simpleTest/server")
os.chdir(cls.testdir)
cls.generateKeys()
cls.config = bft_config.Config(4, 1, 0, 4096, 1000, 50, "")
cls.replicas = [bft_config.Replica(id=i,
ip="127.0.0.1",
port=bft_config.bft_msg_port_from_node_id(i),
metrics_port=bft_config.metrics_port_from_node_id(i))
for i in range(0,4)]
print("Running tests in {}".format(cls.testdir))
@classmethod
def tearDownClass(self):
shutil.rmtree(self.testdir)
os.chdir(self.origdir)
@classmethod
def generateKeys(cls):
"""Create keys expected by SimpleTest server for 4 nodes"""
keygen = os.path.join(cls.toolsdir, "GenerateConcordKeys")
args = [keygen, "-n", "4", "-f", "1", "-o", "private_replica_"]
subprocess.run(args, check=True)
def readRequest(self):
"""Serialize a read request"""
return struct.pack("<Q", 100)
def writeRequest(self, val):
"""Serialize a write request"""
return struct.pack("<QQ", 200, val)
def read_val(self, val):
"""Return a deserialized read value"""
return struct.unpack("<Q", val)[0]
def testTimeout(self):
"""Client requests will timeout since no servers are running"""
read = self.readRequest()
write = self.writeRequest(1)
trio.run(self._testTimeout, read, True)
trio.run(self._testTimeout, write, False)
async def _testTimeout(self, msg, read_only):
config = self.config._replace(req_timeout_milli=100)
with bft_client.UdpClient(config, self.replicas, None) as udp_client:
with self.assertRaises(trio.TooSlowError):
await udp_client.sendSync(msg, read_only)
def startServers(self):
"""Start all 4 simpleTestServers"""
self.procs = [subprocess.Popen([self.serverbin, str(i)], close_fds=True)
for i in range(0, 4)]
def stopServers(self):
"""Stop all processes in self.procs"""
for p in self.procs:
p.kill()
p.wait()
def testReadWrittenValue(self):
"""Write a value and then read it"""
self.startServers()
try:
trio.run(self._testReadWrittenValue)
except:
raise
finally:
self.stopServers()
async def _testReadWrittenValue(self):
val = 999
with bft_client.UdpClient(self.config, self.replicas, None) as udp_client:
await udp_client.sendSync(self.writeRequest(val), False)
read = await udp_client.sendSync(self.readRequest(), True)
self.assertEqual(val, self.read_val(read))
def testRetry(self):
"""
Start servers after client has already made an attempt to send and
ensure request succeeds.
"""
trio.run(self._testRetry)
async def _testRetry(self):
"""Start servers after a delay in parallel with a write request"""
try:
async with trio.open_nursery() as nursery:
nursery.start_soon(self.startServersWithDelay)
nursery.start_soon(self.writeWithRetryAssert)
except:
raise
finally:
self.stopServers()
async def writeWithRetryAssert(self):
"""Issue a write and ensure that a retry occurs"""
config = self.config._replace(req_timeout_milli=5000)
val = 1
with bft_client.UdpClient(config, self.replicas, None) as udp_client:
self.assertEqual(udp_client.retries, 0)
await udp_client.sendSync(self.writeRequest(val), False)
self.assertTrue(udp_client.retries > 0)
async def startServersWithDelay(self):
# Retry timeout is 50ms
# This guarantees we wait at least one retry with high probability
await trio.sleep(.250)
self.startServers()
def testPrimaryWrite(self):
"""Test that we learn the primary and using it succeeds."""
self.startServers()
try:
trio.run(self._testPrimaryWrite)
except:
raise
finally:
self.stopServers()
async def _testPrimaryWrite(self):
# Try to guarantee we don't retry accidentally
config = self.config._replace(retry_timeout_milli=500)
with bft_client.UdpClient(self.config, self.replicas, None) as udp_client:
self.assertEqual(None, udp_client.primary)
await udp_client.sendSync(self.writeRequest(1), False)
# We know the servers are up once the write completes
self.assertNotEqual(None, udp_client.primary)
sent = udp_client.msgs_sent
read = await udp_client.sendSync(self.readRequest(), True)
sent += 4
self.assertEqual(sent, udp_client.msgs_sent)
self.assertEqual(1, self.read_val(read))
self.assertNotEqual(None, udp_client.primary)
await udp_client.sendSync(self.writeRequest(2), False)
sent += 1 # Only send to the primary
self.assertEqual(sent, udp_client.msgs_sent)
read = await udp_client.sendSync(self.readRequest(), True)
sent += 4
self.assertEqual(sent, udp_client.msgs_sent)
self.assertEqual(2, self.read_val(read))
self.assertNotEqual(None, udp_client.primary)
async def _testMofNQuorum(self):
config = self.config._replace(retry_timeout_milli=500)
with bft_client.UdpClient(self.config, self.replicas, None) as udp_client:
await udp_client.sendSync(self.writeRequest(1), False)
single_read_q = bft_client.MofNQuorum([0], 1)
read = await udp_client.sendSync(self.readRequest(), True, m_of_n_quorum=single_read_q)
self.assertEqual(1, self.read_val(read))
def testMonNQuorum(self):
self.startServers()
try:
trio.run(self._testMofNQuorum)
except:
raise
finally:
self.stopServers()
if __name__ == '__main__':
unittest.main()
| 35.965347
| 99
| 0.626428
|
881a3235e1cfc4f6d5a5bd15fa74442aa2d8ee8e
| 2,307
|
py
|
Python
|
messaging_script.py
|
fletchapin/camping-bot
|
967cbacdba198b293720994d4a70ee085b296a73
|
[
"MIT"
] | null | null | null |
messaging_script.py
|
fletchapin/camping-bot
|
967cbacdba198b293720994d4a70ee085b296a73
|
[
"MIT"
] | null | null | null |
messaging_script.py
|
fletchapin/camping-bot
|
967cbacdba198b293720994d4a70ee085b296a73
|
[
"MIT"
] | null | null | null |
import time
import smtplib
import argparse
from email.message import EmailMessage
import camping_scraper as cs
def msg_alert(
subject,
body,
to=None,
email_addr=None,
app_pwd=None,
):
"""Send text message alerts to phone.
Check README for configuration instructions.
Parameters
----------
subject : str
body : str
to : str
Phone number appended with the mobile carrier's SMS Gateway Address
email_addr : str
this code only works with a Gmail address
app_pwd : str
"""
if not to or not email_addr or not app_pwd:
raise ValueError("Please check README and overwrite keyword args with personal info")
msg = EmailMessage()
msg.set_content(body)
msg["subject"] = subject
msg["to"] = to
msg["from"] = email_addr
server = smtplib.SMTP("smtp.gmail.com", 587)
server.starttls()
server.login(str(email_addr), str(app_pwd))
server.send_message(msg)
server.quit()
parser = argparse.ArgumentParser(description="Find available campsites.")
parser.add_argument("--park", "-p")
parser.add_argument("--campground", "-c")
parser.add_argument("--year", "-y")
parser.add_argument("--months", "-m", nargs='+', default=[])
parser.add_argument("--sleep", "-s", default=86400)
parser.add_argument("--verbose", "-v", action="store_true")
args = parser.parse_args()
while True:
availability = cs.find_availability_by_year(args.park, args.campground, args.year, args.months)
if availability:
msg = "Availability found at " + args.park + " " + args.campground + ":\n"
for available in availability:
msg += available.strftime("%Y-%m-%d") + "\n"
# split up texts before they go over newline limit
if len(msg) > 125:
msg_alert("Campsite Availability", msg)
if args.verbose:
print(msg)
msg = ""
if len(msg) > 0:
msg_alert("Campsite Availability", msg)
else:
msg = ("No available sites found for " + args.park + " " + args.campground +
". Will try to search again in " + str(args.sleep / 3600) + " hours.")
msg_alert("Campsite Availability", msg)
if args.verbose:
print(msg)
time.sleep(args.sleep)
| 28.481481
| 99
| 0.622453
|
7490430ec98adb51cfe07d13f6ff83665c9c4ff4
| 511
|
py
|
Python
|
tests/client/test_helpers.py
|
geffy/ebonite
|
2d85eeca44ac1799e743bafe333887712e325060
|
[
"Apache-2.0"
] | 1
|
2019-11-27T14:33:45.000Z
|
2019-11-27T14:33:45.000Z
|
tests/client/test_helpers.py
|
geffy/ebonite
|
2d85eeca44ac1799e743bafe333887712e325060
|
[
"Apache-2.0"
] | null | null | null |
tests/client/test_helpers.py
|
geffy/ebonite
|
2d85eeca44ac1799e743bafe333887712e325060
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
from ebonite.client.helpers import create_model
from ebonite.ext.sklearn import SklearnModelWrapper
def test_create_model(sklearn_model_obj, pandas_data):
model = create_model(sklearn_model_obj, pandas_data)
assert model is not None
assert isinstance(model.wrapper, SklearnModelWrapper)
assert model.input_meta.columns == list(pandas_data)
assert model.output_meta.real_type == np.ndarray
assert {'numpy', 'sklearn', 'pandas'}.issubset(model.requirements.modules)
| 34.066667
| 78
| 0.786693
|
e93a910728d33f89c7bee2f991a37e99013a46f4
| 9,837
|
py
|
Python
|
depthai_helpers/config_manager.py
|
wirthual/depthai
|
9b3e987796b70fce3d4112f295cf64661d7986a0
|
[
"MIT"
] | null | null | null |
depthai_helpers/config_manager.py
|
wirthual/depthai
|
9b3e987796b70fce3d4112f295cf64661d7986a0
|
[
"MIT"
] | null | null | null |
depthai_helpers/config_manager.py
|
wirthual/depthai
|
9b3e987796b70fce3d4112f295cf64661d7986a0
|
[
"MIT"
] | null | null | null |
import os
import platform
import subprocess
from pathlib import Path
import cv2
import depthai as dai
import numpy as np
from depthai_helpers.cli_utils import cliPrint, PrintColors
from depthai_sdk.previews import Previews
DEPTHAI_ZOO = Path(__file__).parent.parent / Path(f"resources/nn/")
DEPTHAI_VIDEOS = Path(__file__).parent.parent / Path(f"videos/")
DEPTHAI_VIDEOS.mkdir(exist_ok=True)
class ConfigManager:
labels = ""
customFwCommit = ''
def __init__(self, args):
self.args = args
self.args.encode = dict(self.args.encode)
self.args.cameraOrientation = dict(self.args.cameraOrientation)
if self.args.scale is None:
self.args.scale = {"color": 0.37}
else:
self.args.scale = dict(self.args.scale)
if (Previews.left.name in self.args.cameraOrientation or Previews.right.name in self.args.cameraOrientation) and self.useDepth:
print("[WARNING] Changing mono cameras orientation may result in incorrect depth/disparity maps")
@property
def debug(self):
return not self.args.noDebug
@property
def useCamera(self):
return not self.args.video
@property
def useNN(self):
return not self.args.disableNeuralNetwork
@property
def useDepth(self):
return not self.args.disableDepth and self.useCamera
@property
def maxDisparity(self):
maxDisparity = 95
if (self.args.extendedDisparity):
maxDisparity *= 2
if (self.args.subpixel):
maxDisparity *= 32
return maxDisparity
def getModelSource(self):
if not self.useCamera:
return "host"
if self.args.camera == "left":
if self.useDepth:
return "rectifiedLeft"
return "left"
if self.args.camera == "right":
if self.useDepth:
return "rectifiedRight"
return "right"
if self.args.camera == "color":
return "color"
def getModelName(self):
if self.args.cnnModel:
return self.args.cnnModel
modelDir = self.getModelDir()
if modelDir is not None:
return Path(modelDir).stem
def getModelDir(self):
if self.args.cnnPath:
return self.args.cnnPath
if self.args.cnnModel is not None and (DEPTHAI_ZOO / self.args.cnnModel).exists():
return DEPTHAI_ZOO / self.args.cnnModel
def getAvailableZooModels(self):
def verify(path: Path):
return path.parent.name == path.stem
def convert(path: Path):
return path.stem
return list(map(convert, filter(verify, DEPTHAI_ZOO.rglob("**/*.json"))))
def getColorMap(self):
cvColorMap = cv2.applyColorMap(np.arange(256, dtype=np.uint8), getattr(cv2, "COLORMAP_{}".format(self.args.colorMap)))
cvColorMap[0] = [0, 0, 0]
return cvColorMap
def getRgbResolution(self):
if self.args.rgbResolution == 2160:
return dai.ColorCameraProperties.SensorResolution.THE_4_K
elif self.args.rgbResolution == 3040:
return dai.ColorCameraProperties.SensorResolution.THE_12_MP
else:
return dai.ColorCameraProperties.SensorResolution.THE_1080_P
def getMonoResolution(self):
if self.args.monoResolution == 720:
return dai.MonoCameraProperties.SensorResolution.THE_720_P
elif self.args.monoResolution == 800:
return dai.MonoCameraProperties.SensorResolution.THE_800_P
else:
return dai.MonoCameraProperties.SensorResolution.THE_400_P
def getMedianFilter(self):
if self.args.stereoMedianSize == 3:
return dai.MedianFilter.KERNEL_3x3
elif self.args.stereoMedianSize == 5:
return dai.MedianFilter.KERNEL_5x5
elif self.args.stereoMedianSize == 7:
return dai.MedianFilter.KERNEL_7x7
else:
return dai.MedianFilter.MEDIAN_OFF
def getUsb2Mode(self):
if self.args['forceUsb2']:
cliPrint("FORCE USB2 MODE", PrintColors.WARNING)
usb2Mode = True
else:
usb2Mode = False
return usb2Mode
def adjustPreviewToOptions(self):
if len(self.args.show) != 0:
return
self.args.show.append(Previews.color.name)
if self.useDepth:
if self.lowBandwidth:
self.args.show.append(Previews.disparityColor.name)
else:
self.args.show.append(Previews.depth.name)
if self.args.guiType == "qt":
if self.useNN:
self.args.show.append(Previews.nnInput.name)
if self.useDepth:
if self.lowBandwidth:
self.args.show.append(Previews.disparityColor.name)
else:
self.args.show.append(Previews.depthRaw.name)
self.args.show.append(Previews.rectifiedLeft.name)
self.args.show.append(Previews.rectifiedRight.name)
else:
self.args.show.append(Previews.left.name)
self.args.show.append(Previews.right.name)
def adjustParamsToDevice(self, device):
deviceInfo = device.getDeviceInfo()
cams = device.getConnectedCameras()
depthEnabled = dai.CameraBoardSocket.LEFT in cams and dai.CameraBoardSocket.RIGHT in cams
if depthEnabled:
self.args.disableDepth = False
else:
if not self.args.disableDepth:
print("Disabling depth...")
self.args.disableDepth = True
if self.args.spatialBoundingBox:
print("Disabling spatial bounding boxes...")
self.args.spatialBoundingBox = False
if self.args.camera != 'color':
print("Switching source to RGB camera...")
self.args.camera = 'color'
updatedShowArg = []
for name in self.args.show:
if name in ("nnInput", "color"):
updatedShowArg.append(name)
else:
print("Disabling {} preview...".format(name))
if len(updatedShowArg) == 0:
print("No previews available, adding color and nnInput...")
updatedShowArg.append("color")
if self.useNN:
updatedShowArg.append("nnInput")
self.args.show = updatedShowArg
if self.args.bandwidth == "auto":
if deviceInfo.desc.protocol != dai.XLinkProtocol.X_LINK_USB_VSC:
print("Enabling low-bandwidth mode due to connection mode... (protocol: {})".format(deviceInfo.desc.protocol))
self.args.bandwidth = "low"
print("Setting PoE video quality to 50 to reduce latency...")
self.args.poeQuality = 50
elif device.getUsbSpeed() not in [dai.UsbSpeed.SUPER, dai.UsbSpeed.SUPER_PLUS]:
print("Enabling low-bandwidth mode due to low USB speed... (speed: {})".format(device.getUsbSpeed()))
self.args.bandwidth = "low"
else:
self.args.bandwidth = "high"
def linuxCheckApplyUsbRules(self):
if platform.system() == 'Linux':
ret = subprocess.call(['grep', '-irn', 'ATTRS{idVendor}=="03e7"', '/etc/udev/rules.d'], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
if(ret != 0):
cliPrint("\nWARNING: Usb rules not found", PrintColors.WARNING)
cliPrint("\nSet rules: \n"
"""echo 'SUBSYSTEM=="usb", ATTRS{idVendor}=="03e7", MODE="0666"' | sudo tee /etc/udev/rules.d/80-movidius.rules \n"""
"sudo udevadm control --reload-rules && sudo udevadm trigger \n"
"Disconnect/connect usb cable on host! \n", PrintColors.RED)
os._exit(1)
def getCountLabel(self, nnetManager):
if self.args.countLabel is None:
return None
if self.args.countLabel.isdigit():
obj = nnetManager.getLabelText(int(self.args.countLabel)).lower()
print(f"Counting number of {obj} in the frame")
return obj
else: return self.args.countLabel.lower()
@property
def leftCameraEnabled(self):
return (self.args.camera == Previews.left.name and self.useNN) or \
Previews.left.name in self.args.show or \
Previews.rectifiedLeft.name in self.args.show or \
self.useDepth
@property
def rightCameraEnabled(self):
return (self.args.camera == Previews.right.name and self.useNN) or \
Previews.right.name in self.args.show or \
Previews.rectifiedRight.name in self.args.show or \
self.useDepth
@property
def rgbCameraEnabled(self):
return (self.args.camera == Previews.color.name and self.useNN) or \
Previews.color.name in self.args.show
@property
def inputSize(self):
return tuple(map(int, self.args.cnnInputSize.split('x'))) if self.args.cnnInputSize else None
@property
def previewSize(self):
return (576, 320)
@property
def lowBandwidth(self):
return self.args.bandwidth == "low"
@property
def lowCapabilities(self):
return platform.machine().startswith("arm") or platform.machine().startswith("aarch")
@property
def shaves(self):
if self.args.shaves is not None:
return self.args.shaves
if not self.useCamera:
return 8
if self.args.rgbResolution > 1080:
return 5
return 6
@property
def dispMultiplier(self):
val = 255 / self.maxDisparity
return val
| 35.90146
| 153
| 0.605063
|
699043b73562bfe2fc09b8729625b24696260628
| 7,215
|
py
|
Python
|
beatbrain/models_old/cvae_keras.py
|
tasercake/BeatBrain
|
2d84e1021c509f6564223858c051394c6c8504bb
|
[
"MIT"
] | 5
|
2019-09-10T22:34:34.000Z
|
2019-11-19T07:07:03.000Z
|
beatbrain/models_old/cvae_keras.py
|
tasercake/BeatBrain
|
2d84e1021c509f6564223858c051394c6c8504bb
|
[
"MIT"
] | 35
|
2019-11-12T03:18:43.000Z
|
2019-12-16T14:03:24.000Z
|
beatbrain/models_old/cvae_keras.py
|
tasercake/Beatbrain
|
2d84e1021c509f6564223858c051394c6c8504bb
|
[
"MIT"
] | 3
|
2019-09-04T10:07:57.000Z
|
2019-11-17T10:14:28.000Z
|
import os
import time
import numpy as np
import tensorflow as tf
from operator import mul
from functools import reduce
from pathlib import Path
from datetime import datetime
from tqdm import tqdm
from PIL import Image
from beatbrain.generator import data_utils
from beatbrain import default_config
tf.compat.v1.enable_eager_execution()
def log_normal_pdf(sample, mean, logvar, raxis=1):
log2pi = tf.math.log(2.0 * np.pi)
return tf.reduce_sum(
-0.5 * ((sample - mean) ** 2.0 * tf.exp(-logvar) + logvar + log2pi), axis=raxis
)
@tf.function
def compute_loss(model, x):
mean, logvar = model.encode(x)
z = model.reparameterize(mean, logvar)
x_logit = model.decode(z)
cross_ent = tf.nn.sigmoid_cross_entropy_with_logits(logits=x_logit, labels=x)
logpx_z = -tf.reduce_sum(cross_ent, axis=[1, 2, 3])
logpz = log_normal_pdf(z, 0.0, 0.0)
logqz_x = log_normal_pdf(z, mean, logvar)
return -tf.reduce_mean(logpx_z + logpz - logqz_x)
@tf.function
def compute_apply_gradients(mdl, x, opt):
with tf.GradientTape() as tape:
loss = compute_loss(mdl, x)
gradients = tape.gradient(loss, mdl.trainable_variables)
opt.apply_gradients(zip(gradients, mdl.trainable_variables))
def visualize_model_outputs(mdl, epoch, test_input, output):
output = Path(output)
predictions = mdl.sample(eps=test_input)
print(f"Saving Samples Images to {output}")
for i, pred in enumerate(predictions):
progress_dir = (
Path(
default_config.MODEL_WEIGHTS or datetime.now().strftime("%Y%m%d-%H%M%S")
)
.resolve()
.stem
)
out_dir = output.joinpath(progress_dir).joinpath(str(i + 1))
os.makedirs(out_dir, exist_ok=True)
image = Image.fromarray(pred[:, :, 0].numpy(), mode="F")
image.save(os.path.join(out_dir, f"epoch_{epoch}.tiff"))
def reparameterize(args):
mean, logvar = args
batch = tf.keras.backend.shape(mean)[0]
dim = tf.keras.backend.int_shape(mean)[1]
eps = tf.random.normal(shape=(batch, dim))
return eps * tf.keras.backend.exp(logvar * 0.5) + mean
def vae_loss(mean, logvar, img_dims):
def loss_fn(y_pred, y_true):
reconstruction_loss = tf.keras.losses.binary_crossentropy(y_pred, y_true)
reconstruction_loss *= reduce(mul, img_dims)
kl_loss = (
1 + logvar - tf.keras.backend.square(mean) - tf.keras.backend.exp(logvar)
)
kl_loss = -0.5 * tf.keras.backend.sum(kl_loss, axis=-1)
return tf.keras.backend.mean(reconstruction_loss + kl_loss)
return loss_fn
# region Model hyperparameters
window_size = default_config.WINDOW_SIZE
image_dims = [default_config.CHUNK_SIZE, default_config.N_MELS]
input_shape = [*image_dims, window_size]
latent_dims = default_config.LATENT_DIMS
num_conv = 2
num_filters = 32
max_filters = 64
kernel_size = 3
# endregion
# region Training hyperparameters
num_epochs = default_config.EPOCHS
batch_size = default_config.BATCH_SIZE
# endregion
# region Model definition
inputs = tf.keras.layers.Input(shape=input_shape, name="encoder_input")
x = inputs
for i in range(num_conv):
x = tf.keras.layers.Conv2D(
filters=min(num_filters * (i + 1), max_filters),
kernel_size=kernel_size,
activation="relu",
strides=2,
padding="same",
activity_regularizer=tf.keras.regularizers.l1(0.01),
)(x)
latent_shape = x.shape
x = tf.keras.layers.Flatten()(x)
z_mean = tf.keras.layers.Dense(latent_dims, name="z_mean")(x)
z_log_var = tf.keras.layers.Dense(latent_dims, name="z_log_var")(x)
z = tf.keras.layers.Lambda(reparameterize, output_shape=[latent_dims], name="z")(
[z_mean, z_log_var]
)
encoder = tf.keras.Model(inputs, [z_mean, z_log_var, z], name="encoder")
encoder.summary()
latent_inputs = tf.keras.layers.Input(shape=(latent_dims,), name="z_sampled")
x = tf.keras.layers.Dense(reduce(mul, latent_shape[1:]), activation="relu")(
latent_inputs
)
x = tf.keras.layers.Reshape(latent_shape[1:])(x)
for i in range(num_conv):
x = tf.keras.layers.Conv2DTranspose(
filters=min(num_filters * (num_conv - i), max_filters),
kernel_size=kernel_size,
strides=2,
activation="relu",
padding="same",
activity_regularizer=tf.keras.regularizers.l1(0.01),
)(x)
reconstructed = tf.keras.layers.Conv2DTranspose(
filters=window_size, kernel_size=3, strides=1, padding="SAME", activation="sigmoid"
)(x)
decoder = tf.keras.Model(latent_inputs, reconstructed, name="decoder")
decoder.summary()
outputs = decoder(encoder(inputs)[2])
vae = tf.keras.Model(inputs, outputs, name="vae")
vae.compile(
optimizer=tf.keras.optimizers.Adam(1e-4),
loss=vae_loss(z_mean, z_log_var, image_dims),
experimental_run_tf_function=False,
)
vae.summary()
# endregion
# region Train and evaluate
train_dataset, test_dataset = data_utils.load_numpy_dataset(
default_config.TRAIN_DATA_DIR, return_tuples=True
)
start = time.time()
num_samples = 2000
with tqdm(train_dataset.take(num_samples), total=num_samples) as pbar:
for i, element in enumerate(pbar):
# pbar.write(f"{i + 1}: {element[0].shape}")
pass
print("----------------FINISHED----------------")
print(time.time() - start)
# if Path(settings.MODEL_WEIGHTS).is_file():
# vae.load_weights(settings.MODEL_WEIGHTS)
# vae.fit(train_dataset, epochs=num_epochs, validation_data=(test_dataset, None))
# endregion
# optimizer = tf.keras.optimizers.Adam(1e-4)
# model = CVAE(num_conv=4)
# model.compile(optimizer=optimizer)
# if os.path.exists(settings.MODEL_WEIGHTS):
# print(f"Loading weights from '{settings.MODEL_WEIGHTS}'")
# model.load_weights(settings.MODEL_WEIGHTS)
# num_train = num_test = 0
# generation_vector = tf.random.normal(shape=[settings.EXAMPLES_TO_GENERATE, model.latent_dims])
# visualiziation_output_dir = os.path.join(settings.OUTPUT_DIR, 'progress')
# visualize_model_outputs(model, 0, generation_vector, visualiziation_output_dir)
#
# for epoch in range(1, settings.EPOCHS + 1):
# start = time.time()
# print(f"Training | Epoch {epoch} / {settings.EPOCHS}...")
# for train_x in tqdm(train_dataset, total=num_train or None):
# compute_apply_gradients(model, train_x, optimizer)
# if epoch == 1:
# num_train += 1
# print(f"Finished Train Step | Epoch {epoch} Train Step took {time.time() - start:.2f} seconds")
#
# if epoch % 1 == 0:
# # Evaluate Model
# print(f"Evaluation | Epoch {epoch}...")
# loss = tf.keras.metrics.Mean()
# for test_x in tqdm(test_dataset, total=num_test):
# loss(compute_loss(model, test_x))
# if epoch == 1:
# num_test += 1
# elbo = -loss.result()
# print(f"Epoch {epoch} took {time.time() - start:.2f} seconds | Test Set ELBO: {elbo}")
# # Save Model Weights
# os.makedirs(os.path.dirname(settings.MODEL_WEIGHTS), exist_ok=True) # Create dir if it doesn't exist
# model.save_weights(settings.MODEL_WEIGHTS)
# # Save Generated Samples
# visualize_model_outputs(model, epoch, generation_vector, visualiziation_output_dir)
| 34.6875
| 111
| 0.685793
|
60448573d04556a655c31902f3df64a63b4700ab
| 692
|
py
|
Python
|
uwsgi_sloth/tests/test_structures.py
|
365moods/uwsgi
|
8097c08b1090aa08a0a241cb8772a803486e0759
|
[
"Apache-2.0"
] | 127
|
2015-01-02T11:57:22.000Z
|
2022-03-03T02:23:54.000Z
|
uwsgi_sloth/tests/test_structures.py
|
365moods/uwsgi
|
8097c08b1090aa08a0a241cb8772a803486e0759
|
[
"Apache-2.0"
] | 8
|
2015-06-15T12:10:13.000Z
|
2019-07-21T23:01:18.000Z
|
uwsgi_sloth/tests/test_structures.py
|
365moods/uwsgi
|
8097c08b1090aa08a0a241cb8772a803486e0759
|
[
"Apache-2.0"
] | 20
|
2015-01-06T03:27:25.000Z
|
2020-09-04T03:53:46.000Z
|
# -*- coding: utf-8 -*-
from uwsgi_sloth.structures import ValuesAggregation
def test_ValuesAggregation():
agr = ValuesAggregation()
agr.add_values(range(1, 101))
assert agr.get_result() == {'max': 100, 'avg': 50.5, 'min': 1}
assert agr.avg == 50.5
# Test merge with
agr1 = ValuesAggregation(values=range(1, 11))
agr2 = ValuesAggregation(values=range(-10, 0))
agr3 = ValuesAggregation(values=range(100, 201))
assert agr1.merge_with(agr2).get_result() == {'max': 10, 'avg': 0.0, 'min': -10}
assert agr1.merge_with(agr3).get_result() == {
'max': 200,
'avg': (sum(range(1, 11)) + sum(range(100, 201))) / 111.0,
'min': 1
}
| 32.952381
| 84
| 0.608382
|
32965b39fcb4b17b66cabd633652335a19b431f6
| 30,226
|
py
|
Python
|
dumpflash/dumpjffs2.py
|
dmikushin/nandflasher
|
f5d99449c488d6a37955d84e40b1226ae90e3ac1
|
[
"Unlicense"
] | null | null | null |
dumpflash/dumpjffs2.py
|
dmikushin/nandflasher
|
f5d99449c488d6a37955d84e40b1226ae90e3ac1
|
[
"Unlicense"
] | null | null | null |
dumpflash/dumpjffs2.py
|
dmikushin/nandflasher
|
f5d99449c488d6a37955d84e40b1226ae90e3ac1
|
[
"Unlicense"
] | null | null | null |
# pylint: disable=invalid-name
# pylint: disable=line-too-long
import struct
import pprint
import os
import zlib
import shutil
from . import crc32
def main():
JFFS2_COMPR_NONE = 0x00
JFFS2_COMPR_ZERO = 0x01
JFFS2_COMPR_RTIME = 0x02
JFFS2_COMPR_RUBINMIPS = 0x03
JFFS2_COMPR_COPY = 0x04
JFFS2_COMPR_DYNRUBIN = 0x05
JFFS2_COMPR_ZLIB = 0x06
JFFS2_COMPR_LZO = 0x07
# Compatibility flags.
JFFS2_COMPAT_MASK = 0xc000
JFFS2_NODE_ACCURATE = 0x2000
# INCOMPAT: Fail to mount the filesystem
JFFS2_FEATURE_INCOMPAT = 0xc000
# ROCOMPAT: Mount read-only
JFFS2_FEATURE_ROCOMPAT = 0x8000
# RWCOMPAT_COPY: Mount read/write, and copy the node when it's GC'd
JFFS2_FEATURE_RWCOMPAT_COPY = 0x4000
# RWCOMPAT_DELETE: Mount read/write, and delete the node when it's GC'd
JFFS2_FEATURE_RWCOMPAT_DELETE = 0x0000
JFFS2_NODETYPE_DIRENT = (JFFS2_FEATURE_INCOMPAT | JFFS2_NODE_ACCURATE | 1)
JFFS2_NODETYPE_INODE = (JFFS2_FEATURE_INCOMPAT | JFFS2_NODE_ACCURATE | 2)
JFFS2_NODETYPE_CLEANMARKER = (JFFS2_FEATURE_RWCOMPAT_DELETE | JFFS2_NODE_ACCURATE | 3)
JFFS2_NODETYPE_PADDING = (JFFS2_FEATURE_RWCOMPAT_DELETE | JFFS2_NODE_ACCURATE | 4)
JFFS2_NODETYPE_SUMMARY = (JFFS2_FEATURE_RWCOMPAT_DELETE | JFFS2_NODE_ACCURATE | 6)
JFFS2_NODETYPE_XATTR = (JFFS2_FEATURE_INCOMPAT | JFFS2_NODE_ACCURATE | 8)
JFFS2_NODETYPE_XREF = (JFFS2_FEATURE_INCOMPAT | JFFS2_NODE_ACCURATE | 9)
# XATTR Related
JFFS2_XPREFIX_USER = 1 # for 'user.'
JFFS2_XPREFIX_SECURITY = 2 # for 'security.'
JFFS2_XPREFIX_ACL_ACCESS = 3 # for 'system.posix_acl_access'
JFFS2_XPREFIX_ACL_DEFAULT = 4 # for 'system.posix_acl_default'
JFFS2_XPREFIX_TRUSTED = 5 # for 'trusted.*'
JFFS2_ACL_VERSION = 0x0001
JFFS2_NODETYPE_CHECKPOINT = (JFFS2_FEATURE_RWCOMPAT_DELETE | JFFS2_NODE_ACCURATE | 3)
JFFS2_NODETYPE_OPTIONS = (JFFS2_FEATURE_RWCOMPAT_COPY | JFFS2_NODE_ACCURATE | 4)
JFFS2_INO_FLAG_PREREAD = 1 # Do read_inode() for this one at
JFFS2_INO_FLAG_USERCOMPR = 2 # User has requested a specific
header_unpack_fmt = '<HHL'
header_struct_size = struct.calcsize(header_unpack_fmt)
inode_unpack_fmt = '<LLLLHHLLLLLLLBBHLL'
inode_struct_size = struct.calcsize(inode_unpack_fmt)
dirent_unpack_fmt = '<LLLLLBBBLL'
dirent_struct_size = struct.calcsize(dirent_unpack_fmt)
class JFFS:
DebugLevel = 0
DumpMagicError = False
def __init__(self):
self.INodeMap = {}
self.DirentMap = {}
self.OrigFilename = None
def parse(self, filename, target_filename = ''):
self.OrigFilename = filename
fd = open(filename, 'rb')
data = fd.read()
fd.close()
data_offset = 0
total_count = 0
last_magic = 0
last_nodetype = 0
last_totlen = 0
last_data_offset = 0
while 1:
error = False
hdr = data[data_offset:data_offset+header_struct_size]
try:
(magic, nodetype, totlen) = struct.unpack(header_unpack_fmt, hdr)
except:
break
# magci_header_offset = data_offset
if magic != 0x1985:
if self.DumpMagicError:
print('* Magic Error:', hex(data_offset), '(', hex(magic), ', ', hex(nodetype), ')')
print('\tLast record:', hex(last_data_offset), '(', hex(last_magic), ', ', hex(last_nodetype), ', ', hex(last_totlen), ')')
while data_offset < len(data):
tag = data[data_offset:data_offset+4]
if tag == b'\x85\x19\x02\xe0':
if self.DumpMagicError:
print('\tFound next inode at 0x%x' % data_offset)
print('')
break
data_offset += 0x4
if data_offset < len(data):
(magic, nodetype, totlen) = struct.unpack(header_unpack_fmt, data[data_offset:data_offset+header_struct_size])
if magic != 0x1985:
break
if nodetype == JFFS2_NODETYPE_INODE:
node_data = data[data_offset+header_struct_size:data_offset+header_struct_size+inode_struct_size]
(hdr_crc, ino, version, mode, uid, gid, isize, atime, mtime, ctime, offset, csize, dsize, compr, usercompr, flags, data_crc, node_crc) = struct.unpack(inode_unpack_fmt, node_data)
payload = data[data_offset+0x44: data_offset+0x44+csize]
if compr == 0x6:
try:
payload = decompress(payload)
except:
if self.DebugLevel > 0:
print('* Uncompress error')
error = True
if self.DebugLevel > 0:
print('payload length:', len(payload))
if self.DebugLevel > 1:
pprint.pprint(payload)
if ino not in self.INodeMap:
self.INodeMap[ino] = []
self.INodeMap[ino].append({
'data_offset': data_offset,
'ino': ino,
'hdr_crc': hdr_crc,
'version': version,
'mode': mode,
'uid': uid,
'gid': gid,
'isize': isize,
'atime': atime,
'mtime': mtime,
'ctime': ctime,
'offset': offset,
'csize': csize,
'dsize': dsize,
'compr': compr,
'usercompr': usercompr,
'flags': flags,
'data_crc': data_crc,
'node_crc': node_crc,
'totlen': totlen,
'payload': payload
})
if error or (target_filename != '' and ino in self.DirentMap and self.DirentMap[ino]['payload'].find(target_filename) >= 0):
#if self.DebugLevel>0:
if True:
print(' = '*79)
print('* JFFS2_NODETYPE_INODE:')
print('magic: %x nodetype: %x totlen: %x' % (magic, nodetype, totlen))
print('data_offset: %x offset: %x csize: %x dsize: %x next_offset: %x' % (data_offset, offset, csize, dsize, data_offset + 44 + csize))
print('ino: %x version: %x mode: %x' % (ino, version, mode))
print('uid: %x gid: %x' % (uid, gid))
print('atime: %x mtime: %x ctime: %x' % (atime, mtime, ctime))
print('compr: %x usercompr: %x' % (compr, usercompr))
print('flags: %x isize: %x' % (flags, isize))
print('hdr_crc: %x data_crc: %x node_crc: %x' % (hdr_crc, data_crc, node_crc))
print('')
elif nodetype == JFFS2_NODETYPE_DIRENT:
(hdr_crc, pino, version, ino, mctime, nsize, ent_type, _, node_crc, name_crc) = struct.unpack(dirent_unpack_fmt, data[data_offset+header_struct_size:data_offset+header_struct_size+dirent_struct_size])
payload = data[data_offset+header_struct_size+dirent_struct_size+1: data_offset+header_struct_size+dirent_struct_size+1+nsize]
if ino not in self.DirentMap or self.DirentMap[ino]['version'] < version:
self.DirentMap[ino] = {
'hdr_crc': hdr_crc,
'pino': pino,
'version': version,
'mctime': mctime,
'nsize': nsize,
'ent_type': ent_type,
'node_crc': node_crc,
'name_crc': name_crc,
'payload': payload
}
if target_filename != '' and payload.find(target_filename) >= 0:
print(' = '*79)
print('* JFFS2_NODETYPE_DIRENT:')
print('data_offset:\t', hex(data_offset))
print('magic:\t\t%x' % magic)
print('nodetype:\t%x' % nodetype)
print('totlen:\t\t%x' % totlen)
print('hdr_crc:\t%x' % hdr_crc)
print('pino:\t\t%x' % pino)
print('version:\t%x' % version)
print('ino:\t\t%x' % ino)
print('node_crc:\t%x' % node_crc)
parent_node = ''
if pino in self.DirentMap:
parent_node = self.DirentMap[pino]['payload']
print('Payload:\t%s' % (parent_node + '\\' + payload))
print('')
elif nodetype == 0x2004:
pass
else:
print(' = '*79)
print('data_offset:\t', hex(data_offset))
print('magic:\t\t%x' % magic)
print('nodetype:\t%x' % nodetype)
print('totlen:\t\t%x' % totlen)
(last_magic, last_nodetype, last_totlen) = (magic, nodetype, totlen)
last_data_offset = data_offset
if totlen%4 != 0:
totlen += 4-(totlen%4)
data_offset += totlen
current_page_data_len = data_offset % 0x200
if (0x200-current_page_data_len) < 0x8:
data_offset += 0x200-current_page_data_len
if self.DebugLevel > 0:
print('* Record (@%x):\tMagic: %x\tType: %x\tTotlen %x\tPadded Totlen: %x' % (last_data_offset, last_magic, last_nodetype, last_totlen, totlen))
total_count += 1
print('Total Count:', total_count)
if self.DebugLevel > 0:
pprint.pprint(self.DirentMap)
def get_path(self, ino):
path = ''
while ino != 0 and ino in self.DirentMap:
path = '/' + self.DirentMap[ino]['payload'] + path
ino = self.DirentMap[ino]['pino']
return path
def read_file_data(self, inode_map_record, dump = False):
data = []
for record in inode_map_record:
if dump:
print('offset: %x dsize: %x data offset: %x length: %x (ver: %x) totlen: %x' % (record['offset'], record['dsize'], record['data_offset'], len(record['payload']), record['version'], record['totlen']))
offset = record['offset']
dsize = record['dsize']
new_data_len = offset+dsize-len(data)
if new_data_len > 0:
try:
data += [b'\x00'] * new_data_len
except:
print('offset: %x dsize: %x data offset: %x length: %x (ver: %x) totlen: %x' % (record['offset'], record['dsize'], record['data_offset'], len(record['payload']), record['version'], record['totlen']))
data[offset:offset+dsize] = record['payload']
return ''.join(data)
def read_file_seq_data(self, inode_map_record, dump = False):
next_offset = 0
data = ''
for record in inode_map_record:
if dump:
print(len(inode_map_record))
print('Version: %x Offset: %x DSize: %x Data Offset: %x Payload Length: %x' % (record['version'], record['offset'], record['dsize'], record['data_offset'], len(record['payload'])))
offset = record['offset']
if offset == next_offset:
next_offset = offset + record['dsize']
# found_record = True
data += record['payload']
return data
def write_data(self, output_filename, inode_map_record, data):
shutil.copy(self.OrigFilename, output_filename)
next_offset = 0
while 1:
found_record = False
for record in inode_map_record:
offset = record['offset']
if offset == next_offset:
orig_data = data
if record['compr'] == 0x6:
try:
data = compress(data)
except:
print('* Compress error')
print('data_offset: %x offset: %x dsize: %x csize: %x' % (record['data_offset'], record['offset'], record['dsize'], record['csize']))
print('Trying to write: %x' % len(data))
if record['csize'] > len(data):
fd = open(output_filename, 'r+')
fd.seek(record['data_offset'])
record['csize'] = len(data)
record['dsize'] = len(orig_data)
fd.write(struct.pack(inode_unpack_fmt,
record['hdr_crc'],
record['ino'],
record['version'],
record['mode'],
record['uid'],
record['gid'],
record['isize'],
record['atime'],
record['mtime'],
record['ctime'],
record['offset'],
record['csize'],
record['dsize'],
record['compr'],
record['usercompr'],
record['flags'],
record['data_crc'],
record['node_crc']
) + data + (record['csize'] - len(data)) * b'\xff')
fd.close()
next_offset = offset + record['dsize']
if next_offset != offset:
found_record = True
break
if not found_record:
break
return data
def dump_file(self, filename, mod = '', out = ''):
print('dump_file')
for ino in list(self.DirentMap.keys()):
if ino in self.INodeMap:
path = self.get_path(ino)
if path == filename:
print('')
print(' = '*80)
print(ino, self.get_path(ino), len(self.DirentMap[ino]['payload']))
pprint.pprint(self.DirentMap[ino])
data = self.read_file_data(self.INodeMap[ino])
print(data)
if mod != '':
fd = open(mod, 'rb')
self.write_data(out, self.INodeMap[ino], fd.read())
fd.close()
def dump_info(self, output_dir, ino, target_filename = ''):
path = self.get_path(ino)
directory = os.path.dirname(path)
basename = os.path.basename(path)
local_dir = os.path.join(output_dir, directory[1:])
local_path = os.path.join(local_dir, basename)
write_file = True
dump = False
if target_filename != '':
write_file = False
if path.find(target_filename) >= 0:
dump = True
write_file = True
else:
write_file = True
if dump:
print('File %s (ino: %d)' % (path, ino))
data = self.read_file_data(self.INodeMap[ino], dump = dump)
if dump:
print('\tFile length: %d' % (len(data)))
print('')
if len(data) == 0:
return
if write_file:
if not os.path.isdir(local_dir):
os.makedirs(local_dir)
try:
fd = open(local_path, 'wb')
fd.write(data)
fd.close()
except:
print('Failed to create file: %s' % (local_path))
def dump(self, output_dir, target_filename = ''):
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
processed_ino = {}
for ino in list(self.DirentMap.keys()):
if ino in self.INodeMap:
processed_ino[ino] = True
self.dump_info(output_dir, ino, target_filename)
for ino in list(self.INodeMap.keys()):
if ino not in processed_ino:
self.dump_info(output_dir, ino, target_filename)
def list_data(self, inode_map_record):
for record in inode_map_record:
print('version: 0x%x' % record['version'])
print('\toffset: 0x%x' % record['offset'])
print('\tpayload: 0x%x' % len(record['payload']))
print('\tdata_offset: 0x%x' % record['data_offset'])
print('\tctime: 0x%x' % record['ctime'])
print('\tmtime: 0x%x' % record['mtime'])
print('\tatime: 0x%x' % record['atime'])
def list_file(self, filename):
print('Path\tInode\tNumber of records')
for ino in list(self.DirentMap.keys()):
if ino in self.INodeMap:
if filename == '':
print(self.get_path(ino))
print('\tInode:', ino)
print('\tRecords:', len(self.INodeMap[ino]))
else:
path = self.get_path(ino)
if path == filename:
print(self.get_path(ino))
print('\tInode:', ino)
print('\tRecords:', len(self.INodeMap[ino]))
self.list_data(self.INodeMap[ino])
def make_inode(
self,
ino = 0x683,
version = 0x1da,
mode = 0x81ed,
uid = 0x0,
gid = 0x0,
isize = 0x1bcb8,
atime = 0x498351be,
mtime = 0x498351be,
ctime = 0x31,
offset = 0,
dsize = 0x1000,
compr = 6,
usercompr = 0,
flags = 0,
payload = ''
):
crc32_inst = crc32.CRC32()
crc32_inst.set_sarwate()
magic = 0x1985
nodetype = JFFS2_NODETYPE_INODE
totlen = len(payload)+0x44
header = struct.pack(header_unpack_fmt, magic, nodetype, totlen)
csize = len(payload)
hdr_crc = 0
data_crc = crc32_inst.calc(payload) #0x4d8bd458
node_crc = 0x0 #0x6d423d5a
inode = struct.pack(inode_unpack_fmt, hdr_crc, ino, version, mode, uid, gid, isize, atime, mtime, ctime, offset, csize, dsize, compr, usercompr, flags, data_crc, node_crc)
hdr_crc = crc32_inst.calc(header) #0xca1c1cba
inode = struct.pack(inode_unpack_fmt, hdr_crc, ino, version, mode, uid, gid, isize, atime, mtime, ctime, offset, csize, dsize, compr, usercompr, flags, data_crc, node_crc)
ri = header+inode
ri = ri[0:header_struct_size+inode_struct_size-8]
node_crc = crc32_inst.calc(ri)
inode = struct.pack(inode_unpack_fmt, hdr_crc, ino, version, mode, uid, gid, isize, atime, mtime, ctime, offset, csize, dsize, compr, usercompr, flags, data_crc, node_crc)
data = header
data += inode
data += payload
debug = 0
if debug > 0:
print('')
print('header: %08X' % ((crc32_inst.calc(header)) & 0xFFFFFFFF))
print('inode: %08X' % ((crc32_inst.calc(inode)) & 0xFFFFFFFF))
print('header+inode: %08X' % ((crc32_inst.calc(ri)) & 0xFFFFFFFF))
print('payload: %08X' % ((crc32_inst.calc(payload)) & 0xFFFFFFFF))
print('data: %08X' % ((crc32_inst.calc(data)) & 0xFFFFFFFF))
return data
def make_inode_with_header(self, header, payload):
(magic, nodetype, totlen) = struct.unpack(header_unpack_fmt, header[0:header_struct_size])
print('magic: %X' % (magic))
print('nodetype: %X' % (nodetype))
print('totlen: %X' % (totlen))
(hdr_crc, ino, version, mode, uid, gid, isize, atime, mtime, ctime, offset, csize, dsize, compr, usercompr, flags, data_crc, node_crc) = struct.unpack(inode_unpack_fmt, header[header_struct_size:header_struct_size+inode_struct_size])
print('hdr_crc: %X' % (hdr_crc))
print('ino: %X' % (ino))
print('version: %X' % (version))
print('mode: %X' % (mode))
print('uid: %X' % (uid))
print('gid: %X' % (gid))
print('isize: %X' % (isize))
print('atime: %X' % (atime))
print('mtime: %X' % (mtime))
print('ctime: %X' % (ctime))
print('offset: %X' % (offset))
print('csize: %X' % (csize))
print('dsize: %X' % (dsize))
print('compr: %X' % (compr))
print('usercompr: %X' % (usercompr))
print('flags: %X' % (flags))
print('data_crc: %X' % (data_crc))
print('node_crc: %X' % (node_crc))
return self.make_inode(
ino = ino,
version = version,
mode = mode,
uid = uid,
gid = gid,
isize = isize,
atime = atime,
mtime = mtime,
ctime = ctime,
offset = offset,
dsize = dsize,
compr = compr,
usercompr = usercompr,
flags = flags,
payload = payload
)
def make_inode_with_header_file(self, header_file, payload_file):
fd = open(header_file, 'rb')
header = fd.read()[0:header_struct_size+inode_struct_size]
fd.close()
fd = open(payload_file, 'rb')
payload = fd.read()
fd.close()
return self.make_inode_with_header(header, payload)
def write_ino(self, ino, target_filename, offset, size, new_data_filename, output_filename):
path = self.get_path(ino)
# directory = os.path.dirname(path)
# basename = os.path.basename(path)
if path == target_filename:
print('File %s (ino: %d)' % (path, ino))
print('%x %x' % (offset, size))
data = []
for record in self.INodeMap[ino]:
record_offset = record['offset']
record_dsize = record['dsize']
if record_offset <= offset and offset <= record_offset+record_dsize:
record_data_offset = record['data_offset']
totlen = record['totlen']
print('%x (%x) -> file offset: %x (%x) totlen = %x' % (record_offset, record_dsize, record_data_offset, record['csize'], totlen))
fd = open(new_data_filename, 'rb')
fd.seek(record_offset)
data = fd.read(record_dsize)
fd.close()
new_data = zlib.compress(data)
new_inode = self.make_inode(
ino = record['ino'],
version = record['version'],
mode = record['mode'],
uid = record['uid'],
gid = record['gid'],
isize = record['isize'],
atime = record['atime'],
mtime = record['mtime'],
ctime = record['ctime'],
offset = record['offset'],
dsize = record['dsize'],
compr = record['compr'],
usercompr = record['usercompr'],
flags = record['flags'],
payload = new_data
)
new_inode_len = len(new_inode)
print(' new_inode: %x' % (len(new_inode)))
if totlen > new_inode_len:
new_inode += (totlen-new_inode_len) * b'\xff'
if output_filename != '':
#print 'Writing to %s at 0x%x (0x%x)' % (output_filename, record_data_offset, len(new_inode))
fd = open(output_filename, 'wb')
orig_fd = open(self.OrigFilename, 'rb')
old_data = orig_fd.read()
orig_fd.close()
#Save
ofd = open('old.bin', 'wb')
ofd.write(old_data[record_data_offset:record_data_offset+len(new_inode)])
ofd.close()
nfd = open('new.bin', 'wb')
nfd.write(new_inode)
nfd.close()
fd.write(old_data)
fd.seek(record_data_offset)
fd.write(new_inode)
fd.close()
def write_file(self, target_filename, new_data_filename, offset, size, output_filename):
processed_ino = {}
for ino in list(self.DirentMap.keys()):
if ino in self.INodeMap:
processed_ino[ino] = True
self.write_ino(ino, target_filename, offset, size, new_data_filename, output_filename)
for ino in list(self.INodeMap.keys()):
if ino not in processed_ino:
self.write_ino(ino, target_filename, offset, size, new_data_filename, output_filename)
if __name__ == '__main__':
from optparse import OptionParser
parser = OptionParser()
parser.add_option(
'-o', '--output_dir', dest = 'output_dir',
help = 'Set output directory name', default = '', metavar = 'OUTPUT_DIR')
parser.add_option(
'-O', '--output_filename', dest = 'output_filename',
help = 'Set output filename', default = '', metavar = 'OUTPUT_FILENAME')
parser.add_option(
'-f', '--target_filename', dest = 'target_filename',
help = 'Set target filename', default = '', metavar = 'TARGET_FILENAME')
parser.add_option(
'-n', '--new_data_filename', dest = 'new_data_filename',
help = 'Set new data file name', default = '', metavar = 'NEW_DATA_FILENAME')
parser.add_option('-l', action = 'store_true', dest = 'list')
parser.add_option('-d', type = 'int', default = 0, dest = 'debug')
parser.add_option('-t', type = 'int', default = 0, dest = 'offset')
parser.add_option('-s', type = 'int', default = 0, dest = 'size')
(options, args) = parser.parse_args()
jffs2_filename = args[0]
jffs = JFFS()
jffs.parse(jffs2_filename, target_filename = options.target_filename)
jffs.DebugLevel = options.debug
if options.list:
jffs.list_file(options.file)
elif options.new_data_filename != '':
jffs.write_file(options.target_filename, options.new_data_filename, options.offset, options.size, options.output_filename)
elif options.output_dir != '':
print('Dumping files to a folder: %s' % (options.output_dir))
jffs.dump(options.output_dir, target_filename = options.target_filename)
elif options.file != '' and options.output_filename != '':
jffs.dump_file(options.target_filename, options.new_data_filename, options.output_filename)
if __name__ == "__main__":
sys.exit(main())
| 42.873759
| 245
| 0.464071
|
d0332e9ed9f67052f52254b7869af388df1c5dfa
| 372
|
py
|
Python
|
orchestration/models/integration_type.py
|
dave-read/vdc
|
0a331c0d4cde2a87df11e9ff4304a539f0a56692
|
[
"MIT"
] | 1
|
2019-06-12T23:48:30.000Z
|
2019-06-12T23:48:30.000Z
|
orchestration/models/integration_type.py
|
dave-read/vdc
|
0a331c0d4cde2a87df11e9ff4304a539f0a56692
|
[
"MIT"
] | 1
|
2019-04-17T00:11:59.000Z
|
2019-04-17T00:11:59.000Z
|
orchestration/models/integration_type.py
|
dave-read/vdc
|
0a331c0d4cde2a87df11e9ff4304a539f0a56692
|
[
"MIT"
] | 2
|
2019-04-16T16:09:44.000Z
|
2019-05-31T19:54:12.000Z
|
from enum import Enum
class IntegrationType(Enum):
BILLING_CLIENT_SDK=1
MANAGEMENT_GROUP_CLIENT_SDK=2
MANAGEMENT_LOCK_CLIENT_SDK=3
POLICY_CLIENT_SDK=4
RESOURCE_MANAGEMENT_CLIENT_SDK=5
SUBSCRIPTION_CLIENT_SDK=6
AAD_CLIENT_CLI=7
KEYVAULT_CLIENT_CLI=8
RBAC_CLIENT_CLI=9
RESOURCE_MANAGEMENT_CLIENT_CLI=10
SUBSCRIPTION_CLIENT_CLI=11
| 26.571429
| 37
| 0.798387
|
ce29b5e528b41c3d63927feb10d0d426bb2b5600
| 8,071
|
py
|
Python
|
dss/video.py
|
terabit-software/dynamic-stream-server
|
55988f8af7b49b28f446c61aeae8ecae40c7ade2
|
[
"BSD-3-Clause"
] | 9
|
2015-08-06T20:36:21.000Z
|
2021-09-08T19:49:46.000Z
|
dss/video.py
|
terabit-software/dynamic-stream-server
|
55988f8af7b49b28f446c61aeae8ecae40c7ade2
|
[
"BSD-3-Clause"
] | null | null | null |
dss/video.py
|
terabit-software/dynamic-stream-server
|
55988f8af7b49b28f446c61aeae8ecae40c7ade2
|
[
"BSD-3-Clause"
] | 4
|
2015-08-21T22:00:11.000Z
|
2019-10-26T15:31:15.000Z
|
from __future__ import division
import time
import warnings
try:
# Python 3
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
from .config import config
from .providers import Providers
from .tools import process, thread, noxml
from .tools.show import Show
from .stats import StreamStats
show = Show('Video')
class StreamHTTPClient(object):
""" Emulate the behaviour of a RTMP client when there's an HTTP access
for a certain Stream. If no other HTTP access is made within the
timeout period, the `Stream` instance will be decremented.
"""
def __init__(self, parent):
self.lock = thread.Condition()
self.timeout = None
self.parent = parent
self._stop()
def wait(self, timeout):
self.timeout = timeout
if not self._stopped:
with self.lock:
self._stop(restart=True)
self.lock.notify_all()
else:
self.thread = thread.Thread(self._wait_worker).start()
return self
def _wait_worker(self):
with self.lock:
while self._stopped:
self._start()
self.lock.wait(self.timeout)
self._stop()
self.parent.dec(http=True)
def _stop(self, restart=False, data=True):
self._stopped = data
if not restart:
self._stopped_info = data
def _start(self):
self._stop(data=False)
def __bool__(self):
return not self._stopped_info
__nonzero__ = __bool__
class Stream(object):
_ffmpeg = config['ffmpeg']
run_timeout = _ffmpeg.getint('timeout')
reload_timeout = _ffmpeg.getint('reload')
def __init__(self, id, timeout=run_timeout):
self.lock = thread.Lock()
self.id = id
provider = Providers.select(id)
try:
provider.get_stream(id)
except Exception:
# The prefix match but the id is not real
raise KeyError('Invalid id for {0.identifier!r} ({0.name}) provider'.format(provider))
self.fn = lambda self=self: process.run_proc(
self.id,
provider.make_cmd(self.id),
'fetch'
)
self.cnt = 0
self._proc_run = False
self.proc = None
self.thread = None
self.timeout = timeout
self.http_client = StreamHTTPClient(self)
self.stats = StreamStats()
def __repr__(self):
pid = self.proc.pid if self.proc else None
return '<{0}: Users={1} Pid={2}>'.format(self.id, self.clients, pid)
@property
def clients(self):
return self.cnt + bool(self.http_client)
@property
def alive(self):
return self.proc or self.proc_run
@property
def proc_run(self):
return self._proc_run
@proc_run.setter
def proc_run(self, value):
with self.lock:
self._proc_run = value
def inc(self, k=1, http_wait=None):
""" Increment user count unless it is a http user (then http_wait
must be set). If so, it should wait a period of time on another
thread and the clients property will be indirectly incremented.
If there is no process running and it should be, a new process
will be started.
"""
if http_wait:
self.http_client.wait(http_wait)
else:
self.cnt += k
if not self.proc and not self.proc_run:
self.proc_start()
show(self)
return self
def dec(self, http=False):
""" Decrement the user count unless it is a http user. If there are no
new clients, the process is scheduled to shutdown.
"""
if not http:
if self.cnt:
self.cnt -= 1
if not self.clients:
self.proc_stop()
show(self)
return self
def _proc_msg(self, pid, msg):
return '{0} - FFmpeg[{1}] {2}'.format(self.id, pid, msg)
def proc_start(self):
""" Process starter on another thread.
"""
def worker():
self.proc_run = True
start_msg = 'started'
while True:
with self.fn() as self.proc:
self.stats.timed.started()
pid = self.proc and self.proc.pid
show(self._proc_msg(pid, start_msg))
self.proc.wait()
self.proc = None
if self.proc_run: # Should be running, but isn't
self.stats.timed.died()
show.warn(self._proc_msg(pid, 'died'))
time.sleep(self.reload_timeout)
if self.proc_run: # It might have been stopped after waiting
start_msg = 'restarted'
continue
show(self._proc_msg(pid, 'stopped'))
break
self.thread = thread.Thread(worker).start()
def _kill(self):
""" Kill the FFmpeg process. Don't call this function directly,
otherwise the process may be restarted. Call `proc_stop` instead.
"""
try:
self.proc.kill()
self.proc.wait()
except (OSError, AttributeError):
pass
finally:
self.proc = None
def proc_stop(self, now=False):
if now:
self.proc_run = False
self._kill()
return
if not self.proc_run:
return
self.proc_run = False
def stop_worker():
time.sleep(self.timeout)
if not self.clients:
self._kill()
else:
self.proc_run = True
thread.Thread(stop_worker).start()
class Video(object):
_data = {}
_data_lock = thread.Lock()
run = True
@classmethod
def start(cls, id, increment=1, http_wait=None):
if cls.run:
cls.get_stream(id).inc(increment, http_wait=http_wait)
@classmethod
def stop(cls, id):
cls.get_stream(id).dec()
@classmethod
def get_stream(cls, id):
with cls._data_lock:
stream = cls._data.get(id)
if stream is None:
stream = Stream(id)
cls._data[id] = stream
return stream
@classmethod
def get_stats(cls):
http = config['http-server']
addr = http['addr']
stat = http['stat_url']
data = urlopen(addr + stat).read()
return noxml.load(data, ('stream', 'application'))
@classmethod
def initialize_from_stats(cls):
try:
stats = cls.get_stats()['server']['application']
except IOError:
return
app = config['rtmp-server']['app']
try:
app = next(x['live'] for x in stats if x['name'] == app)
except StopIteration:
raise RuntimeError('No app named %r' % app)
# App clients
stream_list = app.get('stream')
if stream_list is None:
return
for stream in stream_list:
# Stream clients
nclients = int(stream['nclients'])
if 'publishing' in stream:
nclients -= 1
if nclients <= 0:
continue
try:
cls.start(stream['name'], nclients)
except KeyError:
warnings.warn('Invalid stream name: %r' % stream['name'])
@classmethod
def auto_start(cls):
for id in config['video_start'].get_list('auto_start'):
cls.start(id)
for p in config['video_start'].get_list('auto_start_provider'):
streams = Providers.select(p).streams()
for id in streams:
cls.start(id)
@classmethod
def terminate_streams(cls):
with cls._data_lock:
cls.run = False
for strm in cls._data.values():
strm.proc_stop(now=True)
| 28.72242
| 98
| 0.548755
|
0727a525625498b9ae4721d04aff0d2cf52d1505
| 9,104
|
py
|
Python
|
serial/serialcli.py
|
jabdoa2/pyserial
|
92d101613be41ecb2f2054c3f43a006fbe6f9966
|
[
"BSD-3-Clause"
] | 1,118
|
2015-01-02T04:31:39.000Z
|
2022-03-23T18:46:48.000Z
|
Thonny/Lib/site-packages/serial/serialcli.py
|
Pydiderot/pydiderotIDE
|
a42fcde3ea837ae40c957469f5d87427e8ce46d3
|
[
"MIT"
] | 330
|
2015-01-03T04:38:18.000Z
|
2022-02-14T12:47:51.000Z
|
Thonny/Lib/site-packages/serial/serialcli.py
|
Pydiderot/pydiderotIDE
|
a42fcde3ea837ae40c957469f5d87427e8ce46d3
|
[
"MIT"
] | 296
|
2015-05-13T15:03:32.000Z
|
2022-03-22T20:51:25.000Z
|
#! python
#
# Backend for .NET/Mono (IronPython), .NET >= 2
#
# This file is part of pySerial. https://github.com/pyserial/pyserial
# (C) 2008-2015 Chris Liechti <cliechti@gmx.net>
#
# SPDX-License-Identifier: BSD-3-Clause
import System
import System.IO.Ports
from serial.serialutil import *
# must invoke function with byte array, make a helper to convert strings
# to byte arrays
sab = System.Array[System.Byte]
def as_byte_array(string):
return sab([ord(x) for x in string]) # XXX will require adaption when run with a 3.x compatible IronPython
class Serial(SerialBase):
"""Serial port implementation for .NET/Mono."""
BAUDRATES = (50, 75, 110, 134, 150, 200, 300, 600, 1200, 1800, 2400, 4800,
9600, 19200, 38400, 57600, 115200)
def open(self):
"""\
Open port with current settings. This may throw a SerialException
if the port cannot be opened.
"""
if self._port is None:
raise SerialException("Port must be configured before it can be used.")
if self.is_open:
raise SerialException("Port is already open.")
try:
self._port_handle = System.IO.Ports.SerialPort(self.portstr)
except Exception as msg:
self._port_handle = None
raise SerialException("could not open port %s: %s" % (self.portstr, msg))
# if RTS and/or DTR are not set before open, they default to True
if self._rts_state is None:
self._rts_state = True
if self._dtr_state is None:
self._dtr_state = True
self._reconfigure_port()
self._port_handle.Open()
self.is_open = True
if not self._dsrdtr:
self._update_dtr_state()
if not self._rtscts:
self._update_rts_state()
self.reset_input_buffer()
def _reconfigure_port(self):
"""Set communication parameters on opened port."""
if not self._port_handle:
raise SerialException("Can only operate on a valid port handle")
#~ self._port_handle.ReceivedBytesThreshold = 1
if self._timeout is None:
self._port_handle.ReadTimeout = System.IO.Ports.SerialPort.InfiniteTimeout
else:
self._port_handle.ReadTimeout = int(self._timeout * 1000)
# if self._timeout != 0 and self._interCharTimeout is not None:
# timeouts = (int(self._interCharTimeout * 1000),) + timeouts[1:]
if self._write_timeout is None:
self._port_handle.WriteTimeout = System.IO.Ports.SerialPort.InfiniteTimeout
else:
self._port_handle.WriteTimeout = int(self._write_timeout * 1000)
# Setup the connection info.
try:
self._port_handle.BaudRate = self._baudrate
except IOError as e:
# catch errors from illegal baudrate settings
raise ValueError(str(e))
if self._bytesize == FIVEBITS:
self._port_handle.DataBits = 5
elif self._bytesize == SIXBITS:
self._port_handle.DataBits = 6
elif self._bytesize == SEVENBITS:
self._port_handle.DataBits = 7
elif self._bytesize == EIGHTBITS:
self._port_handle.DataBits = 8
else:
raise ValueError("Unsupported number of data bits: %r" % self._bytesize)
if self._parity == PARITY_NONE:
self._port_handle.Parity = getattr(System.IO.Ports.Parity, 'None') # reserved keyword in Py3k
elif self._parity == PARITY_EVEN:
self._port_handle.Parity = System.IO.Ports.Parity.Even
elif self._parity == PARITY_ODD:
self._port_handle.Parity = System.IO.Ports.Parity.Odd
elif self._parity == PARITY_MARK:
self._port_handle.Parity = System.IO.Ports.Parity.Mark
elif self._parity == PARITY_SPACE:
self._port_handle.Parity = System.IO.Ports.Parity.Space
else:
raise ValueError("Unsupported parity mode: %r" % self._parity)
if self._stopbits == STOPBITS_ONE:
self._port_handle.StopBits = System.IO.Ports.StopBits.One
elif self._stopbits == STOPBITS_ONE_POINT_FIVE:
self._port_handle.StopBits = System.IO.Ports.StopBits.OnePointFive
elif self._stopbits == STOPBITS_TWO:
self._port_handle.StopBits = System.IO.Ports.StopBits.Two
else:
raise ValueError("Unsupported number of stop bits: %r" % self._stopbits)
if self._rtscts and self._xonxoff:
self._port_handle.Handshake = System.IO.Ports.Handshake.RequestToSendXOnXOff
elif self._rtscts:
self._port_handle.Handshake = System.IO.Ports.Handshake.RequestToSend
elif self._xonxoff:
self._port_handle.Handshake = System.IO.Ports.Handshake.XOnXOff
else:
self._port_handle.Handshake = getattr(System.IO.Ports.Handshake, 'None') # reserved keyword in Py3k
#~ def __del__(self):
#~ self.close()
def close(self):
"""Close port"""
if self.is_open:
if self._port_handle:
try:
self._port_handle.Close()
except System.IO.Ports.InvalidOperationException:
# ignore errors. can happen for unplugged USB serial devices
pass
self._port_handle = None
self.is_open = False
# - - - - - - - - - - - - - - - - - - - - - - - -
@property
def in_waiting(self):
"""Return the number of characters currently in the input buffer."""
if not self.is_open:
raise portNotOpenError
return self._port_handle.BytesToRead
def read(self, size=1):
"""\
Read size bytes from the serial port. If a timeout is set it may
return less characters as requested. With no timeout it will block
until the requested number of bytes is read.
"""
if not self.is_open:
raise portNotOpenError
# must use single byte reads as this is the only way to read
# without applying encodings
data = bytearray()
while size:
try:
data.append(self._port_handle.ReadByte())
except System.TimeoutException:
break
else:
size -= 1
return bytes(data)
def write(self, data):
"""Output the given string over the serial port."""
if not self.is_open:
raise portNotOpenError
#~ if not isinstance(data, (bytes, bytearray)):
#~ raise TypeError('expected %s or bytearray, got %s' % (bytes, type(data)))
try:
# must call overloaded method with byte array argument
# as this is the only one not applying encodings
self._port_handle.Write(as_byte_array(data), 0, len(data))
except System.TimeoutException:
raise writeTimeoutError
return len(data)
def reset_input_buffer(self):
"""Clear input buffer, discarding all that is in the buffer."""
if not self.is_open:
raise portNotOpenError
self._port_handle.DiscardInBuffer()
def reset_output_buffer(self):
"""\
Clear output buffer, aborting the current output and
discarding all that is in the buffer.
"""
if not self.is_open:
raise portNotOpenError
self._port_handle.DiscardOutBuffer()
def _update_break_state(self):
"""
Set break: Controls TXD. When active, to transmitting is possible.
"""
if not self.is_open:
raise portNotOpenError
self._port_handle.BreakState = bool(self._break_state)
def _update_rts_state(self):
"""Set terminal status line: Request To Send"""
if not self.is_open:
raise portNotOpenError
self._port_handle.RtsEnable = bool(self._rts_state)
def _update_dtr_state(self):
"""Set terminal status line: Data Terminal Ready"""
if not self.is_open:
raise portNotOpenError
self._port_handle.DtrEnable = bool(self._dtr_state)
@property
def cts(self):
"""Read terminal status line: Clear To Send"""
if not self.is_open:
raise portNotOpenError
return self._port_handle.CtsHolding
@property
def dsr(self):
"""Read terminal status line: Data Set Ready"""
if not self.is_open:
raise portNotOpenError
return self._port_handle.DsrHolding
@property
def ri(self):
"""Read terminal status line: Ring Indicator"""
if not self.is_open:
raise portNotOpenError
#~ return self._port_handle.XXX
return False # XXX an error would be better
@property
def cd(self):
"""Read terminal status line: Carrier Detect"""
if not self.is_open:
raise portNotOpenError
return self._port_handle.CDHolding
# - - platform specific - - - -
# none
| 36.126984
| 113
| 0.614455
|
45841e82593effcfc8fffbae5192ae95e20c3ae4
| 3,743
|
py
|
Python
|
custom_components/wyzeapi/binary_sensor.py
|
morozsm/ha-wyzeapi
|
9973c2bf81405bef097653542dfebf1cc2974de2
|
[
"Apache-2.0"
] | null | null | null |
custom_components/wyzeapi/binary_sensor.py
|
morozsm/ha-wyzeapi
|
9973c2bf81405bef097653542dfebf1cc2974de2
|
[
"Apache-2.0"
] | null | null | null |
custom_components/wyzeapi/binary_sensor.py
|
morozsm/ha-wyzeapi
|
9973c2bf81405bef097653542dfebf1cc2974de2
|
[
"Apache-2.0"
] | null | null | null |
import logging
import time
from datetime import timedelta
from typing import List
from homeassistant.const import ATTR_ATTRIBUTION
from wyzeapy.base_client import DeviceTypes, Device, AccessTokenError, PropertyIDs
from wyzeapy.client import Client
from homeassistant.components.binary_sensor import (
BinarySensorEntity,
DEVICE_CLASS_MOTION
)
from .const import DOMAIN
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
_LOGGER = logging.getLogger(__name__)
ATTRIBUTION = "Data provided by Wyze"
SCAN_INTERVAL = timedelta(seconds=10)
async def async_setup_entry(hass: HomeAssistant, config_entry: ConfigEntry, async_add_entities):
_LOGGER.debug("""Creating new WyzeApi binary sensor component""")
client = hass.data[DOMAIN][config_entry.entry_id]
def get_devices() -> List[Device]:
try:
devices = client.get_devices()
except AccessTokenError as e:
_LOGGER.warning(e)
client.reauthenticate()
devices = client.get_devices()
return devices
devices = await hass.async_add_executor_job(get_devices)
cameras = []
for device in devices:
try:
device_type = DeviceTypes(device.product_type)
if device_type == DeviceTypes.CAMERA:
cameras.append(WyzeCameraMotion(client, device))
except ValueError as e:
_LOGGER.warning("{}: Please report this error to https://github.com/JoshuaMulliken/ha-wyzeapi".format(e))
async_add_entities(cameras, True)
class WyzeCameraMotion(BinarySensorEntity):
_on: bool
_available: bool
def __init__(self, wyzeapi_client: Client, device: Device):
self._client = wyzeapi_client
self._device = device
self._last_event = int(str(int(time.time())) + "000")
@property
def device_info(self):
return {
"identifiers": {
(DOMAIN, self._device.mac)
},
"name": self.name,
"manufacturer": "WyzeLabs",
"model": self._device.product_model
}
@property
def available(self) -> bool:
return self._available
@property
def name(self):
"""Return the display name of this switch."""
return self._device.nickname
@property
def is_on(self):
"""Return true if switch is on."""
return self._on
@property
def unique_id(self):
return "{}-motion".format(self._device.mac)
@property
def device_state_attributes(self):
"""Return device attributes of the entity."""
return {
ATTR_ATTRIBUTION: ATTRIBUTION,
"state": self.is_on,
"available": self.available,
"device model": self._device.product_model,
"mac": self.unique_id
}
@property
def device_class(self):
return DEVICE_CLASS_MOTION
def update(self):
try:
device_info = self._client.get_info(self._device)
except AccessTokenError:
self._client.reauthenticate()
device_info = self._client.get_info(self._device)
for property_id, value in device_info:
if property_id == PropertyIDs.AVAILABLE:
self._available = True if value == "1" else False
latest_event = self._client.get_latest_event(self._device)
if latest_event is not None:
if latest_event.event_ts > self._last_event:
self._on = True
self._last_event = latest_event.event_ts
else:
self._on = False
self._last_event = latest_event.event_ts
else:
self._on = False
| 29.472441
| 117
| 0.638792
|
71d7ae9081343c0ce11418130c73838282a12812
| 753
|
py
|
Python
|
test/feature_extraction/character_length_test.py
|
tmhatton/MLinPractice
|
759706e13181cec864d6aa8ece9ae7042f083e4c
|
[
"MIT"
] | null | null | null |
test/feature_extraction/character_length_test.py
|
tmhatton/MLinPractice
|
759706e13181cec864d6aa8ece9ae7042f083e4c
|
[
"MIT"
] | 1
|
2021-10-19T08:09:44.000Z
|
2021-10-19T08:09:44.000Z
|
test/feature_extraction/character_length_test.py
|
tmhatton/MLinPractice
|
759706e13181cec864d6aa8ece9ae7042f083e4c
|
[
"MIT"
] | null | null | null |
import unittest
import pandas as pd
from code.util import COLUMN_TWEET
from code.feature_extraction.character_length import CharacterLength
class MyTestCase(unittest.TestCase):
def setUp(self) -> None:
self.INPUT_COLUM = COLUMN_TWEET
self.extractor = CharacterLength(self.INPUT_COLUM)
def test_character_length(self):
input_text = "Hallo, das ist ein Text mit 40 Zeichen."
output = [[40]]
input_df = pd.DataFrame()
input_df[self.INPUT_COLUM] = [input_text]
char_length = self.extractor.fit_transform(input_df)
print(self.extractor.fit_transform(input_df))
self.assertEqual(char_length, output) # add assertion here
if __name__ == '__main__':
unittest.main()
| 26.892857
| 68
| 0.703851
|
f7c5a0f69494c901201d1d8cda9e65d10ffe2215
| 342
|
py
|
Python
|
ycash_cli.py
|
nultinator/python_ycash
|
d0cd4753e2e00dc734896e82aeb31ca02c5cb543
|
[
"Unlicense"
] | null | null | null |
ycash_cli.py
|
nultinator/python_ycash
|
d0cd4753e2e00dc734896e82aeb31ca02c5cb543
|
[
"Unlicense"
] | null | null | null |
ycash_cli.py
|
nultinator/python_ycash
|
d0cd4753e2e00dc734896e82aeb31ca02c5cb543
|
[
"Unlicense"
] | null | null | null |
from bitcoinrpc.authproxy import AuthServiceProxy
user = input("user: ")
password = input("password: ")
port = input("port: ")
access = AuthServiceProxy("http://" + user + ":" + password + "@127.0.0.1:" + port)
getblockchaininfo = access.getblockchaininfo()
getnewaddress = access.getnewaddress()
getbestblockhash = access.getbestblockhash()
| 38
| 83
| 0.730994
|
5cf20d57761f03df8fc82b22670e7c3211b380b8
| 652
|
py
|
Python
|
model/config.py
|
mateusap1/athenas
|
f48df6e05452be39c87479279be7eb378291c404
|
[
"MIT"
] | null | null | null |
model/config.py
|
mateusap1/athenas
|
f48df6e05452be39c87479279be7eb378291c404
|
[
"MIT"
] | null | null | null |
model/config.py
|
mateusap1/athenas
|
f48df6e05452be39c87479279be7eb378291c404
|
[
"MIT"
] | null | null | null |
path_files = {
"node_path": "/home/mateusap1/Documents/athena_core/node",
"account_path": "/home/mateusap1/Documents/athena_core/account"
}
id_config = {
"username_char_limit": 64,
"nonce_limit": 10**6,
"date_format": "%Y-%m-%d %H:%M:%S.%f",
"hash_difficulty": 2
}
contract_config = {
"minimum_judges": 1,
"maximum_judges": 256,
"minimum_rules": 1,
"maximum_rules": 1024,
"allow_sender_to_judge": False
}
verdict_config = {
"sentence_char_limit": 4096,
"description_char_limit": 8192
}
node_config = {
"transactions_limit": 10,
"transactions_expire_days": 2,
"max_transactions": 100
}
| 21.733333
| 67
| 0.656442
|
5a7f5fef9b23e069982151a367ca2c9ca5402fb2
| 3,619
|
py
|
Python
|
viewer/settings.py
|
ThinkDone/viewer
|
24afd5b7d246ca779f1a4a49ce5a0ef2021e85d2
|
[
"Apache-2.0"
] | null | null | null |
viewer/settings.py
|
ThinkDone/viewer
|
24afd5b7d246ca779f1a4a49ce5a0ef2021e85d2
|
[
"Apache-2.0"
] | null | null | null |
viewer/settings.py
|
ThinkDone/viewer
|
24afd5b7d246ca779f1a4a49ce5a0ef2021e85d2
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Scrapy settings for viewer project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'viewer'
SPIDER_MODULES = ['viewer.spiders']
NEWSPIDER_MODULE = 'viewer.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'Mozilla/5.0 (iPhone; CPU iPhone OS 8_0 like Mac OS X) AppleWebKit/600.1.3 (KHTML, like Gecko) Version/8.0 Mobile/12A4345d Safari/600.1.4'
USER_AGENT = 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.101 Safari/537.36'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'viewer.middlewares.ViewerSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'viewer.middlewares.MyCustomDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
#'scrapy.pipelines.images.ImagesPipeline': 1,
'viewer.pipelines.MyImagesPipeline': 1,
}
#-- Attent this is an absolute path
IMAGES_STORE = '..'
IMAGES_URLS_FIELD = 'image_urls'
IMAGES_RESULT_FIELD = 'images'
IMAGES_THUMBS = {
'small': (128, 128),
}
IMAGES_MIN_HEIGHT = 513
IMAGES_MIN_WIDTH = 513
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 60
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| 34.141509
| 152
| 0.770655
|
0ed0ce6532629e75d27261e5eb3a6c278a37a737
| 3,559
|
py
|
Python
|
model-optimizer/mo/utils/import_extensions.py
|
zhoub/dldt
|
e42c01cf6e1d3aefa55e2c5df91f1054daddc575
|
[
"Apache-2.0"
] | 1
|
2021-02-20T21:48:36.000Z
|
2021-02-20T21:48:36.000Z
|
model-optimizer/mo/utils/import_extensions.py
|
zhoub/dldt
|
e42c01cf6e1d3aefa55e2c5df91f1054daddc575
|
[
"Apache-2.0"
] | null | null | null |
model-optimizer/mo/utils/import_extensions.py
|
zhoub/dldt
|
e42c01cf6e1d3aefa55e2c5df91f1054daddc575
|
[
"Apache-2.0"
] | 1
|
2021-02-19T01:06:12.000Z
|
2021-02-19T01:06:12.000Z
|
"""
Copyright (c) 2018-2019 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import importlib
import logging as log
import os
import pkgutil
import sys
from mo.back.replacement import BackReplacementPattern
from mo.middle.replacement import MiddleReplacementPattern
from mo.ops.op import Op
from mo.utils.class_registration import _check_unique_ids, update_registration, get_enabled_and_disabled_transforms
def import_by_path(path: str, middle_names: list = ()):
for module_loader, name, ispkg in pkgutil.iter_modules([path]):
importlib.import_module('{}.{}'.format('.'.join(middle_names), name))
def default_path():
EXT_DIR_NAME = 'extensions'
return os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir, os.pardir, EXT_DIR_NAME))
def load_dir(framework: str, path: str, get_front_classes: callable):
"""
Assuming the following sub-directory structure for path:
front/
<framework>/
<other_files>.py
<other_directories>/
<other_files>.py
ops/
<ops_files>.py
middle/
<other_files>.py
back/
<other_files>.py
This function loads modules in the following order:
1. ops/<ops_files>.py
2. front/<other_files>.py
3. front/<framework>/<other_files>.py
4. middle/<other_files>.py
5. back/<other_files>.py
Handlers loaded later override earlier registered handlers for an op.
1, 2, 3 can concur for the same op, but 4 registers a transformation pass
and it shouldn't conflict with any stuff loaded by 1, 2 or 3.
It doesn't load files from front/<other_directories>
"""
log.info("Importing extensions from: {}".format(path))
root_dir, ext = os.path.split(path)
sys.path.insert(0, root_dir)
enabled_transforms, disabled_transforms = get_enabled_and_disabled_transforms()
front_classes = get_front_classes()
internal_dirs = {
('ops', ): [Op],
('front', ): front_classes,
('front', framework): front_classes,
('middle', ): [MiddleReplacementPattern],
('back', ): [BackReplacementPattern]}
if ext == 'mo':
internal_dirs[('front', framework, 'extractors')] = front_classes
for p in internal_dirs.keys():
import_by_path(os.path.join(path, *p), [ext, *p])
update_registration(internal_dirs[p], enabled_transforms, disabled_transforms)
sys.path.remove(root_dir)
def load_dirs(framework: str, dirs: list, get_front_classes: callable):
if dirs is None:
return
mo_inner_extensions = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir, os.pardir, 'mo'))
dirs.insert(0, mo_inner_extensions)
dirs = [os.path.abspath(e) for e in dirs]
if default_path() not in dirs:
dirs.insert(0, default_path())
for path in dirs:
load_dir(framework, path, get_front_classes)
_check_unique_ids()
| 34.553398
| 115
| 0.671818
|
82e26ba01763c708ef98f3b5588267759807ac19
| 22,886
|
py
|
Python
|
src/ip-group/azext_ip_group/vendored_sdks/v2019_09_01/operations/_load_balancers_operations.py
|
Mannan2812/azure-cli-extensions
|
e2b34efe23795f6db9c59100534a40f0813c3d95
|
[
"MIT"
] | 207
|
2017-11-29T06:59:41.000Z
|
2022-03-31T10:00:53.000Z
|
src/ip-group/azext_ip_group/vendored_sdks/v2019_09_01/operations/_load_balancers_operations.py
|
Mannan2812/azure-cli-extensions
|
e2b34efe23795f6db9c59100534a40f0813c3d95
|
[
"MIT"
] | 4,061
|
2017-10-27T23:19:56.000Z
|
2022-03-31T23:18:30.000Z
|
src/ip-group/azext_ip_group/vendored_sdks/v2019_09_01/operations/_load_balancers_operations.py
|
Mannan2812/azure-cli-extensions
|
e2b34efe23795f6db9c59100534a40f0813c3d95
|
[
"MIT"
] | 802
|
2017-10-11T17:36:26.000Z
|
2022-03-31T22:24:32.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrest.polling import LROPoller, NoPolling
from msrestazure.polling.arm_polling import ARMPolling
from .. import models
class LoadBalancersOperations(object):
"""LoadBalancersOperations operations.
You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Client API version. Constant value: "2019-09-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2019-09-01"
self.config = config
def _delete_initial(
self, resource_group_name, load_balancer_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.delete.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def delete(
self, resource_group_name, load_balancer_name, custom_headers=None, raw=False, polling=True, **operation_config):
"""Deletes the specified load balancer.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns None or
ClientRawResponse<None> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
load_balancer_name=load_balancer_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}'}
def get(
self, resource_group_name, load_balancer_name, expand=None, custom_headers=None, raw=False, **operation_config):
"""Gets the specified load balancer.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param expand: Expands referenced resources.
:type expand: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: LoadBalancer or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.network.v2019_09_01.models.LoadBalancer or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('LoadBalancer', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}'}
def _create_or_update_initial(
self, resource_group_name, load_balancer_name, parameters, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.create_or_update.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'LoadBalancer')
# Construct and send request
request = self._client.put(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('LoadBalancer', response)
if response.status_code == 201:
deserialized = self._deserialize('LoadBalancer', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, load_balancer_name, parameters, custom_headers=None, raw=False, polling=True, **operation_config):
"""Creates or updates a load balancer.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param parameters: Parameters supplied to the create or update load
balancer operation.
:type parameters: ~azure.mgmt.network.v2019_09_01.models.LoadBalancer
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns LoadBalancer or
ClientRawResponse<LoadBalancer> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2019_09_01.models.LoadBalancer]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2019_09_01.models.LoadBalancer]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
load_balancer_name=load_balancer_name,
parameters=parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('LoadBalancer', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}'}
def update_tags(
self, resource_group_name, load_balancer_name, tags=None, custom_headers=None, raw=False, **operation_config):
"""Updates a load balancer tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: LoadBalancer or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.network.v2019_09_01.models.LoadBalancer or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
parameters = models.TagsObject(tags=tags)
# Construct URL
url = self.update_tags.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'TagsObject')
# Construct and send request
request = self._client.patch(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('LoadBalancer', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}'}
def list_all(
self, custom_headers=None, raw=False, **operation_config):
"""Gets all the load balancers in a subscription.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of LoadBalancer
:rtype:
~azure.mgmt.network.v2019_09_01.models.LoadBalancerPaged[~azure.mgmt.network.v2019_09_01.models.LoadBalancer]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list_all.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def internal_paging(next_link=None):
request = prepare_request(next_link)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
header_dict = None
if raw:
header_dict = {}
deserialized = models.LoadBalancerPaged(internal_paging, self._deserialize.dependencies, header_dict)
return deserialized
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/loadBalancers'}
def list(
self, resource_group_name, custom_headers=None, raw=False, **operation_config):
"""Gets all the load balancers in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of LoadBalancer
:rtype:
~azure.mgmt.network.v2019_09_01.models.LoadBalancerPaged[~azure.mgmt.network.v2019_09_01.models.LoadBalancer]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def internal_paging(next_link=None):
request = prepare_request(next_link)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
header_dict = None
if raw:
header_dict = {}
deserialized = models.LoadBalancerPaged(internal_paging, self._deserialize.dependencies, header_dict)
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers'}
| 46.327935
| 170
| 0.666914
|
090a1b1a7063321309c06ecbcf28a1f3b1fa5183
| 6,442
|
py
|
Python
|
tests/unit/test_anonymize_files.py
|
colgate-cs-research/netconan
|
310a9afcb0a3e6fdec39a3bd62cccdcb2069de49
|
[
"Apache-2.0"
] | 1
|
2021-11-13T10:43:32.000Z
|
2021-11-13T10:43:32.000Z
|
tests/unit/test_anonymize_files.py
|
colgate-cs-research/netconan
|
310a9afcb0a3e6fdec39a3bd62cccdcb2069de49
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/test_anonymize_files.py
|
colgate-cs-research/netconan
|
310a9afcb0a3e6fdec39a3bd62cccdcb2069de49
|
[
"Apache-2.0"
] | null | null | null |
"""Test file anonymization."""
# Copyright 2018 Intentionet
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytest
from testfixtures import LogCapture
from netconan.anonymize_files import anonymize_file, anonymize_files
_INPUT_CONTENTS = """
# Intentionet's sensitive test file
ip address 192.168.2.1 255.255.255.255
my hash is $1$salt$ABCDEFGHIJKLMNOPQRS
password foobar
"""
_REF_CONTENTS = """
# 1cbbc2's fd8607 test file
ip address 192.168.139.13 255.255.255.255
my hash is $1$0000$CxUUGIrqPb7GaB5midrQZ.
password netconanRemoved1
"""
_SALT = "TESTSALT"
_SENSITIVE_WORDS = [
"intentionet",
"sensitive",
]
def test_anonymize_files_bad_input_empty(tmpdir):
"""Test anonymize_files with empty input dir."""
input_dir = tmpdir.mkdir("input")
output_dir = tmpdir.mkdir("output")
with pytest.raises(ValueError, match='Input directory is empty'):
anonymize_files(str(input_dir), str(output_dir), True, True, salt=_SALT,
sensitive_words=_SENSITIVE_WORDS)
def test_anonymize_files_bad_input_missing(tmpdir):
"""Test anonymize_files with non-existent input."""
filename = "test.txt"
input_file = tmpdir.join(filename)
output_file = tmpdir.mkdir("out").join(filename)
with pytest.raises(ValueError, match='Input does not exist'):
anonymize_files(str(input_file), str(output_file), True, True,
salt=_SALT,
sensitive_words=_SENSITIVE_WORDS)
def test_anonymize_files_bad_output_file(tmpdir):
"""Test anonymize_files when output 'file' already exists but is a dir."""
filename = "test.txt"
input_file = tmpdir.join(filename)
input_file.write(_INPUT_CONTENTS)
output_file = tmpdir.mkdir("out").mkdir(filename)
with pytest.raises(ValueError, match='Cannot write output file.*'):
anonymize_file(str(input_file), str(output_file))
# Anonymizing files should complete okay, because it skips the errored file
with LogCapture() as log_capture:
anonymize_files(str(input_file), str(output_file), True, True,
salt=_SALT,
sensitive_words=_SENSITIVE_WORDS)
# Confirm the correct message is logged
log_capture.check_present(
('root', 'ERROR', 'Failed to anonymize file {}'.format(str(input_file)))
)
# Confirm the exception info was also logged
assert ('Cannot write output file; output file is a directory'
in str(log_capture.records[-1].exc_info[1]))
def test_anonymize_files_bad_output_dir(tmpdir):
"""Test anonymize_files when output 'dir' already exists but is a file."""
filename = "test.txt"
input_dir = tmpdir.mkdir("input")
input_dir.join(filename).write(_INPUT_CONTENTS)
output_file = tmpdir.join("out")
output_file.write('blah')
with pytest.raises(ValueError, match='Output path must be a directory.*'):
anonymize_files(str(input_dir), str(output_file), True, True,
salt=_SALT,
sensitive_words=_SENSITIVE_WORDS)
def test_anonymize_files_dir(tmpdir):
"""Test anonymize_files with a file in root of input dir."""
filename = "test.txt"
input_dir = tmpdir.mkdir("input")
input_dir.join(filename).write(_INPUT_CONTENTS)
output_dir = tmpdir.mkdir("output")
output_file = output_dir.join(filename)
anonymize_files(str(input_dir), str(output_dir), True, True, salt=_SALT,
sensitive_words=_SENSITIVE_WORDS)
# Make sure output file exists and matches the ref
assert(os.path.isfile(str(output_file)))
assert(read_file(str(output_file)) == _REF_CONTENTS)
def test_anonymize_files_dir_skip_hidden(tmpdir):
"""Test that file starting with '.' is skipped."""
filename = ".test.txt"
input_dir = tmpdir.mkdir("input")
input_file = input_dir.join(filename)
input_file.write(_INPUT_CONTENTS)
output_dir = tmpdir.mkdir("output")
output_file = output_dir.join(filename)
anonymize_files(str(input_dir), str(output_dir), True, True, salt=_SALT,
sensitive_words=_SENSITIVE_WORDS)
# Make sure output file does not exist
assert(not os.path.exists(str(output_file)))
def test_anonymize_files_dir_nested(tmpdir):
"""Test anonymize_files with files in nested dirs i.e. not at root of input dir."""
filename = "test.txt"
input_dir = tmpdir.mkdir("input")
input_dir.mkdir("subdir1").join(filename).write(_INPUT_CONTENTS)
input_dir.mkdir("subdir2").mkdir("subsubdir").join(filename).write(_INPUT_CONTENTS)
output_dir = tmpdir.mkdir("output")
output_file_1 = output_dir.join("subdir1").join(filename)
output_file_2 = output_dir.join("subdir2").join("subsubdir").join(filename)
anonymize_files(str(input_dir), str(output_dir), True, True, salt=_SALT,
sensitive_words=_SENSITIVE_WORDS)
# Make sure both output files exists and match the ref
assert(os.path.isfile(str(output_file_1)))
assert(read_file(str(output_file_1)) == _REF_CONTENTS)
assert(os.path.isfile(str(output_file_2)))
assert(read_file(str(output_file_2)) == _REF_CONTENTS)
def test_anonymize_files_file(tmpdir):
"""Test anonymize_files with input file instead of dir."""
filename = "test.txt"
input_file = tmpdir.join(filename)
input_file.write(_INPUT_CONTENTS)
output_file = tmpdir.mkdir("out").join(filename)
anonymize_files(str(input_file), str(output_file), True, True, salt=_SALT,
sensitive_words=_SENSITIVE_WORDS)
# Make sure output file exists and matches the ref
assert(os.path.isfile(str(output_file)))
assert(read_file(str(output_file)) == _REF_CONTENTS)
def read_file(file_path):
"""Read and return contents of file at specified path."""
with open(file_path, 'r') as f:
return f.read()
| 35.01087
| 87
| 0.698386
|
53efb15e8f5384652a362dc6aab507dd0dafd0d3
| 3,111
|
py
|
Python
|
tumn/utils/database.py
|
hatsu-koi/tumn-server
|
d425a1d3d59ca016e0424487ffa8fddb16b79e1c
|
[
"MIT"
] | null | null | null |
tumn/utils/database.py
|
hatsu-koi/tumn-server
|
d425a1d3d59ca016e0424487ffa8fddb16b79e1c
|
[
"MIT"
] | null | null | null |
tumn/utils/database.py
|
hatsu-koi/tumn-server
|
d425a1d3d59ca016e0424487ffa8fddb16b79e1c
|
[
"MIT"
] | null | null | null |
class Database:
""" A dictionary that allows multiple keys for one value """
def __init__(self):
self.keys = {}
self.values = {}
def __getitem__(self, item): # <---SQL SELECT statement
values = self.keys[item]
if len(values) > 1:
return sorted(list(values))
elif len(values) == 1:
return list(values)[0]
def __setitem__(self, key, value):
if key not in self.keys: # it's a new key <---SQL INSERT statement
if value not in self.values: # it's a new value
self.keys[key] = set() # a new set
self.keys[key].add(value)
self.values[value] = set() # a new set
self.values[value].add(key)
elif value in self.values:
self.keys[key] = set() # a new set
self.keys[key].add(value) # a new key
self.values[value].add(key) # but just an update to the values
elif key in self.keys: # it's a new relationships
self.keys[key].add(value)
if value not in self.values:
self.values[value] = set()
self.values[value].add(key)
elif value in self.values:
self.values[value].add(key)
def update(self, key, old_value, new_value):
"""update is a special case because __setitem__ can't see that
you want to propagate your update onto multiple values. """
if old_value in self.keys[key]:
affected_keys = self.values[old_value]
for key in affected_keys:
self.__setitem__(key, new_value)
self.keys[key].remove(old_value)
del self.values[old_value]
else:
raise KeyError("key: {} does not have value: {}".format(key,old_value))
def __delitem__(self, key, value=None): # <---SQL DELETE statement
if value is None:
# All the keys relations are to be deleted.
try:
value_set = self.keys[key]
for value in value_set:
self.values[value].remove(key)
if not self.values[value]:
del self.values[value]
del self.keys[key] # then we delete the key.
except KeyError:
raise KeyError("key not found")
else: # then only a single relationships is being removed.
try:
if value in self.keys[key]: # this is a set.
self.keys[key].remove(value)
self.values[value].remove(key)
if not self.keys[key]: # if the set is empty, we remove the key
del self.keys[key]
if not self.values[value]: # if the set is empty, we remove the value
del self.values[value]
except KeyError:
raise KeyError("key not found")
def iterload(self, key_list, value_list):
for key in key_list:
for value in value_list:
self.__setitem__(key, value)
| 42.040541
| 86
| 0.535198
|
6a56095a85938df327f205cc35e6fc97d4b5c424
| 9,790
|
py
|
Python
|
data/user_params.py
|
JulianoGianlupi/iu399sp19p017
|
89594c3a5ad5d84301772802a14d8905fad15cdc
|
[
"BSD-3-Clause"
] | null | null | null |
data/user_params.py
|
JulianoGianlupi/iu399sp19p017
|
89594c3a5ad5d84301772802a14d8905fad15cdc
|
[
"BSD-3-Clause"
] | null | null | null |
data/user_params.py
|
JulianoGianlupi/iu399sp19p017
|
89594c3a5ad5d84301772802a14d8905fad15cdc
|
[
"BSD-3-Clause"
] | null | null | null |
# This file is auto-generated from a Python script that parses a PhysiCell configuration (.xml) file.
#
# Edit at your own risk.
#
import os
from ipywidgets import Label,Text,Checkbox,Button,HBox,VBox,FloatText,IntText,BoundedIntText,BoundedFloatText,Layout,Box
class UserTab(object):
def __init__(self):
micron_units = Label('micron') # use "option m" (Mac, for micro symbol)
constWidth = '180px'
tab_height = '500px'
stepsize = 10
#style = {'description_width': '250px'}
style = {'description_width': '25%'}
layout = {'width': '400px'}
name_button_layout={'width':'25%'}
widget_layout = {'width': '15%'}
units_button_layout ={'width':'15%'}
desc_button_layout={'width':'45%'}
param_name1 = Button(description='random_seed', disabled=True, layout=name_button_layout)
param_name1.style.button_color = 'tan'
self.random_seed = IntText(
value=0,
step=1,
style=style, layout=widget_layout)
param_name2 = Button(description='motile_cell_persistence_time', disabled=True, layout=name_button_layout)
param_name2.style.button_color = 'tan'
self.motile_cell_persistence_time = FloatText(
value=15,
step=1,
style=style, layout=widget_layout)
param_name3 = Button(description='motile_cell_migration_speed', disabled=True, layout=name_button_layout)
param_name3.style.button_color = 'tan'
self.motile_cell_migration_speed = FloatText(
value=0.5,
step=0.1,
style=style, layout=widget_layout)
param_name4 = Button(description='motile_cell_relative_adhesion', disabled=True, layout=name_button_layout)
param_name4.style.button_color = 'tan'
self.motile_cell_relative_adhesion = FloatText(
value=0.05,
step=0.01,
style=style, layout=widget_layout)
param_name5 = Button(description='motile_cell_apoptosis_rate', disabled=True, layout=name_button_layout)
param_name5.style.button_color = 'tan'
self.motile_cell_apoptosis_rate = FloatText(
value=0.0,
step=0.01,
style=style, layout=widget_layout)
param_name6 = Button(description='motile_cell_relative_cycle_entry_rate', disabled=True, layout=name_button_layout)
param_name6.style.button_color = 'tan'
self.motile_cell_relative_cycle_entry_rate = FloatText(
value=0.1,
step=0.01,
style=style, layout=widget_layout)
param_name7 = Button(description='birth_interval', disabled=True, layout=name_button_layout)
param_name7.style.button_color = 'tan'
self.birth_interval = FloatText(
value=60,
step=1,
style=style, layout=widget_layout)
param_name8 = Button(description='volume_total', disabled=True, layout=name_button_layout)
param_name8.style.button_color = 'tan'
self.volume_total = FloatText(
value=1,
step=0.1,
style=style, layout=widget_layout)
param_name9 = Button(description='target_fluid_frac', disabled=True, layout=name_button_layout)
param_name9.style.button_color = 'tan'
self.target_fluid_frac = FloatText(
value=0.75,
step=0.1,
style=style, layout=widget_layout)
param_name10 = Button(description='fluid_change_rate', disabled=True, layout=name_button_layout)
param_name10.style.button_color = 'tan'
self.fluid_change_rate = FloatText(
value=0.15,
step=0.01,
style=style, layout=widget_layout)
param_name11 = Button(description='cytoplasmic_biomass_change_rate', disabled=True, layout=name_button_layout)
param_name11.style.button_color = 'tan'
self.cytoplasmic_biomass_change_rate = FloatText(
value=0.15,
step=0.01,
style=style, layout=widget_layout)
units_button1 = Button(description='', disabled=True, layout=units_button_layout)
units_button1.style.button_color = 'tan'
units_button2 = Button(description='min', disabled=True, layout=units_button_layout)
units_button2.style.button_color = 'tan'
units_button3 = Button(description='micron/min', disabled=True, layout=units_button_layout)
units_button3.style.button_color = 'tan'
units_button4 = Button(description='', disabled=True, layout=units_button_layout)
units_button4.style.button_color = 'tan'
units_button5 = Button(description='1/min', disabled=True, layout=units_button_layout)
units_button5.style.button_color = 'tan'
units_button6 = Button(description='', disabled=True, layout=units_button_layout)
units_button6.style.button_color = 'tan'
units_button7 = Button(description='min', disabled=True, layout=units_button_layout)
units_button7.style.button_color = 'tan'
units_button8 = Button(description='micron^3', disabled=True, layout=units_button_layout)
units_button8.style.button_color = 'tan'
units_button9 = Button(description='', disabled=True, layout=units_button_layout)
units_button9.style.button_color = 'tan'
units_button10 = Button(description='1/min', disabled=True, layout=units_button_layout)
units_button10.style.button_color = 'tan'
units_button11 = Button(description='1/min', disabled=True, layout=units_button_layout)
units_button11.style.button_color = 'tan'
row0 = [param_name1, self.random_seed, units_button1, ]
row0 = [param_name2, self.motile_cell_persistence_time, units_button2, ]
row0 = [param_name3, self.motile_cell_migration_speed, units_button3, ]
row0 = [param_name4, self.motile_cell_relative_adhesion, units_button4, ]
row0 = [param_name5, self.motile_cell_apoptosis_rate, units_button5, ]
row0 = [param_name6, self.motile_cell_relative_cycle_entry_rate, units_button6, ]
row0 = [param_name7, self.birth_interval, units_button7, ]
row0 = [param_name8, self.volume_total, units_button8, ]
row0 = [param_name9, self.target_fluid_frac, units_button9, ]
row0 = [param_name10, self.fluid_change_rate, units_button10, ]
row0 = [param_name11, self.cytoplasmic_biomass_change_rate, units_button11, ]
box_layout = Layout(display='flex', flex_flow='row', align_items='stretch', width='100%')
box0 = Box(children=row0, layout=box_layout)
box0 = Box(children=row0, layout=box_layout)
box0 = Box(children=row0, layout=box_layout)
box0 = Box(children=row0, layout=box_layout)
box0 = Box(children=row0, layout=box_layout)
box0 = Box(children=row0, layout=box_layout)
box0 = Box(children=row0, layout=box_layout)
box0 = Box(children=row0, layout=box_layout)
box0 = Box(children=row0, layout=box_layout)
box0 = Box(children=row0, layout=box_layout)
box0 = Box(children=row0, layout=box_layout)
self.tab = VBox([
box0,
box0,
box0,
box0,
box0,
box0,
box0,
box0,
box0,
box0,
box0,
])
# Populate the GUI widgets with values from the XML
def fill_gui(self, xml_root):
uep = xml_root.find('.//user_parameters') # find unique entry point into XML
self.random_seed.value = int(uep.find('.//random_seed').text)
self.motile_cell_persistence_time.value = float(uep.find('.//motile_cell_persistence_time').text)
self.motile_cell_migration_speed.value = float(uep.find('.//motile_cell_migration_speed').text)
self.motile_cell_relative_adhesion.value = float(uep.find('.//motile_cell_relative_adhesion').text)
self.motile_cell_apoptosis_rate.value = float(uep.find('.//motile_cell_apoptosis_rate').text)
self.motile_cell_relative_cycle_entry_rate.value = float(uep.find('.//motile_cell_relative_cycle_entry_rate').text)
self.birth_interval.value = float(uep.find('.//birth_interval').text)
self.volume_total.value = float(uep.find('.//volume_total').text)
self.target_fluid_frac.value = float(uep.find('.//target_fluid_frac').text)
self.fluid_change_rate.value = float(uep.find('.//fluid_change_rate').text)
self.cytoplasmic_biomass_change_rate.value = float(uep.find('.//cytoplasmic_biomass_change_rate').text)
# Read values from the GUI widgets to enable editing XML
def fill_xml(self, xml_root):
uep = xml_root.find('.//user_parameters') # find unique entry point into XML
uep.find('.//random_seed').text = str(self.random_seed.value)
uep.find('.//motile_cell_persistence_time').text = str(self.motile_cell_persistence_time.value)
uep.find('.//motile_cell_migration_speed').text = str(self.motile_cell_migration_speed.value)
uep.find('.//motile_cell_relative_adhesion').text = str(self.motile_cell_relative_adhesion.value)
uep.find('.//motile_cell_apoptosis_rate').text = str(self.motile_cell_apoptosis_rate.value)
uep.find('.//motile_cell_relative_cycle_entry_rate').text = str(self.motile_cell_relative_cycle_entry_rate.value)
uep.find('.//birth_interval').text = str(self.birth_interval.value)
uep.find('.//volume_total').text = str(self.volume_total.value)
uep.find('.//target_fluid_frac').text = str(self.target_fluid_frac.value)
uep.find('.//fluid_change_rate').text = str(self.fluid_change_rate.value)
uep.find('.//cytoplasmic_biomass_change_rate').text = str(self.cytoplasmic_biomass_change_rate.value)
| 46.842105
| 123
| 0.681001
|
36500818cca8fdf195f0801af57962e54e36489e
| 4,281
|
py
|
Python
|
reagent/samplers/frechet.py
|
JiayingClaireWu/ReAgent
|
3f2365c5bab396b3e965f77cd8d4f0ac15ae2f7b
|
[
"BSD-3-Clause"
] | null | null | null |
reagent/samplers/frechet.py
|
JiayingClaireWu/ReAgent
|
3f2365c5bab396b3e965f77cd8d4f0ac15ae2f7b
|
[
"BSD-3-Clause"
] | null | null | null |
reagent/samplers/frechet.py
|
JiayingClaireWu/ReAgent
|
3f2365c5bab396b3e965f77cd8d4f0ac15ae2f7b
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from typing import Optional
import reagent.types as rlt
import torch
from reagent.core.configuration import resolve_defaults
from reagent.gym.types import Sampler
from torch.distributions import Gumbel
class FrechetSort(Sampler):
@resolve_defaults
def __init__(
self,
shape: float = 1.0,
topk: Optional[int] = None,
equiv_len: Optional[int] = None,
log_scores: bool = False,
):
"""FréchetSort is a softer version of descending sort which samples all possible
orderings of items favoring orderings which resemble descending sort. This can
be used to convert descending sort by rank score into a differentiable,
stochastic policy amenable to policy gradient algorithms.
:param shape: parameter of Frechet Distribution. Lower values correspond to
aggressive deviations from descending sort.
:param topk: If specified, only the first topk actions are specified.
:param equiv_len: Orders are considered equivalent if the top equiv_len match. Used
in probability computations
:param log_scores Scores passed in are already log-transformed. In this case, we would
simply add Gumbel noise.
Example:
Consider the sampler:
sampler = FrechetSort(shape=3, topk=5, equiv_len=3)
Given a set of scores, this sampler will produce indices of items roughly
resembling a argsort by scores in descending order. The higher the shape,
the more it would resemble a descending argsort. `topk=5` means only the top
5 ranks will be output. The `equiv_len` determines what orders are considered
equivalent for probability computation. In this example, the sampler will
produce probability for the top 3 items appearing in a given order for the
`log_prob` call.
"""
self.shape = shape
self.topk = topk
self.upto = equiv_len
if topk is not None:
if equiv_len is None:
self.upto = topk
# pyre-fixme[58]: `>` is not supported for operand types `Optional[int]`
# and `Optional[int]`.
if self.upto > self.topk:
raise ValueError(f"Equiv length {equiv_len} cannot exceed topk={topk}.")
self.gumbel_noise = Gumbel(0, 1.0 / shape)
self.log_scores = log_scores
@staticmethod
def select_indices(scores: torch.Tensor, actions: torch.Tensor) -> torch.Tensor:
"""Helper for scores[actions] that are also works for batched tensors"""
if len(actions.shape) > 1:
num_rows = scores.size(0)
row_indices = torch.arange(num_rows).unsqueeze(0).T # pyre-ignore[ 16 ]
return scores[row_indices, actions].T
else:
return scores[actions]
def sample_action(self, scores: torch.Tensor) -> rlt.ActorOutput:
"""Sample a ranking according to Frechet sort. Note that possible_actions_mask
is ignored as the list of rankings scales exponentially with slate size and
number of items and it can be difficult to enumerate them."""
assert scores.dim() == 2, "sample_action only accepts batches"
log_scores = scores if self.log_scores else torch.log(scores)
perturbed = log_scores + self.gumbel_noise.sample((scores.shape[1],))
action = torch.argsort(perturbed.detach(), descending=True)
if self.topk is not None:
action = action[: self.topk]
log_prob = self.log_prob(scores, action)
return rlt.ActorOutput(action, log_prob)
def log_prob(self, scores: torch.Tensor, action) -> torch.Tensor:
"""What is the probability of a given set of scores producing the given
list of permutations only considering the top `equiv_len` ranks?"""
log_scores = scores if self.log_scores else torch.log(scores)
s = self.select_indices(log_scores, action)
n = len(log_scores)
p = self.upto if self.upto is not None else n
return -sum(
torch.log(torch.exp((s[k:] - s[k]) * self.shape).sum(dim=0))
for k in range(p) # pyre-ignore
)
| 45.063158
| 94
| 0.662228
|
3e6415337fbd44a89c93eb73f8a20055bf50f994
| 344
|
py
|
Python
|
metatransform/datawrapper.py
|
analyticsdept/py-metatransform
|
fa28cb25e85275563eef6a54ab409e46289fbaab
|
[
"MIT"
] | null | null | null |
metatransform/datawrapper.py
|
analyticsdept/py-metatransform
|
fa28cb25e85275563eef6a54ab409e46289fbaab
|
[
"MIT"
] | null | null | null |
metatransform/datawrapper.py
|
analyticsdept/py-metatransform
|
fa28cb25e85275563eef6a54ab409e46289fbaab
|
[
"MIT"
] | null | null | null |
class MetaTransformDataWrapper():
def __init__(self, data=None, target=None):
self.data = data
self.target = target
self._dict = {
'data': self.data,
'target': self.target
}
def __repr__(self) -> dict:
return self.to_dict
def to_dict(self):
return self._dict
| 24.571429
| 47
| 0.55814
|
6b2151d2de62f96a90e178c3fe27a05d9af628f6
| 7,888
|
py
|
Python
|
src/scenic/simulators/carla/controller.py
|
shalinmehtalgsvl/Scenic
|
7b90d0181e99870c8cade9004b8280ff6b03c49a
|
[
"BSD-3-Clause"
] | null | null | null |
src/scenic/simulators/carla/controller.py
|
shalinmehtalgsvl/Scenic
|
7b90d0181e99870c8cade9004b8280ff6b03c49a
|
[
"BSD-3-Clause"
] | null | null | null |
src/scenic/simulators/carla/controller.py
|
shalinmehtalgsvl/Scenic
|
7b90d0181e99870c8cade9004b8280ff6b03c49a
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) # Copyright (c) 2018-2020 CVC.
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
""" This module contains PID controllers to perform lateral and longitudinal control. """
from collections import deque
import math
import numpy as np
import carla
from scenic.simulators.carla.misc import get_speed
class VehiclePIDController():
"""
VehiclePIDController is the combination of two PID controllers
(lateral and longitudinal) to perform the
low level control a vehicle from client side
"""
def __init__(self, vehicle, args_lateral=None, args_longitudinal=None, max_throttle=0.75, max_brake=0.3, max_steering=0.8):
"""
Constructor method.
:param vehicle: actor to apply to local planner logic onto
:param args_lateral: dictionary of arguments to set the lateral PID controller
using the following semantics:
K_P -- Proportional term
K_D -- Differential term
K_I -- Integral term
:param args_longitudinal: dictionary of arguments to set the longitudinal
PID controller using the following semantics:
K_P -- Proportional term
K_D -- Differential term
K_I -- Integral term
"""
self.max_brake = max_brake
self.max_throt = max_throttle
self.max_steer = max_steering
self._vehicle = vehicle
self._world = self._vehicle.get_world()
self.past_steering = self._vehicle.get_control().steer
if args_longitudinal!=None:
self._lon_controller = PIDLongitudinalController(self._vehicle, **args_longitudinal)
else:
self._lon_controller = PIDLongitudinalController(self._vehicle)
if args_lateral!=None:
self._lat_controller = PIDLateralController(self._vehicle, **args_lateral)
else:
self._lat_controller = PIDLateralController(self._vehicle)
def run_step(self, target_speed, waypoint):
"""
Execute one step of control invoking both lateral and longitudinal
PID controllers to reach a target waypoint
at a given target_speed.
:param target_speed: desired vehicle speed
:param waypoint: target location encoded as a waypoint
:return: distance (in meters) to the waypoint
"""
acceleration = self._lon_controller.run_step(target_speed)
current_steering = self._lat_controller.run_step(waypoint)
control = carla.VehicleControl()
if acceleration >= 0.0:
control.throttle = min(acceleration, self.max_throt)
control.brake = 0.0
else:
control.throttle = 0.0
control.brake = min(abs(acceleration), self.max_brake)
# Steering regulation: changes cannot happen abruptly, can't steer too much.
if current_steering > self.past_steering + 0.1:
current_steering = self.past_steering + 0.1
elif current_steering < self.past_steering - 0.1:
current_steering = self.past_steering - 0.1
if current_steering >= 0:
steering = min(self.max_steer, current_steering)
else:
steering = max(-self.max_steer, current_steering)
control.steer = steering
control.hand_brake = False
control.manual_gear_shift = False
self.past_steering = steering
return control
class PIDLongitudinalController():
"""
PIDLongitudinalController implements longitudinal control using a PID.
"""
def __init__(self, vehicle, K_P=1.0, K_D=0.0, K_I=0.0, dt=0.03):
"""
Constructor method.
:param vehicle: actor to apply to local planner logic onto
:param K_P: Proportional term
:param K_D: Differential term
:param K_I: Integral term
:param dt: time differential in seconds
"""
self._vehicle = vehicle
self._k_p = K_P
self._k_d = K_D
self._k_i = K_I
self._dt = dt
self._error_buffer = deque(maxlen=10)
def run_step(self, target_speed, debug=False):
"""
Execute one step of longitudinal control to reach a given target speed.
:param target_speed: target speed in Km/h
:param debug: boolean for debugging
:return: throttle control
"""
current_speed = get_speed(self._vehicle)
if debug:
print('Current speed = {}'.format(current_speed))
return self._pid_control(target_speed, current_speed)
def _pid_control(self, target_speed, current_speed):
"""
Estimate the throttle/brake of the vehicle based on the PID equations
:param target_speed: target speed in Km/h
:param current_speed: current speed of the vehicle in Km/h
:return: throttle/brake control
"""
error = target_speed - current_speed
self._error_buffer.append(error)
if len(self._error_buffer) >= 2:
_de = (self._error_buffer[-1] - self._error_buffer[-2]) / self._dt
_ie = sum(self._error_buffer) * self._dt
else:
_de = 0.0
_ie = 0.0
return np.clip((self._k_p * error) + (self._k_d * _de) + (self._k_i * _ie), -1.0, 1.0)
class PIDLateralController():
"""
PIDLateralController implements lateral control using a PID.
"""
def __init__(self, vehicle, K_P=1.0, K_D=0.0, K_I=0.0, dt=0.03):
"""
Constructor method.
:param vehicle: actor to apply to local planner logic onto
:param K_P: Proportional term
:param K_D: Differential term
:param K_I: Integral term
:param dt: time differential in seconds
"""
self._vehicle = vehicle
self._k_p = K_P
self._k_d = K_D
self._k_i = K_I
self._dt = dt
self._e_buffer = deque(maxlen=10)
def run_step(self, waypoint):
"""
Execute one step of lateral control to steer
the vehicle towards a certain waypoin.
:param waypoint: target waypoint
:return: steering control in the range [-1, 1] where:
-1 maximum steering to left
+1 maximum steering to right
"""
return self._pid_control(waypoint, self._vehicle.get_transform())
def _pid_control(self, waypoint, vehicle_transform):
"""
Estimate the steering angle of the vehicle based on the PID equations
:param waypoint: target waypoint
:param vehicle_transform: current transform of the vehicle
:return: steering control in the range [-1, 1]
"""
v_begin = vehicle_transform.location
v_end = v_begin + carla.Location(x=math.cos(math.radians(vehicle_transform.rotation.yaw)),
y=math.sin(math.radians(vehicle_transform.rotation.yaw)))
v_vec = np.array([v_end.x - v_begin.x, v_end.y - v_begin.y, 0.0])
w_vec = np.array([waypoint.transform.location.x -
v_begin.x, waypoint.transform.location.y -
v_begin.y, 0.0])
_dot = math.acos(np.clip(np.dot(w_vec, v_vec) /
(np.linalg.norm(w_vec) * np.linalg.norm(v_vec)), -1.0, 1.0))
_cross = np.cross(v_vec, w_vec)
if _cross[2] < 0:
_dot *= -1.0
self._e_buffer.append(_dot)
if len(self._e_buffer) >= 2:
_de = (self._e_buffer[-1] - self._e_buffer[-2]) / self._dt
_ie = sum(self._e_buffer) * self._dt
else:
_de = 0.0
_ie = 0.0
return np.clip((self._k_p * _dot) + (self._k_d * _de) + (self._k_i * _ie), -1.0, 1.0)
| 35.214286
| 127
| 0.612576
|
f9bc39a5bee669c860a21fc2dd49a59b022c5826
| 23,479
|
py
|
Python
|
ai4good/webapp/cm_model_report_utils.py
|
macapakaz/model-server
|
db2451da7dfbe33f3e9cf481122b11551589b7c0
|
[
"MIT"
] | null | null | null |
ai4good/webapp/cm_model_report_utils.py
|
macapakaz/model-server
|
db2451da7dfbe33f3e9cf481122b11551589b7c0
|
[
"MIT"
] | null | null | null |
ai4good/webapp/cm_model_report_utils.py
|
macapakaz/model-server
|
db2451da7dfbe33f3e9cf481122b11551589b7c0
|
[
"MIT"
] | null | null | null |
import time
import numpy as np
import pandas as pd
from ai4good.models.cm.simulator import AGE_SEP
DIGIT_SEP = ' to ' # em dash to separate from minus sign
def timing(f):
def wrap(*args):
time1 = time.time()
ret = f(*args)
time2 = time.time()
print('%s function took %0.1f s' % (f.__name__, (time2-time1)))
return ret
return wrap
def load_report(mr, params) -> pd.DataFrame:
return normalize_report(mr.get('report'), params)
def normalize_report(df, params):
df = df.copy()
df.R0 = df.R0.apply(lambda x: round(complex(x).real, 1))
df_temp = df.drop(['Time', 'R0', 'latentRate', 'removalRate', 'hospRate', 'deathRateICU', 'deathRateNoIcu'],
axis=1)
df_temp = df_temp * params.population
df.update(df_temp)
return df
@timing
def prevalence_all_table(df):
# calculate Peak Day IQR and Peak Number IQR for each of the 'incident' variables to table
table_params = ['Infected (symptomatic)', 'Hospitalised', 'Critical', 'Change in Deaths']
grouped = df.groupby(['R0', 'latentRate', 'removalRate', 'hospRate', 'deathRateICU', 'deathRateNoIcu'])
incident_rs = {}
for index, group in grouped:
# for each RO value find out the peak days for each table params
group = group.set_index('Time')
incident = {}
for param in table_params:
incident[param] = (group.loc[:, param].idxmax(), group.loc[:, param].max())
incident_rs[index] = incident
iqr_table = {}
for param in table_params:
day = []
number = []
for elem in incident_rs.values():
day.append(elem[param][0])
number.append(elem[param][1])
q75_day, q25_day = np.percentile(day, [75, 25])
q75_number, q25_number = np.percentile(number, [75, 25])
iqr_table[param] = (
(int(round(q25_day)), int(round(q75_day))), (int(round(q25_number)), int(round(q75_number))))
table_columns = {'Infected (symptomatic)': 'Prevalence of Symptomatic Cases',
'Hospitalised': 'Hospitalisation Demand',
'Critical': 'Critical Care Demand', 'Change in Deaths': 'Prevalence of Deaths'}
outcome = []
peak_day = []
peak_number = []
for param in table_params:
outcome.append(table_columns[param])
peak_day.append(f'{iqr_table[param][0][0]}{DIGIT_SEP}{iqr_table[param][0][1]}')
peak_number.append(f'{iqr_table[param][1][0]}{DIGIT_SEP}{iqr_table[param][1][1]}')
data = {'Outcome': outcome, 'Peak Day IQR': peak_day, 'Peak Number IQR': peak_number}
return pd.DataFrame.from_dict(data)
@timing
def prevalence_age_table(df):
# calculate age specific Peak Day IQR and Peak Number IQR for each of the 'prevalent' variables to contruct table
table_params = ['Infected (symptomatic)', 'Hospitalised', 'Critical']
grouped = df.groupby(['R0', 'latentRate', 'removalRate', 'hospRate', 'deathRateICU', 'deathRateNoIcu'])
prevalent_age = {}
params_age = []
for index, group in grouped:
# for each RO value find out the peak days for each table params
group = group.set_index('Time')
prevalent = {}
for param in table_params:
for column in df.columns:
if column.startswith(param):
prevalent[column] = (group.loc[:, column].idxmax(), group.loc[:, column].max())
params_age.append(column)
prevalent_age[index] = prevalent
params_age_dedup = list(set(params_age))
prevalent_age_bucket = {}
for elem in prevalent_age.values():
for key, value in elem.items():
if key in prevalent_age_bucket:
prevalent_age_bucket[key].append(value)
else:
prevalent_age_bucket[key] = [value]
iqr_table_age = {}
for key, value in prevalent_age_bucket.items():
day = [x[0] for x in value]
number = [x[1] for x in value]
q75_day, q25_day = np.percentile(day, [75, 25])
q75_number, q25_number = np.percentile(number, [75, 25])
iqr_table_age[key] = (
(int(round(q25_day)), int(round(q75_day))), (int(round(q25_number)), int(round(q75_number))))
arrays = [np.array(['Incident Cases']*9 + ['Hospital Demand']*9 + ['Critical Demand']*9),
np.array(
['all ages', '<9 years', '10-19 years', '20-29 years', '30-39 years', '40-49 years', '50-59 years',
'60-69 years', '70+ years']*3)]
peak_day = np.empty(27, dtype="S10")
peak_number = np.empty(27, dtype="S10")
for key, item in iqr_table_age.items():
if key == 'Infected (symptomatic)':
peak_day[0] = f'{iqr_table_age[key][0][0]}{DIGIT_SEP}{iqr_table_age[key][0][1]}'
peak_number[0] = f'{iqr_table_age[key][1][0]}{DIGIT_SEP}{iqr_table_age[key][1][1]}'
elif key == 'Hospitalised':
peak_day[9] = f'{iqr_table_age[key][0][0]}{DIGIT_SEP}{iqr_table_age[key][0][1]}'
peak_number[9] = f'{iqr_table_age[key][1][0]}{DIGIT_SEP}{iqr_table_age[key][1][1]}'
elif key == 'Critical':
peak_day[18] = f'{iqr_table_age[key][0][0]}{DIGIT_SEP}{iqr_table_age[key][0][1]}'
peak_number[18] = f'{iqr_table_age[key][1][0]}{DIGIT_SEP}{iqr_table_age[key][1][1]}'
elif '0-9' in key:
if key.startswith('Infected (symptomatic)'):
peak_day[1] = f'{iqr_table_age[key][0][0]}{DIGIT_SEP}{iqr_table_age[key][0][1]}'
peak_number[1] = f'{iqr_table_age[key][1][0]}{DIGIT_SEP}{iqr_table_age[key][1][1]}'
elif key.startswith('Hospitalised'):
peak_day[10] = f'{iqr_table_age[key][0][0]}{DIGIT_SEP}{iqr_table_age[key][0][1]}'
peak_number[10] = f'{iqr_table_age[key][1][0]}{DIGIT_SEP}{iqr_table_age[key][1][1]}'
elif key.startswith('Critical'):
peak_day[19] = f'{iqr_table_age[key][0][0]}{DIGIT_SEP}{iqr_table_age[key][0][1]}'
peak_number[19] = f'{iqr_table_age[key][1][0]}{DIGIT_SEP}{iqr_table_age[key][1][1]}'
elif '10-19' in key:
if key.startswith('Infected (symptomatic)'):
peak_day[2] = f'{iqr_table_age[key][0][0]}{DIGIT_SEP}{iqr_table_age[key][0][1]}'
peak_number[2] = f'{iqr_table_age[key][1][0]}{DIGIT_SEP}{iqr_table_age[key][1][1]}'
elif key.startswith('Hospitalised'):
peak_day[11] = f'{iqr_table_age[key][0][0]}{DIGIT_SEP}{iqr_table_age[key][0][1]}'
peak_number[11] = f'{iqr_table_age[key][1][0]}{DIGIT_SEP}{iqr_table_age[key][1][1]}'
elif key.startswith('Critical'):
peak_day[20] = f'{iqr_table_age[key][0][0]}{DIGIT_SEP}{iqr_table_age[key][0][1]}'
peak_number[20] = f'{iqr_table_age[key][1][0]}{DIGIT_SEP}{iqr_table_age[key][1][1]}'
elif '20-29' in key:
if key.startswith('Infected (symptomatic)'):
peak_day[3] = f'{iqr_table_age[key][0][0]}{DIGIT_SEP}{iqr_table_age[key][0][1]}'
peak_number[3] = f'{iqr_table_age[key][1][0]}{DIGIT_SEP}{iqr_table_age[key][1][1]}'
elif key.startswith('Hospitalised'):
peak_day[12] = f'{iqr_table_age[key][0][0]}{DIGIT_SEP}{iqr_table_age[key][0][1]}'
peak_number[12] = f'{iqr_table_age[key][1][0]}{DIGIT_SEP}{iqr_table_age[key][1][1]}'
elif key.startswith('Critical'):
peak_day[21] = f'{iqr_table_age[key][0][0]}{DIGIT_SEP}{iqr_table_age[key][0][1]}'
peak_number[21] = f'{iqr_table_age[key][1][0]}{DIGIT_SEP}{iqr_table_age[key][1][1]}'
elif '30-39' in key:
if key.startswith('Infected (symptomatic)'):
peak_day[4] = f'{iqr_table_age[key][0][0]}{DIGIT_SEP}{iqr_table_age[key][0][1]}'
peak_number[4] = f'{iqr_table_age[key][1][0]}{DIGIT_SEP}{iqr_table_age[key][1][1]}'
elif key.startswith('Hospitalised'):
peak_day[13] = f'{iqr_table_age[key][0][0]}{DIGIT_SEP}{iqr_table_age[key][0][1]}'
peak_number[13] = f'{iqr_table_age[key][1][0]}{DIGIT_SEP}{iqr_table_age[key][1][1]}'
elif key.startswith('Critical'):
peak_day[22] = f'{iqr_table_age[key][0][0]}{DIGIT_SEP}{iqr_table_age[key][0][1]}'
peak_number[22] = f'{iqr_table_age[key][1][0]}{DIGIT_SEP}{iqr_table_age[key][1][1]}'
elif '40-49' in key:
if key.startswith('Infected (symptomatic)'):
peak_day[5] = f'{iqr_table_age[key][0][0]}{DIGIT_SEP}{iqr_table_age[key][0][1]}'
peak_number[5] = f'{iqr_table_age[key][1][0]}{DIGIT_SEP}{iqr_table_age[key][1][1]}'
elif key.startswith('Hospitalised'):
peak_day[14] = f'{iqr_table_age[key][0][0]}{DIGIT_SEP}{iqr_table_age[key][0][1]}'
peak_number[14] = f'{iqr_table_age[key][1][0]}{DIGIT_SEP}{iqr_table_age[key][1][1]}'
elif key.startswith('Critical'):
peak_day[23] = f'{iqr_table_age[key][0][0]}{DIGIT_SEP}{iqr_table_age[key][0][1]}'
peak_number[23] = f'{iqr_table_age[key][1][0]}{DIGIT_SEP}{iqr_table_age[key][1][1]}'
elif '50-59' in key:
if key.startswith('Infected (symptomatic)'):
peak_day[6] = f'{iqr_table_age[key][0][0]}{DIGIT_SEP}{iqr_table_age[key][0][1]}'
peak_number[6] = f'{iqr_table_age[key][1][0]}{DIGIT_SEP}{iqr_table_age[key][1][1]}'
elif key.startswith('Hospitalised'):
peak_day[15] = f'{iqr_table_age[key][0][0]}{DIGIT_SEP}{iqr_table_age[key][0][1]}'
peak_number[15] = f'{iqr_table_age[key][1][0]}{DIGIT_SEP}{iqr_table_age[key][1][1]}'
elif key.startswith('Critical'):
peak_day[24] = f'{iqr_table_age[key][0][0]}{DIGIT_SEP}{iqr_table_age[key][0][1]}'
peak_number[24] = f'{iqr_table_age[key][1][0]}{DIGIT_SEP}{iqr_table_age[key][1][1]}'
elif '60-69' in key:
if key.startswith('Infected (symptomatic)'):
peak_day[7] = f'{iqr_table_age[key][0][0]}{DIGIT_SEP}{iqr_table_age[key][0][1]}'
peak_number[7] = f'{iqr_table_age[key][1][0]}{DIGIT_SEP}{iqr_table_age[key][1][1]}'
elif key.startswith('Hospitalised'):
peak_day[16] = f'{iqr_table_age[key][0][0]}{DIGIT_SEP}{iqr_table_age[key][0][1]}'
peak_number[16] = f'{iqr_table_age[key][1][0]}{DIGIT_SEP}{iqr_table_age[key][1][1]}'
elif key.startswith('Critical'):
peak_day[25] = f'{iqr_table_age[key][0][0]}{DIGIT_SEP}{iqr_table_age[key][0][1]}'
peak_number[25] = f'{iqr_table_age[key][1][0]}{DIGIT_SEP}{iqr_table_age[key][1][1]}'
elif '70+' in key:
if key.startswith('Infected (symptomatic)'):
peak_day[8] = f'{iqr_table_age[key][0][0]}{DIGIT_SEP}{iqr_table_age[key][0][1]}'
peak_number[8] = f'{iqr_table_age[key][1][0]}{DIGIT_SEP}{iqr_table_age[key][1][1]}'
elif key.startswith('Hospitalised'):
peak_day[17] = f'{iqr_table_age[key][0][0]}{DIGIT_SEP}{iqr_table_age[key][0][1]}'
peak_number[17] = f'{iqr_table_age[key][1][0]}{DIGIT_SEP}{iqr_table_age[key][1][1]}'
elif key.startswith('Critical'):
peak_day[26] = f'{iqr_table_age[key][0][0]}{DIGIT_SEP}{iqr_table_age[key][0][1]}'
peak_number[26] = f'{iqr_table_age[key][1][0]}{DIGIT_SEP}{iqr_table_age[key][1][1]}'
d = {'Peak Day, IQR': peak_day.astype(str), 'Peak Number, IQR': peak_number.astype(str)}
return pd.DataFrame(data=d, index=arrays)
@timing
def cumulative_all_table(df, population, camp_params):
# now we try to calculate the total count
# cases: (N-exposed)*0.5 since the asymptomatic rate is 0.5
# hopistal days: cumulative count of hospitalisation bucket
# critical days: cumulative count of critical days
# deaths: we already have that from the frame
df = df.filter(regex='^Time$|^R0$|^latentRate$|^removalRate$|^hospRate$|^deathRateICU$|^deathRateNoIcu$|Susceptible'+AGE_SEP+'|^Deaths$|^Hospitalised$|^Critical$|^Deaths$')
groups = df.groupby(['R0', 'latentRate', 'removalRate', 'hospRate', 'deathRateICU', 'deathRateNoIcu'])
groups_tails = groups.apply(lambda x: x.set_index('Time').tail(1))
susceptible = groups_tails.filter(like='Susceptible'+AGE_SEP).rename(columns=lambda x: x.split(AGE_SEP)[1])[camp_params['Age']]
susceptible = ((population * camp_params['Population_structure'].values / 100 - susceptible) * camp_params['p_symptomatic'].values).sum(axis=1)
susceptible.index = susceptible.index.droplevel('Time')
deaths = groups_tails['Deaths']
deaths.index = deaths.index.droplevel('Time')
cumulative = {
'Susceptible': susceptible,
'Hospitalised': groups['Hospitalised'].sum(),
'Critical': groups['Critical'].sum(),
'Deaths': deaths
}
cumulative_all = pd.DataFrame(cumulative)
cumulative_count = cumulative_all.quantile([.25, .75]).apply(round).astype(int).astype(str).apply(lambda x: DIGIT_SEP.join(x.values), axis=0).values
data = {'Totals': ['Symptomatic Cases', 'Hospital Person-Days', 'Critical Person-days', 'Deaths'],
'Counts': cumulative_count}
return pd.DataFrame.from_dict(data)
@timing
def cumulative_age_table(df, camp_params):
# need to have an age break down for this as well
# 1 month 3 month and 6 month breakdown
arrays = [np.array(
['Symptomatic Cases'] * 9 + ['Hospital Person-Days'] * 9 + ['Critical Person-days'] * 9 + ['Deaths'] * 9),
np.array(
['all ages', '<9 years', '10-19 years', '20-29 years', '30-39 years', '40-49 years', '50-59 years',
'60-69 years', '70+ years'] * 4)]
params_select = ['Susceptible:', 'Deaths']
params_accu = ['Hospitalised', 'Critical']
columns_to_acc, columns_to_select, multipliers = collect_columns(df.columns, params_accu, params_select, camp_params)
first_month_diff = df.groupby(['R0', 'latentRate', 'removalRate', 'hospRate', 'deathRateICU', 'deathRateNoIcu'])[
columns_to_select + ['Time']].apply(find_first_month_diff)
third_month_diff = df.groupby(['R0', 'latentRate', 'removalRate', 'hospRate', 'deathRateICU', 'deathRateNoIcu'])[
columns_to_select + ['Time']].apply(find_third_month_diff)
sixth_month_diff = df.groupby(['R0', 'latentRate', 'removalRate', 'hospRate', 'deathRateICU', 'deathRateNoIcu'])[
columns_to_select + ['Time']].apply(find_sixth_month_diff)
first_month_select = first_month_diff[columns_to_select].mul(multipliers).quantile([.25, .75])
three_month_select = third_month_diff[columns_to_select].mul(multipliers).quantile([.25, .75])
six_month_select = sixth_month_diff[columns_to_select].mul(multipliers).quantile([.25, .75])
first_month_select['Susceptible'] = first_month_select.filter(like='Susceptible:').sum(axis=1)
three_month_select['Susceptible'] = three_month_select.filter(like='Susceptible:').sum(axis=1)
six_month_select['Susceptible'] = six_month_select.filter(like='Susceptible:').sum(axis=1)
one_month_cumsum = df.groupby(['R0', 'latentRate', 'removalRate', 'hospRate', 'deathRateICU', 'deathRateNoIcu'])[
columns_to_acc + ['Time']].apply(find_one_month)
three_month_cumsum = df.groupby(['R0', 'latentRate', 'removalRate', 'hospRate', 'deathRateICU', 'deathRateNoIcu'])[
columns_to_acc + ['Time']].apply(find_three_months)
six_month_cumsum = df.groupby(['R0', 'latentRate', 'removalRate', 'hospRate', 'deathRateICU', 'deathRateNoIcu'])[
columns_to_acc + ['Time']].apply(find_six_months)
first_month_accu = one_month_cumsum[columns_to_acc].quantile([.25, .75])
three_month_accu = three_month_cumsum[columns_to_acc].quantile([.25, .75])
six_month_accu = six_month_cumsum[columns_to_acc].quantile([.25, .75])
first_month = pd.concat([first_month_select, first_month_accu], axis=1)
third_month = pd.concat([three_month_select, three_month_accu], axis=1)
sixth_month = pd.concat([six_month_select, six_month_accu], axis=1)
sorted_columns = first_month.columns.sort_values()
my_comp_order = ['Susceptible', 'Hospitalised', 'Critical', 'Deaths']
my_sorted_columns = sum([list(filter(lambda column: comp in column, sorted_columns)) for comp in my_comp_order], [])
first_month_count = first_month[my_sorted_columns]\
.apply(round).astype(int).astype(str) \
.apply(lambda x: DIGIT_SEP.join(x.values), axis=0).values
three_month_count = third_month[my_sorted_columns]\
.apply(round).astype(int).astype(str) \
.apply(lambda x: DIGIT_SEP.join(x.values), axis=0).values
six_month_count = sixth_month[my_sorted_columns]\
.apply(round).astype(int).astype(str) \
.apply(lambda x: DIGIT_SEP.join(x.values), axis=0).values
d = {'First month': first_month_count, 'First three months': three_month_count,
'First six months': six_month_count}
return pd.DataFrame(data=d, index=arrays)
def collect_columns(columns, params_accu, params_select, camp_params):
columns_to_select = list(filter(lambda column: any(column.startswith(s) for s in params_select), columns))
columns_to_acc = list(filter(lambda column: any(column.startswith(s) for s in params_accu), columns))
multipliers = list(
map(lambda column: -camp_params[camp_params['Age'].apply(lambda x: x in column)]['p_symptomatic'].values[0] if 'Susceptible:' in column else 1,
columns_to_select))
return columns_to_acc, columns_to_select, multipliers
def diff_table(baseline, intervention, N):
t1 = effectiveness_cum_table(baseline, intervention, N)
t2 = effectiveness_peak_table(baseline, intervention)
r1 = [
'Symptomatic Cases',
t1.loc['Symptomatic Cases']['Reduction'],
t2.loc['Prevalence of Symptomatic Cases']['Delay in Peak Day'],
t2.loc['Prevalence of Symptomatic Cases']['Reduction in Peak Number']
]
r2 = [
'Hospital Person-Days',
t1.loc['Hospital Person-Days']['Reduction'],
t2.loc['Hospitalisation Demand']['Delay in Peak Day'],
t2.loc['Hospitalisation Demand']['Reduction in Peak Number']
]
r3 = [
'Critical Person-days',
t1.loc['Critical Person-days']['Reduction'],
t2.loc['Critical Care Demand']['Delay in Peak Day'],
t2.loc['Critical Care Demand']['Reduction in Peak Number']
]
r4 = [
'Deaths',
t1.loc['Deaths']['Reduction'],
t2.loc['Prevalence of Deaths']['Delay in Peak Day'],
t2.loc['Prevalence of Deaths']['Reduction in Peak Number']
]
df = pd.DataFrame([r1, r2, r3, r4],
columns=['Outcome', 'Overall reduction', 'Delay in Peak Day', 'Reduction in Peak Number'])
return df
def effectiveness_cum_table(baseline, intervention, N):
table_params = ['Symptomatic Cases', 'Hospital Person-Days', 'Critical Person-days', 'Deaths']
cum_table_baseline = cumulative_all_table(baseline, N)
# print("CUM: "+str(cum_table_baseline.loc[:, 'Counts']))
baseline_numbers = cum_table_baseline.loc[:, 'Counts'].apply(lambda x: [int(i) for i in x.split(DIGIT_SEP)])
baseline_numbers_separate = pd.DataFrame(baseline_numbers.tolist(), columns=['25%', '75%'])
comparisonTable = {}
cumTable = cumulative_all_table(intervention, N)
# print("Counts: \n"+str(cumTable.loc[:, 'Counts']))
intervention_numbers = pd.DataFrame(
cumTable.loc[:, 'Counts'].apply(lambda x: [int(i) for i in x.split(DIGIT_SEP)]).tolist(),
columns=['25%', '75%'])
differencePercentage = (baseline_numbers_separate - intervention_numbers) / baseline_numbers_separate * 100
prettyOutput = []
for _, row in differencePercentage.round(0).astype(int).iterrows():
output1, output2 = row['25%'], row['75%']
prettyOutput.append(format_diff_row(output1, output2))
comparisonTable['Reduction'] = prettyOutput
comparisonTable['Total'] = table_params
return pd.DataFrame.from_dict(comparisonTable).set_index('Total')
def format_diff_row(o1, o2, unit='%'):
if o1 == o2:
return f'{o1} {unit}'
elif o2 > o1:
return f'{o1} to {o2} {unit}'
else:
return f'{o2} to {o1} {unit}'
def effectiveness_peak_table(baseline, intervention):
# the calcuation here is a little bit hand wavy and flimsy, the correct way of implementing should be to compare each intervention
# with the baseline with the same set up parameters and then in that range pick 25% to 75% data or else it is not correct.
interventionPeak_baseline = prevalence_all_table(baseline)
table_columns = interventionPeak_baseline.Outcome.tolist()
peakDay_baseline = pd.DataFrame(
interventionPeak_baseline.loc[:, 'Peak Day IQR'].apply(lambda x: [int(i) for i in x.split(DIGIT_SEP)]).tolist(),
columns=['25%', '75%'])
peakNumber_baseline = pd.DataFrame(
interventionPeak_baseline.loc[:, 'Peak Number IQR'].apply(
lambda x: [int(i) for i in x.split(DIGIT_SEP)]).tolist(),
columns=['25%', '75%'])
comparisonSubdict = {}
interventionPeak = prevalence_all_table(intervention)
peakDay = pd.DataFrame(
interventionPeak.loc[:, 'Peak Day IQR'].apply(lambda x: [int(i) for i in x.split(DIGIT_SEP)]).tolist(),
columns=['25%', '75%'])
peakNumber = pd.DataFrame(
interventionPeak.loc[:, 'Peak Number IQR'].apply(lambda x: [int(i) for i in x.split(DIGIT_SEP)]).tolist(),
columns=['25%', '75%'])
differenceDay = (peakDay - peakDay_baseline)
peakNumber_baseline = peakNumber_baseline + 0.01 # Shift to avoid div/0
peakNumber = peakNumber + 0.01
differenceNumberPercentage = (peakNumber_baseline - peakNumber) / peakNumber_baseline * 100
# differenceNumberPercentage = differenceNumberPercentage.replace([np.inf, -np.inf], 100.0)
prettyOutputDay = []
prettyOutputNumber = []
for _, row in differenceDay.round(0).astype(int).iterrows():
output1, output2 = row['25%'], row['75%']
prettyOutputDay.append(format_diff_row(output1, output2, 'days'))
for _, row in differenceNumberPercentage.round(0).astype(int).iterrows():
output1, output2 = row['25%'], row['75%']
prettyOutputNumber.append(format_diff_row(output1, output2))
comparisonSubdict['Delay in Peak Day'] = prettyOutputDay
comparisonSubdict['Reduction in Peak Number'] = prettyOutputNumber
comparisondf = pd.DataFrame(comparisonSubdict).set_index(pd.Index(table_columns), 'States')
return comparisondf
def find_first_month(df):
return df[df['Time'] == 30]
def find_third_month(df):
return df[df['Time'] == 90]
def find_sixth_month(df):
return df[df['Time'] == 180]
def find_first_month_diff(df):
return df[df['Time'] <= 30].diff(periods=30).tail(1)
def find_third_month_diff(df):
return df[df['Time'] <= 90].diff(periods=90).tail(1)
def find_sixth_month_diff(df):
return df[df['Time'] <= 180].diff(periods=180).tail(1)
def find_one_month(df):
return df[df['Time'] <= 30].cumsum().tail(1)
def find_three_months(df):
return df[df['Time'] <= 90].cumsum().tail(1)
def find_six_months(df):
return df[df['Time'] <= 180].cumsum().tail(1)
def _merge(dict1, dict2):
res = {**dict1, **dict2}
return res
| 52.059867
| 176
| 0.636824
|
38d30593c68c7fab10ef5b262d796a2da2cfcd4c
| 14,401
|
py
|
Python
|
session-4/libs/dataset_utils.py
|
itamaro/CADL
|
b5de17485962577fc51156cd12da1b16d66dbb26
|
[
"Apache-2.0"
] | 1,628
|
2016-07-19T22:21:56.000Z
|
2022-02-27T15:19:45.000Z
|
session-4/libs/dataset_utils.py
|
itamaro/CADL
|
b5de17485962577fc51156cd12da1b16d66dbb26
|
[
"Apache-2.0"
] | 75
|
2016-07-22T02:05:05.000Z
|
2019-05-20T21:00:34.000Z
|
session-4/libs/dataset_utils.py
|
itamaro/CADL
|
b5de17485962577fc51156cd12da1b16d66dbb26
|
[
"Apache-2.0"
] | 955
|
2016-07-22T00:10:52.000Z
|
2022-02-24T15:06:09.000Z
|
"""Utils for dataset creation.
Creative Applications of Deep Learning w/ Tensorflow.
Kadenze, Inc.
Copyright Parag K. Mital, June 2016.
"""
import os
import pickle
import numpy as np
import tensorflow as tf
from . import dft
from .utils import download_and_extract_tar
def create_input_pipeline(files, batch_size, n_epochs, shape, crop_shape=None,
crop_factor=1.0, n_threads=4):
"""Creates a pipefile from a list of image files.
Includes batch generator/central crop/resizing options.
The resulting generator will dequeue the images batch_size at a time until
it throws tf.errors.OutOfRangeError when there are no more images left in
the queue.
Parameters
----------
files : list
List of paths to image files.
batch_size : int
Number of image files to load at a time.
n_epochs : int
Number of epochs to run before raising tf.errors.OutOfRangeError
shape : list
[height, width, channels]
crop_shape : list
[height, width] to crop image to.
crop_factor : float
Percentage of image to take starting from center.
n_threads : int, optional
Number of threads to use for batch shuffling
"""
# We first create a "producer" queue. It creates a production line which
# will queue up the file names and allow another queue to deque the file
# names all using a tf queue runner.
# Put simply, this is the entry point of the computational graph.
# It will generate the list of file names.
# We also specify it's capacity beforehand.
producer = tf.train.string_input_producer(
files, capacity=len(files))
# We need something which can open the files and read its contents.
reader = tf.WholeFileReader()
# We pass the filenames to this object which can read the file's contents.
# This will create another queue running which dequeues the previous queue.
keys, vals = reader.read(producer)
# And then have to decode its contents as we know it is a jpeg image
imgs = tf.image.decode_jpeg(
vals,
channels=3 if len(shape) > 2 and shape[2] == 3 else 0)
# We have to explicitly define the shape of the tensor.
# This is because the decode_jpeg operation is still a node in the graph
# and doesn't yet know the shape of the image. Future operations however
# need explicit knowledge of the image's shape in order to be created.
imgs.set_shape(shape)
# Next we'll centrally crop the image to the size of 100x100.
# This operation required explicit knowledge of the image's shape.
if shape[0] > shape[1]:
rsz_shape = [int(shape[0] / shape[1] * crop_shape[0] / crop_factor),
int(crop_shape[1] / crop_factor)]
else:
rsz_shape = [int(crop_shape[0] / crop_factor),
int(shape[1] / shape[0] * crop_shape[1] / crop_factor)]
rszs = tf.image.resize_images(imgs, rsz_shape)
crops = (tf.image.resize_image_with_crop_or_pad(
rszs, crop_shape[0], crop_shape[1])
if crop_shape is not None
else imgs)
# Now we'll create a batch generator that will also shuffle our examples.
# We tell it how many it should have in its buffer when it randomly
# permutes the order.
min_after_dequeue = len(files) // 10
# The capacity should be larger than min_after_dequeue, and determines how
# many examples are prefetched. TF docs recommend setting this value to:
# min_after_dequeue + (num_threads + a small safety margin) * batch_size
capacity = min_after_dequeue + (n_threads + 1) * batch_size
# Randomize the order and output batches of batch_size.
batch = tf.train.shuffle_batch([crops],
enqueue_many=False,
batch_size=batch_size,
capacity=capacity,
min_after_dequeue=min_after_dequeue,
num_threads=n_threads)
# alternatively, we could use shuffle_batch_join to use multiple reader
# instances, or set shuffle_batch's n_threads to higher than 1.
return batch
def gtzan_music_speech_download(dst='gtzan_music_speech'):
"""Download the GTZAN music and speech dataset.
Parameters
----------
dst : str, optional
Location to put the GTZAN music and speech datset.
"""
path = 'http://opihi.cs.uvic.ca/sound/music_speech.tar.gz'
download_and_extract_tar(path, dst)
def gtzan_music_speech_load(dst='gtzan_music_speech'):
"""Load the GTZAN Music and Speech dataset.
Downloads the dataset if it does not exist into the dst directory.
Parameters
----------
dst : str, optional
Location of GTZAN Music and Speech dataset.
Returns
-------
Xs, ys : np.ndarray, np.ndarray
Array of data, Array of labels
"""
from scipy.io import wavfile
if not os.path.exists(dst):
gtzan_music_speech_download(dst)
music_dir = os.path.join(os.path.join(dst, 'music_speech'), 'music_wav')
music = [os.path.join(music_dir, file_i)
for file_i in os.listdir(music_dir)
if file_i.endswith('.wav')]
speech_dir = os.path.join(os.path.join(dst, 'music_speech'), 'speech_wav')
speech = [os.path.join(speech_dir, file_i)
for file_i in os.listdir(speech_dir)
if file_i.endswith('.wav')]
Xs = []
ys = []
for i in music:
sr, s = wavfile.read(i)
s = s / 16384.0 - 1.0
re, im = dft.dft_np(s)
mag, phs = dft.ztoc(re, im)
Xs.append((mag, phs))
ys.append(0)
for i in speech:
sr, s = wavfile.read(i)
s = s / 16384.0 - 1.0
re, im = dft.dft_np(s)
mag, phs = dft.ztoc(re, im)
Xs.append((mag, phs))
ys.append(1)
Xs = np.array(Xs)
Xs = np.transpose(Xs, [0, 2, 3, 1])
ys = np.array(ys)
return Xs, ys
def cifar10_download(dst='cifar10'):
"""Download the CIFAR10 dataset.
Parameters
----------
dst : str, optional
Directory to download into.
"""
path = 'http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'
download_and_extract_tar(path, dst)
def cifar10_load(dst='cifar10'):
"""Load the CIFAR10 dataset.
Downloads the dataset if it does not exist into the dst directory.
Parameters
----------
dst : str, optional
Location of CIFAR10 dataset.
Returns
-------
Xs, ys : np.ndarray, np.ndarray
Array of data, Array of labels
"""
if not os.path.exists(dst):
cifar10_download(dst)
Xs = None
ys = None
for f in range(1, 6):
cf = pickle.load(open(
'%s/cifar-10-batches-py/data_batch_%d' % (dst, f), 'rb'),
encoding='LATIN')
if Xs is not None:
Xs = np.r_[Xs, cf['data']]
ys = np.r_[ys, np.array(cf['labels'])]
else:
Xs = cf['data']
ys = cf['labels']
Xs = np.swapaxes(np.swapaxes(Xs.reshape(-1, 3, 32, 32), 1, 3), 1, 2)
return Xs, ys
def dense_to_one_hot(labels, n_classes=2):
"""Convert class labels from scalars to one-hot vectors.
Parameters
----------
labels : array
Input labels to convert to one-hot representation.
n_classes : int, optional
Number of possible one-hot.
Returns
-------
one_hot : array
One hot representation of input.
"""
return np.eye(n_classes).astype(np.float32)[labels]
class DatasetSplit(object):
"""Utility class for batching data and handling multiple splits.
Attributes
----------
current_batch_idx : int
Description
images : np.ndarray
Xs of the dataset. Not necessarily images.
labels : np.ndarray
ys of the dataset.
n_labels : int
Number of possible labels
num_examples : int
Number of total observations
"""
def __init__(self, images, labels):
"""Initialize a DatasetSplit object.
Parameters
----------
images : np.ndarray
Xs/inputs
labels : np.ndarray
ys/outputs
"""
self.images = np.array(images).astype(np.float32)
if labels is not None:
self.labels = np.array(labels).astype(np.int32)
self.n_labels = len(np.unique(labels))
else:
self.labels = None
self.num_examples = len(self.images)
def next_batch(self, batch_size=100):
"""Batch generator with randomization.
Parameters
----------
batch_size : int, optional
Size of each minibatch.
Returns
-------
Xs, ys : np.ndarray, np.ndarray
Next batch of inputs and labels (if no labels, then None).
"""
# Shuffle each epoch
current_permutation = np.random.permutation(range(len(self.images)))
epoch_images = self.images[current_permutation, ...]
if self.labels is not None:
epoch_labels = self.labels[current_permutation, ...]
# Then iterate over the epoch
self.current_batch_idx = 0
while self.current_batch_idx < len(self.images):
end_idx = min(
self.current_batch_idx + batch_size, len(self.images))
this_batch = {
'images': epoch_images[self.current_batch_idx:end_idx],
'labels': epoch_labels[self.current_batch_idx:end_idx]
if self.labels is not None else None
}
self.current_batch_idx += batch_size
yield this_batch['images'], this_batch['labels']
class Dataset(object):
"""Create a dataset from data and their labels.
Allows easy use of train/valid/test splits; Batch generator.
Attributes
----------
all_idxs : list
All indexes across all splits.
all_inputs : list
All inputs across all splits.
all_labels : list
All labels across all splits.
n_labels : int
Number of labels.
split : list
Percentage split of train, valid, test sets.
test_idxs : list
Indexes of the test split.
train_idxs : list
Indexes of the train split.
valid_idxs : list
Indexes of the valid split.
"""
def __init__(self, Xs, ys=None, split=[1.0, 0.0, 0.0], one_hot=False):
"""Initialize a Dataset object.
Parameters
----------
Xs : np.ndarray
Images/inputs to a network
ys : np.ndarray
Labels/outputs to a network
split : list, optional
Percentage of train, valid, and test sets.
one_hot : bool, optional
Whether or not to use one-hot encoding of labels (ys).
"""
self.all_idxs = []
self.all_labels = []
self.all_inputs = []
self.train_idxs = []
self.valid_idxs = []
self.test_idxs = []
self.n_labels = 0
self.split = split
# Now mix all the labels that are currently stored as blocks
self.all_inputs = Xs
n_idxs = len(self.all_inputs)
idxs = range(n_idxs)
rand_idxs = np.random.permutation(idxs)
self.all_inputs = self.all_inputs[rand_idxs, ...]
if ys is not None:
self.all_labels = ys if not one_hot else dense_to_one_hot(ys)
self.all_labels = self.all_labels[rand_idxs, ...]
else:
self.all_labels = None
# Get splits
self.train_idxs = idxs[:round(split[0] * n_idxs)]
self.valid_idxs = idxs[len(self.train_idxs):
len(self.train_idxs) + round(split[1] * n_idxs)]
self.test_idxs = idxs[
(len(self.valid_idxs) + len(self.train_idxs)):
(len(self.valid_idxs) + len(self.train_idxs)) +
round(split[2] * n_idxs)]
@property
def X(self):
"""Inputs/Xs/Images.
Returns
-------
all_inputs : np.ndarray
Original Inputs/Xs.
"""
return self.all_inputs
@property
def Y(self):
"""Outputs/ys/Labels.
Returns
-------
all_labels : np.ndarray
Original Outputs/ys.
"""
return self.all_labels
@property
def train(self):
"""Train split.
Returns
-------
split : DatasetSplit
Split of the train dataset.
"""
if len(self.train_idxs):
inputs = self.all_inputs[self.train_idxs, ...]
if self.all_labels is not None:
labels = self.all_labels[self.train_idxs, ...]
else:
labels = None
else:
inputs, labels = [], []
return DatasetSplit(inputs, labels)
@property
def valid(self):
"""Validation split.
Returns
-------
split : DatasetSplit
Split of the validation dataset.
"""
if len(self.valid_idxs):
inputs = self.all_inputs[self.valid_idxs, ...]
if self.all_labels is not None:
labels = self.all_labels[self.valid_idxs, ...]
else:
labels = None
else:
inputs, labels = [], []
return DatasetSplit(inputs, labels)
@property
def test(self):
"""Test split.
Returns
-------
split : DatasetSplit
Split of the test dataset.
"""
if len(self.test_idxs):
inputs = self.all_inputs[self.test_idxs, ...]
if self.all_labels is not None:
labels = self.all_labels[self.test_idxs, ...]
else:
labels = None
else:
inputs, labels = [], []
return DatasetSplit(inputs, labels)
def mean(self):
"""Mean of the inputs/Xs.
Returns
-------
mean : np.ndarray
Calculates mean across 0th (batch) dimension.
"""
return np.mean(self.all_inputs, axis=0)
def std(self):
"""Standard deviation of the inputs/Xs.
Returns
-------
std : np.ndarray
Calculates std across 0th (batch) dimension.
"""
return np.std(self.all_inputs, axis=0)
| 30.903433
| 79
| 0.586418
|
2db8e065217e8b802b1ec79fb86c015062335f3e
| 3,742
|
py
|
Python
|
src/reviews/utils.py
|
Talengi/phase
|
60ff6f37778971ae356c5b2b20e0d174a8288bfe
|
[
"MIT"
] | 8
|
2016-01-29T11:53:40.000Z
|
2020-03-02T22:42:02.000Z
|
src/reviews/utils.py
|
Talengi/phase
|
60ff6f37778971ae356c5b2b20e0d174a8288bfe
|
[
"MIT"
] | 289
|
2015-03-23T07:42:52.000Z
|
2022-03-11T23:26:10.000Z
|
src/reviews/utils.py
|
Talengi/phase
|
60ff6f37778971ae356c5b2b20e0d174a8288bfe
|
[
"MIT"
] | 7
|
2015-12-08T09:03:20.000Z
|
2020-05-11T15:36:51.000Z
|
from itertools import groupby
from django.core.cache import cache
from django.contrib.contenttypes.models import ContentType
from reviews.models import Review, ReviewMixin
def get_cached_reviews(revision):
"""Get all reviews for the given revision.
This method is intended to be used when one want to fetch all reviews for
all the document's revisions successively.
All the reviews will be fetched in a single query and cached.
Also, for revision which review was never started, there is no review
objects to fetch, so we need to create some dummy ones for display purpose.
See https://trello.com/c/CdZF9eAG/174-afficher-la-liste-de-distribution-d-un-document
Note that this cache is cleared in a signal of the same module.
"""
reviews = get_all_reviews(revision.document)
if revision.revision in reviews:
revision_reviews = reviews[revision.revision]
else:
dummy_reviews = get_dummy_reviews(revision)
if revision.revision in dummy_reviews:
revision_reviews = dummy_reviews[revision.revision]
else:
revision_reviews = []
return revision_reviews
def get_all_reviews(document):
"""Return a dictionnary of revision indexed reviews."""
cache_key = 'all_reviews_{}'.format(document.id)
all_reviews = cache.get(cache_key, None)
if all_reviews is None:
qs = Review.objects \
.filter(document=document) \
.order_by('revision', 'id') \
.select_related('reviewer')
all_reviews = {}
for revision_id, reviews in groupby(qs, lambda obj: obj.revision):
all_reviews[revision_id] = list(reviews)
cache.set(cache_key, all_reviews, 5)
return all_reviews
def get_dummy_reviews(revision):
"""Return a dictionary of Review objects for."""
cache_key = 'dummy_reviews_{}'.format(revision.metadata.document_id)
dummy_reviews = cache.get(cache_key, None)
if dummy_reviews is None:
revisions = revision.__class__.objects \
.filter(metadata__document=revision.document) \
.filter(review_start_date=None) \
.select_related('leader', 'approver') \
.prefetch_related('reviewers')
dummy_reviews = {}
for revision in revisions:
revision_reviews = []
for reviewer in revision.reviewers.all():
revision_reviews.append(Review(
role='reviewer',
status=Review.STATUSES.void,
reviewer=reviewer,
document_id=revision.metadata.document_id))
if revision.leader:
revision_reviews.append(Review(
role='leader',
status=Review.STATUSES.void,
reviewer=revision.leader,
document_id=revision.metadata.document_id))
if revision.approver:
revision_reviews.append(Review(
role='approver',
status=Review.STATUSES.void,
reviewer=revision.approver,
document_id=revision.metadata.document_id))
dummy_reviews[revision.revision] = revision_reviews
cache.set(cache_key, dummy_reviews, 5)
return dummy_reviews
def get_all_reviewable_types():
"""Return all inheriting ReviewMixin classes content types."""
qs = ContentType.objects.all()
types = (ct for ct in qs if issubclass(ct.model_class(), ReviewMixin))
return types
def get_all_reviewable_classes():
"""Return all available ReviewMixin subclasses."""
classes = [ct.model_class() for ct in get_all_reviewable_types()]
return classes
| 33.115044
| 89
| 0.649118
|
d14743a84a11d66435ff7c7f43426c4c5111c668
| 1,801
|
py
|
Python
|
weblog/urls.py
|
mmohajer9/Resumo
|
625c279e71e98f0d461679d75c6c464f6afcf437
|
[
"MIT"
] | 1
|
2019-07-28T10:09:26.000Z
|
2019-07-28T10:09:26.000Z
|
weblog/urls.py
|
mmohajer9/Resumo
|
625c279e71e98f0d461679d75c6c464f6afcf437
|
[
"MIT"
] | 8
|
2021-04-08T22:03:32.000Z
|
2022-02-10T09:35:46.000Z
|
weblog/urls.py
|
mmohajer9/resumo
|
625c279e71e98f0d461679d75c6c464f6afcf437
|
[
"MIT"
] | null | null | null |
from django.urls import path , include
from . import views
app_name = 'weblog'
urlpatterns = [
path('' , views.home , name = 'home'),
path('register/' , views.register_form_view , name = 'register'),
path('register/additional/<str:username>/' , views.additional_info_form_view , name = 'additional_info'),
path('signin/' , views.signin , name = 'signin'),
path('aboutus/',views.aboutus , name = 'aboutus'),
path('signout/' , views.signout , name = 'signout'),
path('profile/' , views.profile , name = 'profile'),
path('profile/<str:username>/' , views.user_profile , name = 'user_profile'),
path('profile/<str:username>/edit_profile' , views.edit_profile , name = 'edit_profile'),
path('profile/<str:username>/wall' , views.UserWallView.as_view() , name = 'wall'),
path('profile/<str:username>/post/<int:pk>' , views.PostDetailView.as_view() , name = 'post'),
path('profile/<str:username>/post/<int:pk>/likes' , views.PostLikeListView.as_view() , name = 'postlikes'),
path('profile/<str:username>/newpost' , views.PostCreateView.as_view() , name = 'newpost'),
path('profile/<str:username>/post/<int:post_id>/likeThePost' , views.likeThePost , name = 'likeThePost'),
path('profile/<str:username>/post/<int:post_id>/dislikeThePost' , views.dislikeThePost , name = 'dislikeThePost'),
path('profile/<str:username>/post/<int:post_id>/deleteLikeOrDislike/<int:pk>' , views.LikeOrDislikeDeleteView.as_view() , name = 'deleteLikeOrDislike'),
path('profile/<str:username>/post/<int:post_id>/deleteComment/<int:pk>' , views.deleteCommentDeleteView.as_view() , name = 'deleteComment'),
# path('profile/<str:username>/edit_user' , views.edit_user , name = 'edit_user'), <-- ziad mohem nis age nazadish
# -- I Should Think ! -- #
]
| 64.321429
| 156
| 0.679622
|
7228dd445c0440e0a7a4f9fc414fc3002175d6c6
| 18,271
|
py
|
Python
|
site/app/metroui.py
|
hehaoslj/globalhealth
|
6df3ae643392c93eb2c380c25339c15e9a3e804c
|
[
"MIT"
] | null | null | null |
site/app/metroui.py
|
hehaoslj/globalhealth
|
6df3ae643392c93eb2c380c25339c15e9a3e804c
|
[
"MIT"
] | null | null | null |
site/app/metroui.py
|
hehaoslj/globalhealth
|
6df3ae643392c93eb2c380c25339c15e9a3e804c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#-*- coding: utf-8 -*-
import random
color_templ="""black white lime green emerald teal blue cyan cobalt indigo violet pink magenta crimson red orange amber yellow brown olive steel mauve taupe gray dark darker darkBrown darkCrimson darkMagenta darkIndigo darkCyan darkCobalt darkTeal darkEmerald darkGreen darkOrange darkRed darkPink darkViolet darkBlue lightBlue lightRed lightGreen lighterBlue lightTeal lightOlive lightOrange lightPink grayDark grayDarker grayLight grayLighter"""
color_prefix_templ="bg fg ribbed"
colors = color_templ.split(" ")
color_prefixs = color_prefix_templ.split(' ')
random.seed(1)
def rand_color():
pos = random.randint(0, len(colors)-1)
px = random.randint(0, len(color_prefixs)-1)
return '-'.join((color_prefixs[px], colors[pos]) )
class HTMLElement(object):
default_attributes={}
tag = "unknown_tag"
nullable = False
def __init__(self, *args, **kwargs):
self.attributes = kwargs
self.attributes.update(self.default_attributes)
if 'cls' in self.attributes:
self.attributes['class'] = self.attributes['cls']
del self.attributes['cls']
if 'attrs' in self.attributes:
self.attributes.update( self.attributes['attrs'] )
del self.attributes['attrs']
if 'ctx' in self.attributes:
self.children = self.attributes['ctx']
del self.attributes['ctx']
else:
self.children = args
def tostr(self, o):
if o == None:
return ''
if type(o) == str:
return o
elif type(o) == unicode:
return o.encode('utf-8')
elif type(o) in (tuple, list):
return ''.join([self.tostr(child) for child in o])
else:
return str(o)
def __str__(self):
attr = ' '.join(['{}="{}"'.format(name, value) for name, value in self.attributes.items()])
ctx = ''
ctx = self.tostr(self.children)
#if type(self.children) ==str:
# ctx = self.children
#elif type(self.children) == unicode:
# ctx = self.children.encode('utf-8')
#elif type(self.children) in (tuple, list):
# ctx = ''.join([str(child) for child in self.children])
if ctx == '' and self.nullable == True:
return ''
return '\n<{} {}>{}</{}>\n'.format(self.tag, attr, ctx, self.tag)
class div(HTMLElement):
tag = "div"
class ndiv(div):
nullable = True
class anchor(HTMLElement):
tag = 'a'
class nanchor(anchor):
nullable = True
class h1(HTMLElement):
tag = 'h1'
class image(HTMLElement):
tag='img'
class tile(div):
default_attributes={'cls' : 'col-sm-6 col-md-3'}
label = 'Tile'
href='#'
color='tile-red'
def __init__(self, *args, **kwargs):
div.__init__(self, **kwargs)
if len(args) > 0:
self.label = str(args[0])
if len(args)>1:
self.href=str(args[1])
if len(args)>2:
self.color = str(args[2])
for k,v in kwargs.items():
self.__setattr__(k, v)
self.update()
def __setattr__(self, key, value):
if not self.__dict__.has_key(key):
self.__dict__[key] = value
else:
self.__dict__[key] = value
#print key, value
self.update()
def update(self):
htitle = h1(self.label)
ha = anchor(htitle, href=self.href)
thumb = div(ha, cls='thumbnail tile tile-medium ' + self.color)
self.__dict__['children'] = str( thumb )
class span(HTMLElement):
tag = 'span'
class nspan(span):
nullable = True
class button(HTMLElement):
tag = 'button'
class topnav(HTMLElement):
default_attributes={'cls':'navbar navbar-inverse navbar-fixed-top'}
tag = 'nav'
title = 'Project'
toggle = 'Toggle Navigation'
"""<nav class="navbar navbar-inverse navbar-fixed-top">
<div class="container">
<div class="navbar-header">
<button type="button" class="navbar-toggle collapsed" data-toggle="collapse" data-target="#navbar" aria-expanded="false" aria-controls="navbar">
<span class="sr-only">$tr('Toggle navigation')</span>
<span class="icon-bar"></span>
<span class="icon-bar"></span>
<span class="icon-bar"></span>
</button>
<a class="navbar-brand" href="#">$tr('Project name')</a>
</div>
<div id="navbar" class="navbar-collapse collapse">
<form class="navbar-form navbar-right">
<div class="form-group">
<input type="text" placeholder="$tr('Email')" class="form-control">
</div>
<div class="form-group">
<input type="password" placeholder="$tr('Password')" class="form-control">
</div>
<button type="submit" class="btn btn-success">Sign in</button>
</form>
</div><!--/.navbar-collapse -->
</div>
</nav>"""
def __init__(self, *args, **kwargs):
HTMLElement.__init__(self, **kwargs)
if(len(args) > 1):
self.title = args[0]
if(len(args) >2):
self.toggle = args[1]
for k,v in kwargs.items():
self.__setattr__(k, v)
self.update()
def update(self):
eng = None
chs = None
if conf.lang == 'en':
eng=anchor('English', cls='btn btn-default active', role='button', href='/en/')
chs=anchor('Chinese', cls='btn btn-success', role='button', href='/zh-CN/')
else:
eng=anchor('English', cls='btn btn-success', href='/en/')
chs=anchor('Chinese', cls='btn btn-default active', href='/zh-CN/')
frm = div(chs, eng, cls="navbar-right")
bar = div(frm, attrs={'id':"navbar", 'class':"navbar-collapse collapse"})
sr = span(self.toggle, cls="sr-only")
ic = span(cls="icon-bar")
btn = button(sr, ic, ic, ic, attrs={'type':"button", 'class':"navbar-toggle collapsed", 'data-toggle':"collapse", 'data-target':"#navbar",
'aria-expanded':"false", 'aria-controls':"navbar"})
a = anchor(self.title, cls="navbar-brand", href="#")
hd = div(btn, a, cls="navbar-header")
ctx = div(hd, bar, cls="container")
self.__dict__['children'] = str(ctx)
def __setattr__(self, key, value):
if not self.__dict__.has_key(key):
self.__dict__[key] = value
else:
self.__dict__[key] = value
#print key, value
self.update()
class menubar(div):
"""
<header class="app-bar fixed-top navy" data-role="appbar">
<div class="container">
<a href="/" class="app-bar-element branding"><img src="images/wn8.png" style="height: 28px; display: inline-block; margin-right: 10px;"> Metro UI CSS</a>
<ul class="app-bar-menu small-dropdown">
<li data-flexorderorigin="0" data-flexorder="1" class="">
<a href="#" class="dropdown-toggle">Base CSS</a>
<ul class="d-menu" data-role="dropdown" data-no-close="true" style="display: none;">
<li class="disabled"><a href="overview.html">Overview</a></li>
<li class="divider"></li>
<li>
<a href="" class="dropdown-toggle">Grid system</a>
<ul class="d-menu" data-role="dropdown">
<li><a href="grid.html">Simple grid</a></li>
<li><a href="flexgrid.html">Flex grid</a></li>
</ul>
</li>
<li><a href="typography.html">Typography</a></li>
<li><a href="tables.html">Tables</a></li>
<li><a href="inputs.html">Forms & Inputs</a></li>
<li><a href="buttons.html">Buttons</a></li>
<li><a href="images.html">Images</a></li>
<li><a href="font.html">Metro Icon Font</a></li>
<li class="divider"></li>
<li><a href="colors.html">Colors</a></li>
<li><a href="helpers.html">Helpers classes</a></li>
<li class="divider"></li>
<li><a href="rtl.html">RTL Support</a></li>
<li class="disabled"><a href="responsive.html">Responsive</a></li>
</ul>
</li>
</ul>
<span class="app-bar-pull"></span>
<div class="app-bar-pullbutton automatic" style="display: none;"></div>
<div class="clearfix" style="width: 0;"></div>
<nav class="app-bar-pullmenu hidden flexstyle-app-bar-menu" style="display: none;">
<ul class="app-bar-pullmenubar hidden app-bar-menu"></ul>
</nav>
</div>
</header>
"""
tag = 'header'
default_attributes={'cls':"app-bar fixed-top navy no-flexible", 'data-role':"appbar"}
def __init__(self, config, obj):
div.__init__(self)
ctx = div(self.branding(config, obj), self.menu(config, obj), self.menutail(config, obj), cls="container")
self.children=str(ctx)
def options(self, config):
class _O(object):
pass
o = _O()
o.name="Options"
o.href="options"
o.menu=list()
e=_O()
e.name="en"
e.href=config.url(lang="en")
o.menu.append(e)
e=_O()
e.name="zh-CN"
e.href=config.url(lang="zh-CN")
o.menu.append(e)
rt='<li class="navbar-right">'+self.submenu(o, config=config)[4:]
return rt
def submenu(self, o, config, prefix=""):
obj = o
order = 0
if type(o) == tuple:
obj, order = o
if obj.name=='--':
return '<li class="divider"></li>\n'
nm = config.tr(obj.name)
href = obj.href
if href[0] != '/':
href = '/'.join((prefix, obj.href))
if obj.href == "#":
href = "#"
prefix = ""
if hasattr(obj, "menu") == False:
rt = u'<li><a href="{href}"> {name} </a></li>\n'.format(href=href, name=nm, order=order, order1=order+1)
return rt
else:
rt=u'''<li><a href="#" class="dropdown-toggle"> {name} </a>
<ul class="d-menu" data-role="dropdown" data-no-close="true" style="display: none;">
<li class="active"><a href="{href}">{name}</a></li>
<li class="divider"></li>
'''.format(href=href, name=nm, order=order, order1=order+1)+ '\n'.join([self.submenu(o,config=config, prefix=href) for o in obj.menu ])+ '</ul></li>\n'
return rt
def menu(self, config, obj):
self.order = 0
s = u'''<ul class="app-bar-menu small-dropdown">'''+ '\n'.join([self.submenu(o, config=config, prefix='/'+config.lang) for o in zip(obj, range(len(obj)) ) ])+ self.options(config) + '</ul>\n'
return s.encode('utf-8')
def branding(self, config, obj):
title = config.tr(config.site.start)
s = u'\n<a href="{url}" class="app-bar-element branding"><i class="icon icon-windows icon-2x"></i></a>\n\n'.format(title=title, url=config.url('start'))
return s.encode('utf-8')
def menutail(self, c, o):
return """
<span class="app-bar-pull"></span>
<div class="app-bar-pullbutton automatic" style="display: none;"></div>
<div class="clearfix" style="width: 0;"></div>
<nav class="app-bar-pullmenu hidden flexstyle-app-bar-menu" style="display: none;">
<ul class="app-bar-pullmenubar hidden app-bar-menu"></ul>
</nav>
"""
def parse_cls(fn, *args, **kw):
def wrapped(*args, **kw):
bg = kw['bg'] if kw.has_key('bg') else rand_color()
fg = kw['fg'] if kw.has_key('fg') else "fg-white"
cls = kw['cls'] if kw.has_key('cls') else 'tile'
url = kw['url'] if kw.has_key('url') else None
cls=' '.join((bg, fg, cls))
lcls = "tile-label"
if kw.has_key('label_cls'):
lcls += ' ' + kw['label_cls']
s1=nspan(kw['text'], cls=lcls)
if url:
o=anchor(fn(*args, tile_label=s1, **kw), href=url, cls=cls, attrs={'data-role':"tile"})
return o
else:
o = div(fn(*args, tile_label=s1, **kw), cls=cls, attrs={'data-role':"tile"})
return o
return wrapped
@parse_cls
def tile1(icon="", **kw):
"""<!-- Tile with icon, icon can be font icon or image -->"""
ctx=[kw['tile_label']]
s = span(cls="icon %s" % icon)
ctx.append(s)
d2 = div(ctx=ctx, cls="tile-content iconic")
return d2
@parse_cls
def tile_image(img="", **kw):
ctx=[kw['tile_label']]
s=image(src=img)
ctx.append(s)
if kw['text']:
s=nspan(kw['text'], cls="tile-label")
ctx.append(s)
d2 = div(ctx=ctx, cls="tile-content")
return d2
@parse_cls
def tile2(label="",badge="", **kw):
"""<!-- Tile with label and badge -->
<div class="tile">
<div class="tile-content ">
<span class="tile-label">Label</span>
<span class="tile-badge">5</span>
</div>
</div>"""
ctx=[kw['tile_label']]
s1=nspan(kw['text'], cls="tile-label")
s2 = span(badge, cls="tile-badge")
ctx.append(s1)
ctx.append(s2)
d2=None
if kw.has_key('icon'):
s3 = span(cls="icon %s" % kw['icon'])
ctx.append(s3)
d2 = div(ctx=ctx, cls="tile-content iconic")
elif kw.has_key('image'):
s3 = image(src=kw['image'])
ctx.append(s3)
d2 = div(ctx=ctx, cls="tile-content iconic")
else:
d2 = div(ctx=ctx, cls="tile-content")
return d2
@parse_cls
def tile3(imgset=[], **kw):
"""<!-- Tile with image set (max 5 images) -->
<div class="tile">
<div class="tile-content image-set">
<img src="...">
<img src="...">
<img src="...">
<img src="...">
<img src="...">
</div>
</div>"""
ctx=[]
ims = ""
for img in imgset:
i = image(src=img)
ctx.append(i)
ctx.append(kw['tile_label'])
d2 = div(ctx=ctx, cls="tile-content image-set")
return d2
@parse_cls
def tile4(imgctn="", overlay="", **kw):
"""<!-- Tile with image container -->
<div class="tile">
<div class="tile-content">
<div class="image-container">
<div class="frame">
<img src="...">
</div>
<div class="image-overlay">
Overlay text
</div>
</div>
</div>
</div>"""
i=image(src=imgctn)
d1 = div(i, cls="frame")
d2 = div(overlay, cls="image-overlay")
dic = div(d1, d2,kw['tile_label'], cls="image-container")
dtc = div(dic, cls="tile-content")
return dtc
@parse_cls
def tile_carousel(carousel=[], **kw):
"""<!-- Tile with carousel -->
<div class="tile">
<div class="tile-content">
<div class="carousel" data-role="carousel">
<div class="slide"><img src="..."></div>
...
<div class="slide"><img src="..."></div>
</div>
</div>
</div>"""
ctx=[]
for k in carousel:
img=image(src=k, attrs={'data-role':"fitImage", 'data-format':"fill"})
d1 = div(img, cls="slide")
ctx.append(d1)
ctx.append(kw['tile_label'])
d2 = div(ctx=ctx, cls="carousel", attrs={'data-role':"carousel", 'data-controls':"false",'data-height':"100%", 'data-width':"100%"})
dtc = div(d2, cls="tile-content")
return dtc
@parse_cls
def tile_slide(slide="", over="", direction='slide-up', **kw):
"""<!-- Tile with slide-up effect -->
<div class="tile">
<div class="tile-content slide-up">
<div class="slide">
... Main slide content ...
</div>
<div class="slide-over">
... Over slide content here ...
</div>
</div>
</div>"""
img = image(src=slide)
s = div(img, cls="slide")
o = div(over, cls="slide-over")
dtc = div(s, o,kw['tile_label'], cls="tile-content %s" % direction)
return dtc
@parse_cls
def tile_panel(panel="", header="", **kw):
"""<div class="tile-big tile-wide-y bg-white" data-role="tile">
<div class="tile-content">
<div class="panel" style="height: 100%">
<div class="heading bg-darkRed fg-white"><span class="title text-light">Meeting</span></div>
<div class="content fg-dark clear-float" style="height: 100%">
...
</div>
</div>
</div>
</div>"""
ctx = div(panel, cls="content fg-dark clear-float", style="height: 100%")
s = kw['tile_label']
hdr = div(s, cls="heading bg-darkOrange fg-white")
pnl = div(hdr, ctx, cls="panel", style="height: 100%")
dtc = div(pnl, cls="tile-content")
return dtc
def Tile(*args, **kw):
if kw.has_key('imgset'):
return tile3(*args, **kw)
elif kw.has_key('imgctn'):
return tile4(*args, **kw)
elif kw.has_key('carousel'):
return tile_carousel(*args, **kw)
elif kw.has_key('slide'):
return tile_slide(*args, **kw)
elif kw.has_key('img'):
return tile_image(*args, **kw)
elif kw.has_key('panel'):
return tile_panel(*args, **kw)
elif kw.has_key('label'):
return tile2(*args, **kw)
elif kw.has_key('icon'):
return tile1(*args, **kw)
| 35.47767
| 448
| 0.516283
|
f44c960b1c38d32187f7e51a2041748216a95b94
| 1,370
|
py
|
Python
|
_unittests/ut_onnx_conv/test_rt_valid_model_lightgbm.py
|
xadupre/mlprodict
|
f82c8a26a60104948c67849b1c4af95ca812c153
|
[
"MIT"
] | 1
|
2020-12-18T03:49:53.000Z
|
2020-12-18T03:49:53.000Z
|
_unittests/ut_onnx_conv/test_rt_valid_model_lightgbm.py
|
xadupre/mlprodict
|
f82c8a26a60104948c67849b1c4af95ca812c153
|
[
"MIT"
] | null | null | null |
_unittests/ut_onnx_conv/test_rt_valid_model_lightgbm.py
|
xadupre/mlprodict
|
f82c8a26a60104948c67849b1c4af95ca812c153
|
[
"MIT"
] | null | null | null |
"""
@brief test log(time=9s)
"""
import unittest
from logging import getLogger
from pyquickhelper.loghelper import fLOG
from pyquickhelper.pycode import ExtTestCase, skipif_circleci
from sklearn.exceptions import ConvergenceWarning
try:
from sklearn.utils._testing import ignore_warnings
except ImportError:
from sklearn.utils.testing import ignore_warnings
from skl2onnx import __version__ as skl2onnx_version
from mlprodict.onnxrt.validate import enumerate_validated_operator_opsets
class TestRtValidateLightGbm(ExtTestCase):
@skipif_circleci('too long')
@ignore_warnings(category=(UserWarning, ConvergenceWarning, RuntimeWarning))
def test_rt_LGBMClassifier_onnxruntime1(self):
fLOG(__file__, self._testMethodName, OutputPrint=__name__ == "__main__")
logger = getLogger('skl2onnx')
logger.disabled = True
verbose = 1 if __name__ == "__main__" else 0
debug = True
buffer = []
def myprint(*args, **kwargs):
buffer.append(" ".join(map(str, args)))
rows = list(enumerate_validated_operator_opsets(
verbose, models={"LGBMClassifier"},
fLOG=myprint,
runtime='onnxruntime1', debug=debug,
filter_exp=lambda m, p: '-64' not in p))
self.assertGreater(len(rows), 1)
if __name__ == "__main__":
unittest.main()
| 31.860465
| 80
| 0.706569
|
a374b7206d02f2d0845b999a66b499151fa619f3
| 385
|
py
|
Python
|
stubs/micropython-v1_11-esp32/ussl.py
|
mattytrentini/micropython-stubs
|
4d596273823b69e9e5bcf5fa67f249c374ee0bbc
|
[
"MIT"
] | null | null | null |
stubs/micropython-v1_11-esp32/ussl.py
|
mattytrentini/micropython-stubs
|
4d596273823b69e9e5bcf5fa67f249c374ee0bbc
|
[
"MIT"
] | null | null | null |
stubs/micropython-v1_11-esp32/ussl.py
|
mattytrentini/micropython-stubs
|
4d596273823b69e9e5bcf5fa67f249c374ee0bbc
|
[
"MIT"
] | null | null | null |
"""
Module: 'ussl' on micropython-v1.11-esp32
"""
# MCU: {'ver': 'v1.11', 'build': '', 'platform': 'esp32', 'port': 'esp32', 'machine': 'ESP32 module with ESP32', 'release': '1.11.0', 'nodename': 'esp32', 'name': 'micropython', 'family': 'micropython', 'sysname': 'esp32', 'version': '1.11.0'}
# Stubber: 1.5.4
from typing import Any
def wrap_socket(*args, **kwargs) -> Any:
...
| 35
| 243
| 0.6
|
50254deb8a36e240167f07b2c131880e856a6d52
| 40
|
py
|
Python
|
start/Working with numbers/add/num2.py
|
codermoji-contrib/python
|
764bffaf0e92270be196aa5728f255aaaf5b8150
|
[
"MIT"
] | null | null | null |
start/Working with numbers/add/num2.py
|
codermoji-contrib/python
|
764bffaf0e92270be196aa5728f255aaaf5b8150
|
[
"MIT"
] | null | null | null |
start/Working with numbers/add/num2.py
|
codermoji-contrib/python
|
764bffaf0e92270be196aa5728f255aaaf5b8150
|
[
"MIT"
] | null | null | null |
print(12345 + 98765)
print(12345 * 99)
| 10
| 20
| 0.675
|
4a725d86d40821b305677375c76f76c6f7bcf114
| 325
|
py
|
Python
|
build/lib/pyggi/utils/helpers.py
|
s-marta/pyggi-bloa
|
aefe15eda32e713dc8402c9b8d4bcb7cb05b31c8
|
[
"MIT"
] | 26
|
2018-01-30T13:07:51.000Z
|
2021-08-01T13:41:48.000Z
|
build/lib/pyggi/utils/helpers.py
|
s-marta/pyggi-bloa
|
aefe15eda32e713dc8402c9b8d4bcb7cb05b31c8
|
[
"MIT"
] | 9
|
2018-01-10T02:22:10.000Z
|
2021-12-08T06:28:19.000Z
|
build/lib/pyggi/utils/helpers.py
|
s-marta/pyggi-bloa
|
aefe15eda32e713dc8402c9b8d4bcb7cb05b31c8
|
[
"MIT"
] | 9
|
2019-02-11T19:00:52.000Z
|
2021-12-30T07:48:52.000Z
|
import os
weighted_choice = lambda s : random.choice(sum(([v] * wt for v,wt in s),[]))
def get_file_extension(file_path):
"""
:param file_path: The path of file
:type file_path: str
:return: file extension
:rtype: str
"""
_, file_extension = os.path.splitext(file_path)
return file_extension
| 25
| 76
| 0.661538
|
d927eaefc5004586674b02e4c433d6bf0b7a695f
| 1,235
|
py
|
Python
|
playlist/inventories/json_inventory.py
|
hugoprudente/playlist-manager
|
51ae833e717c8a814c6b86726c8b099ba2afce6a
|
[
"Apache-2.0"
] | 1
|
2020-12-28T15:50:41.000Z
|
2020-12-28T15:50:41.000Z
|
playlist/inventories/json_inventory.py
|
hugoprudente/playlist-manager
|
51ae833e717c8a814c6b86726c8b099ba2afce6a
|
[
"Apache-2.0"
] | 1
|
2021-06-13T15:02:51.000Z
|
2021-06-13T15:02:51.000Z
|
playlist/inventories/json_inventory.py
|
hugoprudente/playlist-manager
|
51ae833e717c8a814c6b86726c8b099ba2afce6a
|
[
"Apache-2.0"
] | null | null | null |
import io
import json
import sys
from pathlib import Path
import jmespath
class JSONInventory:
def write_file(
self,
file_path,
data,
extra_data=None,
fields=None,
format=None,
merge=True,
):
# raw data
local_data = data
file_path = Path(file_path)
if file_path.exists() and merge: # pragma: no cover
with io.open(str(file_path)) as open_file:
sys.stdout.write("merge")
sys.stdout.write("\n")
# object_merge(json.load(open_file), file_data)
with io.open(
str(file_path),
"w",
) as open_file:
json.dump(local_data, open_file)
def stdout(self, data, extra_data=None, fields=None, format=None):
if fields is not None and fields:
expression = jmespath.compile(fields)
sys.stdout.write(json.dumps(expression.search(data)))
sys.stdout.write("\n")
else:
if extra_data is not None:
sys.stdout.write(json.dumps(extra_data))
sys.stdout.write("\n")
sys.stdout.write(json.dumps(data))
sys.stdout.write("\n")
| 26.847826
| 70
| 0.551417
|
60ed140be06ba7dd780f2d62692ba60ea17b68f7
| 3,304
|
py
|
Python
|
eval.py
|
mehdidc/keras-yolo3
|
459b08438b13b6aacd1464960b1ad7d816a601d6
|
[
"MIT"
] | null | null | null |
eval.py
|
mehdidc/keras-yolo3
|
459b08438b13b6aacd1464960b1ad7d816a601d6
|
[
"MIT"
] | null | null | null |
eval.py
|
mehdidc/keras-yolo3
|
459b08438b13b6aacd1464960b1ad7d816a601d6
|
[
"MIT"
] | null | null | null |
from PIL import Image
import numpy as np
from collections import defaultdict
from skimage.transform import resize
from skimage.io import imread
from yolo3.yolo import YOLO
from clize import run
from joblib import dump
eps = 1e-12
iou_threshold = 0.45
def evaluate(data, model_path, anchors_path, classes_path):
yolo = YOLO(
model_path=model_path,
anchors_path=anchors_path,
classes_path=classes_path,
score_threshold=0.0,
iou=iou_threshold,
max_boxes=10000,
)
lines = open(data).readlines()
B_list = []
BP_list = []
for i, l in enumerate(lines):
toks = l.strip().split(' ')
image_filename = toks[0]
boxes = toks[1:]
x = imread(image_filename)
x = Image.fromarray(x)
B = [list(map(int, b.split(','))) for b in boxes]
out_boxes, out_scores, out_classes = yolo.predict_image(x)
BP = [list(tuple(b) + (c, s)) for b, s, c in zip(out_boxes, out_scores, out_classes)]
B_list.extend(B)
BP_list.extend(BP)
if i % 10 == 0:
print('[{:05d}]/[{:05d}]'.format(i, len(lines)))
break
stats = get_stats(B_list, BP_list)
for k in sorted(stats.keys()):
v = stats[k]
print('{}: {:.2f}'.format(k, v))
def get_stats(B, BP):
precs, recs = PR(B, BP)
d = {}
for th in (0.5, 0.6, 0.8, 0.9, 0.95, 0.99):
vals = [p for p, r in zip(precs, recs) if r >= th]
if len(vals):
p = max(vals)
else:
p = 0
vals = [r for p, r in zip(precs, recs) if p >= th]
if len(vals):
r = max(vals)
else:
r = 0
d['prec({:.2f})'.format(th)] = p
d['rec({:.2f})'.format(th)] = r
bmax = max(B, key=lambda b:(b[2]-b[0]) * (b[3]-b[1]))
detected = 0
for i, p in enumerate(BP):
*bp, pred_class_id, score = p
*bt, class_id = bmax
if iou(bp, bt) >= iou_threshold and class_id == pred_class_id:
detected = 1
break
d['detected'] = detected
return d
def PR(B, BP, iou_threshold=0.45):
R = np.zeros(len(B))
P = np.zeros(len(BP))
nb_precision = 0
nb_recall = 0
precisions = []
recalls = []
BP = sorted(BP, key=lambda p:p[-1], reverse=True)
for i, p in enumerate(BP):
*bp, pred_class_id, score = p
for j, t in enumerate(B):
*bt, class_id = t
if iou(bp, bt) >= iou_threshold and class_id == pred_class_id:
if R[j] == 0:
R[j] = 1
nb_recall += 1
if P[i] == 0:
P[i] = 1
nb_precision += 1
p = nb_precision / (i + 1)
r = nb_recall / len(B)
precisions.append(p)
recalls.append(r)
return precisions, recalls
def iou(bbox1, bbox2):
x, y, xm, ym = bbox1
w = xm - x
h = ym - y
xx, yy, xxm, yym = bbox2
ww = xxm - xx
hh = yym - yy
winter = min(x + w, xx + ww) - max(x, xx)
hinter = min(y + h, yy + hh) - max(y, yy)
if winter < 0 or hinter < 0:
inter = 0
else:
inter = winter * hinter
union = w * h + ww * hh - inter
return inter / (union + eps)
if __name__ == '__main__':
run(evaluate)
| 27.533333
| 93
| 0.517857
|
cb3c2781d56de0cd4998f91aa98ff7dd70fd8283
| 916
|
py
|
Python
|
apps/home/quickstart.py
|
Fayzan-Bhatti/MY_Product
|
e7a2f3b64b3f3fb421f95bb779fa8e480c3c23a8
|
[
"MIT"
] | null | null | null |
apps/home/quickstart.py
|
Fayzan-Bhatti/MY_Product
|
e7a2f3b64b3f3fb421f95bb779fa8e480c3c23a8
|
[
"MIT"
] | null | null | null |
apps/home/quickstart.py
|
Fayzan-Bhatti/MY_Product
|
e7a2f3b64b3f3fb421f95bb779fa8e480c3c23a8
|
[
"MIT"
] | null | null | null |
from google.analytics.data_v1beta import BetaAnalyticsDataClient
from google.analytics.data_v1beta.types import DateRange
from google.analytics.data_v1beta.types import Dimension
from google.analytics.data_v1beta.types import Metric
from google.analytics.data_v1beta.types import RunReportRequest
def sample_run_report(property_id="82468401"):
client = BetaAnalyticsDataClient()
request = RunReportRequest(
property=f"properties/{property_id}",
dimensions=[Dimension(name="city")],
metrics=[Metric(name="activeUsers")],
date_ranges=[DateRange(start_date="2019-03-26", end_date="today")],
)
print(property)
response = client.run_report(request)
print("Report result:")
for row in response.rows:
print(row.dimension_values[0].value, row.metric_values[0].value)
if __name__ == "__main__":
sample_run_report()
print('main function calling')
| 36.64
| 75
| 0.744541
|
b89e9d33f72c30fa423ca1e71dcb171264f899c3
| 227
|
py
|
Python
|
src/0121.best-time-to-buy-and-sell-stock/best-time-to-buy-and-sell-stock.py
|
lyphui/Just-Code
|
e0c3c3ecb67cb805080ff686e88522b2bffe7741
|
[
"MIT"
] | 782
|
2019-11-19T08:20:49.000Z
|
2022-03-25T06:59:09.000Z
|
src/0121.best-time-to-buy-and-sell-stock/best-time-to-buy-and-sell-stock.py
|
Heitao5200/Just-Code
|
5bb3ee485a103418e693b7ec8e26dc84f3691c79
|
[
"MIT"
] | 1
|
2021-03-04T12:21:01.000Z
|
2021-03-05T01:23:54.000Z
|
src/0121.best-time-to-buy-and-sell-stock/best-time-to-buy-and-sell-stock.py
|
Heitao5200/Just-Code
|
5bb3ee485a103418e693b7ec8e26dc84f3691c79
|
[
"MIT"
] | 155
|
2019-11-20T08:20:42.000Z
|
2022-03-19T07:28:09.000Z
|
class Solution:
def maxProfit(self, prices: List[int]) -> int:
min_p, max_v = float('inf'), 0
for p in prices:
min_p = min(min_p, p)
max_v = max(max_v, p - min_p)
return max_v
| 32.428571
| 50
| 0.528634
|
e4b82e231c5c28a3b546edaf8076ea55fc30483a
| 220
|
py
|
Python
|
core/commands/owner/__init__.py
|
salvatorecalo/nebula8
|
b63c0a1a98ccc955320449eb260b62f70ab5ce0a
|
[
"Apache-2.0"
] | 1
|
2021-09-28T00:37:36.000Z
|
2021-09-28T00:37:36.000Z
|
core/commands/owner/__init__.py
|
salvatorecalo/nebula8
|
b63c0a1a98ccc955320449eb260b62f70ab5ce0a
|
[
"Apache-2.0"
] | null | null | null |
core/commands/owner/__init__.py
|
salvatorecalo/nebula8
|
b63c0a1a98ccc955320449eb260b62f70ab5ce0a
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright SquirrelNetwork
"""Import Files"""
__all__ = ["add_community","exit","broadcast","server_info","superban","test","whitelist"]
from core.commands.owner import *
| 24.444444
| 90
| 0.690909
|
13baf448bca8776a5cbed486295710b7747f537d
| 7,613
|
py
|
Python
|
shellsploit/disassembly/Syscalls/linux_64.py
|
riusksk/shellsploit-library
|
9c0e1fec2d510cc1195194ce18f5b6f0aeface9f
|
[
"MIT"
] | 10
|
2016-10-09T10:21:43.000Z
|
2020-04-20T05:28:50.000Z
|
shellsploit/disassembly/Syscalls/linux_64.py
|
riusksk/shellsploit-library
|
9c0e1fec2d510cc1195194ce18f5b6f0aeface9f
|
[
"MIT"
] | null | null | null |
shellsploit/disassembly/Syscalls/linux_64.py
|
riusksk/shellsploit-library
|
9c0e1fec2d510cc1195194ce18f5b6f0aeface9f
|
[
"MIT"
] | 7
|
2017-03-22T18:21:34.000Z
|
2019-12-02T20:22:47.000Z
|
#------------------Bombermans Team------------------------------------------#
# Author : B3mB4m
# Concat : b3mb4m@protonmail.com
# Project : https://github.com/b3mb4m/shellsploit-library
# LICENSE : https://github.com/b3mb4m/shellsploit-library/blob/master/LICENSE
#----------------------------------------------------------------------------#
list = {
"read":"0",
"write":"1",
"open":"2",
"close":"3",
"stat":"4",
"fstat":"5",
"lstat":"6",
"poll":"7",
"lseek":"8",
"mmap":"9",
"mprotect":"10",
"munmap":"11",
"brk":"12",
"rt_sigaction":"13",
"rt_sigprocmask":"14",
"rt_sigreturn":"15",
"ioctl":"16",
"pread64":"17",
"pwrite64":"18",
"readv":"19",
"writev":"20",
"access":"21",
"pipe":"22",
"select":"23",
"sched_yield":"24",
"mremap":"25",
"msync":"26",
"mincore":"27",
"madvise":"28",
"shmget":"29",
"shmat":"30",
"shmctl":"31",
"dup":"32",
"dup2":"33",
"pause":"34",
"nanosleep":"35",
"getitimer":"36",
"alarm":"37",
"setitimer":"38",
"getpid":"39",
"sendfile":"40",
"socket":"41",
"connect":"42",
"accept":"43",
"sendto":"44",
"recvfrom":"45",
"sendmsg":"46",
"recvmsg":"47",
"shutdown":"48",
"bind":"49",
"listen":"50",
"getsockname":"51",
"getpeername":"52",
"socketpair":"53",
"setsockopt":"54",
"getsockopt":"55",
"clone":"56",
"fork":"57",
"vfork":"58",
"execve":"59",
"exit":"60",
"wait4":"61",
"kill":"62",
"uname":"63",
"semget":"64",
"semop":"65",
"semctl":"66",
"shmdt":"67",
"msgget":"68",
"msgsnd":"69",
"msgrcv":"70",
"msgctl":"71",
"fcntl":"72",
"flock":"73",
"fsync":"74",
"fdatasync":"75",
"truncate":"76",
"ftruncate":"77",
"getdents":"78",
"getcwd":"79",
"chdir":"80",
"fchdir":"81",
"rename":"82",
"mkdir":"83",
"rmdir":"84",
"creat":"85",
"link":"86",
"unlink":"87",
"symlink":"88",
"readlink":"89",
"chmod":"90",
"fchmod":"91",
"chown":"92",
"fchown":"93",
"lchown":"94",
"umask":"95",
"gettimeofday":"96",
"getrlimit":"97",
"getrusage":"98",
"sysinfo":"99",
"times":"100",
"ptrace":"101",
"getuid":"102",
"syslog":"103",
"getgid":"104",
"setuid":"105",
"setgid":"106",
"geteuid":"107",
"getegid":"108",
"setpgid":"109",
"getppid":"110",
"getpgrp":"111",
"setsid":"112",
"setreuid":"113",
"setregid":"114",
"getgroups":"115",
"setgroups":"116",
"setresuid":"117",
"getresuid":"118",
"setresgid":"119",
"getresgid":"120",
"getpgid":"121",
"setfsuid":"122",
"setfsgid":"123",
"getsid":"124",
"capget":"125",
"capset":"126",
"rt_sigpending":"127",
"rt_sigtimedwait":"128",
"rt_sigqueueinfo":"129",
"rt_sigsuspend":"130",
"sigaltstack":"131",
"utime":"132",
"mknod":"133",
"uselib":"134",
"personality":"135",
"ustat":"136",
"statfs":"137",
"fstatfs":"138",
"sysfs":"139",
"getpriority":"140",
"setpriority":"141",
"sched_setparam":"142",
"sched_getparam":"143",
"sched_setscheduler":"144",
"sched_getscheduler":"145",
"sched_get_priority_max":"146",
"sched_get_priority_min":"147",
"sched_rr_get_interval":"148",
"mlock":"149",
"munlock":"150",
"mlockall":"151",
"munlockall":"152",
"vhangup":"153",
"modify_ldt":"154",
"pivot_root":"155",
"_sysctl":"156",
"prctl":"157",
"arch_prctl":"158",
"adjtimex":"159",
"setrlimit":"160",
"chroot":"161",
"sync":"162",
"acct":"163",
"settimeofday":"164",
"mount":"165",
"umount2":"166",
"swapon":"167",
"swapoff":"168",
"reboot":"169",
"sethostname":"170",
"setdomainname":"171",
"iopl":"172",
"ioperm":"173",
"create_module":"174",
"init_module":"175",
"delete_module":"176",
"get_kernel_syms":"177",
"query_module":"178",
"quotactl":"179",
"nfsservctl":"180",
"getpmsg":"181",
"putpmsg":"182",
"afs_syscall":"183",
"tuxcall":"184",
"security":"185",
"gettid":"186",
"readahead":"187",
"setxattr":"188",
"lsetxattr":"189",
"fsetxattr":"190",
"getxattr":"191",
"lgetxattr":"192",
"fgetxattr":"193",
"listxattr":"194",
"llistxattr":"195",
"flistxattr":"196",
"removexattr":"197",
"lremovexattr":"198",
"fremovexattr":"199",
"tkill":"200",
"time":"201",
"futex":"202",
"sched_setaffinity":"203",
"sched_getaffinity":"204",
"set_thread_area":"205",
"io_setup":"206",
"io_destroy":"207",
"io_getevents":"208",
"io_submit":"209",
"io_cancel":"210",
"get_thread_area":"211",
"lookup_dcookie":"212",
"epoll_create":"213",
"epoll_ctl_old":"214",
"epoll_wait_old":"215",
"remap_file_pages":"216",
"getdents64":"217",
"set_tid_address":"218",
"restart_syscall":"219",
"semtimedop":"220",
"fadvise64":"221",
"timer_create":"222",
"timer_settime":"223",
"timer_gettime":"224",
"timer_getoverrun":"225",
"timer_delete":"226",
"clock_settime":"227",
"clock_gettime":"228",
"clock_getres":"229",
"clock_nanosleep":"230",
"exit_group":"231",
"epoll_wait":"232",
"epoll_ctl":"233",
"tgkill":"234",
"utimes":"235",
"vserver":"236",
"mbind":"237",
"set_mempolicy":"238",
"get_mempolicy":"239",
"mq_open":"240",
"mq_unlink":"241",
"mq_timedsend":"242",
"mq_timedreceive":"243",
"mq_notify":"244",
"mq_getsetattr":"245",
"kexec_load":"246",
"waitid":"247",
"add_key":"248",
"request_key":"249",
"keyctl":"250",
"ioprio_set":"251",
"ioprio_get":"252",
"inotify_init":"253",
"inotify_add_watch":"254",
"inotify_rm_watch":"255",
"migrate_pages":"256",
"openat":"257",
"mkdirat":"258",
"mknodat":"259",
"fchownat":"260",
"futimesat":"261",
"newfstatat":"262",
"unlinkat":"263",
"renameat":"264",
"linkat":"265",
"symlinkat":"266",
"readlinkat":"267",
"fchmodat":"268",
"faccessat":"269",
"pselect6":"270",
"ppoll":"271",
"unshare":"272",
"set_robust_list":"273",
"get_robust_list":"274",
"splice":"275",
"tee":"276",
"sync_file_range":"277",
"vmsplice":"278",
"move_pages":"279",
"utimensat":"280",
"epoll_pwait":"281",
"signalfd":"282",
"timerfd_create":"283",
"eventfd":"284",
"fallocate":"285",
"timerfd_settime":"286",
"timerfd_gettime":"287",
"accept4":"288",
"signalfd4":"289",
"eventfd2":"290",
"epoll_create1":"291",
"dup3":"292",
"pipe2":"293",
"inotify_init1":"294",
"preadv":"295",
"pwritev":"296",
"rt_tgsigqueueinfo":"297",
"perf_event_open":"298",
"recvmmsg":"299",
"fanotify_init":"300",
"fanotify_mark":"301",
"prlimit64":"302",
"name_to_handle_at":"303",
"open_by_handle_at":"304",
"clock_adjtime":"305",
"syncfs":"306",
"sendmmsg":"307",
"setns":"308",
"getcpu":"309",
"process_vm_readv":"310",
"process_vm_writev":"311",
"kcmp":"312",
"finit_module":"313",
"sched_setattr":"314",
"sched_getattr":"315",
"renameat2":"316",
"seccomp":"317",
"getrandom":"318",
"memfd_create":"319",
"kexec_file_load":"320",
"bpf":"321",
"execveat":"322"
}
#print list["execve"]
| 22.657738
| 78
| 0.506371
|
a390c5a40a9be6efc34936ffbee9adbf36ba21bf
| 3,652
|
py
|
Python
|
tests/annotation/general/test_line_path.py
|
saltastroops/imephu
|
0c302a73d01fe3ad018e7adf4b91e0beaecc6709
|
[
"MIT"
] | null | null | null |
tests/annotation/general/test_line_path.py
|
saltastroops/imephu
|
0c302a73d01fe3ad018e7adf4b91e0beaecc6709
|
[
"MIT"
] | 3
|
2022-02-02T20:51:05.000Z
|
2022-02-03T21:13:27.000Z
|
tests/annotation/general/test_line_path.py
|
saltastroops/imephu
|
0c302a73d01fe3ad018e7adf4b91e0beaecc6709
|
[
"MIT"
] | null | null | null |
import pytest
from astropy import units as u
from astropy.coordinates import SkyCoord
from imephu.annotation.general import CircleAnnotation, LinePathAnnotation
from imephu.finder_chart import FinderChart
def test_line_path_annotation(fits_file, check_finder):
"""Test line path annotations."""
finder_chart = FinderChart(fits_file)
closed_path_vertices = [
SkyCoord(ra="00h40m40s", dec="-59d56m00s"),
SkyCoord(ra="00h40m20s", dec="-59d55m00s"),
SkyCoord(ra="00h40m00s", dec="-60d00m00s"),
]
closed_path_annotation = LinePathAnnotation(
closed_path_vertices,
wcs=finder_chart.wcs,
edgecolor="none",
facecolor="green",
alpha=0.2,
)
open_path_vertices = [
SkyCoord(ra="00h39m20s", dec="-60d04m00s"),
SkyCoord(ra="00h39m40s", dec="-60d05m00s"),
SkyCoord(ra="00h40m00s", dec="-60d00m00s"),
]
open_path_annotation = LinePathAnnotation(
open_path_vertices, wcs=finder_chart.wcs, closed=False, edgecolor="orange"
)
finder_chart.add_annotation(closed_path_annotation)
finder_chart.add_annotation(open_path_annotation)
check_finder(finder_chart)
@pytest.mark.parametrize(
"pivot,angle",
[
(SkyCoord(ra="00h39m40s", dec=-60 * u.deg), 0 * u.deg),
(SkyCoord(ra="00h39m40s", dec=-60 * u.deg), -90 * u.deg),
],
)
def test_line_path_annotation_rotated(pivot, angle, fits_file, check_finder, legend):
"""Test rotated circle annotations."""
finder_chart = FinderChart(fits_file)
line_path_annotation = LinePathAnnotation(
[
SkyCoord(ra="00h39m50s", dec="-60d00m00s"),
SkyCoord(ra="00h39m50s", dec="-60d01m00s"),
SkyCoord(ra="00h40m00s", dec="-60d00m00s"),
],
wcs=finder_chart.wcs,
edgecolor="none",
facecolor="gray",
alpha=0.2,
)
rotated_circle_annotation = line_path_annotation.rotate(pivot, angle)
rotated_circle_annotation._kwargs["edgecolor"] = "blue"
rotated_circle_annotation._kwargs["facecolor"] = "blue"
pivot_marker = CircleAnnotation(
pivot,
12 * u.arcsec,
wcs=finder_chart.wcs,
edgecolor="none",
facecolor="orange",
alpha=0.7,
)
finder_chart.add_annotation(pivot_marker)
finder_chart.add_annotation(line_path_annotation)
finder_chart.add_annotation(rotated_circle_annotation)
finder_chart.add_annotation(
legend(f"Rotated by {angle.to_value(u.deg)} deg", wcs=finder_chart.wcs)
)
check_finder(finder_chart)
@pytest.mark.parametrize("displacement", [(0, 0) * u.arcmin, (2.5, -4) * u.arcmin])
def test_line_path_annotation_translated(
displacement, fits_file, fits_center, check_finder, legend
):
"""Test translated circle annotations."""
finder_chart = FinderChart(fits_file)
line_path_annotation = LinePathAnnotation(
[
SkyCoord(ra="00h39m40s", dec="-59d58m00s"),
SkyCoord(ra="00h39m50s", dec="-59d58m00s"),
SkyCoord(ra="00h39m40s", dec="-59d59m00s"),
],
wcs=finder_chart.wcs,
edgecolor="none",
facecolor="gray",
)
translated_line_path_annotation = line_path_annotation.translate(displacement)
translated_line_path_annotation._kwargs["color"] = "blue"
finder_chart.add_annotation(line_path_annotation)
finder_chart.add_annotation(translated_line_path_annotation)
finder_chart.add_annotation(
legend(
f"Translated by {displacement.to_value(u.arcmin)} arcmin",
wcs=finder_chart.wcs,
)
)
check_finder(finder_chart)
| 34.780952
| 85
| 0.672508
|
30c7f000491fb9e814029756fff40525a3ff2fe5
| 1,261
|
py
|
Python
|
hood_app/migrations/0002_auto_20180807_1654.py
|
ephantuskaranja/hood_watch
|
3d91e90d1a9c8c7b73d5dcea17cb7bb83e5b71ea
|
[
"MIT"
] | null | null | null |
hood_app/migrations/0002_auto_20180807_1654.py
|
ephantuskaranja/hood_watch
|
3d91e90d1a9c8c7b73d5dcea17cb7bb83e5b71ea
|
[
"MIT"
] | null | null | null |
hood_app/migrations/0002_auto_20180807_1654.py
|
ephantuskaranja/hood_watch
|
3d91e90d1a9c8c7b73d5dcea17cb7bb83e5b71ea
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-08-07 13:54
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('hood_app', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='recommendation',
options={'verbose_name_plural': 'Recommendations'},
),
migrations.AddField(
model_name='recommendation',
name='desc',
field=models.TextField(blank=True, max_length=250),
),
migrations.AddField(
model_name='reports',
name='desc',
field=models.TextField(blank=True, max_length=250),
),
migrations.AddField(
model_name='reports',
name='outstanding',
field=models.CharField(choices=[('RACISM', 'racism'), ('BAD-SERVICES', 'Bad-Services'), ('BRIBERY', 'Bribery')], max_length=60, null=True),
),
migrations.AlterField(
model_name='reports',
name='institution_category',
field=models.CharField(blank=True, choices=[('PUBLIC', 'public'), ('PRIVATE', 'private')], max_length=60),
),
]
| 31.525
| 151
| 0.580492
|
0990598171fe0d2ddc13b1b9d7c83a6b984de51f
| 3,675
|
py
|
Python
|
scripts/AnEn_CNN/DATA03_supplemental_refine.py
|
yingkaisha/rainbow
|
cee707e8fe29a6606041f0e26b33720793fe129b
|
[
"MIT"
] | 6
|
2021-02-17T20:47:51.000Z
|
2021-03-20T05:27:38.000Z
|
scripts/AnEn_CNN/DATA03_supplemental_refine.py
|
yingkaisha/rainbow
|
cee707e8fe29a6606041f0e26b33720793fe129b
|
[
"MIT"
] | null | null | null |
scripts/AnEn_CNN/DATA03_supplemental_refine.py
|
yingkaisha/rainbow
|
cee707e8fe29a6606041f0e26b33720793fe129b
|
[
"MIT"
] | 1
|
2021-03-10T06:08:05.000Z
|
2021-03-10T06:08:05.000Z
|
import sys
import time
import os.path
from glob import glob
from datetime import datetime, timedelta
# data tools
import h5py
import numpy as np
# custom tools
sys.path.insert(0, '/glade/u/home/ksha/WORKSPACE/utils/')
sys.path.insert(0, '/glade/u/home/ksha/WORKSPACE/Analog_BC/')
sys.path.insert(0, '/glade/u/home/ksha/WORKSPACE/Analog_BC/utils/')
import data_utils as du
from namelist import *
# importing domain information
with h5py.File(save_dir+'BC_domain_info.hdf', 'r') as h5io:
base_lon = h5io['base_lon'][...]
base_lat = h5io['base_lat'][...]
etopo_025 = h5io['etopo_base'][...]
land_mask = h5io['land_mask_base'][...]
land_mask_bc = h5io['land_mask_bc'][...]
bc_in_base = np.ones(land_mask.shape).astype(bool)
bc_in_base[bc_inds[0]:bc_inds[1], bc_inds[2]:bc_inds[3]] = land_mask_bc
grid_shape = land_mask.shape
# subsetting by land mask
IND = []
for i in range(grid_shape[0]):
for j in range(grid_shape[1]):
if ~bc_in_base[i, j]:
IND.append([i, j])
IND = np.array(IND, dtype=np.int)
N_grids = len(IND)
# -------------------------------------- #
# Combine and save
N_S = 41
SL_xy = np.zeros((12,)+grid_shape+(N_S, 3,))*np.nan
for month in range(12):
with h5py.File(save_dir+'S40_mon{}.hdf'.format(month), 'r') as h5io:
sl_ind = h5io['IND'][...]
# place the current month into allocation
SL_xy[month, ...] = sl_ind
tuple_save = (SL_xy,)
label_save = ['SL_xy']
du.save_hdf5(tuple_save, label_save, save_dir, 'SL40_d4.hdf')
# -------------------------------------- #
# remove duplicates
N_range = np.array([20, 40])
for N_S in N_range:
with h5py.File(save_dir+'SL40_d4.hdf', 'r') as h5io:
SL_xy = h5io['SL_xy'][..., :N_S, :]
inds_to_inds = {}
# flattening for preserving unique (ix, iy) pairs
SL_xy_mask = SL_xy[:, ~bc_in_base, :, :]
Ix = SL_xy_mask[..., 0]
Iy = SL_xy_mask[..., 1]
# get unique pairs
Ix_flat = Ix.reshape(12*N_grids*N_S)
Iy_flat = Iy.reshape(12*N_grids*N_S)
IxIy = np.concatenate((Ix_flat[:, None], Iy_flat[:, None]), axis=1)
IxIy_unique = np.unique(IxIy, axis=0)
# indx encoding for np.searchsorted
IxIy_1d = np.sort(IxIy_unique[:, 0]*9.99+IxIy_unique[:, 1]*0.01)
# map each pair to the unqiue pairs
for mon in range(12):
ind_to_ind = np.empty((N_grids, N_S), dtype=np.int)
for i in range(N_grids):
ix = Ix[mon, i, :]
iy = Iy[mon, i, :]
# applying the same encoding rule
ixiy_1d = ix*9.99+iy*0.01
# reverse select inds
for s in range(N_S):
ind_to_ind[i, s] = (np.searchsorted(IxIy_1d, ixiy_1d[s]))
inds_to_inds['{}'.format(mon)] = ind_to_ind
# applying int
IxIy_unique = IxIy_unique.astype(np.int)
# verifing the reverse mapping of inds
for mon in range(12):
for i in range(N_grids):
for s in range(N_S):
ix = Ix[mon, i, s]
iy = Iy[mon, i, s]
ind_to_ind = inds_to_inds['{}'.format(mon)]
ix_mapped, iy_mapped = IxIy_unique[int(ind_to_ind[i, s]), :]
# if not matched, then raise a msg
if (np.abs(ix - ix_mapped) + np.abs(iy - iy_mapped)) > 0:
print("no...........")
errorerrorerrorerror
IxIy_maps = tuple(inds_to_inds.values())
tuple_save = IxIy_maps + (IxIy_unique,)
label_save = []
for i in range(12):
label_save.append('mon_{}_inds'.format(i))
label_save.append('unique_inds')
# save
du.save_hdf5(tuple_save, label_save, save_dir, 'SL{}_d4_unique.hdf'.format(N_S))
| 31.410256
| 84
| 0.595918
|
e04a3410ed2a786cff0271a02e36df3e7e901b38
| 52,147
|
py
|
Python
|
sympy/core/mul.py
|
smichr/sympy
|
eda86926d98ab6cb7ec73e3cb8ea78ac15bddea3
|
[
"BSD-3-Clause"
] | 7
|
2015-01-14T06:55:33.000Z
|
2018-08-11T14:43:52.000Z
|
sympy/core/mul.py
|
smichr/sympy
|
eda86926d98ab6cb7ec73e3cb8ea78ac15bddea3
|
[
"BSD-3-Clause"
] | 1
|
2018-02-19T04:56:04.000Z
|
2018-02-19T04:56:04.000Z
|
sympy/core/mul.py
|
smichr/sympy
|
eda86926d98ab6cb7ec73e3cb8ea78ac15bddea3
|
[
"BSD-3-Clause"
] | 1
|
2016-04-24T14:39:22.000Z
|
2016-04-24T14:39:22.000Z
|
from collections import defaultdict
import operator
from sympy.core.sympify import sympify
from sympy.core.basic import Basic, C
from sympy.core.singleton import S
from sympy.core.operations import AssocOp
from sympy.core.cache import cacheit
from sympy.core.logic import fuzzy_not
from sympy.core.compatibility import cmp_to_key
from sympy.core.expr import Expr
# internal marker to indicate:
# "there are still non-commutative objects -- don't forget to process them"
class NC_Marker:
is_Order = False
is_Mul = False
is_Number = False
is_Poly = False
is_commutative = False
# Key for sorting commutative args in canonical order
_args_sortkey = cmp_to_key(Basic.compare)
def _mulsort(args):
# in-place sorting of args
args.sort(key=_args_sortkey)
def _unevaluated_Mul(*args):
"""Return a well-formed unevaluated Mul: Numbers are collected and
put in slot 0 and args are sorted. Use this when args have changed
but you still want to return an unevaluated Mul.
Examples
========
>>> from sympy.core.mul import _unevaluated_Mul as uMul
>>> from sympy import S, sqrt, Mul
>>> from sympy.abc import x, y
>>> a = uMul(*[S(3.0), x, S(2)])
>>> a.args[0]
6.00000000000000
>>> a.args[1]
x
Beyond the Number being in slot 0, there is no other flattening of
arguments, but two unevaluated Muls with the same arguments will
always compare as equal during testing:
>>> m = uMul(sqrt(2), sqrt(3))
>>> m == uMul(sqrt(3), sqrt(2))
True
>>> m == Mul(*m.args)
False
"""
args = list(args)
newargs = []
ncargs = []
co = S.One
while args:
a = args.pop()
if a.is_Mul:
c, nc = a.args_cnc()
args.extend(c)
if nc:
ncargs.append(Mul._from_args(nc))
elif a.is_Number:
co *= a
else:
newargs.append(a)
_mulsort(newargs)
if co is not S.One:
newargs.insert(0, co)
if ncargs:
newargs.append(Mul._from_args(ncargs))
return Mul._from_args(newargs)
class Mul(Expr, AssocOp):
__slots__ = []
is_Mul = True
#identity = S.One
# cyclic import, so defined in numbers.py
@classmethod
def flatten(cls, seq):
"""Return commutative, noncommutative and order arguments by
combining related terms.
Notes
=====
* In an expression like ``a*b*c``, python process this through sympy
as ``Mul(Mul(a, b), c)``. This can have undesirable consequences.
- Sometimes terms are not combined as one would like:
{c.f. http://code.google.com/p/sympy/issues/detail?id=1497}
>>> from sympy import Mul, sqrt
>>> from sympy.abc import x, y, z
>>> 2*(x + 1) # this is the 2-arg Mul behavior
2*x + 2
>>> y*(x + 1)*2
2*y*(x + 1)
>>> 2*(x + 1)*y # 2-arg result will be obtained first
y*(2*x + 2)
>>> Mul(2, x + 1, y) # all 3 args simultaneously processed
2*y*(x + 1)
>>> 2*((x + 1)*y) # parentheses can control this behavior
2*y*(x + 1)
Powers with compound bases may not find a single base to
combine with unless all arguments are processed at once.
Post-processing may be necessary in such cases.
{c.f. http://code.google.com/p/sympy/issues/detail?id=2629}
>>> a = sqrt(x*sqrt(y))
>>> a**3
(x*sqrt(y))**(3/2)
>>> Mul(a,a,a)
(x*sqrt(y))**(3/2)
>>> a*a*a
x*sqrt(y)*sqrt(x*sqrt(y))
>>> _.subs(a.base, z).subs(z, a.base)
(x*sqrt(y))**(3/2)
- If more than two terms are being multiplied then all the
previous terms will be re-processed for each new argument.
So if each of ``a``, ``b`` and ``c`` were :class:`Mul`
expression, then ``a*b*c`` (or building up the product
with ``*=``) will process all the arguments of ``a`` and
``b`` twice: once when ``a*b`` is computed and again when
``c`` is multiplied.
Using ``Mul(a, b, c)`` will process all arguments once.
* The results of Mul are cached according to arguments, so flatten
will only be called once for ``Mul(a, b, c)``. If you can
structure a calculation so the arguments are most likely to be
repeats then this can save time in computing the answer. For
example, say you had a Mul, M, that you wished to divide by ``d[i]``
and multiply by ``n[i]`` and you suspect there are many repeats
in ``n``. It would be better to compute ``M*n[i]/d[i]`` rather
than ``M/d[i]*n[i]`` since every time n[i] is a repeat, the
product, ``M*n[i]`` will be returned without flattening -- the
cached value will be returned. If you divide by the ``d[i]``
first (and those are more unique than the ``n[i]``) then that will
create a new Mul, ``M/d[i]`` the args of which will be traversed
again when it is multiplied by ``n[i]``.
{c.f. http://code.google.com/p/sympy/issues/detail?id=2607}
This consideration is moot if the cache is turned off.
NB
--
The validity of the above notes depends on the implementation
details of Mul and flatten which may change at any time. Therefore,
you should only consider them when your code is highly performance
sensitive.
Removal of 1 from the sequence is already handled by AssocOp.__new__.
"""
rv = None
if len(seq) == 2:
a, b = seq
if b.is_Rational:
a, b = b, a
assert not a is S.One
if a and a.is_Rational:
r, b = b.as_coeff_Mul()
a *= r
if b.is_Mul:
bargs, nc = b.args_cnc()
rv = bargs, nc, None
if a is not S.One:
bargs.insert(0, a)
elif b.is_Add and b.is_commutative:
if a is S.One:
rv = [b], [], None
else:
r, b = b.as_coeff_Add()
bargs = [_keep_coeff(a, bi) for bi in Add.make_args(b)]
_addsort(bargs)
ar = a*r
if ar:
bargs.insert(0, ar)
bargs = [Add._from_args(bargs)]
rv = bargs, [], None
if rv:
return rv
# apply associativity, separate commutative part of seq
c_part = [] # out: commutative factors
nc_part = [] # out: non-commutative factors
nc_seq = []
coeff = S.One # standalone term
# e.g. 3 * ...
c_powers = [] # (base,exp) n
# e.g. (x,n) for x
num_exp = [] # (num-base, exp) y
# e.g. (3, y) for ... * 3 * ...
neg1e = S.Zero # exponent on -1 extracted from Number-based Pow and I
pnum_rat = {} # (num-base, Rat-exp) 1/2
# e.g. (3, 1/2) for ... * 3 * ...
order_symbols = None
# --- PART 1 ---
#
# "collect powers and coeff":
#
# o coeff
# o c_powers
# o num_exp
# o neg1e
# o pnum_rat
#
# NOTE: this is optimized for all-objects-are-commutative case
for o in seq:
# O(x)
if o.is_Order:
o, order_symbols = o.as_expr_variables(order_symbols)
# Mul([...])
if o.is_Mul:
if o.is_commutative:
seq.extend(o.args) # XXX zerocopy?
else:
# NCMul can have commutative parts as well
for q in o.args:
if q.is_commutative:
seq.append(q)
else:
nc_seq.append(q)
# append non-commutative marker, so we don't forget to
# process scheduled non-commutative objects
seq.append(NC_Marker)
continue
# 3
elif o.is_Number:
if o is S.NaN or coeff is S.ComplexInfinity and o is S.Zero:
# we know for sure the result will be nan
return [S.NaN], [], None
elif coeff.is_Number: # it could be zoo
coeff *= o
if coeff is S.NaN:
# we know for sure the result will be nan
return [S.NaN], [], None
continue
elif o is S.ComplexInfinity:
if not coeff:
# 0 * zoo = NaN
return [S.NaN], [], None
if coeff is S.ComplexInfinity:
# zoo * zoo = zoo
return [S.ComplexInfinity], [], None
coeff = S.ComplexInfinity
continue
elif o is S.ImaginaryUnit:
neg1e += S.Half
continue
elif o.is_commutative:
# e
# o = b
b, e = o.as_base_exp()
# y
# 3
if o.is_Pow:
if b.is_Number:
# get all the factors with numeric base so they can be
# combined below, but don't combine negatives unless
# the exponent is an integer
if e.is_Rational:
if e.is_Integer:
coeff *= Pow(b, e) # it is an unevaluated power
continue
elif e.is_negative: # also a sign of an unevaluated power
seq.append(Pow(b, e))
continue
elif b.is_negative:
neg1e += e
b = -b
if b is not S.One:
pnum_rat.setdefault(b, []).append(e)
continue
elif b.is_positive or e.is_integer:
num_exp.append((b, e))
continue
elif b is S.ImaginaryUnit and e.is_Rational: # it is unevaluated
neg1e += e/2
continue
c_powers.append((b, e))
# NON-COMMUTATIVE
# TODO: Make non-commutative exponents not combine automatically
else:
if o is not NC_Marker:
nc_seq.append(o)
# process nc_seq (if any)
while nc_seq:
o = nc_seq.pop(0)
if not nc_part:
nc_part.append(o)
continue
# b c b+c
# try to combine last terms: a * a -> a
o1 = nc_part.pop()
b1, e1 = o1.as_base_exp()
b2, e2 = o.as_base_exp()
new_exp = e1 + e2
# Only allow powers to combine if the new exponent is
# not an Add. This allow things like a**2*b**3 == a**5
# if a.is_commutative == False, but prohibits
# a**x*a**y and x**a*x**b from combining (x,y commute).
if b1 == b2 and (not new_exp.is_Add):
o12 = b1 ** new_exp
# now o12 could be a commutative object
if o12.is_commutative:
seq.append(o12)
continue
else:
nc_seq.insert(0, o12)
else:
nc_part.append(o1)
nc_part.append(o)
# We do want a combined exponent if it would not be an Add, such as
# y 2y 3y
# x * x -> x
# We determine if two exponents have the same term by using
# as_coeff_Mul.
#
# Unfortunately, this isn't smart enough to consider combining into
# exponents that might already be adds, so things like:
# z - y y
# x * x will be left alone. This is because checking every possible
# combination can slow things down.
# gather exponents of common bases...
def _gather(c_powers):
new_c_powers = []
common_b = {} # b:e
for b, e in c_powers:
co = e.as_coeff_Mul()
common_b.setdefault(b, {}).setdefault(co[1], []).append(co[0])
for b, d in common_b.items():
for di, li in d.items():
d[di] = Add(*li)
for b, e in common_b.items():
for t, c in e.items():
new_c_powers.append((b, c*t))
return new_c_powers
# in c_powers
c_powers = _gather(c_powers)
# and in num_exp
num_exp = _gather(num_exp)
# --- PART 2 ---
#
# o process collected powers (x**0 -> 1; x**1 -> x; otherwise Pow)
# o combine collected powers (2**x * 3**x -> 6**x)
# with numeric base
# ................................
# now we have:
# - coeff:
# - c_powers: (b, e)
# - num_exp: (2, e)
# - pnum_rat: {(1/3, [1/3, 2/3, 1/4])}
# 0 1
# x -> 1 x -> x
for b, e in c_powers:
if e is S.One:
if b.is_Number:
coeff *= b
else:
c_part.append(b)
elif e is not S.Zero:
c_part.append(Pow(b, e))
# x x x
# 2 * 3 -> 6
inv_exp_dict = {} # exp:Mul(num-bases) x x
# e.g. x:6 for ... * 2 * 3 * ...
for b, e in num_exp:
inv_exp_dict.setdefault(e, []).append(b)
for e, b in inv_exp_dict.items():
inv_exp_dict[e] = Mul(*b)
c_part.extend([Pow(b, e) for e, b in inv_exp_dict.iteritems() if e])
# b, e -> e' = sum(e), b
# {(1/5, [1/3]), (1/2, [1/12, 1/4]} -> {(1/3, [1/5, 1/2])}
comb_e = {}
for b, e in pnum_rat.iteritems():
comb_e.setdefault(Add(*e), []).append(b)
del pnum_rat
# process them, reducing exponents to values less than 1
# and updating coeff if necessary else adding them to
# num_rat for further processing
num_rat = []
for e, b in comb_e.iteritems():
b = Mul(*b)
if e.q == 1:
coeff *= Pow(b, e)
continue
if e.p > e.q:
e_i, ep = divmod(e.p, e.q)
coeff *= Pow(b, e_i)
e = Rational(ep, e.q)
num_rat.append((b, e))
del comb_e
# extract gcd of bases in num_rat
# 2**(1/3)*6**(1/4) -> 2**(1/3+1/4)*3**(1/4)
pnew = defaultdict(list)
i = 0 # steps through num_rat which may grow
while i < len(num_rat):
bi, ei = num_rat[i]
grow = []
for j in range(i + 1, len(num_rat)):
bj, ej = num_rat[j]
g = bi.gcd(bj)
if g is not S.One:
# 4**r1*6**r2 -> 2**(r1+r2) * 2**r1 * 3**r2
# this might have a gcd with something else
e = ei + ej
if e.q == 1:
coeff *= Pow(g, e)
else:
if e.p > e.q:
e_i, ep = divmod(e.p, e.q) # change e in place
coeff *= Pow(g, e_i)
e = Rational(ep, e.q)
grow.append((g, e))
# update the jth item
num_rat[j] = (bj/g, ej)
# update bi that we are checking with
bi = bi/g
if bi is S.One:
break
if bi is not S.One:
obj = Pow(bi, ei)
if obj.is_Number:
coeff *= obj
else:
# changes like sqrt(12) -> 2*sqrt(3)
for obj in Mul.make_args(obj):
if obj.is_Number:
coeff *= obj
else:
assert obj.is_Pow
bi, ei = obj.args
pnew[ei].append(bi)
num_rat.extend(grow)
i += 1
# combine bases of the new powers
for e, b in pnew.iteritems():
pnew[e] = Mul(*b)
# handle -1 and I
if neg1e:
# treat I as (-1)**(1/2) and compute -1's total exponent
p, q = neg1e.as_numer_denom()
# if the integer part is odd, extract -1
n, p = divmod(p, q)
if n % 2:
coeff = -coeff
# if it's a multiple of 1/2 extract I
if q == 2:
c_part.append(S.ImaginaryUnit)
elif p:
# see if there is any positive base this power of
# -1 can join
neg1e = Rational(p, q)
for e, b in pnew.iteritems():
if e == neg1e and b.is_positive:
pnew[e] = -b
break
else:
# keep it separate; we've already evaluated it as
# much as possible so evaluate=False
c_part.append(Pow(S.NegativeOne, neg1e, evaluate=False))
# add all the pnew powers
c_part.extend([Pow(b, e) for e, b in pnew.iteritems()])
# oo, -oo
if (coeff is S.Infinity) or (coeff is S.NegativeInfinity):
def _handle_for_oo(c_part, coeff_sign):
new_c_part = []
for t in c_part:
if t.is_positive:
continue
if t.is_negative:
coeff_sign *= -1
continue
new_c_part.append(t)
return new_c_part, coeff_sign
c_part, coeff_sign = _handle_for_oo(c_part, 1)
nc_part, coeff_sign = _handle_for_oo(nc_part, coeff_sign)
coeff *= coeff_sign
# zoo
if coeff is S.ComplexInfinity:
# zoo might be
# unbounded_real + bounded_im
# bounded_real + unbounded_im
# unbounded_real + unbounded_im
# and non-zero real or imaginary will not change that status.
c_part = [c for c in c_part if not (c.is_nonzero and
c.is_real is not None)]
nc_part = [c for c in nc_part if not (c.is_nonzero and
c.is_real is not None)]
# 0
elif coeff is S.Zero:
# we know for sure the result will be 0
return [coeff], [], order_symbols
# order commutative part canonically
_mulsort(c_part)
# current code expects coeff to be always in slot-0
if coeff is not S.One:
c_part.insert(0, coeff)
# we are done
if len(c_part) == 2 and c_part[0].is_Number and c_part[1].is_Add:
# 2*(1+a) -> 2 + 2 * a
coeff = c_part[0]
c_part = [Add(*[coeff*f for f in c_part[1].args])]
return c_part, nc_part, order_symbols
def _eval_power(b, e):
# don't break up NC terms: (A*B)**3 != A**3*B**3, it is A*B*A*B*A*B
cargs, nc = b.args_cnc(split_1=False)
if e.is_Integer:
return Mul(*[Pow(b, e, evaluate=False) for b in cargs]) * \
Pow(Mul._from_args(nc), e, evaluate=False)
p = Pow(b, e, evaluate=False)
if e.is_Rational or e.is_Float:
return p._eval_expand_power_base()
return p
@classmethod
def class_key(cls):
return 3, 0, cls.__name__
def _eval_evalf(self, prec):
c, m = self.as_coeff_Mul()
if c is S.NegativeOne:
if m.is_Mul:
rv = -AssocOp._eval_evalf(m, prec)
else:
mnew = m._eval_evalf(prec)
if mnew is not None:
m = mnew
rv = -m
else:
rv = AssocOp._eval_evalf(self, prec)
if rv.is_number:
return rv.expand()
return rv
@cacheit
def as_two_terms(self):
"""Return head and tail of self.
This is the most efficient way to get the head and tail of an
expression.
- if you want only the head, use self.args[0];
- if you want to process the arguments of the tail then use
self.as_coef_mul() which gives the head and a tuple containing
the arguments of the tail when treated as a Mul.
- if you want the coefficient when self is treated as an Add
then use self.as_coeff_add()[0]
>>> from sympy.abc import x, y
>>> (3*x*y).as_two_terms()
(3, x*y)
"""
args = self.args
if len(args) == 1:
return S.One, self
elif len(args) == 2:
return args
else:
return args[0], self._new_rawargs(*args[1:])
@cacheit
def as_coeff_mul(self, *deps):
if deps:
l1 = []
l2 = []
for f in self.args:
if f.has(*deps):
l2.append(f)
else:
l1.append(f)
return self._new_rawargs(*l1), tuple(l2)
args = self.args
if args[0].is_Rational:
return args[0], args[1:]
elif args[0] is S.NegativeInfinity:
return S.NegativeOne, (-args[0],) + args[1:]
return S.One, args
def as_coeff_Mul(self, rational=False):
"""Efficiently extract the coefficient of a product. """
coeff, args = self.args[0], self.args[1:]
if coeff.is_Number and not (rational and not coeff.is_Rational):
if len(args) == 1:
return coeff, args[0]
else:
return coeff, self._new_rawargs(*args)
else:
return S.One, self
def as_real_imag(self, deep=True, **hints):
other = []
coeff = S(1)
for a in self.args:
if a.is_real:
coeff *= a
elif a.is_commutative:
# search for complex conjugate pairs:
for i, x in enumerate(other):
if x == a.conjugate():
coeff *= C.Abs(x)**2
del other[i]
break
else:
other.append(a)
else:
other.append(a)
m = Mul(*other)
if hints.get('ignore') == m:
return None
else:
return (coeff*C.re(m), coeff*C.im(m))
@staticmethod
def _expandsums(sums):
"""
Helper function for _eval_expand_mul.
sums must be a list of instances of Basic.
"""
L = len(sums)
if L == 1:
return sums[0].args
terms = []
left = Mul._expandsums(sums[:L//2])
right = Mul._expandsums(sums[L//2:])
terms = [Mul(a, b) for a in left for b in right]
added = Add(*terms)
return Add.make_args(added) # it may have collapsed down to one term
def _eval_expand_mul(self, **hints):
from sympy import fraction, expand_mul, expand_multinomial
# Handle things like 1/(x*(x + 1)), which are automatically converted
# to 1/x*1/(x + 1)
expr = self
n, d = fraction(expr)
if d.is_Mul:
n, d = [i._eval_expand_mul(**hints) if i.is_Mul else i
for i in (n, d)]
expr = n/d
if not expr.is_Mul:
return expr
plain, sums, rewrite = [], [], False
for factor in expr.args:
if factor.is_Add:
sums.append(factor)
rewrite = True
else:
if factor.is_commutative:
plain.append(factor)
else:
sums.append(Basic(factor)) # Wrapper
if not rewrite:
return expr
else:
plain = Mul(*plain)
if sums:
terms = Mul._expandsums(sums)
args = []
for term in terms:
t = Mul(plain, term)
if t.is_Mul and any(a.is_Add for a in t.args):
t = t._eval_expand_mul()
args.append(t)
return Add(*args)
else:
return plain
def _eval_derivative(self, s):
terms = list(self.args)
factors = []
for i in xrange(len(terms)):
t = terms[i].diff(s)
if t is S.Zero:
continue
factors.append(Mul(*(terms[:i] + [t] + terms[i + 1:])))
return Add(*factors)
def _matches_simple(self, expr, repl_dict):
# handle (w*3).matches('x*5') -> {w: x*5/3}
coeff, terms = self.as_coeff_Mul()
terms = Mul.make_args(terms)
if len(terms) == 1:
newexpr = self.__class__._combine_inverse(expr, coeff)
return terms[0].matches(newexpr, repl_dict)
return
def matches(self, expr, repl_dict={}, old=False):
expr = sympify(expr)
if self.is_commutative and expr.is_commutative:
return AssocOp._matches_commutative(self, expr, repl_dict, old)
elif self.is_commutative is not expr.is_commutative:
return None
c1, nc1 = self.args_cnc()
c2, nc2 = expr.args_cnc()
repl_dict = repl_dict.copy()
if c1:
if not c2:
c2 = [1]
a = Mul(*c1)
if isinstance(a, AssocOp):
repl_dict = a._matches_commutative(Mul(*c2), repl_dict, old)
else:
repl_dict = a.matches(Mul(*c2), repl_dict)
if repl_dict:
a = Mul(*nc1)
if isinstance(a, Mul):
repl_dict = a._matches(Mul(*nc2), repl_dict)
else:
repl_dict = a.matches(Mul(*nc2), repl_dict)
return repl_dict or None
def _matches(self, expr, repl_dict={}):
# weed out negative one prefixes
sign = 1
a, b = self.as_two_terms()
if a is S.NegativeOne:
if b.is_Mul:
sign = -sign
else:
# the remainder, b, is not a Mul anymore
return b.matches(-expr, repl_dict)
expr = sympify(expr)
if expr.is_Mul and expr.args[0] is S.NegativeOne:
expr = -expr
sign = -sign
if not expr.is_Mul:
# expr can only match if it matches b and a matches +/- 1
if len(self.args) == 2:
# quickly test for equality
if b == expr:
return a.matches(Rational(sign), repl_dict)
# do more expensive match
dd = b.matches(expr, repl_dict)
if dd is None:
return None
dd = a.matches(Rational(sign), dd)
return dd
return None
d = repl_dict.copy()
# weed out identical terms
pp = list(self.args)
ee = list(expr.args)
for p in self.args:
if p in expr.args:
ee.remove(p)
pp.remove(p)
# only one symbol left in pattern -> match the remaining expression
if len(pp) == 1 and isinstance(pp[0], C.Wild):
if len(ee) == 1:
d[pp[0]] = sign * ee[0]
else:
d[pp[0]] = sign * expr.func(*ee)
return d
if len(ee) != len(pp):
return None
for p, e in zip(pp, ee):
d = p.xreplace(d).matches(e, d)
if d is None:
return None
return d
@staticmethod
def _combine_inverse(lhs, rhs):
"""
Returns lhs/rhs, but treats arguments like symbols, so things like
oo/oo return 1, instead of a nan.
"""
if lhs == rhs:
return S.One
def check(l, r):
if l.is_Float and r.is_comparable:
# if both objects are added to 0 they will share the same "normalization"
# and are more likely to compare the same. Since Add(foo, 0) will not allow
# the 0 to pass, we use __add__ directly.
return l.__add__(0) == r.evalf().__add__(0)
return False
if check(lhs, rhs) or check(rhs, lhs):
return S.One
if lhs.is_Mul and rhs.is_Mul:
a = list(lhs.args)
b = [1]
for x in rhs.args:
if x in a:
a.remove(x)
elif -x in a:
a.remove(-x)
b.append(-1)
else:
b.append(x)
return Mul(*a)/Mul(*b)
return lhs/rhs
def as_powers_dict(self):
d = defaultdict(int)
for term in self.args:
b, e = term.as_base_exp()
d[b] += e
return d
def as_numer_denom(self):
# don't use _from_args to rebuild the numerators and denominators
# as the order is not guaranteed to be the same once they have
# been separated from each other
numers, denoms = zip(*[f.as_numer_denom() for f in self.args])
return Mul(*numers), Mul(*denoms)
def as_base_exp(self):
e1 = None
bases = []
nc = 0
for m in self.args:
b, e = m.as_base_exp()
if not b.is_commutative:
nc += 1
if e1 is None:
e1 = e
elif e != e1 or nc > 1:
return self, S.One
bases.append(b)
return Mul(*bases), e1
def _eval_is_polynomial(self, syms):
return all(term._eval_is_polynomial(syms) for term in self.args)
def _eval_is_rational_function(self, syms):
return all(term._eval_is_rational_function(syms) for term in self.args)
_eval_is_bounded = lambda self: self._eval_template_is_attr('is_bounded')
_eval_is_commutative = lambda self: self._eval_template_is_attr(
'is_commutative')
_eval_is_rational = lambda self: self._eval_template_is_attr('is_rational',
when_multiple=None)
def _eval_is_integer(self):
is_rational = self.is_rational
if is_rational:
n, d = self.as_numer_denom()
if d is S.One:
return True
elif d is S(2):
return n.is_even
elif is_rational is False:
return False
def _eval_is_polar(self):
has_polar = any(arg.is_polar for arg in self.args)
return has_polar and \
all(arg.is_polar or arg.is_positive for arg in self.args)
# I*I -> R, I*I*I -> -I
def _eval_is_real(self):
im_count = 0
is_neither = False
for t in self.args:
if t.is_imaginary:
im_count += 1
continue
t_real = t.is_real
if t_real:
continue
elif t_real is False:
if is_neither:
return None
else:
is_neither = True
else:
return None
if is_neither:
return False
return (im_count % 2 == 0)
def _eval_is_imaginary(self):
im_count = 0
is_neither = False
for t in self.args:
if t.is_imaginary:
im_count += 1
continue
t_real = t.is_real
if t_real:
continue
elif t_real is False:
if is_neither:
return None
else:
is_neither = True
else:
return None
if is_neither:
return False
return (im_count % 2 == 1)
def _eval_is_hermitian(self):
nc_count = 0
im_count = 0
is_neither = False
for t in self.args:
if not t.is_commutative:
nc_count += 1
if nc_count > 1:
return None
if t.is_antihermitian:
im_count += 1
continue
t_real = t.is_hermitian
if t_real:
continue
elif t_real is False:
if is_neither:
return None
else:
is_neither = True
else:
return None
if is_neither:
return False
return (im_count % 2 == 0)
def _eval_is_antihermitian(self):
nc_count = 0
im_count = 0
is_neither = False
for t in self.args:
if not t.is_commutative:
nc_count += 1
if nc_count > 1:
return None
if t.is_antihermitian:
im_count += 1
continue
t_real = t.is_hermitian
if t_real:
continue
elif t_real is False:
if is_neither:
return None
else:
is_neither = True
else:
return None
if is_neither:
return False
return (im_count % 2 == 1)
def _eval_is_irrational(self):
for t in self.args:
a = t.is_irrational
if a:
others = list(self.args)
others.remove(t)
if all(x.is_rational is True for x in others):
return True
return None
if a is None:
return
return False
def _eval_is_zero(self):
zero = None
for a in self.args:
if a.is_zero:
zero = True
continue
bound = a.is_bounded
if not bound:
return bound
if zero:
return True
def _eval_is_positive(self):
"""Return True if self is positive, False if not, and None if it
cannot be determined.
This algorithm is non-recursive and works by keeping track of the
sign which changes when a negative or nonpositive is encountered.
Whether a nonpositive or nonnegative is seen is also tracked since
the presence of these makes it impossible to return True, but
possible to return False if the end result is nonpositive. e.g.
pos * neg * nonpositive -> pos or zero -> None is returned
pos * neg * nonnegative -> neg or zero -> False is returned
"""
sign = 1
saw_NON = False
for t in self.args:
if t.is_positive:
continue
elif t.is_negative:
sign = -sign
elif t.is_zero:
return False
elif t.is_nonpositive:
sign = -sign
saw_NON = True
elif t.is_nonnegative:
saw_NON = True
else:
return
if sign == 1 and saw_NON is False:
return True
if sign < 0:
return False
def _eval_is_negative(self):
"""Return True if self is negative, False if not, and None if it
cannot be determined.
This algorithm is non-recursive and works by keeping track of the
sign which changes when a negative or nonpositive is encountered.
Whether a nonpositive or nonnegative is seen is also tracked since
the presence of these makes it impossible to return True, but
possible to return False if the end result is nonnegative. e.g.
pos * neg * nonpositive -> pos or zero -> False is returned
pos * neg * nonnegative -> neg or zero -> None is returned
"""
sign = 1
saw_NON = False
for t in self.args:
if t.is_positive:
continue
elif t.is_negative:
sign = -sign
elif t.is_zero:
return False
elif t.is_nonpositive:
sign = -sign
saw_NON = True
elif t.is_nonnegative:
saw_NON = True
else:
return
if sign == -1 and saw_NON is False:
return True
if sign > 0:
return False
def _eval_is_odd(self):
is_integer = self.is_integer
if is_integer:
r = True
for t in self.args:
if not t.is_integer:
return None
elif t.is_even:
r = False
elif t.is_integer:
if r is False:
pass
elif t.is_odd is None:
r = None
return r
# !integer -> !odd
elif is_integer is False:
return False
def _eval_is_even(self):
is_integer = self.is_integer
if is_integer:
return fuzzy_not(self._eval_is_odd())
elif is_integer is False:
return False
def _eval_subs(self, old, new):
from sympy import sign, multiplicity
from sympy.simplify.simplify import powdenest, fraction
if not old.is_Mul:
return None
if old.args[0] == -1:
return self._subs(-old, -new)
def base_exp(a):
# if I and -1 are in a Mul, they get both end up with
# a -1 base (see issue 3322); all we want here are the
# true Pow or exp separated into base and exponent
if a.is_Pow or a.func is C.exp:
return a.as_base_exp()
return a, S.One
def breakup(eq):
"""break up powers of eq when treated as a Mul:
b**(Rational*e) -> b**e, Rational
commutatives come back as a dictionary {b**e: Rational}
noncommutatives come back as a list [(b**e, Rational)]
"""
(c, nc) = (defaultdict(int), list())
for a in Mul.make_args(eq):
a = powdenest(a)
(b, e) = base_exp(a)
if e is not S.One:
(co, _) = e.as_coeff_mul()
b = Pow(b, e/co)
e = co
if a.is_commutative:
c[b] += e
else:
nc.append([b, e])
return (c, nc)
def rejoin(b, co):
"""
Put rational back with exponent; in general this is not ok, but
since we took it from the exponent for analysis, it's ok to put
it back.
"""
(b, e) = base_exp(b)
return Pow(b, e*co)
def ndiv(a, b):
"""if b divides a in an extractive way (like 1/4 divides 1/2
but not vice versa, and 2/5 does not divide 1/3) then return
the integer number of times it divides, else return 0.
"""
if not b.q % a.q or not a.q % b.q:
return int(a/b)
return 0
# give Muls in the denominator a chance to be changed (see issue 2552)
# rv will be the default return value
rv = None
n, d = fraction(self)
if d is not S.One:
self2 = n._subs(old, new)/d._subs(old, new)
if not self2.is_Mul:
return self2._subs(old, new)
if self2 != self:
self = rv = self2
# Now continue with regular substitution.
# handle the leading coefficient and use it to decide if anything
# should even be started; we always know where to find the Rational
# so it's a quick test
co_self = self.args[0]
co_old = old.args[0]
co_xmul = None
if co_old.is_Rational and co_self.is_Rational:
# if coeffs are the same there will be no updating to do
# below after breakup() step; so skip (and keep co_xmul=None)
if co_old != co_self:
co_xmul = co_self.extract_multiplicatively(co_old)
elif co_old.is_Rational:
return rv
# break self and old into factors
(c, nc) = breakup(self)
(old_c, old_nc) = breakup(old)
# update the coefficients if we had an extraction
# e.g. if co_self were 2*(3/35*x)**2 and co_old = 3/5
# then co_self in c is replaced by (3/5)**2 and co_residual
# is 2*(1/7)**2
if co_xmul and co_xmul.is_Rational:
n_old, d_old = co_old.as_numer_denom()
n_self, d_self = co_self.as_numer_denom()
def _multiplicity(p, n):
p = abs(p)
if p is S.One:
return S.Infinity
return multiplicity(p, abs(n))
mult = S(min(_multiplicity(n_old, n_self),
_multiplicity(d_old, d_self)))
c.pop(co_self)
c[co_old] = mult
co_residual = co_self/co_old**mult
else:
co_residual = 1
# do quick tests to see if we can't succeed
ok = True
if len(old_nc) > len(nc):
# more non-commutative terms
ok = False
elif len(old_c) > len(c):
# more commutative terms
ok = False
elif set(i[0] for i in old_nc).difference(set(i[0] for i in nc)):
# unmatched non-commutative bases
ok = False
elif set(old_c).difference(set(c)):
# unmatched commutative terms
ok = False
elif any(sign(c[b]) != sign(old_c[b]) for b in old_c):
# differences in sign
ok = False
if not ok:
return rv
if not old_c:
cdid = None
else:
rat = []
for (b, old_e) in old_c.items():
c_e = c[b]
rat.append(ndiv(c_e, old_e))
if not rat[-1]:
return rv
cdid = min(rat)
if not old_nc:
ncdid = None
for i in range(len(nc)):
nc[i] = rejoin(*nc[i])
else:
ncdid = 0 # number of nc replacements we did
take = len(old_nc) # how much to look at each time
limit = cdid or S.Infinity # max number that we can take
failed = [] # failed terms will need subs if other terms pass
i = 0
while limit and i + take <= len(nc):
hit = False
# the bases must be equivalent in succession, and
# the powers must be extractively compatible on the
# first and last factor but equal inbetween.
rat = []
for j in range(take):
if nc[i + j][0] != old_nc[j][0]:
break
elif j == 0:
rat.append(ndiv(nc[i + j][1], old_nc[j][1]))
elif j == take - 1:
rat.append(ndiv(nc[i + j][1], old_nc[j][1]))
elif nc[i + j][1] != old_nc[j][1]:
break
else:
rat.append(1)
j += 1
else:
ndo = min(rat)
if ndo:
if take == 1:
if cdid:
ndo = min(cdid, ndo)
nc[i] = Pow(new, ndo)*rejoin(nc[i][0],
nc[i][1] - ndo*old_nc[0][1])
else:
ndo = 1
# the left residual
l = rejoin(nc[i][0], nc[i][1] - ndo*
old_nc[0][1])
# eliminate all middle terms
mid = new
# the right residual (which may be the same as the middle if take == 2)
ir = i + take - 1
r = (nc[ir][0], nc[ir][1] - ndo*
old_nc[-1][1])
if r[1]:
if i + take < len(nc):
nc[i:i + take] = [l*mid, r]
else:
r = rejoin(*r)
nc[i:i + take] = [l*mid*r]
else:
# there was nothing left on the right
nc[i:i + take] = [l*mid]
limit -= ndo
ncdid += ndo
hit = True
if not hit:
# do the subs on this failing factor
failed.append(i)
i += 1
else:
if not ncdid:
return rv
# although we didn't fail, certain nc terms may have
# failed so we rebuild them after attempting a partial
# subs on them
failed.extend(range(i, len(nc)))
for i in failed:
nc[i] = rejoin(*nc[i]).subs(old, new)
# rebuild the expression
if cdid is None:
do = ncdid
elif ncdid is None:
do = cdid
else:
do = min(ncdid, cdid)
margs = []
for b in c:
if b in old_c:
# calculate the new exponent
e = c[b] - old_c[b]*do
margs.append(rejoin(b, e))
else:
margs.append(rejoin(b.subs(old, new), c[b]))
if cdid and not ncdid:
# in case we are replacing commutative with non-commutative,
# we want the new term to come at the front just like the
# rest of this routine
margs = [Pow(new, cdid)] + margs
return co_residual*Mul(*margs)*Mul(*nc)
def _eval_nseries(self, x, n, logx):
from sympy import powsimp
terms = [t.nseries(x, n=n, logx=logx) for t in self.args]
return powsimp(Mul(*terms).expand(), combine='exp', deep=True)
def _eval_as_leading_term(self, x):
return Mul(*[t.as_leading_term(x) for t in self.args])
def _eval_conjugate(self):
return Mul(*[t.conjugate() for t in self.args])
def _eval_transpose(self):
return Mul(*[t.transpose() for t in self.args[::-1]])
def _eval_adjoint(self):
return Mul(*[t.adjoint() for t in self.args[::-1]])
def _sage_(self):
s = 1
for x in self.args:
s *= x._sage_()
return s
def as_content_primitive(self, radical=False):
"""Return the tuple (R, self/R) where R is the positive Rational
extracted from self.
Examples
========
>>> from sympy import sqrt
>>> (-3*sqrt(2)*(2 - 2*sqrt(2))).as_content_primitive()
(6, -sqrt(2)*(-sqrt(2) + 1))
See docstring of Expr.as_content_primitive for more examples.
"""
coef = S.One
args = []
for i, a in enumerate(self.args):
c, p = a.as_content_primitive(radical=radical)
coef *= c
if p is not S.One:
args.append(p)
# don't use self._from_args here to reconstruct args
# since there may be identical args now that should be combined
# e.g. (2+2*x)*(3+3*x) should be (6, (1 + x)**2) not (6, (1+x)*(1+x))
return coef, Mul(*args)
def as_ordered_factors(self, order=None):
"""Transform an expression into an ordered list of factors.
Examples
========
>>> from sympy import sin, cos
>>> from sympy.abc import x, y
>>> (2*x*y*sin(x)*cos(x)).as_ordered_factors()
[2, x, y, sin(x), cos(x)]
"""
cpart, ncpart = self.args_cnc()
cpart.sort(key=lambda expr: expr.sort_key(order=order))
return cpart + ncpart
@property
def _sorted_args(self):
return self.as_ordered_factors()
def prod(a, start=1):
"""Return product of elements of a. Start with int 1 so if only
ints are included then an int result is returned.
Examples
========
>>> from sympy import prod, S
>>> prod(range(3))
0
>>> type(_) is int
True
>>> prod([S(2), 3])
6
>>> _.is_Integer
True
You can start the product at something other than 1:
>>> prod([1, 2], 3)
6
"""
return reduce(operator.mul, a, start)
def _keep_coeff(coeff, factors, clear=True, sign=False):
"""Return ``coeff*factors`` unevaluated if necessary.
If ``clear`` is False, do not keep the coefficient as a factor
if it can be distributed on a single factor such that one or
more terms will still have integer coefficients.
If ``sign`` is True, allow a coefficient of -1 to remain factored out.
Examples
========
>>> from sympy.core.mul import _keep_coeff
>>> from sympy.abc import x, y
>>> from sympy import S
>>> _keep_coeff(S.Half, x + 2)
(x + 2)/2
>>> _keep_coeff(S.Half, x + 2, clear=False)
x/2 + 1
>>> _keep_coeff(S.Half, (x + 2)*y, clear=False)
y*(x + 2)/2
>>> _keep_coeff(S(-1), x + y)
-x - y
>>> _keep_coeff(S(-1), x + y, sign=True)
-(x + y)
"""
if not coeff.is_Number:
if factors.is_Number:
factors, coeff = coeff, factors
else:
return coeff*factors
if coeff is S.One:
return factors
elif coeff is S.NegativeOne and not sign:
return -factors
elif factors.is_Add:
if not clear and coeff.is_Rational and coeff.q != 1:
q = S(coeff.q)
for i in factors.args:
c, t = i.as_coeff_Mul()
r = c/q
if r == int(r):
return coeff*factors
return Mul._from_args((coeff, factors))
elif factors.is_Mul:
margs = list(factors.args)
if margs[0].is_Number:
margs[0] *= coeff
if margs[0] == 1:
margs.pop(0)
else:
margs.insert(0, coeff)
return Mul._from_args(margs)
else:
return coeff*factors
from numbers import Rational
from power import Pow
from add import Add, _addsort
| 32.962705
| 99
| 0.469576
|
aece8ed4851cf834f72d71cc94df55873fe590f3
| 4,362
|
py
|
Python
|
watersheds/ws_anisotropic_distance_transform.py
|
constantinpape/watersheds
|
9fde72b2df5aa0e3531969361b3a6c37be77ba8a
|
[
"BSD-3-Clause"
] | null | null | null |
watersheds/ws_anisotropic_distance_transform.py
|
constantinpape/watersheds
|
9fde72b2df5aa0e3531969361b3a6c37be77ba8a
|
[
"BSD-3-Clause"
] | null | null | null |
watersheds/ws_anisotropic_distance_transform.py
|
constantinpape/watersheds
|
9fde72b2df5aa0e3531969361b3a6c37be77ba8a
|
[
"BSD-3-Clause"
] | null | null | null |
import vigra
import numpy as np
from wsdt import group_seeds_by_distance, iterative_inplace_watershed
def signed_anisotropic_dt(
pmap,
threshold,
anisotropy,
preserve_membrane_pmaps
):
binary_membranes = (pmap >= threshold).astype('uint32')
distance_to_membrane = vigra.filters.distanceTransform(
binary_membranes,
pixel_pitch = [anisotropy, 1., 1.])
if preserve_membrane_pmaps:
# Instead of computing a negative distance transform within the thresholded membrane areas,
# Use the original probabilities (but inverted)
membrane_mask = binary_membranes.astype(np.bool)
distance_to_membrane[membrane_mask] = -pmap[membrane_mask]
else:
# Save RAM with a sneaky trick:
# Use distanceTransform in-place, despite the fact that the input and output don't have the same types!
# (We can just cast labeled as a float32, since uint32 and float32 are the same size.)
distance_to_nonmembrane = binary_membranes.view('float32')
vigra.filters.distanceTransform(
binary_membranes,
background=False,
out=distance_to_nonmembrane,
pixel_pitch = [anisotropy, 1., 1.])
# Combine the inner/outer distance transforms
distance_to_nonmembrane[distance_to_nonmembrane>0] -= 1
distance_to_membrane[:] -= distance_to_nonmembrane
return distance_to_membrane
def anisotropic_seeds(
distance_to_membrane,
anisotropy,
sigma_seeds,
group_seeds
):
seeds = np.zeros_like(distance_to_membrane, dtype = 'uint32')
seed_map = vigra.filters.gaussianSmoothing(distance_to_membrane, (1. / anisotropy, 1., 1.) )
for z in xrange(distance_to_membrane.shape[0]):
seeds_z = vigra.analysis.localMaxima(seed_map[z], allowPlateaus=True, allowAtBorder=True, marker=np.nan)
if group_seeds:
seeds_z = group_seeds_by_distance( seeds_z, distance_to_membrane[z])
else:
seeds_z = vigra.analysis.labelMultiArrayWithBackground(seeds_z)
seeds[z] = seeds_z
return seeds
def ws_anisotropic_distance_transform(
pmap,
threshold,
anisotropy,
sigma_seeds,
sigma_weights = 0.,
min_segment_size = 0,
preserve_membrane_pmaps = True,
grow_on_pmap = True,
group_seeds = False
):
"""
Watershed on anisotropic distance transform on 3d probabiity map.
@params:
pmap: probability map, 3d numpy.ndarray of type float32.
threshold: threshold for pixels that are considered in distance transform.
anisotropy: anisotropy factor along the z axis.
sigma_seeds: smoothing factor for distance transform used for finding seeds.
sigma_weights: smoothing factor for heiht map used for the watershed (default 0.).
min_segment_size: size filter for resulting segments (default 0 -> no size filtering).
preserve_membrane: preserve membrane seeds (default: False).
grow_on_pmap: grow on the probability map instead of distance transform (default: True).
group_seeds: use heuristics to group adjacent seeds (default: False).
@returns:
fragments: numpy.ndarray of type uint32
n_labels: number of labels
"""
# make sure we are in 3d and that first axis is z
assert pmap.ndim == 3
shape = pmap.shape
assert shape[0] < shape[1] and shape[0] < shape[2]
distance_to_membrane = signed_anisotropic_dt(pmap, threshold, anisotropy, preserve_membrane_pmaps)
seeds = anisotropic_seeds(distance_to_membrane, anisotropy, sigma_seeds, group_seeds)
if grow_on_pmap:
hmap = pmap
else:
hmap = distance_to_membrane
# Invert the DT: Watershed code requires seeds to be at minimums, not maximums
hmap[:] *= -1
if sigma_weights != 0.:
hmap = vigra.filters.gaussianSmoothing(hmap, ( 1. / sigma_weights ) )
offset = 0
for z in xrange(shape[0]):
max_z = iterative_inplace_watershed(hmap[z], seeds[z], min_segment_size, None)
seeds[z] -= 1
seeds[z] += offset
# TODO make sure that this does not cause a label overlap by one between adjacent slices
offset += max_z
return seeds, offset
| 36.049587
| 113
| 0.672627
|
fd2a67a4ec743ae51df4379da0bbae628be51ef7
| 860
|
py
|
Python
|
util.py
|
hchang18/non-parametric-methods
|
4e1eb168d0b0604dd0e84e0033916fa22cda05c6
|
[
"MIT"
] | 1
|
2021-07-07T22:49:43.000Z
|
2021-07-07T22:49:43.000Z
|
util.py
|
hchang18/non-parametric-methods
|
4e1eb168d0b0604dd0e84e0033916fa22cda05c6
|
[
"MIT"
] | 12
|
2021-06-06T06:41:25.000Z
|
2021-07-06T23:59:05.000Z
|
util.py
|
hchang18/nonparametric-methods
|
4e1eb168d0b0604dd0e84e0033916fa22cda05c6
|
[
"MIT"
] | null | null | null |
# util.py
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import numpy as np
def read_data(filename):
file = open(filename)
file_content = file.readlines()
# clean up to make it into list
file_content = [row.rstrip('\n').lstrip(' ').replace(' ', ' ').split(' ')
for row in file_content]
# change into array (float)
raw_data = np.array(file_content)
data = raw_data[1:].astype(np.float)
y = data[:, 0]
x = data[:, 1]
return data, x, y
def plot_2darray(X, Y):
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.set_title('Scatter Plot of X and Y')
ax.scatter(X, Y)
ax.grid(True)
leg = mpatches.Patch(color=None, label='original data plots')
ax.legend(handles=[leg])
plt.xlabel('X')
plt.ylabel('Y')
plt.tight_layout()
plt.show()
| 24.571429
| 78
| 0.612791
|
6ede55b4a8fefe9b97e10dfece9b5feeb76102a3
| 6,803
|
py
|
Python
|
param_stamp.py
|
i-supermario/Cifar100_CL
|
6c22151ea2c4c3014a569112fdf8a549331b27c4
|
[
"MIT"
] | 164
|
2020-08-13T08:24:59.000Z
|
2022-03-29T07:09:10.000Z
|
param_stamp.py
|
i-supermario/Cifar100_CL
|
6c22151ea2c4c3014a569112fdf8a549331b27c4
|
[
"MIT"
] | 11
|
2020-09-21T11:28:13.000Z
|
2021-07-17T11:36:13.000Z
|
param_stamp.py
|
i-supermario/Cifar100_CL
|
6c22151ea2c4c3014a569112fdf8a549331b27c4
|
[
"MIT"
] | 51
|
2020-08-17T05:40:27.000Z
|
2022-03-29T07:09:28.000Z
|
from data.load import get_multitask_experiment
from utils import checkattr
def get_param_stamp_from_args(args):
'''To get param-stamp a bit quicker.'''
from define_models import define_autoencoder, define_classifier
# -get configurations of experiment
config = get_multitask_experiment(
name=args.experiment, scenario=args.scenario, tasks=args.tasks, data_dir=args.d_dir, only_config=True,
normalize=args.normalize if hasattr(args, "normalize") else False, verbose=False,
)
# -get model architectures
model = define_autoencoder(args=args, config=config, device='cpu') if checkattr(
args,'feedback'
) else define_classifier(args=args, config=config, device='cpu')
if checkattr(args, 'feedback'):
model.lamda_pl = 1. if not hasattr(args, 'pl') else args.pl
train_gen = (hasattr(args, 'replay') and args.replay=="generative" and not checkattr(args, 'feedback'))
if train_gen:
generator = define_autoencoder(args=args, config=config, device='cpu', generator=True,
convE=model.convE if hasattr(args, "hidden") and args.hidden else None)
# -extract and return param-stamp
model_name = model.name
replay_model_name = generator.name if train_gen else None
param_stamp = get_param_stamp(args, model_name, replay=(hasattr(args, "replay") and not args.replay=="none"),
replay_model_name=replay_model_name, verbose=False)
return param_stamp
def get_param_stamp(args, model_name, verbose=True, replay=False, replay_model_name=None):
'''Based on the input-arguments, produce a "parameter-stamp".'''
# -for task
multi_n_stamp = "{n}-{set}{of}".format(
n=args.tasks, set=args.scenario, of="OL" if checkattr(args, 'only_last') else ""
) if hasattr(args, "tasks") else ""
task_stamp = "{exp}{norm}{aug}{multi_n}".format(
exp=args.experiment, norm="-N" if hasattr(args, 'normalize') and args.normalize else "",
aug="+" if hasattr(args, "augment") and args.augment else "", multi_n=multi_n_stamp
)
if verbose:
print(" --> task: "+task_stamp)
# -for model
model_stamp = model_name
if verbose:
print(" --> model: "+model_stamp)
# -for hyper-parameters
pre_conv = ""
if (checkattr(args, "pre_convE") or checkattr(args, "pre_convD")) and (hasattr(args, 'depth') and args.depth>0):
ltag = "" if not hasattr(args, "convE_ltag") or args.convE_ltag=="none" else "-{}".format(args.convE_ltag)
pre_conv = "-pCvE{}".format(ltag) if args.pre_convE else "-pCvD"
pre_conv = "-pConv{}".format(ltag) if args.pre_convE and checkattr(args, "pre_convD") else pre_conv
freeze_conv = ""
if (checkattr(args, "freeze_convD") or checkattr(args, "freeze_convE")) and hasattr(args, 'depth') and args.depth>0:
freeze_conv = "-fCvE" if checkattr(args, "freeze_convE") else "-fCvD"
freeze_conv = "-fConv" if checkattr(args, "freeze_convE") and checkattr(args, "freeze_convD") else freeze_conv
hyper_stamp = "{i_e}{num}-lr{lr}{lrg}-b{bsz}{pretr}{freeze}{reinit}".format(
i_e="e" if args.iters is None else "i", num=args.epochs if args.iters is None else args.iters, lr=args.lr,
lrg=("" if args.lr==args.lr_gen else "-lrG{}".format(args.lr_gen)) if (
hasattr(args, "lr_gen") and hasattr(args, "replay") and args.replay=="generative" and
(not checkattr(args, "feedback"))
) else "",
bsz=args.batch, pretr=pre_conv, freeze=freeze_conv, reinit="-R" if checkattr(args, 'reinit') else ""
)
if verbose:
print(" --> hyper-params: " + hyper_stamp)
# -for EWC / SI
if (checkattr(args, 'ewc') and args.ewc_lambda>0) or (checkattr(args, 'si') and args.si_c>0):
ewc_stamp = "EWC{l}-{fi}{o}".format(
l=args.ewc_lambda, fi="{}".format("N" if args.fisher_n is None else args.fisher_n),
o="-O{}".format(args.gamma) if checkattr(args, 'online') else "",
) if (checkattr(args, 'ewc') and args.ewc_lambda>0) else ""
si_stamp = "SI{c}-{eps}".format(c=args.si_c, eps=args.epsilon) if (checkattr(args,'si') and args.si_c>0) else ""
both = "--" if (checkattr(args,'ewc') and args.ewc_lambda>0) and (checkattr(args,'si') and args.si_c>0) else ""
if verbose and checkattr(args, 'ewc') and args.ewc_lambda>0:
print(" --> EWC: " + ewc_stamp)
if verbose and checkattr(args, 'si') and args.si_c>0:
print(" --> SI: " + si_stamp)
ewc_stamp = "--{}{}{}".format(ewc_stamp, both, si_stamp) if (
(checkattr(args, 'ewc') and args.ewc_lambda>0) or (checkattr(args, 'si') and args.si_c>0)
) else ""
# -for XdG
xdg_stamp = ""
if (checkattr(args, "xdg") and args.xdg_prop > 0):
xdg_stamp = "--XdG{}".format(args.xdg_prop)
if verbose:
print(" --> XdG: " + "gating = {}".format(args.xdg_prop))
# -for replay
if replay:
replay_stamp = "{H}{rep}{bat}{distil}{model}{gi}".format(
H="" if not args.replay=="generative" else (
"H" if (checkattr(args, "hidden") and hasattr(args, 'depth') and args.depth>0) else ""
),
rep="gen" if args.replay=="generative" else args.replay,
bat="" if (
(not hasattr(args, 'batch_replay')) or (args.batch_replay is None) or args.batch_replay==args.batch
) else "-br{}".format(args.batch_replay),
distil="-Di{}".format(args.temp) if args.distill else "",
model="" if (replay_model_name is None) else "-{}".format(replay_model_name),
gi="-gi{}".format(args.g_iters) if (
hasattr(args, "g_iters") and (replay_model_name is not None) and (not args.iters==args.g_iters)
) else "",
)
if verbose:
print(" --> replay: " + replay_stamp)
replay_stamp = "--{}".format(replay_stamp) if replay else ""
# -for choices regarding reconstruction loss
if checkattr(args, "feedback"):
recon_stamp = "--{}{}".format(
"H_" if checkattr(args, "hidden") and hasattr(args, 'depth') and args.depth>0 else "", args.recon_loss
)
elif hasattr(args, "replay") and args.replay=="generative":
recon_stamp = "--{}".format(args.recon_loss)
else:
recon_stamp = ""
# --> combine
param_stamp = "{}--{}--{}{}{}{}{}{}".format(
task_stamp, model_stamp, hyper_stamp, ewc_stamp, xdg_stamp, replay_stamp,
recon_stamp, "-s{}".format(args.seed) if not args.seed==0 else "",
)
## Print param-stamp on screen and return
if verbose:
print(param_stamp)
return param_stamp
| 49.656934
| 120
| 0.6137
|
a0ea958af62a88ceff2a8aa47526e06685de4a3a
| 11,562
|
py
|
Python
|
synapse/appservice/api.py
|
mlakkadshaw/synapse
|
74a2365bd5066955567cc551e72632d6cece94b9
|
[
"Apache-2.0"
] | null | null | null |
synapse/appservice/api.py
|
mlakkadshaw/synapse
|
74a2365bd5066955567cc551e72632d6cece94b9
|
[
"Apache-2.0"
] | 2
|
2022-03-01T08:22:45.000Z
|
2022-03-11T08:13:55.000Z
|
synapse/appservice/api.py
|
mlakkadshaw/synapse
|
74a2365bd5066955567cc551e72632d6cece94b9
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2015, 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import urllib.parse
from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Tuple
from prometheus_client import Counter
from synapse.api.constants import EventTypes, Membership, ThirdPartyEntityKind
from synapse.api.errors import CodeMessageException
from synapse.appservice import (
ApplicationService,
TransactionOneTimeKeyCounts,
TransactionUnusedFallbackKeys,
)
from synapse.events import EventBase
from synapse.events.utils import serialize_event
from synapse.http.client import SimpleHttpClient
from synapse.types import JsonDict, ThirdPartyInstanceID
from synapse.util.caches.response_cache import ResponseCache
if TYPE_CHECKING:
from synapse.server import HomeServer
logger = logging.getLogger(__name__)
sent_transactions_counter = Counter(
"synapse_appservice_api_sent_transactions",
"Number of /transactions/ requests sent",
["service"],
)
failed_transactions_counter = Counter(
"synapse_appservice_api_failed_transactions",
"Number of /transactions/ requests that failed to send",
["service"],
)
sent_events_counter = Counter(
"synapse_appservice_api_sent_events", "Number of events sent to the AS", ["service"]
)
HOUR_IN_MS = 60 * 60 * 1000
APP_SERVICE_PREFIX = "/_matrix/app/unstable"
def _is_valid_3pe_metadata(info: JsonDict) -> bool:
if "instances" not in info:
return False
if not isinstance(info["instances"], list):
return False
return True
def _is_valid_3pe_result(r: JsonDict, field: str) -> bool:
if not isinstance(r, dict):
return False
for k in (field, "protocol"):
if k not in r:
return False
if not isinstance(r[k], str):
return False
if "fields" not in r:
return False
fields = r["fields"]
if not isinstance(fields, dict):
return False
return True
class ApplicationServiceApi(SimpleHttpClient):
"""This class manages HS -> AS communications, including querying and
pushing.
"""
def __init__(self, hs: "HomeServer"):
super().__init__(hs)
self.clock = hs.get_clock()
self.protocol_meta_cache: ResponseCache[Tuple[str, str]] = ResponseCache(
hs.get_clock(), "as_protocol_meta", timeout_ms=HOUR_IN_MS
)
async def query_user(self, service: "ApplicationService", user_id: str) -> bool:
if service.url is None:
return False
# This is required by the configuration.
assert service.hs_token is not None
uri = service.url + ("/users/%s" % urllib.parse.quote(user_id))
try:
response = await self.get_json(uri, {"access_token": service.hs_token})
if response is not None: # just an empty json object
return True
except CodeMessageException as e:
if e.code == 404:
return False
logger.warning("query_user to %s received %s", uri, e.code)
except Exception as ex:
logger.warning("query_user to %s threw exception %s", uri, ex)
return False
async def query_alias(self, service: "ApplicationService", alias: str) -> bool:
if service.url is None:
return False
# This is required by the configuration.
assert service.hs_token is not None
uri = service.url + ("/rooms/%s" % urllib.parse.quote(alias))
try:
response = await self.get_json(uri, {"access_token": service.hs_token})
if response is not None: # just an empty json object
return True
except CodeMessageException as e:
logger.warning("query_alias to %s received %s", uri, e.code)
if e.code == 404:
return False
except Exception as ex:
logger.warning("query_alias to %s threw exception %s", uri, ex)
return False
async def query_3pe(
self,
service: "ApplicationService",
kind: str,
protocol: str,
fields: Dict[bytes, List[bytes]],
) -> List[JsonDict]:
if kind == ThirdPartyEntityKind.USER:
required_field = "userid"
elif kind == ThirdPartyEntityKind.LOCATION:
required_field = "alias"
else:
raise ValueError("Unrecognised 'kind' argument %r to query_3pe()", kind)
if service.url is None:
return []
uri = "%s%s/thirdparty/%s/%s" % (
service.url,
APP_SERVICE_PREFIX,
kind,
urllib.parse.quote(protocol),
)
try:
response = await self.get_json(uri, fields)
if not isinstance(response, list):
logger.warning(
"query_3pe to %s returned an invalid response %r", uri, response
)
return []
ret = []
for r in response:
if _is_valid_3pe_result(r, field=required_field):
ret.append(r)
else:
logger.warning(
"query_3pe to %s returned an invalid result %r", uri, r
)
return ret
except Exception as ex:
logger.warning("query_3pe to %s threw exception %s", uri, ex)
return []
async def get_3pe_protocol(
self, service: "ApplicationService", protocol: str
) -> Optional[JsonDict]:
if service.url is None:
return {}
async def _get() -> Optional[JsonDict]:
uri = "%s%s/thirdparty/protocol/%s" % (
service.url,
APP_SERVICE_PREFIX,
urllib.parse.quote(protocol),
)
try:
info = await self.get_json(uri)
if not _is_valid_3pe_metadata(info):
logger.warning(
"query_3pe_protocol to %s did not return a valid result", uri
)
return None
for instance in info.get("instances", []):
network_id = instance.get("network_id", None)
if network_id is not None:
instance["instance_id"] = ThirdPartyInstanceID(
service.id, network_id
).to_string()
return info
except Exception as ex:
logger.warning("query_3pe_protocol to %s threw exception %s", uri, ex)
return None
key = (service.id, protocol)
return await self.protocol_meta_cache.wrap(key, _get)
async def push_bulk(
self,
service: "ApplicationService",
events: List[EventBase],
ephemeral: List[JsonDict],
to_device_messages: List[JsonDict],
one_time_key_counts: TransactionOneTimeKeyCounts,
unused_fallback_keys: TransactionUnusedFallbackKeys,
txn_id: Optional[int] = None,
) -> bool:
"""
Push data to an application service.
Args:
service: The application service to send to.
events: The persistent events to send.
ephemeral: The ephemeral events to send.
to_device_messages: The to-device messages to send.
txn_id: An unique ID to assign to this transaction. Application services should
deduplicate transactions received with identitical IDs.
Returns:
True if the task succeeded, False if it failed.
"""
if service.url is None:
return True
# This is required by the configuration.
assert service.hs_token is not None
serialized_events = self._serialize(service, events)
if txn_id is None:
logger.warning(
"push_bulk: Missing txn ID sending events to %s", service.url
)
txn_id = 0
uri = service.url + ("/transactions/%s" % urllib.parse.quote(str(txn_id)))
# Never send ephemeral events to appservices that do not support it
body: JsonDict = {"events": serialized_events}
if service.supports_ephemeral:
body.update(
{
# TODO: Update to stable prefixes once MSC2409 completes FCP merge.
"de.sorunome.msc2409.ephemeral": ephemeral,
"de.sorunome.msc2409.to_device": to_device_messages,
}
)
if service.msc3202_transaction_extensions:
if one_time_key_counts:
body[
"org.matrix.msc3202.device_one_time_key_counts"
] = one_time_key_counts
if unused_fallback_keys:
body[
"org.matrix.msc3202.device_unused_fallback_keys"
] = unused_fallback_keys
try:
await self.put_json(
uri=uri,
json_body=body,
args={"access_token": service.hs_token},
)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(
"push_bulk to %s succeeded! events=%s",
uri,
[event.get("event_id") for event in events],
)
sent_transactions_counter.labels(service.id).inc()
sent_events_counter.labels(service.id).inc(len(serialized_events))
return True
except CodeMessageException as e:
logger.warning(
"push_bulk to %s received code=%s msg=%s",
uri,
e.code,
e.msg,
exc_info=logger.isEnabledFor(logging.DEBUG),
)
except Exception as ex:
logger.warning(
"push_bulk to %s threw exception(%s) %s args=%s",
uri,
type(ex).__name__,
ex,
ex.args,
exc_info=logger.isEnabledFor(logging.DEBUG),
)
failed_transactions_counter.labels(service.id).inc()
return False
def _serialize(
self, service: "ApplicationService", events: Iterable[EventBase]
) -> List[JsonDict]:
time_now = self.clock.time_msec()
return [
serialize_event(
e,
time_now,
as_client_event=True,
# If this is an invite or a knock membership event, and we're interested
# in this user, then include any stripped state alongside the event.
include_stripped_room_state=(
e.type == EventTypes.Member
and (
e.membership == Membership.INVITE
or e.membership == Membership.KNOCK
)
and service.is_interested_in_user(e.state_key)
),
)
for e in events
]
| 34.207101
| 91
| 0.57983
|
f7df5d9b0da8331a950bc49f23485c845f42b370
| 1,559
|
py
|
Python
|
keylime/migrations/versions/f35cdd35eb83_move_v_tpm_policy_to_jsonpickletype.py
|
kkaarreell/keylime
|
e12658bb6dc945b694e298b8ac337a204ab86ed2
|
[
"Apache-2.0"
] | 18
|
2016-10-19T13:57:32.000Z
|
2019-01-12T21:35:43.000Z
|
keylime/migrations/versions/f35cdd35eb83_move_v_tpm_policy_to_jsonpickletype.py
|
kkaarreell/keylime
|
e12658bb6dc945b694e298b8ac337a204ab86ed2
|
[
"Apache-2.0"
] | 72
|
2019-01-24T10:12:59.000Z
|
2019-04-17T11:07:16.000Z
|
keylime/migrations/versions/f35cdd35eb83_move_v_tpm_policy_to_jsonpickletype.py
|
kkaarreell/keylime
|
e12658bb6dc945b694e298b8ac337a204ab86ed2
|
[
"Apache-2.0"
] | 10
|
2017-03-27T20:58:08.000Z
|
2018-07-30T12:59:27.000Z
|
"""Move (v)tpm_policy to JSONPickleType
Revision ID: f35cdd35eb83
Revises: 7d5db1a6ffb0
Create Date: 2021-08-02 15:26:34.427156
"""
import sqlalchemy as sa
from alembic import op
import keylime
# revision identifiers, used by Alembic.
revision = "f35cdd35eb83"
down_revision = "7d5db1a6ffb0"
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()[f"upgrade_{engine_name}"]()
def downgrade(engine_name):
globals()[f"downgrade_{engine_name}"]()
def upgrade_registrar():
pass
def downgrade_registrar():
pass
def upgrade_cloud_verifier():
with op.batch_alter_table("verifiermain") as batch_op:
batch_op.alter_column(
"tpm_policy",
existing_type=sa.String(1000),
type_=keylime.db.verifier_db.JSONPickleType(),
existing_nullable=True,
)
batch_op.alter_column(
"vtpm_policy",
existing_type=sa.String(1000),
type_=keylime.db.verifier_db.JSONPickleType(),
existing_nullable=True,
)
def downgrade_cloud_verifier():
with op.batch_alter_table("verifiermain") as batch_op:
batch_op.alter_column(
"tpm_policy",
type_=sa.String(1000),
existing_type=keylime.db.verifier_db.JSONPickleType(),
existing_nullable=True,
)
batch_op.alter_column(
"vtpm_policy",
type_=sa.String(1000),
existing_type=keylime.db.verifier_db.JSONPickleType(),
existing_nullable=True,
)
| 23.621212
| 66
| 0.6517
|
203ea37759d1a16ce67bbbfd8bf746634463f5f9
| 723
|
py
|
Python
|
root/scripts/set_share_list.py
|
DragonCrafted87/docker-alpine-nfs-server
|
bbe7da1779fea99e15091474d875304b419ebbc7
|
[
"MIT"
] | null | null | null |
root/scripts/set_share_list.py
|
DragonCrafted87/docker-alpine-nfs-server
|
bbe7da1779fea99e15091474d875304b419ebbc7
|
[
"MIT"
] | null | null | null |
root/scripts/set_share_list.py
|
DragonCrafted87/docker-alpine-nfs-server
|
bbe7da1779fea99e15091474d875304b419ebbc7
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
from pathlib import Path
from os import listdir
# Local Imports
from python_logger import create_logger #pylint: disable=import-error
def main():
logger = create_logger(Path(__file__).stem)
logger.info(f'{listdir("/nfs_share/")}')
base_directory = Path('/nfs_share')
nfs_permisions = '*(rw,sync,no_subtree_check,no_auth_nlm,insecure,no_root_squash,crossmnt)'
exports_file_path = Path('/etc/exports')
with exports_file_path.open('a') as exports_file:
for nfs_share_dir in base_directory.glob('*'):
if nfs_share_dir.is_dir():
logger.info(f'{str(nfs_share_dir)}')
exports_file.write(f'{str(nfs_share_dir)} {nfs_permisions}\n')
if __name__ == "__main__":
main()
| 28.92
| 93
| 0.724758
|
c90f3c18d4ea5c85b9611e03496dad8ee36a2ea9
| 2,247
|
py
|
Python
|
bcs-ui/backend/templatesets/legacy_apps/instance/migrations/0002_instanceconfig.py
|
laodiu/bk-bcs
|
2a956a42101ff6487ff521fb3ef429805bfa7e26
|
[
"Apache-2.0"
] | 599
|
2019-06-25T03:20:46.000Z
|
2022-03-31T12:14:33.000Z
|
bcs-ui/backend/templatesets/legacy_apps/instance/migrations/0002_instanceconfig.py
|
laodiu/bk-bcs
|
2a956a42101ff6487ff521fb3ef429805bfa7e26
|
[
"Apache-2.0"
] | 537
|
2019-06-27T06:03:44.000Z
|
2022-03-31T12:10:01.000Z
|
bcs-ui/backend/templatesets/legacy_apps/instance/migrations/0002_instanceconfig.py
|
laodiu/bk-bcs
|
2a956a42101ff6487ff521fb3ef429805bfa7e26
|
[
"Apache-2.0"
] | 214
|
2019-06-25T03:26:05.000Z
|
2022-03-31T07:52:03.000Z
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
# Generated by Django 1.11.5 on 2017-11-01 07:10
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('instance', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='InstanceConfig',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('creator', models.CharField(max_length=32, verbose_name='创建者')),
('updator', models.CharField(max_length=32, verbose_name='创建者')),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('is_deleted', models.BooleanField(default=False)),
('deleted_time', models.DateTimeField(blank=True, null=True)),
('instance_id', models.IntegerField(verbose_name='关联的 VersionInstance ID')),
('namespace', models.CharField(max_length=32, verbose_name='命名空间')),
('category', models.CharField(choices=[('application', 'Application'), ('deplpyment', 'Deplpyment'), ('service', 'service'), ('configmap', 'configmap'), ('secret', 'secret')], max_length=10, verbose_name='资源类型')),
('config', models.TextField(help_text='json格式数据', verbose_name='配置文件')),
],
options={
'abstract': False,
},
),
]
| 46.8125
| 229
| 0.656431
|
6cb86a13deef32111663812ad4fb00eb3b14e702
| 60
|
py
|
Python
|
uproot_tree_utils/__init__.py
|
masonproffitt/uproot_tree_utils
|
c51203dcd55d39c247e74d08be5d1ed38e338e68
|
[
"MIT"
] | 6
|
2020-07-16T23:02:02.000Z
|
2021-08-31T06:28:18.000Z
|
uproot_tree_utils/__init__.py
|
masonproffitt/uproot_tree_utils
|
c51203dcd55d39c247e74d08be5d1ed38e338e68
|
[
"MIT"
] | 32
|
2020-07-11T10:01:43.000Z
|
2020-10-06T18:48:34.000Z
|
uproot_tree_utils/__init__.py
|
masonproffitt/uproot_tree_utils
|
c51203dcd55d39c247e74d08be5d1ed38e338e68
|
[
"MIT"
] | null | null | null |
from .clone import clone_tree
from .write import write_tree
| 20
| 29
| 0.833333
|
fa5076b532b067e8c7d9014401bb4b16c8f2f018
| 2,589
|
py
|
Python
|
app/app/api/endpoints/download.py
|
keegan337/ineth-music-share-api
|
414c395271c68e64aa88f4ed87efc6d92b89b5d2
|
[
"MIT"
] | 1
|
2021-01-11T09:31:35.000Z
|
2021-01-11T09:31:35.000Z
|
app/app/api/endpoints/download.py
|
keegan337/inethi-music-share
|
414c395271c68e64aa88f4ed87efc6d92b89b5d2
|
[
"MIT"
] | null | null | null |
app/app/api/endpoints/download.py
|
keegan337/inethi-music-share
|
414c395271c68e64aa88f4ed87efc6d92b89b5d2
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
from flask import request, json
from ...core import downloads
from ...models import database as db_model
from ...main import app
from ...core import database as db_methods
from flask_cors import CORS, cross_origin
from ...models import user as user_model
from ...models import store as store_model
cors = CORS(app)
@cross_origin("http://localhost")
@app.route("/api/update-downloads/", methods=["POST"])
def update_downloads():
"""
Update the download counter for the current time
:return: a normal post request status code and message explaining the status code
"""
username = request.json.get('username')
song_name = request.json.get('songname')
user = user_model.User(username)
db = db_model.MusicDbModel()
local_store_model = store_model.WooModel("LOCAL")
global_store_model = store_model.WooModel("GLOBAL")
download_methods = downloads.DownloadsAPI()
result = download_methods.update_downloads(username, song_name, db, global_store_model, local_store_model)
if result == -1:
response = app.response_class(
response=json.dumps("could not write to db"),
status=400,
mimetype="application/json"
)
return response
elif not isinstance(result, str) and result >= 0:
response = app.response_class(
response=json.dumps(result),
status=200,
mimetype="application/json"
)
return response
else:
response = app.response_class(
response=json.dumps("It has been less than 30 minutes since the last update"),
status=200,
mimetype="application/json"
)
return response
@cross_origin("http://localhost")
@app.route("/api/initiate-download/", methods=["POST"])
def initiate_download_counter():
song_name = request.json.get('songname')
username = request.json.get('username')
local_id = request.json.get('localID')
aws_id = request.json.get('awsID')
db = db_model.MusicDbModel()
db_methods_object = db_methods.MusicShareDbAPI()
response_from_db = db_methods_object.initiate_download(db, song_name, username, local_id, aws_id)
if response_from_db:
response = app.response_class(
response=json.dumps("data written to db"),
status=200, mimetype="application/json"
)
else:
response = app.response_class(
response=json.dumps("could not write to the db"),
status=400,
mimetype="application/json"
)
return response
| 34.986486
| 110
| 0.665508
|
3f8bc9e8980fa62c48a8048e1a3cb76c4c75d7e7
| 2,060
|
py
|
Python
|
src/agstoolbox/core/cmdline.py
|
ericoporto/agstoolbox
|
2a689e3c653a0c48211c55c59e03ced3c6a07d43
|
[
"MIT"
] | 2
|
2022-02-06T16:00:00.000Z
|
2022-03-11T18:58:36.000Z
|
src/agstoolbox/core/cmdline.py
|
ericoporto/agstoolbox
|
2a689e3c653a0c48211c55c59e03ced3c6a07d43
|
[
"MIT"
] | 17
|
2022-01-24T11:21:21.000Z
|
2022-03-20T18:04:41.000Z
|
src/agstoolbox/core/cmdline.py
|
ericoporto/agstoolbox
|
2a689e3c653a0c48211c55c59e03ced3c6a07d43
|
[
"MIT"
] | null | null | null |
from __future__ import annotations # for python 3.8
from sys import exit
import argparse
from agstoolbox import __title__, __version__, __copyright__, __license__
from agstoolbox.core.ags.get_game_projects import list_game_projects_in_dir
def at_cmd_list(args):
projects = list_game_projects_in_dir(args.Path)
for p in projects:
print(p.name + ', ' + p.ags_editor_version.as_str + ', ' + p.path)
pass
def at_cmd_install(args):
print('Install: Not implemented yet!')
print(args)
pass
def at_cmd_run(args):
print('Run: Not implemented yet!')
print(args)
pass
def cmdline(show_help_when_empty: bool):
parser = argparse.ArgumentParser(
prog=__title__,
description=__title__ + ' is an application to help manage AGS Editor versions.',
epilog=__copyright__ + ", " + __license__ + ".")
parser.add_argument(
'-v', '--version', action='store_true', default=False, help='get software version.')
subparsers = parser.add_subparsers(help='command')
# create the parser for the "command_a" command
parser_list = subparsers.add_parser('list', help='command_a help')
parser_list.set_defaults(func=at_cmd_list)
parser_list.add_argument('Path', metavar='path', type=str, help='the path to list')
parser_install = subparsers.add_parser('install', help='install thing help')
parser_install.set_defaults(func=at_cmd_install)
parser_run = subparsers.add_parser('run', help='install thing help')
parser_run.set_defaults(func=at_cmd_run)
parser_list.add_argument('-p', '--proj', action='store_true', default=False, help='list AGS Projects')
parser_list.add_argument('-e', '--editors', action='store_true', default=False, help='list AGS Editors')
args = parser.parse_args()
if 'func' in args.__dict__:
args.func(args)
if args.version:
print(__title__ + " v " + __version__)
exit()
if any(vars(args).values()):
exit()
if show_help_when_empty:
parser.print_usage()
return []
| 31.212121
| 108
| 0.691748
|
57330ca1f97beaee98a6ec5f3dd12812dd129623
| 224
|
py
|
Python
|
tests/test_commands.py
|
stactools-packages/gnatsgo
|
8e57e29bd4a5687866f6be0320e5e4a2fa89187b
|
[
"Apache-2.0"
] | null | null | null |
tests/test_commands.py
|
stactools-packages/gnatsgo
|
8e57e29bd4a5687866f6be0320e5e4a2fa89187b
|
[
"Apache-2.0"
] | 2
|
2022-02-24T16:28:52.000Z
|
2022-02-24T16:29:03.000Z
|
tests/test_commands.py
|
stactools-packages/gnatsgo
|
8e57e29bd4a5687866f6be0320e5e4a2fa89187b
|
[
"Apache-2.0"
] | null | null | null |
from stactools.testing import CliTestCase
from stactools.gnatsgo.commands import create_gnatsgo_command
class CommandsTest(CliTestCase):
def create_subcommand_functions(self):
return [create_gnatsgo_command]
| 22.4
| 61
| 0.816964
|
9cdb62940ccfd8bc11a1aaa75da09e0ebb528eae
| 2,836
|
py
|
Python
|
tests/python/pants_test/integration/remote_cache_integration_test.py
|
rcuza/pants
|
0429258b181986eed856ae45af93b776727774a0
|
[
"Apache-2.0"
] | null | null | null |
tests/python/pants_test/integration/remote_cache_integration_test.py
|
rcuza/pants
|
0429258b181986eed856ae45af93b776727774a0
|
[
"Apache-2.0"
] | null | null | null |
tests/python/pants_test/integration/remote_cache_integration_test.py
|
rcuza/pants
|
0429258b181986eed856ae45af93b776727774a0
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.engine.internals.native_engine_pyo3 import PyExecutor, PyStubCAS
from pants.option.global_options import RemoteCacheWarningsBehavior
from pants.option.scope import GLOBAL_SCOPE_CONFIG_SECTION
from pants.testutil.pants_integration_test import run_pants
def test_warns_on_remote_cache_errors():
executor = PyExecutor(core_threads=2, max_threads=4)
cas = PyStubCAS.builder().always_errors().build(executor)
def run(behavior: RemoteCacheWarningsBehavior) -> str:
pants_run = run_pants(
[
"--backend-packages=['pants.backend.python']",
"--no-dynamic-ui",
"package",
"testprojects/src/python/hello/main:main",
],
use_pantsd=False,
config={
GLOBAL_SCOPE_CONFIG_SECTION: {
"remote_cache_read": True,
"remote_cache_write": True,
"remote_cache_warnings": behavior.value,
# NB: Our options code expects `grpc://`, which it will then convert back to
# `http://` before sending over FFI.
"remote_store_address": cas.address.replace("http://", "grpc://"),
}
},
)
pants_run.assert_success()
return pants_run.stderr
def read_err(i: int) -> str:
return f"Failed to read from remote cache ({i} occurrences so far): Unimplemented"
def write_err(i: int) -> str:
return (
f'Failed to write to remote cache ({i} occurrences so far): Internal: "StubCAS is '
f'configured to always fail"'
)
first_read_err = read_err(1)
first_write_err = write_err(1)
third_read_err = read_err(3)
third_write_err = write_err(3)
fourth_read_err = read_err(4)
fourth_write_err = write_err(4)
ignore_result = run(RemoteCacheWarningsBehavior.ignore)
for err in [
first_read_err,
first_write_err,
third_read_err,
third_write_err,
fourth_read_err,
fourth_write_err,
]:
assert err not in ignore_result
first_only_result = run(RemoteCacheWarningsBehavior.first_only)
for err in [first_read_err, first_write_err]:
assert err in first_only_result
for err in [third_read_err, third_write_err, fourth_read_err, fourth_write_err]:
assert err not in first_only_result
backoff_result = run(RemoteCacheWarningsBehavior.backoff)
for err in [first_read_err, first_write_err, fourth_read_err, fourth_write_err]:
assert err in backoff_result
for err in [third_read_err, third_write_err]:
assert err not in backoff_result
| 37.813333
| 96
| 0.649506
|
ec15bd8814f8f28000018e0aaa9144a07d0bc5e8
| 97,731
|
py
|
Python
|
hacker_python.py
|
SinaDashti/Hacker_Rank
|
4aefd711379c9a5c6c7141bcdef1426e1bd86b33
|
[
"MIT"
] | null | null | null |
hacker_python.py
|
SinaDashti/Hacker_Rank
|
4aefd711379c9a5c6c7141bcdef1426e1bd86b33
|
[
"MIT"
] | null | null | null |
hacker_python.py
|
SinaDashti/Hacker_Rank
|
4aefd711379c9a5c6c7141bcdef1426e1bd86b33
|
[
"MIT"
] | null | null | null |
################################################################################
#Q
################################################################################
# Read an integer N.
# Without using any string methods, try to print the following:
# 123...N
# Note that "..." represents the values in between.
# Input Format
# The first line contains an integer N.
# Output Format
# Output the answer as explained in the task.
# Sample Input
# 3
# Sample Output
# 123
################################################################################
#A
################################################################################
# if __name__ == '__main__':
# n = int(input())
# i = 1
# while i<=n:
# print(i,end='')
# i+=1
################################################################################
# print(*range(1, int(input())+1), sep='')
################################################################################
#Q
################################################################################
# Let's learn about list comprehensions! You are given three integers X, Y and Z
# representing the dimensions of a cuboid along with an integer N.
# You have to print a list of all possible coordinates given by (i, j, k) on a 3D
# grid where the sum of i + j+ k is not equal to N. Here, 0<=i<=X;0<=j<=Y;0<=k<=Z
#
# Input Format
# Four integers X,Y,Z and N each on four separate lines, respectively.
# Constraints
# Print the list in lexicographic increasing order.
# Sample Input
# 1
# 1
# 1
# 2
# Sample Output
# [[0, 0, 0], [0, 0, 1], [0, 1, 0], [1, 0, 0], [1, 1, 1]]
# Explanation 0
# Concept
# You have already used lists in previous hacks. List comprehensions are an elegant way to build a list without having to use different for loops to append values one by one. This example might help.
# Example: You are given two integers x and y . You need to find out the ordered pairs ( i , j ) , such that ( i + j ) is not equal to n and print them in lexicographic order.( 0 <= i <= x ) and ( 0 <= j <= y) This is the code if we dont use list comprehensions in Python.
# python x = int ( raw_input()) y = int ( raw_input()) n = int ( raw_input()) ar = [] p = 0 for i in range ( x + 1 ) : for j in range( y + 1): if i+j != n: ar.append([]) ar[p] = [ i , j ] p+=1 print ar
# Other smaller codes may also exist, but using list comprehensions is always a good option. Code using list comprehensions:
# python x = int ( raw_input()) y = int ( raw_input()) n = int ( raw_input()) print [ [ i, j] for i in range( x + 1) for j in range( y + 1) if ( ( i + j ) != n )]
# Sample Input
# 2
# 2
# 2
# 2
# Sample Output
# [[0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 2], [0, 2, 1], [0, 2, 2], [1, 0, 0],
# [1, 0, 2], [1, 1, 1], [1, 1, 2], [1, 2, 0], [1, 2, 1], [1, 2, 2], [2, 0, 1],
# [2, 0, 2], [2, 1, 0], [2, 1, 1], [2, 1, 2], [2, 2, 0], [2, 2, 1], [2, 2, 2]]
################################################################################
#A
################################################################################
# if __name__ == '__main__':
# x = int(input())
# y = int(input())
# z = int(input())
# n = int(input())
# print (
# [ [ i, j, k] for i in range( x + 1)
# for j in range( y + 1)
# for k in range( z + 1)
# if ( ( i + j + k) != n )]
# )
# other way of getting input
# x,y,z,n = [input() for i in range(4)]
# x, y, z, n = (int(input()) for _ in range(4))
################################################################################
#Q
################################################################################
# Given the participants' score sheet for your University Sports Day, you are
# required to find the runner-up score. You are given n scores. Store them in a
# list and find the score of the runner-up.
# Input Format
# The first line contains n. The second line contains an array A[] of n integers
# each separated by a space.
# Constraints:
# 2<=n<=10
# -100<=A[]<=-100
# Output Format
# Print the runner-up score.
# Sample Input
# 5
# 2 3 6 6 5
# Sample Output
# 5
# Explanation
# Given list is [2,3,6,6,5] . The maximum score is 6, , second maximum is 5.
# Hence, we print 5 as the runner-up score.
################################################################################
#A
################################################################################
# if __name__ == '__main__':
# n = int(input())
# arr = map(int, input().split())
# arr = list(set(arr))
# arr.pop(arr.index(max(arr)))
# print(max(arr))
################################################################################
# i = int(input())
# lis = list(map(int,raw_input().strip().split()))[:i]
# z = max(lis)
# while max(lis) == z:
# lis.remove(max(lis))
#
# print max(lis)
################################################################################
#Q
################################################################################
# nested-list-English
################################################################################
#A
################################################################################
# if __name__ == '__main__':
# li = []
# out = []
# for _ in range(int(input())):
# name = input()
# score = float(input())
# li.append([name, score])
#
# li.sort(key=lambda li:li[1])
# first_min = li[0][1]
# new = li[1:]
# for student, mark in new:
# if mark == first_min:
# continue
# elif mark > first_min:
# idx = new.index([student,mark])
# out.append(student)
# if idx < len(new) - 1:
# if new[idx][1] < new[idx + 1][1]:
# break
# out.sort()
# for item in out:
# print(item)
# print('\n'.join(a for a in sorted(out)))
################################################################################
# out.sort()
#
# sorted(out)
# ['a','b','c']
################################################################################
# n = int(input())
# marksheet = [[input(), float(input())] for _ in range(n)]
# second_highest = sorted(list(set([marks for name, marks in marksheet])))[1]
# print('\n'.join([a for a,b in sorted(marksheet) if b == second_highest]))
################################################################################
################################################################################
#Q
################################################################################
# finding-the-percentage-English
################################################################################
#A
################################################################################
# if __name__ == '__main__':
# n = int(input())
# student_marks = {}
# for _ in range(n):
# name, *line = input().split()
# scores = list(map(float, line))
# student_marks[name] = scores
# query_name = input()
#
# print('%.2f' %(sum(student_marks[query_name])/3))
################################################################################
# query_scores = student_marks[query_name]
# print('{0:.2}'.format(sum(student_marks[query_name])/(len(query_scores))))
################################################################################
#Q
################################################################################
# python-lists-English
################################################################################
#A
################################################################################
# if __name__ == '__main__':
# N = int(input())
# li = []
# for _ in range(N):
# name, *elm = input().split()
# if name != "print" :
# name += "("+ ",".join(elm) +")"
# eval("li."+name)
# else:
# print(li)
#
# if __name__ == '__main__':
# N = int(input())
# li = []
# for _ in range(N):
# name, *elm = input().split()
# if name != "print" :
# eval('li.{0}{1}'.format(name,tuple(map(int,elm))))
# else:
# print(li)
################################################################################
#Q
################################################################################
# python-tuples-English
################################################################################
#A
################################################################################
# if __name__ == '__main__':
# n = int(input())
# integer_list = tuple(map(int, input().split()))
# print(hash(integer_list))
################################################################################
#Q
################################################################################
# decorators-2-name-directory-English
################################################################################
#A
################################################################################
# def person_lister(f):
# def inner(people):
# # complete the function
# return map(f, sorted(people, key=lambda x: int(x[2])))
# return inner
#
# @person_lister
# def name_format(person):
# return ("Mr. " if person[3] == "M" else "Ms. ") + person[0] + " " + person[1]
#
# if __name__ == '__main__':
# people = [input().split() for i in range(int(input()))]
# print(*name_format(people), sep='\n')
################################################################################
# decorators example
# def my_decorator(func):
# def wrapper():
# print("before function")
# func()
# print("after function")
# return wrapper
#
# def hi_arash():
# print("hi Arash!")
#
# the decoration happend here, passing an existing function and reassigning it
# to itself. decorator wrap a function and allow other functional code around it.
# hi_arash = my_decorator(hi_arash)
#
# >>> hi_arash
# <function __main__.my_decorator.<locals>.wrapper()>
#
# >>> hi_arash()
# before function
# hi Arash!
# after function
################################################################################
#Q
################################################################################
# itertools-permutations-English
################################################################################
#A
################################################################################
# from itertools import permutations
# p = input().split()
# print('\n'.join(map(lambda i: ''.join(i),permutations(sorted(p[0]),int(p[1])))))
# print(*[''.join(i) for i in permutations(sorted(p[0]),int(p[1]))],sep='\n')
################################################################################
#Q
################################################################################
# itertools-combinations-English
################################################################################
#A
################################################################################
# from itertools import combinations
# p = input().split()
# print(*[''.join(item) for lis in [combinations(sorted(p[0]),i)
# for i in range(1,int(p[1])+1)]
# for item in lis],sep='\n')
# more readable
# l = [combinations(sorted(p[0]),i) for i in range(1,int(p[1])+1)]
# print(*[''.join(item) for lis in l for item in lis],sep='\n')
################################################################################
#Q
################################################################################
# itertools-combinations-with-replacement-English
################################################################################
#A
################################################################################
# from itertools import combinations_with_replacement
# p = input().split()
# print(*[''.join(i) for i in combinations_with_replacement(sorted(p[0]),int(p[1]))],sep = '\n')
################################################################################
#Q
################################################################################
# compress-the-string-English
################################################################################
#A
################################################################################
# from itertools import groupby
# p = input()
# groups = [list(g) for k, g in groupby(p)]
# keys = [k for k, g in groupby(p)]
# gr_len = [len(i) for i in groups]
# print(*tuple(zip(gr_len,map(int,keys))))
#
# print(*tuple(zip(list(map(len,(list(g) for k, g in groupby(p)))),map(int,[k for k, g in groupby(p)]))))
################################################################################
# print(*[(len(list(g)), int(k)) for k, g in groupby(input())])
# print(' '.join(('({}, {})'.format(len(list(g)), x) for x,g in groupby(input()))))
################################################################################
################################################################################
#Q
################################################################################
# iterables-and-iterators-English
################################################################################
################################################################################
#A
################################################################################
# import itertools as it
# N = int(input())
# li = input().split()
# K = int(input())
# C = list(it.combinations(li, K))
# print('%.3f' % float(len([i for i in C if 'a' in i])/len(C)))
################################################################################
# from itertools import combinations
# N = int(input())
# L = input().split()
# K = int(input())
# C = list(combinations(L, K))
# F = filter(lambda c: 'a' in c, C)
# print("{0:.3}".format(len(list(F))/len(C)))
################################################################################
#Q
################################################################################
# swap-case-English
################################################################################
################################################################################
#A
################################################################################
# def swap_case(s):
# return (''.join([i.lower() if i.isupper() else i.upper() for i in s]))
#
# if __name__ == '__main__':
# s = input()
# result = swap_case(s)
# print(result)
#
# print(*map(lambda ch : ch.lower() if ch.isupper() else ch.upper(), input()), sep="")
################################################################################
#Q
################################################################################
# python-string-split-and-join-English
################################################################################
################################################################################
#A
################################################################################
# def split_and_join(line):
# # write your code here
# return ('-'.join(line.split(' ')))
#
# if __name__ == '__main__':
# line = input()
# result = split_and_join(line)
# print(result)
################################################################################
#Q
################################################################################
# find-a-string-English
################################################################################
################################################################################
#A
################################################################################
# def count_substring(string, sub_string):
# return len([i for i in range(len(string)) if string.startswith(sub_string, i)])
#
# if __name__ == '__main__':
# string = input().strip()
# sub_string = input().strip()
#
# count = count_substring(string, sub_string)
# print(count)
################################################################################
# string, substring = (input().strip(), input().strip())
# print(sum([ 1 for i in range(len(string)-len(substring)+1) \
# if string[i:i+len(substring)] == substring]))
################################################################################
# def count_substring(string, sub_string):
# count=0
# #print(len(string),len(sub_string))
# for i in range(0, len(string)-len(sub_string)+1):
# if string[i] == sub_string[0]:
# flag=1
# for j in range (0, len(sub_string)):
# if string[i+j] != sub_string[j]:
# flag=0
# break
# if flag==1:
# count += 1
# return count
################################################################################
#Q
################################################################################
# string-validators-English
################################################################################
################################################################################
#A
################################################################################
# if __name__ == '__main__':
# s = input()
# print(*list(map(lambda x: eval('any(ch.{0} for ch in list(s))'.format(x)),
# ['isalnum()', 'isalpha()', 'isdigit()', 'islower()', 'isupper()'])), sep='\n')
################################################################################
#Q
################################################################################
# designer-door-mat-English
################################################################################
################################################################################
#A
################################################################################
# N, M = map(int,input().split())
# top = [('.|.'*(2*i - 1)).center(M,'-') for i in range(1,N//2 + 1)]
# print('\n'.join(top)+ '\n' +'WELCOME'.center(M ,'-')+ '\n' + '\n'.join(top[::-1]))
################################################################################
#Q
################################################################################
# python-string-formatting-English
################################################################################
################################################################################
#A
################################################################################
# def print_formatted(n):
# width = len("{0:b}".format(n))
# for num in range(1,n+1):
# for base in 'dXob':
# print('{0:{width}{base}}'.format(num, base=base, width=width),end = ' ')
# print('\n', end='')
#
# if __name__ == '__main__':
# n = int(input())
# print_formatted(n)
################################################################################
# def print_formatted(n):
# width = len("{0:b}".format(n))
# for i in range(1,n+1):
# print ("{0:{width}d} {0:{width}o} {0:{width}X} {0:{width}b}".format(i,width=width))
################################################################################
#Q
################################################################################
# alphabet-rangoli-English
################################################################################
################################################################################
#A
################################################################################
# def print_rangoli(size):
# # your code goes here
# l = [str(chr(i + 97)) for i in range(size)]
# down = [l[-1:-(len(l)-i):-1] + l[i:(len(l))] for i in range(len(l))]
# for line in down[:-(len(down)):-1]:
# print('-'.join(line).center(2 * len(down[0]) -1, '-'))
# for line in down:
# print('-'.join(line).center(2 * len(down[0]) -1, '-'))
#
# if __name__ == '__main__':
# n = int(input())
# print_rangoli(n)
################################################################################
#Q
################################################################################
# capitalize-English
################################################################################
################################################################################
#A
################################################################################
# def solve(s):
# return ' '.join(list(map(lambda x:x.capitalize(),s.split(' '))))
#
# # ' '.join(map(str.capitalize, s.split(' ')))
#
# if __name__ == '__main__':
# fptr = open(os.environ['OUTPUT_PATH'], 'w')
#
# s = input()
#
# result = solve(s)
#
# fptr.write(result + '\n')
#
# fptr.close()
################################################################################
#Q
################################################################################
# itertools-product-English
################################################################################
################################################################################
#A
################################################################################
# import itertools as it
#
# print(*(it.product(map(int,input().split()),map(int,input().split()))))
#
# A, B = [list(map(int, input().split())) for _ in range(2)]
# print(*(it.product(A,B)))
################################################################################
#Q
################################################################################
# collections-counter-English
################################################################################
################################################################################
#A
################################################################################
# from collections import Counter
#
# _, c = input(), Counter(input().split())
# tot = 0
# for _ in range(int(input())):
# temp = input().split()
# if c[temp[0]]:
# tot+= int(temp[1])
# c[temp[0]]-=1
# print(tot)
# ################################################################################
# from collections import Counter
#
# _, stock = input(), Counter(list(map(int,input().split())))
# money = 0
# for size, cost in [map(int, input().split()) for _ in range(int(input()))]:
# if stock[size] > 0:
# stock[size] -= 1
# money += cost
# print(money)
################################################################################
#Q
################################################################################
# defaultdict-tutorial-English
################################################################################
################################################################################
#A
################################################################################
# from collections import defaultdict
#
# d = defaultdict(list)
# n, m = map(int, input().split())
# A, B = [input() for _ in range(n)], [input() for _ in range(m)]
# for idx, val in enumerate(A, start = 1):
# d[val].append(idx)
# for key in B:
# if key in d.keys():
# print(*d[key])
# else:
# print('-1')
################################################################################
# or for the last for
# for key in B:
# print(*d[key] or -1)
################################################################################
# without enuemarte and creating A and B
# for i in range(n):
# d[input()].append(i + 1)
#
# for _ in range(m):
# print(' '.join(map(str, d[input()])) or -1)
################################################################################
################################################################################
#Q
################################################################################
# py-collections-namedtuple-English
################################################################################
################################################################################
#A
################################################################################
# from collections import namedtuple
# N, Student =int(input()), namedtuple('Student',input().split())
# print('%.2f' %float(sum(list(map(int,[Student._make(input().split()).MARKS for _ in range(N)])))/N))
################################################################################
################################################################################
#Q
################################################################################
# py-collections-ordereddict-English
################################################################################
################################################################################
#A
################################################################################
# from collections import OrderedDict
#
# od = OrderedDict()
# for _ in range(int(input())):
# temp = input().split()
# if ' '.join(temp[0:-1]) not in od.keys():
# od[' '.join(temp[0:-1])] = int(temp[-1])
# else:
# od[' '.join(temp[0:-1])] += int(temp[-1])
# print('\n'.join([k + ' ' + str(v) for k,v in od.items()]))
################################################################################
# d = OrderedDict()
# for _ in range(int(input())):
# item, space, quantity = input().rpartition(' ')
# d[item] = d.get(item, 0) + int(quantity)
# for item, quantity in d.items():
# print(item, quantity)
################################################################################
# dct = OrderedDict()
# for _ in range(int(input())):
# i = input().rpartition(" ")
# dct[i[0]] = int(i[-1]) + dct[i[0]] if i[0] in dct else int(i[-1])
# for l in dct:
# print(l, dct[l])
################################################################################
################################################################################
#Q
################################################################################
# py-collections-deque-English
################################################################################
################################################################################
#A
################################################################################
# from collections import deque
# d = deque()
# for _ in range(int(input())):
# temp = input().split()
# if 'append' in temp[0]:
# eval('d.{}({})'.format(temp[0], temp[1]))
# else:
# eval('d.{}()'.format(temp[0]))
#
# # eval('d.{}({})'.format(temp[0], temp[1])) if 'append' in temp[0] else eval('d.{}()'.format(temp[0]))
# print(*d)
################################################################################
# from collections import deque
# d = deque()
# for _ in range(int(input())):
# cmd, *args = input().split()
# getattr(d, cmd)(*args)
# # getattr(d, command)(*map(int, args))
# [print(x, end=' ') for x in d]
################################################################################
################################################################################
################################################################################
#Q
################################################################################
# symmetric-difference-English
################################################################################
################################################################################
#A
################################################################################
# _, M = input(), set(map(int, input().split()))
# _, N = input(), set(map(int, input().split()))
# for i in map(str,sorted(N.difference(M).union(M.difference(N)))):
# print(i)
# ################################################################################
# a,b = [set(input().split()) for _ in range(4)][1::2]
# print(*sorted(a^b, key=int), sep='\n')
################################################################################
################################################################################
################################################################################
#Q
################################################################################
# py-set-add-English
################################################################################
################################################################################
#A
################################################################################
# print(len(set([input() for _ in range(int(input()))])))
################################################################################
#Q
################################################################################
# py-set-discard-remove-pop-English
################################################################################
################################################################################
#A
################################################################################
# n = int(input())
# s = set(map(int, input().split()))
# for _ in range(int(input())):
# cmd, *arg = input().split()
# eval('s.{}({})'.format(cmd, *arg)) if 'pop' not in cmd else eval('s.{}()'.format(cmd))
# print(sum(s))
################################################################################
# n = int(input())
# s = set(map(int, input().split()))
# for i in range(int(input())):
# eval('s.{0}({1})'.format(*input().split()+['']))
#
# print(sum(s))
################################################################################
# n = int(input())
# s = set(map(int, input().split()))
# for _ in range(int(input())):
# method, *args = input().split()
# getattr(s, method)(*map(int,args))
# print(sum(s))
################################################################################
################################################################################
#Q
################################################################################
# py-set-union-English
################################################################################
################################################################################
#A
################################################################################
# a,b = [list(map(int,set(input().split()))) for _ in range(4)][1::2]
# print(len(set(b).union(set(a))))
################################################################################
# _,a,_,b=[set(input().split()) for _ in '1234']; print(len(a|b))
################################################################################
################################################################################
#Q
################################################################################
# py-set-intersection-operation-English
################################################################################
################################################################################
#A
################################################################################
# _,a,_,b=[set(input().split()) for _ in '1234']; print(len(a&b))
################################################################################
#Q
################################################################################
# py-set-difference-operation-English
################################################################################
################################################################################
#A
################################################################################
# _,a,_,b=[set(input().split()) for _ in '1234']; print(len(a-b))
################################################################################
#Q
################################################################################
# py-set-mutations-English
################################################################################
################################################################################
#A
################################################################################
# _, a = input(), set(map(int, input().split()))
# for i in range(int(input())):
# eval('a.{}({})'.format(input().split()[0], set(map(int, input().split()))))
# print(sum(a))
################################################################################
#Q
################################################################################
# py-the-captains-room-English
################################################################################
################################################################################
#A
################################################################################
# _, args = input(), input().split()
# print(*[i for i in set(sorted(args)) if args.count(i) == 1])
################################################################################
# members = input().split()
# rooms = set() # Contains all the rooms.
# room_more_mem = set() # Contains only the rooms with families.
#
# for m in members:
# if m not in room_more_mem:
# target = room_more_mem if m in rooms else rooms
# target.add(m)
# print(rooms.difference(room_more_mem).pop())
################################################################################
#Q
################################################################################
# py-check-subset-English
################################################################################
################################################################################
#A
################################################################################
# for _ in range(int(input())):
# _, A, _, B = input(), set(input().split()), input(), set(input().split())
# print(A.issubset(B))
################################################################################
#Q
################################################################################
# py-check-strict-superset-English
################################################################################
################################################################################
#A
################################################################################
# A = set(input().split())
# print(all([A.issuperset(input().split()) for _ in range(int(input()))]))
################################################################################
# a = set(input().split())
# print(all(a > set(input().split()) for _ in range(int(input()))))
################################################################################
#Q
################################################################################
# Polar-Coordinates
################################################################################
################################################################################
#A
################################################################################
# from cmath import polar
# z = input()
# print('{:.3f}\n{:.3f}'.format(abs(complex(z)),phase(complex(z))))
################################################################################
# from cmath import polar
# print '{}\n{}'.format(*polar(complex(input())))
################################################################################
# import cmath
# print(*cmath.polar(complex(input())), sep='\n')
################################################################################
#Q
################################################################################
# mod-divmod
################################################################################
################################################################################
#A
################################################################################
# a = divmod(int(input()),int(input()))
# print(a[0],a[1],a,sep='\n')
################################################################################
# print('{0}\n{1}\n({0},{1})'.format(*divmod(int(input()), int(input()))))
################################################################################
#Q
################################################################################
# power-mod-power
################################################################################
################################################################################
#A
################################################################################
# from math import pow
# a,b,m = [int(input()) for _ in range(3)]
# print('{:.0f}\n{:.0f}'.format(pow(a,b),pow(a,b)%m))
################################################################################
#Q
################################################################################
# triangle-quest
################################################################################
################################################################################
#A
################################################################################
# for i in range(1,int(input())):
# print((10**(i)//9)*i)
# print([0, 1, 22, 333, 4444, 55555, 666666, 7777777, 88888888, 999999999][i])
################################################################################
#Q
################################################################################
#
################################################################################
################################################################################
#A
################################################################################
# import calendar
# m,d,y = map(int,input().split())
# print(calendar.day_name[calendar.weekday(y, m, d)].upper())
################################################################################
#Q
################################################################################
# exceptions
################################################################################
################################################################################
#A
################################################################################
# for i in [input().split() for i in range(int(input()))]:
# try:
# print(int(i[0])//int(i[1]))
# except Exception as e:
# print("Error Code:",e)
################################################################################
#Q
################################################################################
# incorrect-regex
################################################################################
################################################################################
#A
################################################################################
# import re
# for i in [input() for i in range(int(input()))]:
# try:
# re.compile(i)
# print(True)
# except Exception:
# print(False)
#
# cube = lambda x: x**3
################################################################################
#Q
################################################################################
# map-and-lambda-expression
################################################################################
################################################################################
#A
################################################################################
# cube = lambda x: x**3
# def fibonacci(n):
# # return a list of fibonacci numbers
# lis = [0,1]
# for i in range(2,n):
# lis.append(lis[i-2] + lis[i-1])
# return(lis[0:n])
#
# if __name__ == '__main__':
# n = int(input())
# print(list(map(cube, fibonacci(n))))
################################################################################
#Q
################################################################################
# zipped-English
################################################################################
################################################################################
#A
################################################################################
# N, X = map(int,input().split())
# l = [list(zip([i+1 for i in range(N)],input().split())) for mark in range(X)]
# for student in range(N):
# s = 0
# for subject in range(X):
# s += float(l[subject][student][1])
# print('{:.1f}'.format(s/X))
################################################################################
# n, x = map(int, input().split())
# sheet = []
# for _ in range(x):
# sheet.append( map(float, input().split()) )
# for i in zip(*sheet):
# print( sum(i)/len(i) )
################################################################################
# _, X = map(int,input().split())
# sheet = [map(float, input().split()) for _ in range(X)]
# print(*[sum(i)/len(i) for i in zip(*sheet)], sep = '\n')
################################################################################
# [print(sum(i) / len(i)) for i in zip(*[map(float, input().split()) for _ in range(int(input().split()[1]))])]
################################################################################
################################################################################
#Q
################################################################################
# input
################################################################################
################################################################################
#A
################################################################################
# x, k = map(int,input().split())
# print(eval(input()) == k)
################################################################################
#Q
################################################################################
# any-or-all-English
################################################################################
################################################################################
#A
################################################################################
# _, L = input(), input().split()
# print(all([any(map(lambda x: x == x[::-1],L)),all(map(lambda x: int(x) >= 0,L))]))
################################################################################
# N,n = int(input()),input().split()
# print(all([int(i) > 0 for i in n]) and any([j == j[::-1] for j in n]))
################################################################################
################################################################################
#Q
################################################################################
# class-2-find-the-torsional-angle
################################################################################
################################################################################
#A
################################################################################
# import math
#
# class Points(object):
# def __init__(self, x, y, z):
# self.x = x
# self.y = y
# self.z = z
#
# def __sub__(self, no):
# return Points((self.x - no.x), (self.y - no.y), (self.z - no.z))
#
# def dot(self, no):
# return (self.x * no.x) + (self.y * no.y) + (self.z * no.z)
#
# def cross(self, no):
# return Points((self.y * no.z - self.z * no.y),
# (self.z * no.x - self.x * no.z),
# (self.x * no.y - self.y * no.x))
#
# def absolute(self):
# return pow((self.x ** 2 + self.y ** 2 + self.z ** 2), 0.5)
#
# if __name__ == '__main__':
# points = list()
# for i in range(4):
# a = list(map(float, input().split()))
# points.append(a)
#
# a, b, c, d = Points(*points[0]), Points(*points[1]), Points(*points[2]), Points(*points[3])
# x = (b - a).cross(c - b)
# y = (c - b).cross(d - c)
# angle = math.acos(x.dot(y) / (x.absolute() * y.absolute()))
#
# print("%.2f" % math.degrees(angle))
################################################################################
################################################################################
#Q
################################################################################
# introduction-to-regex-English
################################################################################
################################################################################
#A
################################################################################
# import re
# print(*[bool(re.match('[+|-]?\d*\.\d*$', input())) for _ in range(int(input()))], sep = '\n')
# bool(re.match(r'^[-+]?[0-9]*\.[0-9]+$', input()))
################################################################################
################################################################################
#Q
################################################################################
# re-group-groups-English
################################################################################
################################################################################
#A
################################################################################
# import re
# r = re.search(r"([a-zA-Z0-9])\1+",input())
# print(r.group(1) if r else -1)
################################################################################
################################################################################
#Q
################################################################################
# re-findall-re-finditer-English
################################################################################
################################################################################
#A
################################################################################
# import re
# s = '[qwrtypsdfghjklzxcvbnm]'
# a = re.findall('(?<=' + s +')([aeiou]{2,})' + s, input(), re.I)
# print('\n'.join(a or ['-1']))
################################################################################
#Q
################################################################################
# re-start-re-end-English
################################################################################
################################################################################
#A
################################################################################
# from re import compile
# data, pattern = input(), compile( input() )
# m = pattern.search(data)
# if not m : print("(-1, -1)")
# while m:
# print(f"({m.start()}, {m.end() - 1})")
# m = pattern.search( data, m.start() + 1)
################################################################################
#Q
################################################################################
# validate-a-roman-number-English
################################################################################
################################################################################
#A
################################################################################
# import re
# regex_pattern = r"^M{0,3}(CM|CD|D?C{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{0,3})$"
# print(str(bool(re.match(regex_pattern, input()))))
################################################################################
#Q
################################################################################
# validating-the-phone-number-English
################################################################################
################################################################################
#A
################################################################################
# import re
# l = [bool(re.match(r"^[7-9]\d{9}$", input())) for _ in range(int(input()))]
# print(*['YES' if i else 'NO' for i in l], sep= '\n')
################################################################################
# [print('YES' if re.match(r'[789]\d{9}$',input()) else 'NO') for _ in range(int(input()))]
################################################################################
# it is better to save pattern and do not loop through it.
# the seconf solution is better because it uses one loop.
################################################################################
################################################################################
#Q
################################################################################
# validating-named-email-addresses-English
################################################################################
################################################################################
#A
################################################################################
# import re
# p = '<[a-zA-Z][\w.-]+@[a-zA-Z]+\.[a-zA-Z]{1,3}>'
# for _ in range(int(input())):
# n, e = input().split(' ')
# m = re.match(p, e, re.I)
# if m:
# print(n, e)
################################################################################
################################################################################
#Q
################################################################################
# hex-color-code
################################################################################
################################################################################
#A
################################################################################
# import re
# reg = re.compile(r"(:|,| +)(#[abcdefABCDEF1234567890]{3}|#[abcdefABCDEF1234567890]{6})\b")
# n = int(input())
# for i in range(n):
# line = input()
# items = reg.findall(line)
# if items:
# for item in items:
# print( item[1] )
# (?<=[:\s])#[a-f0-9A-F]{3,}(?!\s)
# (?<!^)(#(?:[\da-f]{3}){1,2})
################################################################################
################################################################################
#Q
################################################################################
# html-parser-part-1-English
################################################################################
################################################################################
#A
################################################################################
# from html.parser import HTMLParser
#
# class MyHTMLParser(HTMLParser):
# def handle_starttag(self, tag, attrs):
# print ('{:6}: {}'.format('Start', tag))
# temp = dict(attrs)
# for k, v in temp.items():
# print ("-> " + k + " > " + str(v))
#
# def handle_endtag(self, tag):
# print ('{:6}: {}'.format('End', tag))
#
# def handle_startendtag(self, tag, attrs):
# print ('{:6}: {}'.format('Empty', tag))
# temp = dict(attrs)
# for k, v in temp.items():
# print ("-> " + k + " > " + str(v))
#
# MyParser = MyHTMLParser()
# MyParser.feed(''.join([input().strip() for _ in range(int(input()))]))
# for ele in attrs:
# print ('->',ele[0],'>',ele[1])
################################################################################
################################################################################
#Q
################################################################################
# html-parser-part-2-English
################################################################################
################################################################################
#A
################################################################################
# from html.parser import HTMLParser
#
# class MyHTMLParser(HTMLParser):
#
# def handle_data(self, data):
# if data != '\n':
# print (">>> Data\n" + data)
#
# def handle_comment(self, data):
# if '\n' in data:
# print('>>> Multi-line Comment\n' + data)
# else:
# print (">>> Single-line Comment\n" + data)
#
# html = ""
# for i in range(int(input())):
# html += input().rstrip()
# html += '\n'
#
# parser = MyHTMLParser()
# parser.feed(html)
# parser.close()
################################################################################
################################################################################
#Q
################################################################################
# detect-html-tags-attributes-and-attribute-values-English
################################################################################
################################################################################
#A
################################################################################
# from html.parser import HTMLParser
# class MyHTMLParser(HTMLParser):
# def handle_starttag(self, tag, attrs):
# print(tag)
# [print('-> {} > {}'.format(*attr)) for attr in attrs]
#
# html = '\n'.join([input() for _ in range(int(input()))])
# parser = MyHTMLParser()
# parser.feed(html)
# parser.close()
################################################################################
################################################################################
#Q
################################################################################
# validating-uid-English
################################################################################
################################################################################
#A
################################################################################
# import re
# for _ in range(int(input())):
# s = input()
# print('Valid' if all([re.search(r, s)
# for r in [r'[A-Za-z0-9]{10}', r'([A-Z].*){2}', r'([0-9].*){3}']])
# and not re.search(r'(.).*\1', s)
# else 'Invalid')
################################################################################
# def is_valid(uid):
# has_2_or_more_upper = bool(re.search(r'[A-Z]{2,}', uid))
# has_3_or_more_digits = bool(re.search(r'\d{3,}', uid))
# has_10_proper_elements = bool(re.match(r'^[a-zA-Z0-9]{10}$', uid))
# no_repeats = not bool(re.search(r'(.)\1', uid))
#
# if has_2_or_more_upper and has_3_or_more_digits and has_10_proper_elements and no_repeats:
# return "Valid"
# else:
# return "Invalid"
#
# for _ in range(int(input())):
# print is_valid(input())
################################################################################
################################################################################
################################################################################
#Q
################################################################################
# xml-1-find-the-score-English
################################################################################
################################################################################
#A
################################################################################
# import sys
# import xml.etree.ElementTree as etree
#
# def get_attr_number(node):
# # your code goes here
# s = 0
# for child in root.iter():
# s += len(child.attrib)
# return s
#
# if __name__ == '__main__':
# sys.stdin.readline()
# xml = sys.stdin.read()
# tree = etree.ElementTree(etree.fromstring(xml))
# root = tree.getroot()
# print(get_attr_number(root))
################################################################################
# return sum([len(elem.items()) for elem in tree.iter())
################################################################################
################################################################################
################################################################################
#Q
################################################################################
# xml2-find-the-maximum-depth-English
################################################################################
################################################################################
#A
################################################################################
# import xml.etree.ElementTree as etree
#
# maxdepth = 0
# def depth(elem, level):
# global maxdepth
# if (level == maxdepth):
# maxdepth += 1
# # recursive call to function to get the depth
# for child in elem:
# depth(child, level + 1)
#
# if __name__ == '__main__':
# n = int(input())
# xml = ""
# for i in range(n):
# xml = xml + input() + "\n"
# tree = etree.ElementTree(etree.fromstring(xml))
# depth(tree.getroot(), -1)
# print(maxdepth)
################################################################################
#Q
################################################################################
# np-shape-reshape-English
################################################################################
################################################################################
#A
################################################################################
# import numpy as np
# print(np.reshape(np.array(list(map(int, input().split()))), (3, 3)))
# print(np.array(input().split(),int).reshape(3,3))
# print (numpy.fromstring(input(), dtype=int, sep=" ").reshape(3,3))
################################################################################
#Q
################################################################################
# np-transpose-and-flatten-English
################################################################################
################################################################################
#A
################################################################################
# import numpy as np
# N, M = map(int,input().split())
# arr = [list(map(int,input().split()[:M])) for r in range(N)]
# print(np.array(arr).transpose() , np.array(arr).flatten() , sep = '\n')
################################################################################
#Q
################################################################################
# np-concatenate-English
################################################################################
################################################################################
#A
################################################################################
# import numpy as np
# N, M, P = map(int,input().split())
# print(np.array([input().split()[:P] for _ in range(N + M)], int))]]
#
# NxP = np.array([input().split()[:P] for _ in range(N)], int)
# MxP = np.array([input().split()[:P] for _ in range(M)], int)
# print(np.concatenate((NxP, MxP), axis = 0))
################################################################################
#Q
################################################################################
# np-zeros-and-ones-English
################################################################################
################################################################################
#A
################################################################################
# nums = tuple(map(int, input().split()))
# print (numpy.zeros(nums, dtype = numpy.int), numpy.ones(nums, dtype = numpy.int), sep = '\n')
################################################################################
#Q
################################################################################
# np-array-mathematics-English
################################################################################
################################################################################
#A
################################################################################
# import numpy as np
# N, M = map(int,input().split())
# A = np.array([input().split()[:M] for _ in range(N)], int)
# B = np.array([input().split()[:M] for _ in range(N)], int)
# print(A + B, A - B, A * B, A // B, A % B, A ** B, sep = '\n')
################################################################################
#Q
################################################################################
# np-sum-and-prod-English
################################################################################
################################################################################
#A
################################################################################
# import numpy as np
# N, M = map(int,input().split())
# print(np.prod(np.sum([np.array(input().split()[:M], int) for _ in range(N)], axis = 0), axis = None))
################################################################################
#Q
################################################################################
# np-min-and-max-English
################################################################################
################################################################################
#A
################################################################################
# import numpy as np
# N, M = map(int,input().split())
# NxM = np.array([input().split()[:M] for _ in range(N)], int)
# print(np.max(np.min(NxM, axis = 1)))
################################################################################
#Q
################################################################################
# np-mean-var-and-std-English
################################################################################
################################################################################
#A
################################################################################
# import numpy as np
# np.set_printoptions(legacy='1.13')
# N, M = map(int,input().split())
# NxM = np.array([input().split()[:M] for _ in range(N)], int)
# print(np.mean(NxM, axis = 1), np.var(NxM, axis = 0), np.std(NxM), sep = '\n')
################################################################################
#Q
################################################################################
# np-dot-and-cross-English
################################################################################
################################################################################
#A
################################################################################
# import numpy as np
# N = int(input())
# A = np.array([input().split()[:N] for _ in range(N)], int)
# B = np.array([input().split()[:N] for _ in range(N)], int)
# print(A.dot(B)) # print(np.dot(A, B))
################################################################################
#Q
################################################################################
# np-inner-and-outter-English
################################################################################
################################################################################
#A
################################################################################
# import numpy as np
# A = np.array(input().split(), int)
# B = np.array(input().split(), int)
# print(np.inner(A, B), np.outer(A, B),sep = '\n')
################################################################################
#Q
################################################################################
# np-polynomials-English
################################################################################
################################################################################
#A
################################################################################
# import numpy as np
# P = np.array(input().split(), float)
# X = float(input())
# print(np.polyval(P, X))
################################################################################
# print(np.polyval(list(map(float,input().split())), float(input())))
################################################################################
#Q
################################################################################
# np-linear-algebra-English
################################################################################
################################################################################
#A
################################################################################
# import numpy as np
# N = int(input())
# A = np.array([input().split() for _ in range(N)], float)
# print(round(np.linalg.det(A), 2))
################################################################################
#Q
################################################################################
# standardize-mobile-number-using-decorators-English
################################################################################
################################################################################
#A
################################################################################
# def wrapper(f):
# def fun(l):
# f([f'+91 {i[-10:-5]} {i[-5:]}' for i in l])
# return fun
#
# @wrapper
# def sort_phone(l):
# print(*sorted(l), sep='\n')
#
# if __name__ == '__main__':
# l = [input() for _ in range(int(input()))]
# sort_phone(l)
################################################################################
#Q
################################################################################
# the-minion-game-English
################################################################################
################################################################################
#A
################################################################################
# def minion_game(string):
# s = string.upper()
# S, K = 0, 0
# text_length = len(s)
# for idx, element in enumerate(s):
# if element[0] in 'AEIOU':
# K += text_length - idx
# else:
# S += text_length - idx
# if S > K:
# print('Stuart {}'.format(S))
# elif S < K:
# print('Kevin {}'.format(K))
# elif S == K:
# print('Draw')
# if __name__ == '__main__':
# s = input()
# minion_game(s)
# https://codereview.stackexchange.com/questions/106238/the-minion-game-challenge
################################################################################
#Q
################################################################################
# time-delta
################################################################################
#A
################################################################################
# from datetime import datetime, timedelta
# def time_delta(t1, t2):
# d1 = datetime.strptime(t1, "%a %d %b %Y %H:%M:%S %z")
# d2 = datetime.strptime(t2, "%a %d %b %Y %H:%M:%S %z")
# return str(round((abs(d1 -d2)).total_seconds()))
################################################################################
#Q
################################################################################
# validating-credit-card-number
################################################################################
#A
################################################################################
# import re
# def valid_card_num(card_num):
# return (
# "Invalid"
# if not re.search("^[456]\d{3}(-?\d{4}){3}$", card_num)
# or re.search(r"(\d)\1{3,}", card_num.replace("-", ""))
# else "Valid"
# )
# print(*[valid_card_num(input()) for _ in range(int(input()))], sep="\n")
################################################################################
#Q
################################################################################
# re-sub-regex-substitution-important
################################################################################
#A
################################################################################
# import re
# # not efficient
# def and_or_replacement(match):
# return (
# match.replace(" && ", " and ")
# .replace(" && ", " and ")
# .replace(" || ", " or ")
# .replace(" || ", " or ")
# )
# for i in range(int(input())):
# print(
# re.sub(
# r"(?<= )(&&|\|\|)(?= )",
# lambda x: "and" if x.group() == "&&" else "or",
# input(),
# )
# )
# https://docs.python.org/3/library/re.html#regular-expression-syntax
# (?=...) lookahead assertion
# (?<=...) positive lookbehind assertion
# When two groups go consecutively with only one space in between (e.g., " && ||"),
# they should produce two successful matches. If you do not use lookbackward and lookforward,
# you end up picking up only the first group in the pair.
# n && && && && && &&n
################################################################################
#Q
################################################################################
# validate-list-of-email-address-with-filter
################################################################################
#A
################################################################################
# def fun(s):
# import re
# # return True if s is a valid email, else return False
# return re.match(r"^[a-zA-Z0-9_-]+@[a-zA-Z0-9]+\.[a-zA-Z]{1,3}$", s)
# def filter_mail(emails):
# return list(filter(fun, emails))
# if __name__ == '__main__':
# n = int(input())
# emails = []
# for _ in range(n):
# emails.append(input())
# filtered_emails = filter_mail(emails)
# filtered_emails.sort()
# print(filtered_emails)
# ^ because if there is an email like learn.point@learningpoint.net
# without it, still it is a match.
# $ because if at the end we have net1, it is a match
# pay attention to r before pattern
################################################################################
#Q
################################################################################
# ginorts
################################################################################
#A
################################################################################
# lower_case = []
# upper_case = []
# odd_num = []
# even_num = []
# num = []
# s = input()
# for c in s:
# if 48 <= ord(c) <= 57:
# if int(ord(c)%2 == 0):
# even_num.append(c)
# else:
# odd_num.append(c)
# elif 65 <= ord(c) <= 90:
# upper_case.append(c)
# elif 97 <= ord(c) <= 122:
# lower_case.append(c)
# num = sorted(odd_num) + sorted(even_num)
# print("".join(sorted(lower_case)+sorted(upper_case)+num))
# # #
# import re
# s = input()
# print(
# "".join(
# sorted(re.findall(r"[a-z]", s))
# + sorted(re.findall(r"[A-Z]", s))
# + sorted(re.findall(r"[13579]", s))
# + sorted(re.findall(r"[02468]", s))
# )
# )
# # other nice solutions
# print(*sorted(s, key=lambda c: (c.isdigit() - c.islower(), c in "02468", c)), sep="")
# >>> False - True
# -1
# s = "Sorting1234"
# [
# (0, False, 'S'), (-1, False, 'o'),
# (-1, False, 'r'), (-1, False, 't'),
# (-1, False, 'i'), (-1, False, 'n'),
# (-1, False, 'g'), (1, False, '1'),
# (1, True, '2'), (1, False, '3'),
# (1, True, '4')
# ]
# first it checks the index 0, if it -1, it is lower
# and must come first, but if both tuples first value is -1,
# then it compares True and False, Flase is smaller, so it comes before,
# if both index 0 and one of tuple are equal, ther are going to be compared
# based on their value i.e 'a' and 'b'.
# >>> (False, False, False, True, 's') < (False, False, True, False, 'G')
# True
# Tuples can be compared with one another one the basis of
# their index positions. In this case, the first two elements
# in both of the tuples were False and since the second index
# position on the tuple to the right side had a True while
# the left one had a False at the same position, therefore
# the right one was greater than the left one and so the output was True
# print(*sorted(input(), key=lambda c: (-ord(c) >> 5, c in "02468", c)), sep="")
# 1) Understanding RIGHT SHIFT >> First let's understand whats right shift by 5 (>>5) means, right shifting
# a number by 5 is like dividing the number by 2^5 and rounds it to the nearest lower
# number (floor) e.g # 64>>5 ==> 2 because 64 / 32 is 2 # 63>>5 ==> 1 # 65>>5 ==> 2
# 2)Understanding ord():
# ord gives the ascii values of the specified character
# Ascii value of:
# a-z ==> 97-122
# A-Z ==> 65-90
# 0-9 ==> 48-57
# e.g ord('d') ==> 100
# 3) ord(c) >> 5
# i)a-z ascii value when right shifted by 5 always gives 3
# (97 to 122) when divided by 2^5 (i.e 32) and floored always gives 3
# ii)A-Z ascii value when right shifted by 5 always gives 2
# (65 to 90) when divided by 2^5 (i.e 32) and floored always gives 2
# iii)0-9 ascii values when right shifted by 5 always gives 1
# (48 to 57) when divided by 2^5 (i.e 32) and floored always gives 1
# 4) ** minus ord(c) >>5 ** 5)
# Since we are sorting the list in descending order
# i.e a-z has the highest value 3 so it needs to come first
# order = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1357902468"
# print(*sorted(input(), key=order.index), sep="")
# import string
# print(*sorted(input(), key=(string.ascii_letters + "1357902468").index), sep="")
################################################################################
#Q
################################################################################
# python-sort-sort-athlete-sort
################################################################################
#A
################################################################################
# if __name__ == "__main__":
# nm = input().split()
# n = int(nm[0])
# m = int(nm[1])
# arr = []
# for _ in range(n):
# arr.append(list(map(int, input().rstrip().split())))
# k = int(input())
# for el in sorted(arr, key=lambda c: c[k]):
# print(*el)
################################################################################
#Q
################################################################################
# class-1-dealing-with-complex-numbers
################################################################################
#A
################################################################################
# class Complex(object):
# def __init__(self, real, imaginary):
# self.real = real
# self.imaginary = imaginary
# def __add__(self, no):
# return Complex((self.real + no.real), (self.imaginary + no.imaginary))
# def __sub__(self, no):
# return Complex((self.real - no.real), (self.imaginary - no.imaginary))
# def __mul__(self, no):
# return Complex(
# (self.real * no.real) - (self.imaginary * no.imaginary),
# (self.real * no.imaginary) + (self.imaginary * no.real),
# )
# def __truediv__(self, no):
# x = self.__mul__(no.conjugate())
# y = no.__mul__(no.conjugate())
# return Complex(
# x.real / (y.real + y.imaginary), x.imaginary / (y.real + y.imaginary)
# )
# def conjugate(self):
# return Complex(self.real, -self.imaginary)
# def mod(self):
# return Complex(((self.real ** 2) + (self.imaginary ** 2)) ** 0.5, 0)
# def __str__(self):
# if self.imaginary == 0:
# result = "%.2f+0.00i" % (self.real)
# elif self.real == 0:
# if self.imaginary >= 0:
# result = "0.00+%.2fi" % (self.imaginary)
# else:
# result = "0.00-%.2fi" % (abs(self.imaginary))
# elif self.imaginary > 0:
# result = "%.2f+%.2fi" % (self.real, self.imaginary)
# else:
# result = "%.2f-%.2fi" % (self.real, abs(self.imaginary))
# return result
# if __name__ == "__main__":
# c = map(float, input().split())
# d = map(float, input().split())
# x = Complex(*c)
# y = Complex(*d)
# print(*map(str, [x + y, x - y, x * y, x / y, x.mod(), y.mod()]), sep="\n")
################################################################################
#Q
################################################################################
# triangle-quest-2
################################################################################
#A
################################################################################
# More than 2 lines will result in 0 score. Do not leave a blank line also
# for i in range(1,int(input())+1):
# print(sum(map(lambda x:10**x, range(0,i)))**2)
# better solution
# The output requires the sequence of Demlo numbers. For n<=9,
# the squares of the first few repunits are precisely the Demlo numbers.
# Example: 1^2=1, 11^2=121, 111^2=12321 and so on.
# for i in range(1,int(input())+1):
# print(((10**i)//9)**2)
################################################################################
#Q
################################################################################
# piling-up
################################################################################
#A
################################################################################
# def check(l):
# biggest = None
# for _ in l:
# if biggest and (l[0]> biggest or l[-1]> biggest):
# return "No"
# elif l[0] >= l[-1]:
# biggest = l[0]
# l.pop(0)
# # l = l[1:]
# elif l[0]<l[-1]:
# biggest = l[-1]
# l.pop()
# # l = l[:-1]
# return "Yes"
# l = []
# for _ in range(int(input())):
# input()
# l.append(check(list(map(int, input().split()))))
# print(*l, sep="\n")
# pay attention: if except pop I use the comments part,
# I cannot pass the test cases!
# other solutions
# possible inputs:
# https://hr-testcases-us-east-1.s3.amazonaws.com/8380/input04.txt?AWSAccessKeyId=AKIAR6O7GJNX5DNFO3PV&Expires=1644617360&Signature=4%2BeOi7YA43ddCbPAHtDq6rYLEk8%3D&response-content-type=text%2Fplain
# for t in range(input()):
# input()
# lst = map(int, raw_input().split())
# l = len(lst)
# i = 0
# while i < l - 1 and lst[i] >= lst[i+1]:
# i += 1
# while i < l - 1 and lst[i] <= lst[i+1]:
# i += 1
# print "Yes" if i == l - 1 else "No"
# faster
# from collections import deque
# for _ in range(int(input())):
# _, queue =input(), deque(map(int, input().split()))
# for cube in reversed(sorted(queue)):
# if queue[-1] == cube: queue.pop()
# elif queue[0] == cube: queue.popleft()
# else:
# print('No')
# break
# else: print('Yes')
################################################################################
#Q
################################################################################
# word-order
################################################################################
#A
################################################################################
# from collections import Counter
# z = Counter([input() for _ in range(int(input()))])
# print(len(z))
# print(*z.values())
################################################################################
#Q
################################################################################
# no-idea
################################################################################
#A
################################################################################
# from collections import Counter
# n, m = input().split()
# arr = input().split()
# a = input().split()
# b = input().split()
# # The reason I use count -1 is, set_arr at least has one, so in the repeated_arr
# # we should not consider those that are going to be calculated inside the
# # len(set_arr.intersection(set(a)))
# repeated_arr = [(item, count - 1) for item, count in Counter(arr).items() if count > 1]
# set_arr = set(arr)
# # here we get just the random numbers and not their counts
# # we should convert it to set, that let us use the intersection
# set_repeated_arr = set([el[0] for el in repeated_arr])
# # without xa and xb, the below for loop will run longer than
# # I can pass the test. actually, we just search for the values
# # the are common between them not all of them.
# xa = set_repeated_arr.intersection(set(a))
# xb = set_repeated_arr.intersection(set(b))
# ha = []
# hb = []
# for el, rep in repeated_arr:
# if el in xa:
# ha.append(rep)
# elif el in xb:
# hb.append(rep)
# print(
# len(set_arr.intersection(set(a)))
# + sum(ha)
# - (len(set_arr.intersection(set(b))) + sum(hb))
# )
# example links
# https://hr-testcases-us-east-1.s3.amazonaws.com/8382/input07.txt?AWSAccessKeyId=AKIAR6O7GJNX5DNFO3PV&Expires=1645184229&Signature=BbdhIxhg0zm2L2DHUFKIaZcNh00%3D&response-content-type=text%2Fplain
# https://hr-testcases-us-east-1.s3.amazonaws.com/8382/input03.txt?AWSAccessKeyId=AKIAR6O7GJNX5DNFO3PV&Expires=1645184415&Signature=zF3F8e25iF6LEiNo3n0ex0SWuwo%3D&response-content-type=text%2Fplain
# other solution
# n, m = input().split()
# sc_ar = input().split()
# A = set(input.split())
# B = set(input().split())
# print sum([(i in A) - (i in B) for i in sc_ar])
################################################################################
#Q
################################################################################
# find-angle
################################################################################
#A
################################################################################
# import math
# AB, BC = int(input()) , int(input())
# M = (AB ** 2 + BC ** 2) ** 0.5
# MBC = math.acos(BC / M) * 180 / math.pi
# print(f"{round(MBC)}\N{DEGREE SIGN}")
# print(str(int(round(math.degrees(math.atan(AB/BC)),0)))+'°')
# hypotenuse = math.hypot(AB,BC)
# degree = chr(176)
################################################################################
#Q
################################################################################
# default-arguments
################################################################################
#A
################################################################################
# class EvenStream(object):
# def __init__(self):
# self.current = 0
# def get_next(self):
# to_return = self.current
# self.current += 2
# return to_return
# class OddStream(object):
# def __init__(self):
# self.current = 1
# def get_next(self):
# to_return = self.current
# self.current += 2
# return to_return
# # here
# def print_from_stream(n, stream=None):
# stream = stream or EvenStream() # here
# for _ in range(n):
# print(stream.get_next())
# queries = int(input())
# for _ in range(queries):
# stream_name, n = input().split()
# n = int(n)
# if stream_name == "even":
# print_from_stream(n)
# else:
# print_from_stream(n, OddStream())
# This is a somewhat contrived example of a subtle but important
# aspect of python that leads to bugs:
# Default argument values are initialized once,
# not every time the function is called.
# This doesn't matter for immutable objects,
# but for mutable objects the object retains
# its state between function calls.
# In the example problem, the initial configuration has
# the EvenStream object as a default parameter,
# so if multiple print_from_stream(n) calls are made,
# the latter calls will use the same object,
# and pick the stream where the previous call finished
################################################################################
#Q
################################################################################
# words-score
################################################################################
#A
################################################################################
# def is_vowel(letter):
# return letter in ['a', 'e', 'i', 'o', 'u', 'y']
# def score_words(words):
# score = 0
# for word in words:
# num_vowels = 0
# for letter in word:
# if is_vowel(letter):
# num_vowels += 1
# if num_vowels % 2 == 0:
# score += 2
# else:
# score+=1
# return score
# n = int(input())
# words = input().split()
# print(score_words(words))
# I just changed ++score to score += 1
# in python we do not have ++ operator
################################################################################
#Q
################################################################################
# reduce-function
################################################################################
#A
################################################################################
# from fractions import Fraction
# from functools import reduce
# def product(fracs):
# t = reduce(lambda x, y : x * y,fracs, 1)
# return t.numerator, t.denominator
# if __name__ == '__main__':
# fracs = []
# for _ in range(int(input())):
# fracs.append(Fraction(*map(int, input().split())))
# result = product(fracs)
# print(*result)
################################################################################
#Q
################################################################################
# most-commons
################################################################################
#A
################################################################################
# import math
# import os
# import random
# import re
# import sys
# from collections import Counter
# if __name__ == '__main__':
# s = input()
# for x,y in sorted(Counter(s).most_common(), key=lambda tup:(-tup[1], tup[0]))[:3]:
# print(x,y)
# # other solutions
# class OrderedCounter(Counter):
# pass
# [print(*c) for c in OrderedCounter(sorted(input())).most_common(3)]
# Dict={}
# for x in sorted(s):
# Dict[x]=Dict.get(x,0)+1
# #Sorting Dict by value & storing sorted keys in Dict_keys.
# Dict_keys=sorted(Dict, key=Dict.get, reverse=True)
# for key in Dict_keys[:3]:
# print(key,Dict[key])
################################################################################
#Q
################################################################################
# merge-the-tools
################################################################################
#A
################################################################################
# def merge_the_tools(string, k):
# z = {}
# for i in range(len(string)):
# if i%k == 0:
# z[i] = []
# for el in string[i:i+k]:
# if el not in z[i]:
# z[i].append(el)
# for v in z.values():
# print("".join(v))
# if __name__ == '__main__':
# string, k = input(), int(input())
# merge_the_tools(string, k)
# other solutions
# def merge_the_tools(string, k):
# for i in range(0, len(string), k):
# uniq = ''
# for c in string[i : i+k]:
# if (c not in uniq):
# uniq+=c
# print(uniq)
# S, N = input(), int(input())
# for part in zip(*[iter(S)] * N):
# d = dict()
# print(''.join([ d.setdefault(c, c) for c in part if c not in d ]))
# setdefault method returns the key value available in the dictionary and if
# given key is not available then it will provided default value and adds it to the dictionary.
# [iter(s)]*n makes a list of n times the same iterator for s.
# Example: [[iter(s)]*3] = ([iter(s), iter(s), iter(s)])
# for part in zip(*[iter(S)] * N):
# It is equivalent to:
# it = iter(s)
# for part in zip(it, it, it):
################################################################################
#Q
################################################################################
# matrix-script
################################################################################
#A
################################################################################
# import math
# import os
# import random
# import re
# import sys
# from collections import defaultdict
# first_multiple_input = input().rstrip().split()
# n = int(first_multiple_input[0])
# m = int(first_multiple_input[1])
# matrix = []
# y = []
# for _ in range(n):
# matrix_item = input()
# matrix.append(matrix_item)
# for el in matrix:
# y.append(list(iter(el)))
# zz = defaultdict(list)
# for idx, val in enumerate(y):
# for i in range(len(val)):
# zz[i].append(y[idx][i])
# sss = ''
# for k, v in zz.items():
# sss+= "".join(v)
# print(re.sub(r'(\w)(\W)+(\w)', r'\1 \3', sss))
# other solution
# print(re.sub(r"(?<=\w)([^\w]+)(?=\w)", " ", sss))
# print(re.sub(r"(\w)(\W)+(\w)", r"\1 \3", "".join([u for t in zip(*matrix) for u in t])))
################################################################################
#Q
################################################################################
# validating-postalcode
################################################################################
#A
################################################################################
# regex_integer_in_range = r"^[1-9]\d{5}$"
# regex_alternating_repetitive_digit_pair = r"(\d)(?=\d\1)"
# import re
# P = input()
# print (bool(re.match(regex_integer_in_range, P))
# and len(re.findall(regex_alternating_repetitive_digit_pair, P)) < 2)
################################################################################
#Q
################################################################################
# maximize-it
################################################################################
#A
################################################################################
from itertools import product
K, M = list(map(int, input().split()))
ll = [list(map(lambda x: int(x) ** 2, input().split()))[1:] for i in range(K)]
print(max(map(lambda x: sum(x) % M, product(*ll))))
# why [1:] ?
# because after the first line,
# the first number is just the count of input
# i.e. 3 > 7, 8, 9
# 3 1000
# 2 5 4
# 3 7 8 9
# 5 5 7 8 9 10
# product(*ll) ?
# * makes the list flat for preparing product
# Initially, I did not try the product (product math operation,
# not product function) when I was blocked on this question.
# I thought the algorithm complexity was high, thus,
# my answer would not be accepted, so I did not try it.
# When I couldn't find any solution,
# I chose the one I thought was the worst. However, it worked!
# First, solve the problem, then improve it!
# It took me a long time, but I finally figured it out!
# Wed 19 May 2022 00:12
################################################################################
# THE END
################################################################################
| 44.810179
| 272
| 0.285682
|
cd5a52bde50b515d4303f750ecfba5bad979a355
| 11,815
|
py
|
Python
|
nislmigrate/facades/file_system_facade.py
|
ni/NI-SystemLink-Migration
|
dbce27627a2ea9b0121478ef64d9acfa2940a20d
|
[
"MIT"
] | null | null | null |
nislmigrate/facades/file_system_facade.py
|
ni/NI-SystemLink-Migration
|
dbce27627a2ea9b0121478ef64d9acfa2940a20d
|
[
"MIT"
] | 9
|
2021-11-08T21:47:50.000Z
|
2022-03-30T20:06:52.000Z
|
nislmigrate/facades/file_system_facade.py
|
ni/NI-SystemLink-Migration
|
dbce27627a2ea9b0121478ef64d9acfa2940a20d
|
[
"MIT"
] | null | null | null |
"""Handle file and directory operations."""
import json
import os
import shutil
import stat
import base64
from nislmigrate.logs.migration_error import MigrationError
from nislmigrate.migration_action import MigrationAction
from cryptography.fernet import Fernet
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
COMPRESSION_FORMAT = 'tar'
class FileSystemFacade:
"""
Handles operations that act on the real file system.
"""
def determine_migration_directory_for_service(self,
migration_directory_root: str,
service_name: str) -> str:
"""
Generates the migration directory for a particular service.
:param service_name: The name of the service to determine the migration directory for.
:return: The migration directory for the service.
"""
return os.path.join(migration_directory_root, service_name)
def does_directory_exist(self, directory: str) -> bool:
"""
Determines whether a directory exists.
:param dir_: The directory path to check.
:return: True if the given directory path is a directory and exists.
"""
return os.path.isdir(directory)
def does_file_exist_in_directory(self,
directory: str,
file_name: str) -> bool:
"""
Determines whether a file with the given name exists in a directory
:param directory: The directory to check.
:param file_name: The file to check.
:return: True if the file exists in the given directory.
"""
path = os.path.join(directory, file_name)
return self.does_file_exist(path)
def does_file_exist(self, file_path: str) -> bool:
"""
Determines whether a file exists on disk.
:param file_path: The path to check.
:return: True if the file exists.
"""
return os.path.isfile(file_path)
def remove_directory(self, directory: str):
"""
Deletes the given directory and its children.
:param dir_: The directory to remove.
:return: None.
"""
if os.path.isdir(directory):
shutil.rmtree(directory, onerror=self.__on_error_remove_readonly_and_retry)
def migrate_singlefile(self,
migration_directory_root: str,
service_name: str,
single_file_source_directory: str,
single_file_name: str,
action: MigrationAction):
"""
Perform a capture or restore the given service.
:param migration_directory_root: The root directory migration is taking place from.
:param action: Whether to capture or restore.
:return: None.
"""
root = migration_directory_root
migration_dir = self.determine_migration_directory_for_service(root, service_name)
if action == MigrationAction.CAPTURE:
self.remove_directory(migration_dir)
os.mkdir(migration_dir)
singlefile_full_path = os.path.join(
single_file_source_directory,
single_file_name,
)
shutil.copy(singlefile_full_path, migration_dir)
elif action == MigrationAction.RESTORE:
singlefile_full_path = os.path.join(migration_dir, single_file_name)
shutil.copy(singlefile_full_path, single_file_source_directory)
def capture_single_file(self,
migration_directory_root: str,
service_name: str,
restore_directory: str,
file: str):
root = migration_directory_root
migration_dir = self.determine_migration_directory_for_service(root, service_name)
self.remove_directory(migration_dir)
os.mkdir(migration_dir)
singlefile_full_path = os.path.join(
restore_directory,
file,
)
shutil.copy(singlefile_full_path, migration_dir)
def restore_single_file(self,
migration_directory_root: str,
service_name: str,
restore_directory: str,
file: str):
root = migration_directory_root
migration_dir = self.determine_migration_directory_for_service(root, service_name)
singlefile_full_path = os.path.join(migration_dir, file)
shutil.copy(singlefile_full_path, restore_directory)
def read_json_file(self, path: str) -> dict:
"""
Reads json from a file.
:param path: The path to the json file to read.
:return: The parsed json from the file.
"""
with open(path, encoding='utf-8-sig') as json_file:
return json.load(json_file)
@staticmethod
def copy_file(from_directory: str, to_directory: str, file_name: str):
"""
Copy an entire directory from one location to another.
:param from_directory: The directory the file to copy exists in.
:param to_directory: The directory to copy the file into.
:param file_name: The name of the file to copy.
"""
if not os.path.exists(to_directory):
os.mkdir(to_directory)
file_path = os.path.join(from_directory, file_name)
shutil.copy(file_path, to_directory)
def copy_directory(self, from_directory: str, to_directory: str, force: bool):
"""
Copy an entire directory from one location to another.
:param from_directory: The directory whose contents to copy.
:param to_directory: The directory to put the copied contents.
:param force: Whether to delete existing content in to_directory before copying.
"""
if os.path.exists(to_directory) and os.listdir(to_directory) and not force:
error = "The tool can not copy to the non empty directory: '%s'" % to_directory
raise MigrationError(error)
if not os.path.exists(from_directory):
raise MigrationError("No data found at: '%s'" % from_directory)
self.remove_directory(to_directory)
shutil.copytree(from_directory, to_directory)
def copy_directory_to_encrypted_file(self, from_directory: str, encrypted_file_path: str, secret: str):
"""
Copy an entire directory from one location to another and encrypts it.
:param from_directory: The directory whose contents to copy.
:param encrypted_file_path: The directory to put the copied contents.
:param secret: A password to use when encrypting the directory.
"""
if self.does_file_exist(encrypted_file_path):
raise FileExistsError("Captured data already exists: '%s'" % encrypted_file_path)
if not self.does_directory_exist(from_directory):
raise FileExistsError("No data found at: '%s'" % from_directory)
extension = f'.{COMPRESSION_FORMAT}'
if self.does_file_exist(from_directory + extension):
raise FileExistsError(f'Data not cleaned up from previous migration: {from_directory + extension}')
# shutil.make_archive automatically appends the compression formats file extension to the output path.
shutil.make_archive(from_directory, COMPRESSION_FORMAT, from_directory)
self.__encrypt_tar(secret, from_directory + extension, encrypted_file_path)
os.remove(from_directory + extension)
def copy_directory_from_encrypted_file(self, encrypted_file_path: str, to_directory: str, secret: str):
"""
Copy an entire directory from one location to another and encrypts it.
:param encrypted_file_path: The directory whose contents to copy.
:param to_directory: The directory to put the copied contents.
:param secret: A password to use when encrypting the directory.
"""
if not self.does_file_exist(encrypted_file_path):
raise MigrationError("No data found at: '%s'" % encrypted_file_path)
extension = f'.{COMPRESSION_FORMAT}'
if self.does_file_exist(encrypted_file_path + extension):
raise MigrationError(f'Data not cleaned up from previous migration: {encrypted_file_path + extension}')
self.__decrypt_tar(secret, encrypted_file_path, encrypted_file_path + extension)
shutil.unpack_archive(encrypted_file_path + extension, to_directory, COMPRESSION_FORMAT)
os.remove(encrypted_file_path + extension)
def write_file(self, path: str, content: str) -> None:
"""
Writes a file to the indicated path with the given content.
:param path: The path to the file to write.
:param content: The contents to write in the file.
"""
with open(path, 'w') as file:
file.write(content)
def read_file(self, path: str) -> str:
"""
Reads the contents from a file at the indicated path.
:param path: The path to the file to read.
"""
if not self.does_file_exist(path):
raise MigrationError(f'Unable to read file at {path} because it does not exist.')
with open(path, 'r') as file:
return file.read()
def __encrypt_tar(self, secret: str, tar_path: str, encrypted_path: str):
with open(tar_path, 'rb') as file:
text = file.read()
encrypter = self.__get_encrypter(secret)
encrypted_text = encrypter.encrypt(text)
with open(encrypted_path, 'wb') as file:
file.write(encrypted_text)
def __decrypt_tar(self, secret: str, encrypted_path: str, tar_path: str):
with open(encrypted_path, 'rb') as file:
encrypted_text = file.read()
encrypter = self.__get_encrypter(secret)
text = encrypter.decrypt(encrypted_text)
with open(tar_path, 'wb') as file:
file.write(text)
@staticmethod
def __get_encrypter(secret: str):
password = bytes(secret, 'utf-8')
if not password:
raise MigrationError('Secret not provided via the --secret flag for encryption.')
key_derivation_function = PBKDF2HMAC(algorithm=hashes.SHA256(), length=32, iterations=320000, salt=b'0'*16)
key = base64.urlsafe_b64encode(key_derivation_function.derive(password))
return Fernet(key)
def copy_directory_if_exists(self, from_directory: str, to_directory: str, force: bool) -> bool:
"""
Calls copy_directory only if the source directory exists. See copy_directory for parameter descriptions.
:return True if a copy happened, otherwise false.
"""
if os.path.exists(from_directory):
self.copy_directory(from_directory, to_directory, force)
return True
else:
return False
def __on_error_remove_readonly_and_retry(self, func, path, execinfo):
"""
Error handler that removes the readonly attribute from a file path
and then retries the previous operation.
:param func: A continuation to run with the path.
:param path: The path to remove the readonly attribute from.
:param execinfo: Will be the exception information returned by sys.exc_info()
:return: None.
"""
self.__remove_readonly(path)
func(path)
def __remove_readonly(self, path):
"""
Removes the read-only attribute from a file or directory.
:param path: The path to remove the readonly attribute from.
"""
os.chmod(path, stat.S_IWRITE)
| 41.311189
| 115
| 0.644943
|
1fbf8e54a4f2000d696e17d5779e175fefa293ef
| 82
|
py
|
Python
|
ros_catkin_ws/src/causalrobot/scripts/NREM_cortex.py
|
Hockey86/causalrobot
|
3871fe4431b3eaa79f7a1d1540334b99cb8ec769
|
[
"MIT"
] | null | null | null |
ros_catkin_ws/src/causalrobot/scripts/NREM_cortex.py
|
Hockey86/causalrobot
|
3871fe4431b3eaa79f7a1d1540334b99cb8ec769
|
[
"MIT"
] | null | null | null |
ros_catkin_ws/src/causalrobot/scripts/NREM_cortex.py
|
Hockey86/causalrobot
|
3871fe4431b3eaa79f7a1d1540334b99cb8ec769
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# MIT License
## Causal discovery and inference
# red LED
| 13.666667
| 33
| 0.719512
|
489ce9bff9e6b9188a02965614cecd92a42d88a0
| 204
|
py
|
Python
|
apps/product/admin.py
|
phdevs1/CyberCaffe
|
bee989a6d8d59205ee2645e986b4b0f16d00bf05
|
[
"Apache-2.0"
] | null | null | null |
apps/product/admin.py
|
phdevs1/CyberCaffe
|
bee989a6d8d59205ee2645e986b4b0f16d00bf05
|
[
"Apache-2.0"
] | 7
|
2021-03-19T08:39:34.000Z
|
2022-03-12T00:15:38.000Z
|
apps/product/admin.py
|
pioh123/CyberCaffe
|
bee989a6d8d59205ee2645e986b4b0f16d00bf05
|
[
"Apache-2.0"
] | null | null | null |
from django.contrib import admin
from .models import Product, Advertise, Promotion
# Register your models here.
admin.site.register(Product)
admin.site.register(Advertise)
admin.site.register(Promotion)
| 25.5
| 49
| 0.818627
|
8422ebe1532c7d8c1b412eb97914d35baad5815f
| 2,985
|
py
|
Python
|
integration/test/__main__.py
|
dianarg/geopm
|
846604c164e3f8fc50551e888297843701dec087
|
[
"BSD-3-Clause"
] | null | null | null |
integration/test/__main__.py
|
dianarg/geopm
|
846604c164e3f8fc50551e888297843701dec087
|
[
"BSD-3-Clause"
] | null | null | null |
integration/test/__main__.py
|
dianarg/geopm
|
846604c164e3f8fc50551e888297843701dec087
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
#
# Copyright (c) 2015 - 2021, Intel Corporation
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY LOG OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from __future__ import absolute_import
import sys
import os
import unittest
from test_omp_outer_loop import *
from test_enforce_policy import *
from test_profile_policy import *
from test_plugin_static_policy import *
from test_tutorial_base import *
from test_frequency_hint_usage import *
from test_profile_overflow import *
from test_trace import *
from test_monitor import *
from test_geopmio import *
from test_ompt import *
from test_launch_application import *
from test_launch_pthread import *
from test_geopmagent import *
from test_environment import *
from test_frequency_map import *
from test_hint_time import *
from test_progress import *
if 'GEOPM_RUN_LONG_TESTS' in os.environ:
from test_ee_timed_scaling_mix import *
from test_power_balancer import *
from test_power_governor import *
from test_scaling_region import *
from test_timed_scaling_region import *
else:
skipped_modules = ['test_ee_timed_scaling_mix',
'test_power_balancer',
'test_power_governor',
'test_scaling_region',
'test_timed_scaling_region',
]
for sm in skipped_modules:
sys.stderr.write("* ({}.*) ... skipped 'Requires GEOPM_RUN_LONG_TESTS environment variable'\n".format(sm))
if __name__ == '__main__':
unittest.main()
| 38.766234
| 114
| 0.740704
|
8f46fb8862eaa7d772baf1916d0707016f47397c
| 5,842
|
py
|
Python
|
tuna_service_sdk/model/ops_automation/jobs_pb2.py
|
easyopsapis/easyops-api-python
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
[
"Apache-2.0"
] | 5
|
2019-07-31T04:11:05.000Z
|
2021-01-07T03:23:20.000Z
|
tuna_service_sdk/model/ops_automation/jobs_pb2.py
|
easyopsapis/easyops-api-python
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
[
"Apache-2.0"
] | null | null | null |
tuna_service_sdk/model/ops_automation/jobs_pb2.py
|
easyopsapis/easyops-api-python
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: jobs.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from tuna_service_sdk.model.ops_automation import bind_resource_pb2 as tuna__service__sdk_dot_model_dot_ops__automation_dot_bind__resource__pb2
from tuna_service_sdk.model.ops_automation import mail_info_pb2 as tuna__service__sdk_dot_model_dot_ops__automation_dot_mail__info__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='jobs.proto',
package='ops_automation',
syntax='proto3',
serialized_options=_b('ZHgo.easyops.local/contracts/protorepo-models/easyops/model/ops_automation'),
serialized_pb=_b('\n\njobs.proto\x12\x0eops_automation\x1a\x39tuna_service_sdk/model/ops_automation/bind_resource.proto\x1a\x35tuna_service_sdk/model/ops_automation/mail_info.proto\"\xc1\x01\n\x04Jobs\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x10\n\x08\x63\x61tegory\x18\x02 \x01(\t\x12\x0e\n\x06menuId\x18\x03 \x01(\t\x12\x32\n\x0c\x62indResource\x18\x04 \x01(\x0b\x32\x1c.ops_automation.BindResource\x12\x0c\n\x04\x64\x65sc\x18\x05 \x01(\t\x12\x13\n\x0b\x61llowModify\x18\x06 \x01(\x08\x12&\n\x04mail\x18\x07 \x01(\x0b\x32\x18.ops_automation.MailInfo\x12\n\n\x02id\x18\x08 \x01(\tBJZHgo.easyops.local/contracts/protorepo-models/easyops/model/ops_automationb\x06proto3')
,
dependencies=[tuna__service__sdk_dot_model_dot_ops__automation_dot_bind__resource__pb2.DESCRIPTOR,tuna__service__sdk_dot_model_dot_ops__automation_dot_mail__info__pb2.DESCRIPTOR,])
_JOBS = _descriptor.Descriptor(
name='Jobs',
full_name='ops_automation.Jobs',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='ops_automation.Jobs.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='category', full_name='ops_automation.Jobs.category', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='menuId', full_name='ops_automation.Jobs.menuId', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='bindResource', full_name='ops_automation.Jobs.bindResource', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='desc', full_name='ops_automation.Jobs.desc', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='allowModify', full_name='ops_automation.Jobs.allowModify', index=5,
number=6, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='mail', full_name='ops_automation.Jobs.mail', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='id', full_name='ops_automation.Jobs.id', index=7,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=145,
serialized_end=338,
)
_JOBS.fields_by_name['bindResource'].message_type = tuna__service__sdk_dot_model_dot_ops__automation_dot_bind__resource__pb2._BINDRESOURCE
_JOBS.fields_by_name['mail'].message_type = tuna__service__sdk_dot_model_dot_ops__automation_dot_mail__info__pb2._MAILINFO
DESCRIPTOR.message_types_by_name['Jobs'] = _JOBS
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Jobs = _reflection.GeneratedProtocolMessageType('Jobs', (_message.Message,), {
'DESCRIPTOR' : _JOBS,
'__module__' : 'jobs_pb2'
# @@protoc_insertion_point(class_scope:ops_automation.Jobs)
})
_sym_db.RegisterMessage(Jobs)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 46.365079
| 669
| 0.763608
|
82cf4a09b0f60b8e05747db36b0d8255c97ebe55
| 51,619
|
py
|
Python
|
python/ccxt/vcc.py
|
bifot/ccxt
|
ad4ae3cf79c315719b5b362443059782e0903152
|
[
"MIT"
] | 1
|
2021-08-02T08:08:52.000Z
|
2021-08-02T08:08:52.000Z
|
python/ccxt/vcc.py
|
bifot/ccxt
|
ad4ae3cf79c315719b5b362443059782e0903152
|
[
"MIT"
] | null | null | null |
python/ccxt/vcc.py
|
bifot/ccxt
|
ad4ae3cf79c315719b5b362443059782e0903152
|
[
"MIT"
] | 1
|
2021-07-23T05:45:00.000Z
|
2021-07-23T05:45:00.000Z
|
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
import hashlib
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import AddressPending
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import RateLimitExceeded
from ccxt.base.errors import RequestTimeout
from ccxt.base.decimal_to_precision import ROUND
from ccxt.base.precise import Precise
class vcc(Exchange):
def describe(self):
return self.deep_extend(super(vcc, self).describe(), {
'id': 'vcc',
'name': 'VCC Exchange',
'countries': ['VN'], # Vietnam
'rateLimit': 1000,
'version': 'v3',
'has': {
'cancelAllOrders': True,
'cancelOrder': True,
'createOrder': True,
'editOrder': False,
'fetchBalance': True,
'fetchClosedOrders': True,
'fetchCurrencies': True,
'fetchDepositAddress': True,
'fetchDeposits': True,
'fetchMarkets': True,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrders': False,
'fetchTicker': 'emulated',
'fetchTickers': True,
'fetchTrades': True,
'fetchTradingFees': False,
'fetchTransactions': True,
'fetchWithdrawals': True,
},
'timeframes': {
'1m': '60000',
'5m': '300000',
'15m': '900000',
'30m': '1800000',
'1h': '3600000',
'2h': '7200000',
'4h': '14400000',
'6h': '21600000',
'12h': '43200000',
'1d': '86400000',
'1w': '604800000',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/100545356-8427f500-326c-11eb-9539-7d338242d61b.jpg',
'api': {
'public': 'https://api.vcc.exchange',
'private': 'https://api.vcc.exchange',
},
'www': 'https://vcc.exchange',
'doc': [
'https://vcc.exchange/api',
],
'fees': 'https://support.vcc.exchange/hc/en-us/articles/360016401754',
'referral': 'https://vcc.exchange?ref=l4xhrH',
},
'api': {
'public': {
'get': [
'summary',
'exchange_info',
'assets', # Available Currencies
'ticker', # Ticker list for all symbols
'trades/{market_pair}', # Recent trades
'orderbook/{market_pair}', # Orderbook
'chart/bars', # Candles
'tick_sizes',
],
},
'private': {
'get': [
'user',
'balance', # Get trading balance
'orders/{order_id}', # Get a single order by order_id
'orders/open', # Get open orders
'orders', # Get closed orders
'orders/trades', # Get trades history
'deposit-address', # Generate or get deposit address
'transactions', # Get deposit/withdrawal history
],
'post': [
'orders', # Create new order
],
'put': [
'orders/{order_id}/cancel', # Cancel order
'orders/cancel-by-type',
'orders/cancel-all',
],
},
},
'fees': {
'trading': {
'tierBased': False,
'percentage': True,
'maker': 0.2 / 100,
'taker': 0.2 / 100,
},
},
'exceptions': {
'exact': {},
'broad': {
'limit may not be greater than': BadRequest, # {"message":"The given data was invalid.","errors":{"limit":["The limit may not be greater than 1000."]}}
'Insufficient balance': InsufficientFunds, # {"message":"Insufficient balance."}
'Unauthenticated': AuthenticationError, # {"message":"Unauthenticated."} # wrong api key
'signature is invalid': AuthenticationError, # {"message":"The given data was invalid.","errors":{"signature":["HMAC signature is invalid"]}}
'Timeout': RequestTimeout, # {"code":504,"message":"Gateway Timeout","description":""}
'Too many requests': RateLimitExceeded, # {"code":429,"message":"Too many requests","description":"Too many requests"}
'quantity field is required': InvalidOrder, # {"message":"The given data was invalid.","errors":{"quantity":["The quantity field is required when type is market."]}}
'price field is required': InvalidOrder, # {"message":"The given data was invalid.","errors":{"price":["The price field is required when type is limit."]}}
'error_security_level': PermissionDenied, # {"message":"error_security_level"}
'pair is invalid': BadSymbol, # {"message":"The given data was invalid.","errors":{"coin":["Trading pair is invalid","Trading pair is offline"]}}
# {"message":"The given data was invalid.","errors":{"type":["The selected type is invalid."]}}
# {"message":"The given data was invalid.","errors":{"trade_type":["The selected trade type is invalid."]}}
'type is invalid': InvalidOrder,
'Data not found': OrderNotFound, # {"message":"Data not found"}
},
},
})
def fetch_markets(self, params={}):
response = self.publicGetExchangeInfo(params)
#
# {
# "message":null,
# "dataVersion":"4677e56a42f0c29872f3a6e75f5d39d2f07c748c",
# "data":{
# "timezone":"UTC",
# "serverTime":1605821914333,
# "symbols":[
# {
# "id":"btcvnd",
# "symbol":"BTC\/VND",
# "coin":"btc",
# "currency":"vnd",
# "baseId":1,
# "quoteId":0,
# "active":true,
# "base_precision":"0.0000010000",
# "quote_precision":"1.0000000000",
# "minimum_quantity":"0.0000010000",
# "minimum_amount":"250000.0000000000",
# "precision":{"price":0,"amount":6,"cost":6},
# "limits":{
# "amount":{"min":"0.0000010000"},
# "price":{"min":"1.0000000000"},
# "cost":{"min":"250000.0000000000"},
# },
# },
# ],
# },
# }
#
data = self.safe_value(response, 'data')
markets = self.safe_value(data, 'symbols')
result = []
for i in range(0, len(markets)):
market = self.safe_value(markets, i)
symbol = self.safe_string(market, 'symbol')
id = symbol.replace('/', '_')
baseId = self.safe_string(market, 'coin')
quoteId = self.safe_string(market, 'currency')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
active = self.safe_value(market, 'active')
precision = self.safe_value(market, 'precision', {})
limits = self.safe_value(market, 'limits', {})
amountLimits = self.safe_value(limits, 'amount', {})
priceLimits = self.safe_value(limits, 'price', {})
costLimits = self.safe_value(limits, 'cost', {})
entry = {
'info': market,
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'active': active,
'precision': {
'price': self.safe_integer(precision, 'price'),
'amount': self.safe_integer(precision, 'amount'),
'cost': self.safe_integer(precision, 'cost'),
},
'limits': {
'amount': {
'min': self.safe_number(amountLimits, 'min'),
'max': None,
},
'price': {
'min': self.safe_number(priceLimits, 'min'),
'max': None,
},
'cost': {
'min': self.safe_number(costLimits, 'min'),
'max': None,
},
},
}
result.append(entry)
return result
def fetch_currencies(self, params={}):
response = self.publicGetAssets(params)
#
# {
# "message":null,
# "dataVersion":"2514c8012d94ea375018fc13e0b5d4d896e435df",
# "data":{
# "BTC":{
# "name":"Bitcoin",
# "unified_cryptoasset_id":1,
# "can_withdraw":1,
# "can_deposit":1,
# "min_withdraw":"0.0011250000",
# "max_withdraw":"100.0000000000",
# "maker_fee":"0.002",
# "taker_fee":"0.002",
# "decimal":8,
# "withdrawal_fee":"0.0006250000",
# },
# },
# }
#
result = {}
data = self.safe_value(response, 'data')
ids = list(data.keys())
for i in range(0, len(ids)):
id = self.safe_string_lower(ids, i)
currency = self.safe_value(data, ids[i])
code = self.safe_currency_code(id)
canDeposit = self.safe_value(currency, 'can_deposit')
canWithdraw = self.safe_value(currency, 'can_withdraw')
active = (canDeposit and canWithdraw)
result[code] = {
'id': id,
'code': code,
'name': self.safe_string(currency, 'name'),
'active': active,
'fee': self.safe_number(currency, 'withdrawal_fee'),
'precision': self.safe_integer(currency, 'decimal'),
'limits': {
'withdraw': {
'min': self.safe_number(currency, 'min_withdraw'),
'max': self.safe_number(currency, 'max_withdraw'),
},
},
}
return result
def fetch_trading_fee(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
request = self.extend({
'symbol': market['id'],
}, self.omit(params, 'symbol'))
response = self.privateGetTradingFeeSymbol(request)
#
# {
# takeLiquidityRate: '0.001',
# provideLiquidityRate: '-0.0001'
# }
#
return {
'info': response,
'maker': self.safe_number(response, 'provideLiquidityRate'),
'taker': self.safe_number(response, 'takeLiquidityRate'),
}
def fetch_balance(self, params={}):
self.load_markets()
response = self.privateGetBalance(params)
#
# {
# "message":null,
# "dataVersion":"7168e6c99e90f60673070944d987988eef7d91fa",
# "data":{
# "vnd":{"balance":0,"available_balance":0},
# "btc":{"balance":0,"available_balance":0},
# "eth":{"balance":0,"available_balance":0},
# },
# }
#
data = self.safe_value(response, 'data')
result = {
'info': response,
'timestamp': None,
'datetime': None,
}
currencyIds = list(data.keys())
for i in range(0, len(currencyIds)):
currencyId = currencyIds[i]
code = self.safe_currency_code(currencyId)
balance = self.safe_value(data, currencyId)
account = self.account()
account['free'] = self.safe_string(balance, 'available_balance')
account['total'] = self.safe_string(balance, 'balance')
result[code] = account
return self.parse_balance(result)
def parse_ohlcv(self, ohlcv, market=None):
#
# {
# "low":"415805323.0000000000",
# "high":"415805323.0000000000",
# "open":"415805323.0000000000",
# "close":"415805323.0000000000",
# "time":"1605845940000",
# "volume":"0.0065930000",
# "opening_time":1605845963263,
# "closing_time":1605845963263
# }
#
return [
self.safe_integer(ohlcv, 'time'),
self.safe_number(ohlcv, 'open'),
self.safe_number(ohlcv, 'high'),
self.safe_number(ohlcv, 'low'),
self.safe_number(ohlcv, 'close'),
self.safe_number(ohlcv, 'volume'),
]
def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'coin': market['baseId'],
'currency': market['quoteId'],
'resolution': self.timeframes[timeframe],
}
limit = 100 if (limit is None) else limit
limit = min(100, limit)
duration = self.parse_timeframe(timeframe)
if since is None:
end = self.seconds()
request['to'] = end
request['from'] = end - limit * duration
else:
start = int(since / 1000)
request['from'] = start
request['to'] = self.sum(start, limit * duration)
response = self.publicGetChartBars(self.extend(request, params))
#
# [
# {"low":"415805323.0000000000","high":"415805323.0000000000","open":"415805323.0000000000","close":"415805323.0000000000","time":"1605845940000","volume":"0.0065930000","opening_time":1605845963263,"closing_time":1605845963263},
# {"low":"416344148.0000000000","high":"416344148.0000000000","open":"415805323.0000000000","close":"416344148.0000000000","time":"1605846000000","volume":"0.0052810000","opening_time":1605846011490,"closing_time":1605846011490},
# {"low":"416299269.0000000000","high":"417278376.0000000000","open":"416344148.0000000000","close":"417278376.0000000000","time":"1605846060000","volume":"0.0136750000","opening_time":1605846070727,"closing_time":1605846102282},
# ]
#
return self.parse_ohlcvs(response, market, timeframe, since, limit)
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'market_pair': market['id'],
# 'depth': 0, # 0 = full orderbook, 5, 10, 20, 50, 100, 500
'level': 2, # 1 = best bidask, 2 = aggregated by price, 3 = no aggregation
}
if limit is not None:
if (limit != 0) and (limit != 5) and (limit != 10) and (limit != 20) and (limit != 50) and (limit != 100) and (limit != 500):
raise BadRequest(self.id + ' fetchOrderBook limit must be 0, 5, 10, 20, 50, 100, 500 if specified')
request['depth'] = limit
response = self.publicGetOrderbookMarketPair(self.extend(request, params))
#
# {
# "message":null,
# "dataVersion":"376cee43af26deabcd3762ab11a876b6e7a71e82",
# "data":{
# "bids":[
# ["413342637.0000000000","0.165089"],
# ["413274576.0000000000","0.03"],
# ["413274574.0000000000","0.03"],
# ],
# "asks":[
# ["416979125.0000000000","0.122835"],
# ["417248934.0000000000","0.030006"],
# ["417458879.0000000000","0.1517"],
# ],
# "timestamp":"1605841619147"
# }
# }
#
data = self.safe_value(response, 'data')
timestamp = self.safe_value(data, 'timestamp')
return self.parse_order_book(data, symbol, timestamp, 'bids', 'asks', 0, 1)
def parse_ticker(self, ticker, market=None):
#
# {
# "base_id":1,
# "quote_id":0,
# "last_price":"411119457",
# "max_price":"419893173.0000000000",
# "min_price":"401292577.0000000000",
# "open_price":null,
# "base_volume":"10.5915050000",
# "quote_volume":"4367495977.4484430060",
# "isFrozen":0
# }
#
timestamp = self.milliseconds()
baseVolume = self.safe_number(ticker, 'base_volume')
quoteVolume = self.safe_number(ticker, 'quote_volume')
open = self.safe_number(ticker, 'open_price')
last = self.safe_number(ticker, 'last_price')
change = None
percentage = None
average = None
if last is not None and open is not None:
change = last - open
average = self.sum(last, open) / 2
if open > 0:
percentage = change / open * 100
vwap = self.vwap(baseVolume, quoteVolume)
symbol = None if (market is None) else market['symbol']
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_number(ticker, 'max_price'),
'low': self.safe_number(ticker, 'min_price'),
'bid': self.safe_number(ticker, 'bid'),
'bidVolume': None,
'ask': self.safe_number(ticker, 'ask'),
'askVolume': None,
'vwap': vwap,
'open': open,
'close': last,
'last': last,
'previousClose': None,
'change': change,
'percentage': percentage,
'average': average,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}
def fetch_tickers(self, symbols=None, params={}):
self.load_markets()
response = self.publicGetTicker(params)
#
# {
# "message":null,
# "dataVersion":"fc521161aebe506178b8588cd2adb598eaf1018e",
# "data":{
# "BTC_VND":{
# "base_id":1,
# "quote_id":0,
# "last_price":"411119457",
# "max_price":"419893173.0000000000",
# "min_price":"401292577.0000000000",
# "open_price":null,
# "base_volume":"10.5915050000",
# "quote_volume":"4367495977.4484430060",
# "isFrozen":0
# },
# }
# }
#
result = {}
data = self.safe_value(response, 'data')
marketIds = list(data.keys())
for i in range(0, len(marketIds)):
marketId = marketIds[i]
market = self.safe_market(marketId, None, '_')
symbol = market['symbol']
result[symbol] = self.parse_ticker(data[marketId], market)
return self.filter_by_array(result, 'symbol', symbols)
def parse_trade(self, trade, market=None):
#
# public fetchTrades
#
# {
# "trade_id":181509285,
# "price":"415933022.0000000000",
# "base_volume":"0.0022080000",
# "quote_volume":"918380.1125760000",
# "trade_timestamp":1605842150357,
# "type":"buy",
# }
#
# private fetchMyTrades
#
# {
# "trade_type":"sell",
# "fee":"0.0610578086",
# "id":1483372,
# "created_at":1606581578368,
# "currency":"usdt",
# "coin":"btc",
# "price":"17667.1900000000",
# "quantity":"0.0017280000",
# "amount":"30.5289043200",
# }
#
timestamp = self.safe_integer_2(trade, 'trade_timestamp', 'created_at')
baseId = self.safe_string_upper(trade, 'coin')
quoteId = self.safe_string_upper(trade, 'currency')
marketId = None
if (baseId is not None) and (quoteId is not None):
marketId = baseId + '_' + quoteId
market = self.safe_market(marketId, market, '_')
symbol = market['symbol']
priceString = self.safe_string(trade, 'price')
amountString = self.safe_string_2(trade, 'base_volume', 'quantity')
price = self.parse_number(priceString)
amount = self.parse_number(amountString)
cost = self.safe_number_2(trade, 'quote_volume', 'amount')
if cost is None:
cost = self.parse_number(Precise.string_mul(priceString, amountString))
side = self.safe_string_2(trade, 'type', 'trade_type')
id = self.safe_string_2(trade, 'trade_id', 'id')
feeCost = self.safe_number(trade, 'fee')
fee = None
if feeCost is not None:
fee = {
'cost': feeCost,
'currency': market['quote'],
}
return {
'info': trade,
'id': id,
'order': None,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'type': None,
'side': side,
'takerOrMaker': None,
'price': price,
'amount': amount,
'cost': cost,
'fee': fee,
}
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'market_pair': market['id'],
# 'type': 'buy', # 'sell'
# 'count': limit, # default 500, max 1000
}
if limit is not None:
request['count'] = min(1000, limit)
response = self.publicGetTradesMarketPair(self.extend(request, params))
#
# {
# "message":null,
# "dataVersion":"1f811b533143f739008a3e4ecaaab2ec82ea50d4",
# "data":[
# {
# "trade_id":181509285,
# "price":"415933022.0000000000",
# "base_volume":"0.0022080000",
# "quote_volume":"918380.1125760000",
# "trade_timestamp":1605842150357,
# "type":"buy",
# },
# ],
# }
#
data = self.safe_value(response, 'data')
return self.parse_trades(data, market, since, limit)
def fetch_transactions(self, code=None, since=None, limit=None, params={}):
self.load_markets()
request = {
# 'type': type, # 'deposit', 'withdraw'
# 'start': int(since / 1000),
# 'end': self.seconds(),
}
currency = None
if code is not None:
currency = self.currency(code)
request['currency'] = currency['id']
if limit is not None:
request['limit'] = min(1000, limit)
if since is not None:
request['start'] = since
response = self.privateGetTransactions(self.extend(request, params))
#
# {
# "message":null,
# "dataVersion":"1fdfb0ec85b666871d62fe59d098d01839b05e97",
# "data":{
# "current_page":1,
# "data":[
# {
# "id":85391,
# "user_id":253063,
# "transaction_id":"0x885719cee5910ca509a223d208797510e80eb27a2f1d51a71bb4ccb82d538131",
# "internal_transaction_id":null,
# "temp_transaction_id":"2367",
# "currency":"usdt",
# "amount":"30.0000000000",
# "btc_amount":"0.0000000000",
# "usdt_amount":"0.0000000000",
# "fee":"0.0000000000",
# "tx_cost":"0.0000000000",
# "confirmation":0,
# "deposit_code":null,
# "status":"success",
# "bank_name":null,
# "foreign_bank_account":null,
# "foreign_bank_account_holder":null,
# "blockchain_address":"0xd54b84AD27E4c4a8C9E0b2b53701DeFc728f6E44",
# "destination_tag":null,
# "error_detail":null,
# "refunded":"0.0000000000",
# "transaction_date":"2020-11-28",
# "transaction_timestamp":"1606563143.959",
# "created_at":1606563143959,
# "updated_at":1606563143959,
# "transaction_email_timestamp":0,
# "network":null,
# "collect_tx_id":null,
# "collect_id":null
# }
# ],
# "first_page_url":"http:\/\/api.vcc.exchange\/v3\/transactions?page=1",
# "from":1,
# "last_page":1,
# "last_page_url":"http:\/\/api.vcc.exchange\/v3\/transactions?page=1",
# "next_page_url":null,
# "path":"http:\/\/api.vcc.exchange\/v3\/transactions",
# "per_page":10,
# "prev_page_url":null,
# "to":1,
# "total":1
# }
# }
#
data = self.safe_value(response, 'data', {})
data = self.safe_value(data, 'data', [])
return self.parse_transactions(data, currency, since, limit)
def fetch_deposits(self, code=None, since=None, limit=None, params={}):
request = {'type': 'deposit'}
return self.fetch_transactions(code, since, limit, self.extend(request, params))
def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
request = {'type': 'withdraw'}
return self.fetch_transactions(code, since, limit, self.extend(request, params))
def parse_transaction(self, transaction, currency=None):
#
# fetchTransactions, fetchDeposits, fetchWithdrawals
#
# {
# "id":85391,
# "user_id":253063,
# "transaction_id":"0x885719cee5910ca509a223d208797510e80eb27a2f1d51a71bb4ccb82d538131",
# "internal_transaction_id":null,
# "temp_transaction_id":"2367",
# "currency":"usdt",
# "amount":"30.0000000000",
# "btc_amount":"0.0000000000",
# "usdt_amount":"0.0000000000",
# "fee":"0.0000000000",
# "tx_cost":"0.0000000000",
# "confirmation":0,
# "deposit_code":null,
# "status":"success",
# "bank_name":null,
# "foreign_bank_account":null,
# "foreign_bank_account_holder":null,
# "blockchain_address":"0xd54b84AD27E4c4a8C9E0b2b53701DeFc728f6E44",
# "destination_tag":null,
# "error_detail":null,
# "refunded":"0.0000000000",
# "transaction_date":"2020-11-28",
# "transaction_timestamp":"1606563143.959",
# "created_at":1606563143959,
# "updated_at":1606563143959,
# "transaction_email_timestamp":0,
# "network":null,
# "collect_tx_id":null,
# "collect_id":null
# }
#
id = self.safe_string(transaction, 'id')
timestamp = self.safe_integer(transaction, 'created_at')
updated = self.safe_integer(transaction, 'updated_at')
currencyId = self.safe_string(transaction, 'currency')
code = self.safe_currency_code(currencyId, currency)
status = self.parse_transaction_status(self.safe_string(transaction, 'status'))
amount = self.safe_number(transaction, 'amount')
if amount is not None:
amount = abs(amount)
address = self.safe_string(transaction, 'blockchain_address')
txid = self.safe_string(transaction, 'transaction_id')
tag = self.safe_string(transaction, 'destination_tag')
fee = None
feeCost = self.safe_number(transaction, 'fee')
if feeCost is not None:
fee = {
'cost': feeCost,
'currency': code,
}
type = amount > 'deposit' if 0 else 'withdrawal'
return {
'info': transaction,
'id': id,
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'address': address,
'addressTo': address,
'addressFrom': None,
'tag': tag,
'tagTo': tag,
'tagFrom': None,
'type': type,
'amount': amount,
'currency': code,
'status': status,
'updated': updated,
'fee': fee,
}
def parse_transaction_status(self, status):
statuses = {
'pending': 'pending',
'error': 'failed',
'success': 'ok',
'cancel': 'canceled',
}
return self.safe_string(statuses, status, status)
def parse_transaction_type(self, type):
types = {
'deposit': 'deposit',
'withdraw': 'withdrawal',
}
return self.safe_string(types, type, type)
def cost_to_precision(self, symbol, cost):
return self.decimal_to_precision(cost, ROUND, self.markets[symbol]['precision']['cost'], self.precisionMode, self.paddingMode)
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'coin': market['baseId'],
'currency': market['quoteId'],
'trade_type': side,
'type': type,
}
if type == 'ceiling_market':
ceiling = self.safe_value(params, 'ceiling')
if ceiling is not None:
request['ceiling'] = self.cost_to_precision(symbol, ceiling)
elif price is not None:
request['ceiling'] = self.cost_to_precision(symbol, amount * price)
else:
raise InvalidOrder(self.id + ' createOrder() requires a price argument or a ceiling parameter for ' + type + ' orders')
else:
request['quantity'] = self.amount_to_precision(symbol, amount)
if type == 'limit':
request['price'] = self.price_to_precision(symbol, price)
stopPrice = self.safe_value_2(params, 'stop_price', 'stopPrice')
if stopPrice is not None:
request['is_stop'] = 1
request['stop_condition'] = 'le' if (side == 'buy') else 'ge' # ge = greater than or equal, le = less than or equal
request['stop_price'] = self.price_to_precision(symbol, price)
params = self.omit(params, ['stop_price', 'stopPrice'])
response = self.privatePostOrders(self.extend(request, params))
#
# ceiling_market order
#
# {
# "message":null,
# "dataVersion":"213fc0d433f38307f736cae1cbda4cc310469b7a",
# "data":{
# "coin":"btc",
# "currency":"usdt",
# "trade_type":"buy",
# "type":"ceiling_market",
# "ceiling":"30",
# "user_id":253063,
# "email":"igor.kroitor@gmail.com",
# "side":"buy",
# "quantity":"0.00172800",
# "status":"pending",
# "fee":0,
# "created_at":1606571333035,
# "updated_at":1606571333035,
# "instrument_symbol":"BTCUSDT",
# "remaining":"0.00172800",
# "fee_rate":"0.002",
# "id":88214435
# }
# }
#
# limit order
#
# {
# "message":null,
# "dataVersion":"d9b1159d2bcefa2388be156e32ddc7cc324400ee",
# "data":{
# "id":41230,
# "trade_type":"sell",
# "type":"limit",
# "quantity":"1",
# "price":"14.99",
# "currency":"usdt",
# "coin":"neo",
# "status":"pending",
# "is_stop": "1",
# "stop_price": "13",
# "stop_condition": "ge",
# "fee":0,
# "created_at":1560244052168,
# "updated_at":1560244052168
# }
# }
#
data = self.safe_value(response, 'data')
return self.parse_order(data, market)
def cancel_order(self, id, symbol=None, params={}):
self.load_markets()
request = {
'order_id': id,
}
response = self.privatePutOrdersOrderIdCancel(self.extend(request, params))
return self.parse_order(response)
def cancel_all_orders(self, symbol=None, params={}):
type = self.safe_string(params, 'type')
method = 'privatePutOrdersCancelAll' if (type is None) else 'privatePutOrdersCancelByType'
request = {}
if type is not None:
request['type'] = type
self.load_markets()
response = getattr(self, method)(self.extend(request, params))
#
# {
# "dataVersion":"6d72fb82a9c613c8166581a887e1723ce5a937ff",
# "data":{
# "data":[
# {
# "id":410,
# "trade_type":"sell",
# "currency":"usdt",
# "coin":"neo",
# "type":"limit",
# "quantity":"1.0000000000",
# "price":"14.9900000000",
# "executed_quantity":"0.0000000000",
# "executed_price":"0.0000000000",
# "fee":"0.0000000000",
# "status":"canceled",
# "created_at":1560244052168,
# "updated_at":1560244052168,
# },
# ],
# },
# }
#
data = self.safe_value(response, 'data', {})
data = self.safe_value(response, 'data', [])
return self.parse_orders(data)
def parse_order_status(self, status):
statuses = {
'pending': 'open',
'stopping': 'open',
'executing': 'open',
'executed': 'closed',
'canceled': 'canceled',
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
#
# ceiling_market
#
# {
# "coin":"btc",
# "currency":"usdt",
# "trade_type":"buy",
# "type":"ceiling_market",
# "ceiling":"30",
# "user_id":253063,
# "email":"igor.kroitor@gmail.com",
# "side":"buy",
# "quantity":"0.00172800",
# "status":"pending",
# "fee":0,
# "created_at":1606571333035,
# "updated_at":1606571333035,
# "instrument_symbol":"BTCUSDT",
# "remaining":"0.00172800",
# "fee_rate":"0.002",
# "id":88214435
# }
#
# limit order
#
# {
# "id":41230,
# "trade_type":"sell",
# "type":"limit",
# "quantity":"1",
# "price":"14.99",
# "currency":"usdt",
# "coin":"neo",
# "status":"pending",
# "is_stop": "1",
# "stop_price": "13",
# "stop_condition": "ge",
# "fee":0,
# "created_at":1560244052168,
# "updated_at":1560244052168
# }
#
created = self.safe_value(order, 'created_at')
updated = self.safe_value(order, 'updated_at')
baseId = self.safe_string_upper(order, 'coin')
quoteId = self.safe_string_upper(order, 'currency')
marketId = baseId + '_' + quoteId
market = self.safe_market(marketId, market, '_')
symbol = market['symbol']
amount = self.safe_number(order, 'quantity')
filled = self.safe_number(order, 'executed_quantity')
status = self.parse_order_status(self.safe_string(order, 'status'))
cost = self.safe_number(order, 'ceiling')
id = self.safe_string(order, 'id')
price = self.safe_number(order, 'price')
average = self.safe_number(order, 'executed_price')
remaining = self.safe_number(order, 'remaining')
type = self.safe_string(order, 'type')
side = self.safe_string(order, 'trade_type')
fee = {
'currency': market['quote'],
'cost': self.safe_number(order, 'fee'),
'rate': self.safe_number(order, 'fee_rate'),
}
lastTradeTimestamp = None
if updated != created:
lastTradeTimestamp = updated
stopPrice = self.safe_number(order, 'stopPrice')
return self.safe_order({
'id': id,
'clientOrderId': id,
'timestamp': created,
'datetime': self.iso8601(created),
'lastTradeTimestamp': lastTradeTimestamp,
'status': status,
'symbol': symbol,
'type': type,
'timeInForce': None,
'postOnly': None,
'side': side,
'price': price,
'stopPrice': stopPrice,
'average': average,
'amount': amount,
'cost': cost,
'filled': filled,
'remaining': remaining,
'fee': fee,
'trades': None,
'info': order,
})
def fetch_order(self, id, symbol=None, params={}):
self.load_markets()
request = {
'order_id': id,
}
response = self.privateGetOrdersOrderId(self.extend(request, params))
#
# {
# "message":null,
# "dataVersion":"57448aa1fb8f227254e8e2e925b3ade8e1e5bbef",
# "data":{
# "id":88265741,
# "user_id":253063,
# "email":"igor.kroitor@gmail.com",
# "updated_at":1606581578141,
# "created_at":1606581578141,
# "coin":"btc",
# "currency":"usdt",
# "type":"market",
# "trade_type":"sell",
# "executed_price":"17667.1900000000",
# "price":null,
# "executed_quantity":"0.0017280000",
# "quantity":"0.0017280000",
# "fee":"0.0610578086",
# "status":"executed",
# "is_stop":0,
# "stop_condition":null,
# "stop_price":null,
# "ceiling":null
# }
# }
#
data = self.safe_value(response, 'data')
return self.parse_order(data)
def fetch_orders_with_method(self, method, symbol=None, since=None, limit=None, params={}):
self.load_markets()
request = {
# 'page': 1,
# 'limit': limit, # max 1000
# 'start_date': since,
# 'end_date': self.milliseconds(),
# 'currency': market['quoteId'],
# 'coin': market['baseId'],
# 'trade_type': 'buy', # or 'sell'
# 'hide_canceled': 0, # 1 to exclude canceled orders
}
market = None
if symbol is not None:
market = self.market(symbol)
request['coin'] = market['baseId']
request['currency'] = market['quoteId']
if since is not None:
request['start_date'] = since
if limit is not None:
request['limit'] = min(1000, limit) # max 1000
response = getattr(self, method)(self.extend(request, params))
#
# {
# "message":null,
# "dataVersion":"89aa11497f23fdd34cf9de9c55acfad863c78780",
# "data":{
# "current_page":1,
# "data":[
# {
# "id":88489678,
# "email":"igor.kroitor@gmail.com",
# "updated_at":1606628593567,
# "created_at":1606628593567,
# "coin":"btc",
# "currency":"usdt",
# "type":"limit",
# "trade_type":"buy",
# "executed_price":"0.0000000000",
# "price":"10000.0000000000",
# "executed_quantity":"0.0000000000",
# "quantity":"0.0010000000",
# "fee":"0.0000000000",
# "status":"pending",
# "is_stop":0,
# "stop_condition":null,
# "stop_price":null,
# "ceiling":null,
# },
# ],
# "first_page_url":"http:\/\/api.vcc.exchange\/v3\/orders\/open?page=1",
# "from":1,
# "last_page":1,
# "last_page_url":"http:\/\/api.vcc.exchange\/v3\/orders\/open?page=1",
# "next_page_url":null,
# "path":"http:\/\/api.vcc.exchange\/v3\/orders\/open",
# "per_page":10,
# "prev_page_url":null,
# "to":1,
# "total":1,
# },
# }
#
data = self.safe_value(response, 'data', {})
data = self.safe_value(data, 'data', [])
return self.parse_orders(data, market, since, limit)
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
return self.fetch_orders_with_method('privateGetOrdersOpen', symbol, since, limit, params)
def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
return self.fetch_orders_with_method('privateGetOrders', symbol, since, limit, params)
def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
request = {
# 'page': 1,
# 'limit': limit, # max 1000
# 'start_date': since,
# 'end_date': self.milliseconds(),
# 'currency': market['quoteId'],
# 'coin': market['baseId'],
# 'trade_type': 'buy', # or 'sell'
}
market = None
if symbol is not None:
market = self.market(symbol)
request['coin'] = market['baseId']
request['currency'] = market['quoteId']
if since is not None:
request['start_date'] = since
if limit is not None:
request['limit'] = min(1000, limit) # max 1000
response = self.privateGetOrdersTrades(self.extend(request, params))
#
# {
# "message":null,
# "dataVersion":"eb890af684cf84e20044e9a9771b96302e7b8dec",
# "data":{
# "current_page":1,
# "data":[
# {
# "trade_type":"sell",
# "fee":"0.0610578086",
# "id":1483372,
# "created_at":1606581578368,
# "currency":"usdt",
# "coin":"btc",
# "price":"17667.1900000000",
# "quantity":"0.0017280000",
# "amount":"30.5289043200",
# },
# ],
# "first_page_url":"http:\/\/api.vcc.exchange\/v3\/orders\/trades?page=1",
# "from":1,
# "last_page":1,
# "last_page_url":"http:\/\/api.vcc.exchange\/v3\/orders\/trades?page=1",
# "next_page_url":null,
# "path":"http:\/\/api.vcc.exchange\/v3\/orders\/trades",
# "per_page":10,
# "prev_page_url":null,
# "to":2,
# "total":2,
# },
# }
#
data = self.safe_value(response, 'data', {})
data = self.safe_value(data, 'data', [])
return self.parse_trades(data, market, since, limit)
def fetch_deposit_address(self, code, params={}):
self.load_markets()
currency = self.currency(code)
request = {
'currency': currency['id'],
}
response = self.privateGetDepositAddress(self.extend(request, params))
#
# {
# "dataVersion":"6d72fb82a9c613c8166581a887e1723ce5a937ff",
# "data":{
# "status": "REQUESTED",
# "blockchain_address": "",
# "currency": "btc"
# }
# }
#
# {
# "dataVersion":"6d72fb82a9c613c8166581a887e1723ce5a937ff",
# "data":{
# "status": "PROVISIONED",
# "blockchain_address": "rPVMhWBsfF9iMXYj3aAzJVkPDTFNSyWdKy",
# "blockchain_tag": "920396135",
# "currency": "xrp"
# }
# }
#
data = self.safe_value(response, 'data')
status = self.safe_string(data, 'status')
if status == 'REQUESTED':
raise AddressPending(self.id + ' is generating ' + code + ' deposit address, call fetchDepositAddress one more time later to retrieve the generated address')
address = self.safe_string(data, 'blockchain_address')
self.check_address(address)
tag = self.safe_string(data, 'blockchain_tag')
currencyId = self.safe_string(data, 'currency')
return {
'currency': self.safe_currency_code(currencyId),
'address': address,
'tag': tag,
'info': data,
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.version + '/' + self.implode_params(path, params)
query = self.omit(params, self.extract_params(path))
if query:
url += '?' + self.urlencode(query)
if api == 'private':
self.check_required_credentials()
timestamp = str(self.milliseconds())
if method != 'GET':
body = self.json(query)
auth = method + ' ' + url
signature = self.hmac(self.encode(auth), self.encode(self.secret), hashlib.sha256)
headers = {
'Authorization': 'Bearer ' + self.apiKey,
'Content-Type': 'application/json',
'timestamp': timestamp,
'signature': signature,
}
url = self.urls['api'][api] + '/' + url
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, code, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return
#
# {"message":"Insufficient balance."}
# {"message":"Unauthenticated."} # wrong api key
# {"message":"The given data was invalid.","errors":{"signature":["HMAC signature is invalid"]}}
# {"code":504,"message":"Gateway Timeout","description":""}
# {"code":429,"message":"Too many requests","description":"Too many requests"}
#
message = self.safe_string(response, 'message')
if message is not None:
feedback = self.id + ' ' + body
self.throw_exactly_matched_exception(self.exceptions['exact'], message, feedback)
self.throw_broadly_matched_exception(self.exceptions['broad'], body, feedback)
raise ExchangeError(feedback)
| 41.594682
| 245
| 0.465759
|
47a8543b438944ff8edf4d9866bd563805a2a7db
| 1,307
|
py
|
Python
|
toThingspeak.py
|
ichsankurnia/BAMS.BBTA3
|
2c7aed514b66d8700f7f33a4b959a0aaf897868d
|
[
"MIT"
] | null | null | null |
toThingspeak.py
|
ichsankurnia/BAMS.BBTA3
|
2c7aed514b66d8700f7f33a4b959a0aaf897868d
|
[
"MIT"
] | null | null | null |
toThingspeak.py
|
ichsankurnia/BAMS.BBTA3
|
2c7aed514b66d8700f7f33a4b959a0aaf897868d
|
[
"MIT"
] | null | null | null |
import time
import os
import sys
import urllib # URL functions
import urllib2 # URL functions
import random
################# Default Constants #################
# These can be changed if required
THINGSPEAKKEY = 'HCRR3NJG1RWYZ6CY'
THINGSPEAKURL = 'https://api.thingspeak.com/update'
def sendData(url,key,field1,field2,temp,pres):
"""
Send event to internet site
"""
values = {'api_key' : key,'field1' : temp,'field2' : pres}
postdata = urllib.urlencode(values)
req = urllib2.Request(url, postdata)
response = urllib2.urlopen(req, None, 5)
html_string = response.read()
response.close()
try:
# Send data to Thingspeak
response = urllib2.urlopen(req, None, 5)
html_string = response.read()
response.close()
log = log + 'Update ' + html_string
except urllib2.HTTPError, e:
log = log + 'Server could not fulfill the request. Error code: ' + e.code
except urllib2.URLError, e:
log = log + 'Failed to reach server. Reason: ' + e.reason
except:
log = log + 'Unknown error'
print log
def main():
while True:
temperature = random.randint(0,100)
pressure = random.randint(0,1000)
sendData(THINGSPEAKURL,THINGSPEAKKEY,'field1','field2',temperature,pressure)
sys.stdout.flush()
if __name__=="__main__":
main()
| 25.134615
| 82
| 0.664116
|
ff4bb5601f14c3fa833a1bb836cfdcc459c7375a
| 1,178
|
py
|
Python
|
wallet/mywallet/forms.py
|
Shpilevskiy/automatic-couscous
|
e0ebbe9338884f017fbeae495a083bd27f6c9354
|
[
"MIT"
] | null | null | null |
wallet/mywallet/forms.py
|
Shpilevskiy/automatic-couscous
|
e0ebbe9338884f017fbeae495a083bd27f6c9354
|
[
"MIT"
] | null | null | null |
wallet/mywallet/forms.py
|
Shpilevskiy/automatic-couscous
|
e0ebbe9338884f017fbeae495a083bd27f6c9354
|
[
"MIT"
] | null | null | null |
from django import forms
from .models import Wallet
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Button
from crispy_forms.bootstrap import Field, FormActions
class AddOperationForm(forms.Form):
title = forms.CharField(label='Title', required=True)
sum = forms.FloatField(label='Sum', required=True)
wallets = forms.ChoiceField(label='Wallets', required=True)
code = forms.ChoiceField(label='Codes', required=True)
date = forms.DateField(widget=forms.TextInput(attrs={'type': 'date'}))
helper = FormHelper()
helper.form_id = 'id-add-operation-form'
helper.form_method = 'POST'
helper.form_class = 'form-horizontal, form-group'
helper.form_show_labels = False
# helper.form_action = '/add-operation/'
helper.layout = Layout(
Field('title', placeholder='Title', css_class='form-control'),
Field('sum', placeholder='Sum', css_class='form-control'),
Field('wallets', css_class='form-control'),
Field('code', css_class='form-control'),
Field('date', css_class='form-control'),
FormActions(Button('Add', 'add', css_class='btn, btn-primary'))
)
| 40.62069
| 74
| 0.696944
|
d8bfe09d7b0185aebd009c98380c574651bc8ad3
| 1,098
|
py
|
Python
|
bootstrapvz/providers/ec2/tasks/host.py
|
null0000/bootstrap-vz
|
003cdd9808bac90383b4c46738507bd7e1daa268
|
[
"Apache-2.0"
] | null | null | null |
bootstrapvz/providers/ec2/tasks/host.py
|
null0000/bootstrap-vz
|
003cdd9808bac90383b4c46738507bd7e1daa268
|
[
"Apache-2.0"
] | null | null | null |
bootstrapvz/providers/ec2/tasks/host.py
|
null0000/bootstrap-vz
|
003cdd9808bac90383b4c46738507bd7e1daa268
|
[
"Apache-2.0"
] | null | null | null |
from bootstrapvz.base import Task
from bootstrapvz.common import phases
from bootstrapvz.common.tasks import host
class AddExternalCommands(Task):
description = 'Determining required external commands for EC2 bootstrapping'
phase = phases.preparation
successors = [host.CheckExternalCommands]
@classmethod
def run(cls, info):
if info.manifest.volume['backing'] == 's3':
info.host_dependencies['euca-bundle-image'] = 'euca2ools'
info.host_dependencies['euca-upload-bundle'] = 'euca2ools'
class GetInstanceMetadata(Task):
description = 'Retrieving instance metadata'
phase = phases.preparation
@classmethod
def run(cls, info):
import urllib2
import json
metadata_url = 'http://169.254.169.254/latest/dynamic/instance-identity/document'
response = urllib2.urlopen(url=metadata_url, timeout=5)
info._ec2['host'] = json.load(response)
info._ec2['region'] = info._ec2['host']['region']
class SetRegion(Task):
description = 'Setting the AWS region'
phase = phases.preparation
@classmethod
def run(cls, info):
info._ec2['region'] = info.manifest.image['region']
| 28.153846
| 83
| 0.753188
|
f65842ddc7191f645c3ba495f2af225ae1296db1
| 1,006
|
py
|
Python
|
model/base/enc_dec_network.py
|
tiagopms/fast-conversational-banking
|
b9d3ddfe3adb78522fafab91c2d20495db063dda
|
[
"MIT"
] | 2
|
2018-03-06T13:00:33.000Z
|
2018-05-29T00:27:01.000Z
|
model/base/enc_dec_network.py
|
tiagopms/fast-conversational-banking
|
b9d3ddfe3adb78522fafab91c2d20495db063dda
|
[
"MIT"
] | null | null | null |
model/base/enc_dec_network.py
|
tiagopms/fast-conversational-banking
|
b9d3ddfe3adb78522fafab91c2d20495db063dda
|
[
"MIT"
] | null | null | null |
import random
import pickle
import torch
from torch import nn
class EncDecNetwork(nn.Module):
def __init__(self, encoder, decoder):
super(EncDecNetwork, self).__init__()
self.encoder = encoder
self.decoder = decoder
self._cuda = False
def full_forward(self):
raise NotImplementedError
def translate(self):
raise NotImplementedError
def cuda(self):
super(EncDecNetwork, self).cuda()
self.encoder.cuda()
self.decoder.cuda()
self._cuda = True
def initialize_params(self, init_range):
for p in self.parameters():
p.data.uniform_(-init_range, init_range)
def save_config_data(self, path):
checkpoint_data = self.get_checkpoint_data()
with open(path, 'wb') as f:
pickle.dump(checkpoint_data, f, -1)
def get_checkpoint_data(self):
raise NotImplementedError('get_checkpoint_data should be implemented by class that inherits EncDecNetwork')
| 25.794872
| 115
| 0.662028
|
c602888b73b2a998d5762c4a47995b91cebfc2b5
| 802
|
py
|
Python
|
tests/test_status.py
|
whalebot-helmsman/pykt-64
|
ee5e0413cd850876d3abc438480fffea4f7b7517
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_status.py
|
whalebot-helmsman/pykt-64
|
ee5e0413cd850876d3abc438480fffea4f7b7517
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_status.py
|
whalebot-helmsman/pykt-64
|
ee5e0413cd850876d3abc438480fffea4f7b7517
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from setup_teardown import start_db, stop_db
from nose.tools import *
from pykt import KyotoTycoon, KTException
@raises(IOError)
def test_err_status():
db = KyotoTycoon()
db.status()
@with_setup(setup=start_db,teardown=stop_db)
def test_status():
db = KyotoTycoon()
db = db.open()
ret = db.status()
ok_(ret)
ok_(isinstance(ret, dict))
db.close()
@with_setup(setup=start_db,teardown=stop_db)
def test_status_with_db():
db = KyotoTycoon("test")
db = db.open()
ret = db.status()
ok_(ret)
ok_(isinstance(ret, dict))
db.close()
@with_setup(setup=start_db,teardown=stop_db)
def test_status_loop():
db = KyotoTycoon()
db = db.open()
for i in xrange(100):
ret = db.status()
ok_(ret)
db.close()
| 21.675676
| 44
| 0.647132
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.