hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c307c01322f202fd16db183f5cc4916c31fb584e
| 12,205
|
py
|
Python
|
holoviews/plotting/bokeh/annotation.py
|
cocoaaa/holoviews
|
bb3dc4975c2604f38d141ccad1c931ed5d9b1322
|
[
"BSD-3-Clause"
] | null | null | null |
holoviews/plotting/bokeh/annotation.py
|
cocoaaa/holoviews
|
bb3dc4975c2604f38d141ccad1c931ed5d9b1322
|
[
"BSD-3-Clause"
] | null | null | null |
holoviews/plotting/bokeh/annotation.py
|
cocoaaa/holoviews
|
bb3dc4975c2604f38d141ccad1c931ed5d9b1322
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import absolute_import, division, unicode_literals
from collections import defaultdict
import param
import numpy as np
from bokeh.models import Span, Arrow, Div as BkDiv
try:
from bokeh.models.arrow_heads import TeeHead, NormalHead
arrow_start = {'<->': NormalHead, '<|-|>': NormalHead}
arrow_end = {'->': NormalHead, '-[': TeeHead, '-|>': NormalHead,
'-': None}
except:
from bokeh.models.arrow_heads import OpenHead, NormalHead
arrow_start = {'<->': NormalHead, '<|-|>': NormalHead}
arrow_end = {'->': NormalHead, '-[': OpenHead, '-|>': NormalHead,
'-': None}
from bokeh.transform import dodge
from ...core.util import datetime_types, dimension_sanitizer, basestring
from ...element import HLine, VLine
from ..plot import GenericElementPlot
from .element import AnnotationPlot, ElementPlot, CompositeElementPlot, ColorbarPlot
from .styles import text_properties, line_properties
from .plot import BokehPlot
from .util import date_to_integer
class TextPlot(ElementPlot, AnnotationPlot):
style_opts = text_properties+['color', 'angle']
_plot_methods = dict(single='text', batched='text')
def get_data(self, element, ranges, style):
mapping = dict(x='x', y='y', text='text')
if self.static_source:
return dict(x=[], y=[], text=[]), mapping, style
if self.invert_axes:
data = dict(x=[element.y], y=[element.x])
else:
data = dict(x=[element.x], y=[element.y])
self._categorize_data(data, ('x', 'y'), element.dimensions())
data['text'] = [element.text]
if 'text_align' not in style:
style['text_align'] = element.halign
baseline = 'middle' if element.valign == 'center' else element.valign
if 'text_baseline' not in style:
style['text_baseline'] = baseline
if 'text_font_size' not in style:
style['text_font_size'] = '%dPt' % element.fontsize
if 'color' in style:
style['text_color'] = style.pop('color')
style['angle'] = np.deg2rad(style.get('angle', element.rotation))
return (data, mapping, style)
def get_batched_data(self, element, ranges=None):
data = defaultdict(list)
zorders = self._updated_zorders(element)
for (key, el), zorder in zip(element.data.items(), zorders):
style = self.lookup_options(element.last, 'style')
style = style.max_cycles(len(self.ordering))[zorder]
eldata, elmapping, style = self.get_data(el, ranges, style)
for k, eld in eldata.items():
data[k].extend(eld)
return data, elmapping, style
def get_extents(self, element, ranges=None, range_type='combined'):
return None, None, None, None
class LabelsPlot(ColorbarPlot, AnnotationPlot):
show_legend = param.Boolean(default=False, doc="""
Whether to show legend for the plot.""")
xoffset = param.Number(default=None, doc="""
Amount of offset to apply to labels along x-axis.""")
yoffset = param.Number(default=None, doc="""
Amount of offset to apply to labels along x-axis.""")
# Deprecated options
color_index = param.ClassSelector(default=None, class_=(basestring, int),
allow_None=True, doc="""
Deprecated in favor of color style mapping, e.g. `color=dim('color')`""")
style_opts = text_properties + ['cmap', 'angle']
_nonvectorized_styles = ['cmap']
_plot_methods = dict(single='text', batched='text')
_batched_style_opts = text_properties
def get_data(self, element, ranges, style):
style = self.style[self.cyclic_index]
if 'angle' in style and isinstance(style['angle'], (int, float)):
style['angle'] = np.deg2rad(style.get('angle', 0))
dims = element.dimensions()
coords = (1, 0) if self.invert_axes else (0, 1)
xdim, ydim, tdim = (dimension_sanitizer(dims[i].name) for i in coords+(2,))
mapping = dict(x=xdim, y=ydim, text=tdim)
data = {d: element.dimension_values(d) for d in (xdim, ydim)}
if self.xoffset is not None:
mapping['x'] = dodge(xdim, self.xoffset)
if self.yoffset is not None:
mapping['y'] = dodge(ydim, self.yoffset)
data[tdim] = [dims[2].pprint_value(v) for v in element.dimension_values(2)]
self._categorize_data(data, (xdim, ydim), element.dimensions())
cdim = element.get_dimension(self.color_index)
if cdim is None:
return data, mapping, style
cdata, cmapping = self._get_color_data(element, ranges, style, name='text_color')
if dims[2] is cdim and cdata:
# If color dim is same as text dim, rename color column
data['text_color'] = cdata[tdim]
mapping['text_color'] = dict(cmapping['text_color'], field='text_color')
else:
data.update(cdata)
mapping.update(cmapping)
return data, mapping, style
class LineAnnotationPlot(ElementPlot, AnnotationPlot):
style_opts = line_properties + ['level']
apply_ranges = param.Boolean(default=False, doc="""
Whether to include the annotation in axis range calculations.""")
_plot_methods = dict(single='Span')
def get_data(self, element, ranges, style):
data, mapping = {}, {}
dim = 'width' if isinstance(element, HLine) else 'height'
if self.invert_axes:
dim = 'width' if dim == 'height' else 'height'
mapping['dimension'] = dim
loc = element.data
if isinstance(loc, datetime_types):
loc = date_to_integer(loc)
mapping['location'] = loc
return (data, mapping, style)
def _init_glyph(self, plot, mapping, properties):
"""
Returns a Bokeh glyph object.
"""
box = Span(level=properties.get('level', 'glyph'), **mapping)
plot.renderers.append(box)
return None, box
def get_extents(self, element, ranges=None, range_type='combined'):
loc = element.data
if isinstance(element, VLine):
dim = 'x'
elif isinstance(element, HLine):
dim = 'y'
if self.invert_axes:
dim = 'x' if dim == 'y' else 'x'
ranges[dim]['soft'] = loc, loc
return super(LineAnnotationPlot, self).get_extents(element, ranges, range_type)
class SplinePlot(ElementPlot, AnnotationPlot):
"""
Draw the supplied Spline annotation (see Spline docstring).
Does not support matplotlib Path codes.
"""
style_opts = line_properties
_plot_methods = dict(single='bezier')
def get_data(self, element, ranges, style):
if self.invert_axes:
data_attrs = ['y0', 'x0', 'cy0', 'cx0', 'cy1', 'cx1', 'y1', 'x1']
else:
data_attrs = ['x0', 'y0', 'cx0', 'cy0', 'cx1', 'cy1', 'x1', 'y1']
verts = np.array(element.data[0])
inds = np.where(np.array(element.data[1])==1)[0]
data = {da: [] for da in data_attrs}
skipped = False
for vs in np.split(verts, inds[1:]):
if len(vs) != 4:
skipped = len(vs) > 1
continue
for x, y, xl, yl in zip(vs[:, 0], vs[:, 1], data_attrs[::2], data_attrs[1::2]):
data[xl].append(x)
data[yl].append(y)
if skipped:
self.param.warning(
'Bokeh SplitPlot only support cubic splines, unsupported '
'splines were skipped during plotting.')
data = {da: data[da] for da in data_attrs}
return (data, dict(zip(data_attrs, data_attrs)), style)
class ArrowPlot(CompositeElementPlot, AnnotationPlot):
style_opts = (['arrow_%s' % p for p in line_properties+['size']] + text_properties)
_style_groups = {'arrow': 'arrow', 'label': 'text'}
def get_data(self, element, ranges, style):
plot = self.state
label_mapping = dict(x='x', y='y', text='text')
# Compute arrow
x1, y1 = element.x, element.y
axrange = plot.x_range if self.invert_axes else plot.y_range
span = (axrange.end - axrange.start) / 6.
if element.direction == '^':
x2, y2 = x1, y1-span
label_mapping['text_baseline'] = 'top'
elif element.direction == '<':
x2, y2 = x1+span, y1
label_mapping['text_align'] = 'left'
label_mapping['text_baseline'] = 'middle'
elif element.direction == '>':
x2, y2 = x1-span, y1
label_mapping['text_align'] = 'right'
label_mapping['text_baseline'] = 'middle'
else:
x2, y2 = x1, y1+span
label_mapping['text_baseline'] = 'bottom'
arrow_opts = {'x_end': x1, 'y_end': y1,
'x_start': x2, 'y_start': y2}
# Define arrowhead
arrow_opts['arrow_start'] = arrow_start.get(element.arrowstyle, None)
arrow_opts['arrow_end'] = arrow_end.get(element.arrowstyle, NormalHead)
# Compute label
if self.invert_axes:
label_data = dict(x=[y2], y=[x2])
else:
label_data = dict(x=[x2], y=[y2])
label_data['text'] = [element.text]
return ({'label': label_data},
{'arrow': arrow_opts, 'label': label_mapping}, style)
def _init_glyph(self, plot, mapping, properties, key):
"""
Returns a Bokeh glyph object.
"""
properties.pop('legend', None)
if key == 'arrow':
properties.pop('source')
arrow_end = mapping.pop('arrow_end')
arrow_start = mapping.pop('arrow_start')
start = arrow_start(**properties) if arrow_start else None
end = arrow_end(**properties) if arrow_end else None
renderer = Arrow(start=start, end=end, **dict(**mapping))
glyph = renderer
else:
properties = {p if p == 'source' else 'text_'+p: v
for p, v in properties.items()}
renderer, glyph = super(ArrowPlot, self)._init_glyph(
plot, mapping, properties, 'text_1')
plot.renderers.append(renderer)
return renderer, glyph
def get_extents(self, element, ranges=None, range_type='combined'):
return None, None, None, None
class DivPlot(BokehPlot, GenericElementPlot, AnnotationPlot):
height = param.Number(default=300)
width = param.Number(default=300)
finalize_hooks = param.HookList(default=[], doc="""
Deprecated; use hooks options instead.""")
hooks = param.HookList(default=[], doc="""
Optional list of hooks called when finalizing a plot. The
hook is passed the plot object and the displayed element, and
other plotting handles can be accessed via plot.handles.""")
_stream_data = False
def __init__(self, element, plot=None, **params):
super(DivPlot, self).__init__(element, **params)
self.callbacks = []
self.handles = {} if plot is None else self.handles['plot']
self.static = len(self.hmap) == 1 and len(self.keys) == len(self.hmap)
def get_data(self, element, ranges, style):
return element.data, {}, style
def initialize_plot(self, ranges=None, plot=None, plots=None, source=None):
"""
Initializes a new plot object with the last available frame.
"""
# Get element key and ranges for frame
element = self.hmap.last
key = self.keys[-1]
self.current_frame = element
self.current_key = key
data, _, _ = self.get_data(element, ranges, {})
div = BkDiv(text=data, width=self.width, height=self.height)
self.handles['plot'] = div
self._execute_hooks(element)
self.drawn = True
return div
def update_frame(self, key, ranges=None, plot=None):
"""
Updates an existing plot with data corresponding
to the key.
"""
element = self._get_frame(key)
text, _, _ = self.get_data(element, ranges, {})
self.handles['plot'].text = text
| 37.210366
| 91
| 0.599754
|
f01ebb21b59e2cb82f71dd187737dc49d06cf708
| 41,038
|
py
|
Python
|
setup.py
|
hubert-pietron/airflow
|
3f9073c46940ef1f25a9f46b447d9cf84435c3ed
|
[
"Apache-2.0"
] | 8,092
|
2016-04-27T20:32:29.000Z
|
2019-01-05T07:39:33.000Z
|
setup.py
|
hubert-pietron/airflow
|
3f9073c46940ef1f25a9f46b447d9cf84435c3ed
|
[
"Apache-2.0"
] | 2,961
|
2016-05-05T07:16:16.000Z
|
2019-01-05T08:47:59.000Z
|
setup.py
|
hubert-pietron/airflow
|
3f9073c46940ef1f25a9f46b447d9cf84435c3ed
|
[
"Apache-2.0"
] | 3,546
|
2016-05-04T20:33:16.000Z
|
2019-01-05T05:14:26.000Z
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Setup.py for the Airflow project."""
import glob
import logging
import os
import subprocess
import sys
import unittest
from copy import deepcopy
from os.path import dirname, relpath
from textwrap import wrap
from typing import Dict, List
from setuptools import Command, Distribution, find_namespace_packages, setup
from setuptools.command.develop import develop as develop_orig
from setuptools.command.install import install as install_orig
# Setuptools patches this import to point to a vendored copy instead of the
# stdlib, which is deprecated in Python 3.10 and will be removed in 3.12.
from distutils import log # isort: skip
# Controls whether providers are installed from packages or directly from sources
# It is turned on by default in case of development environments such as Breeze
# And it is particularly useful when you add a new provider and there is no
# PyPI version to install the provider package from
INSTALL_PROVIDERS_FROM_SOURCES = 'INSTALL_PROVIDERS_FROM_SOURCES'
PY39 = sys.version_info >= (3, 9)
logger = logging.getLogger(__name__)
version = '2.4.0.dev0'
my_dir = dirname(__file__)
def airflow_test_suite() -> unittest.TestSuite:
"""Test suite for Airflow tests"""
test_loader = unittest.TestLoader()
test_suite = test_loader.discover(os.path.join(my_dir, 'tests'), pattern='test_*.py')
return test_suite
class CleanCommand(Command):
"""
Command to tidy up the project root.
Registered as cmdclass in setup() so it can be called with ``python setup.py extra_clean``.
"""
description = "Tidy up the project root"
user_options: List[str] = []
def initialize_options(self) -> None:
"""Set default values for options."""
def finalize_options(self) -> None:
"""Set final values for options."""
@staticmethod
def rm_all_files(files: List[str]) -> None:
"""Remove all files from the list"""
for file in files:
try:
os.remove(file)
except Exception as e:
logger.warning("Error when removing %s: %s", file, e)
def run(self) -> None:
"""Remove temporary files and directories."""
os.chdir(my_dir)
self.rm_all_files(glob.glob('./build/*'))
self.rm_all_files(glob.glob('./**/__pycache__/*', recursive=True))
self.rm_all_files(glob.glob('./**/*.pyc', recursive=True))
self.rm_all_files(glob.glob('./dist/*'))
self.rm_all_files(glob.glob('./*.egg-info'))
self.rm_all_files(glob.glob('./docker-context-files/*.whl'))
self.rm_all_files(glob.glob('./docker-context-files/*.tgz'))
class CompileAssets(Command):
"""
Compile and build the frontend assets using yarn and webpack.
Registered as cmdclass in setup() so it can be called with ``python setup.py compile_assets``.
"""
description = "Compile and build the frontend assets"
user_options: List[str] = []
def initialize_options(self) -> None:
"""Set default values for options."""
def finalize_options(self) -> None:
"""Set final values for options."""
def run(self) -> None:
"""Run a command to compile and build assets."""
subprocess.check_call('./airflow/www/compile_assets.sh')
class ListExtras(Command):
"""
List all available extras
Registered as cmdclass in setup() so it can be called with ``python setup.py list_extras``.
"""
description = "List available extras"
user_options: List[str] = []
def initialize_options(self) -> None:
"""Set default values for options."""
def finalize_options(self) -> None:
"""Set final values for options."""
def run(self) -> None:
"""List extras."""
print("\n".join(wrap(", ".join(EXTRAS_REQUIREMENTS.keys()), 100)))
def git_version(version_: str) -> str:
"""
Return a version to identify the state of the underlying git repo. The version will
indicate whether the head of the current git-backed working directory is tied to a
release tag or not : it will indicate the former with a 'release:{version}' prefix
and the latter with a '.dev0' suffix. Following the prefix will be a sha of the current
branch head. Finally, a "dirty" suffix is appended to indicate that uncommitted
changes are present.
:param str version_: Semver version
:return: Found Airflow version in Git repo
:rtype: str
"""
try:
import git
try:
repo = git.Repo(os.path.join(*[my_dir, '.git']))
except git.NoSuchPathError:
logger.warning('.git directory not found: Cannot compute the git version')
return ''
except git.InvalidGitRepositoryError:
logger.warning('Invalid .git directory not found: Cannot compute the git version')
return ''
except ImportError:
logger.warning('gitpython not found: Cannot compute the git version.')
return ''
if repo:
sha = repo.head.commit.hexsha
if repo.is_dirty():
return f'.dev0+{sha}.dirty'
# commit is clean
return f'.release:{version_}+{sha}'
return 'no_git_version'
def write_version(filename: str = os.path.join(*[my_dir, "airflow", "git_version"])) -> None:
"""
Write the Semver version + git hash to file, e.g. ".dev0+2f635dc265e78db6708f59f68e8009abb92c1e65".
:param str filename: Destination file to write
"""
text = f"{git_version(version)}"
with open(filename, 'w') as file:
file.write(text)
pandas_requirement = 'pandas>=0.17.1'
# 'Start dependencies group' and 'Start dependencies group' are mark for ./scripts/ci/check_order_setup.py
# If you change this mark you should also change ./scripts/ci/check_order_setup.py
# Start dependencies group
alibaba = [
'oss2>=2.14.0',
]
amazon = [
'boto3>=1.15.0',
# watchtower 3 has been released end Jan and introduced breaking change across the board that might
# change logging behaviour:
# https://github.com/kislyuk/watchtower/blob/develop/Changes.rst#changes-for-v300-2022-01-26
# TODO: update to watchtower >3
'watchtower~=2.0.1',
'jsonpath_ng>=1.5.3',
'redshift_connector>=2.0.888',
'sqlalchemy_redshift>=0.8.6',
pandas_requirement,
'mypy-boto3-rds>=1.21.0',
'mypy-boto3-redshift-data>=1.21.0',
]
apache_beam = [
'apache-beam>=2.33.0',
]
arangodb = ['python-arango>=7.3.2']
asana = ['asana>=0.10']
async_packages = [
'eventlet>=0.9.7',
'gevent>=0.13',
'greenlet>=0.4.9',
]
atlas = [
'atlasclient>=0.1.2',
]
azure = [
'azure-batch>=8.0.0',
'azure-cosmos>=4.0.0',
'azure-datalake-store>=0.0.45',
'azure-identity>=1.3.1',
'azure-keyvault-secrets>=4.1.0,<5.0',
'azure-kusto-data>=0.0.43,<0.1',
# Azure integration uses old librarires and the limits below reflect that
# TODO: upgrade to newer versions of all the below libraries
'azure-mgmt-containerinstance>=1.5.0,<2.0',
'azure-mgmt-datafactory>=1.0.0,<2.0',
'azure-mgmt-datalake-store>=0.5.0',
'azure-mgmt-resource>=2.2.0',
# limited due to https://github.com/Azure/azure-sdk-for-python/pull/18801 implementation released in 12.9
'azure-storage-blob>=12.7.0,<12.9.0',
'azure-storage-common>=2.1.0',
'azure-storage-file>=2.1.0',
]
cassandra = [
'cassandra-driver>=3.13.0',
]
celery = [
# The Celery is known to introduce problems when upgraded to a MAJOR version. Airflow Core
# Uses Celery for CeleryExecutor, and we also know that Kubernetes Python client follows SemVer
# (https://docs.celeryq.dev/en/stable/contributing.html?highlight=semver#versions).
# This is a crucial component of Airflow, so we should limit it to the next MAJOR version and only
# deliberately bump the version when we tested it, and we know it can be bumped.
# Bumping this version should also be connected with
# limiting minimum airflow version supported in cncf.kubernetes provider, due to the
# potential breaking changes in Airflow Core as well (celery is added as extra, so Airflow
# core is not hard-limited via install-requirements, only by extra).
'celery>=5.2.3,<6',
'flower>=1.0.0',
]
cgroups = [
# Cgroupspy 0.2.2 added Python 3.10 compatibility
'cgroupspy>=0.2.2',
]
cloudant = [
'cloudant>=2.0',
]
dask = [
# Dask support is limited, we need Dask team to upgrade support for dask if we were to continue
# Supporting it in the future
'cloudpickle>=1.4.1',
'dask>=2.9.0',
'distributed>=2.11.1',
]
databricks = [
'requests>=2.26.0, <3',
'databricks-sql-connector>=2.0.0, <3.0.0',
]
datadog = [
'datadog>=0.14.0',
]
deprecated_api = [
'requests>=2.26.0',
]
doc = [
'click>=8.0',
'sphinx>=4.4.0',
# Docutils 0.17.0 converts generated <div class="section"> into <section> and breaks our doc formatting
# By adding a lot of whitespace separation. This limit can be lifted when we update our doc to handle
# <section> tags for sections
'docutils<0.17.0',
# Without this, Sphinx goes in to a _very_ large backtrack on Python 3.7,
# even though Sphinx 4.4.0 has this but with python_version<3.10.
'importlib-metadata>=4.4; python_version < "3.8"',
'sphinx-airflow-theme',
'sphinx-argparse>=0.1.13',
'sphinx-autoapi>=1.8.0',
'sphinx-copybutton',
'sphinx-jinja>=2.0',
'sphinx-rtd-theme>=0.1.6',
'sphinxcontrib-httpdomain>=1.7.0',
'sphinxcontrib-redoc>=1.6.0',
'sphinxcontrib-spelling>=7.3',
]
docker = [
'docker>=5.0.3',
]
drill = ['sqlalchemy-drill>=1.1.0', 'sqlparse>=0.4.1']
druid = [
'pydruid>=0.4.1',
]
elasticsearch = [
'elasticsearch>7',
'elasticsearch-dbapi',
'elasticsearch-dsl>=5.0.0',
]
exasol = ['pyexasol>=0.5.1', pandas_requirement]
facebook = [
'facebook-business>=6.0.2',
]
flask_appbuilder_authlib = [
'authlib',
]
github = [
'pygithub',
]
google = [
# Google has very clear rules on what dependencies should be used. All the limits below
# follow strict guidelines of Google Libraries as quoted here:
# While this issue is open, dependents of google-api-core, google-cloud-core. and google-auth
# should preserve >1, <3 pins on these packages.
# https://github.com/googleapis/google-cloud-python/issues/10566
# Some of Google Packages are limited to <2.0.0 because 2.0.0 releases of the libraries
# Introduced breaking changes across the board. Those libraries should be upgraded soon
# TODO: Upgrade all Google libraries that are limited to <2.0.0
'PyOpenSSL',
'google-ads>=15.1.1',
'google-api-core>=2.7.0,<3.0.0',
'google-api-python-client>=1.6.0,<2.0.0',
'google-auth>=1.0.0',
'google-auth-httplib2>=0.0.1',
'google-cloud-aiplatform>=1.7.1,<2.0.0',
'google-cloud-automl>=2.1.0',
'google-cloud-bigquery-datatransfer>=3.0.0',
'google-cloud-bigtable>=1.0.0,<2.0.0',
'google-cloud-build>=3.0.0',
'google-cloud-container>=2.2.0,<3.0.0',
'google-cloud-datacatalog>=3.0.0',
'google-cloud-dataplex>=0.1.0',
'google-cloud-dataproc>=3.1.0',
'google-cloud-dataproc-metastore>=1.2.0,<2.0.0',
'google-cloud-dlp>=0.11.0,<2.0.0',
'google-cloud-kms>=2.0.0',
'google-cloud-language>=1.1.1,<2.0.0',
'google-cloud-logging>=2.1.1',
'google-cloud-memcache>=0.2.0',
'google-cloud-monitoring>=2.0.0',
'google-cloud-os-login>=2.0.0',
'google-cloud-orchestration-airflow>=1.0.0,<2.0.0',
'google-cloud-pubsub>=2.0.0',
'google-cloud-redis>=2.0.0',
'google-cloud-secret-manager>=0.2.0,<2.0.0',
'google-cloud-spanner>=1.10.0,<2.0.0',
'google-cloud-speech>=0.36.3,<2.0.0',
'google-cloud-storage>=1.30,<2.0.0',
'google-cloud-tasks>=2.0.0',
'google-cloud-texttospeech>=0.4.0,<2.0.0',
'google-cloud-translate>=1.5.0,<2.0.0',
'google-cloud-videointelligence>=1.7.0,<2.0.0',
'google-cloud-vision>=0.35.2,<2.0.0',
'google-cloud-workflows>=0.1.0,<2.0.0',
'grpcio-gcp>=0.2.2',
'httpx',
'json-merge-patch>=0.2',
'looker-sdk>=22.2.0',
'pandas-gbq',
pandas_requirement,
'sqlalchemy-bigquery>=1.2.1',
]
grpc = [
# Google has very clear rules on what dependencies should be used. All the limits below
# follow strict guidelines of Google Libraries as quoted here:
# While this issue is open, dependents of google-api-core, google-cloud-core. and google-auth
# should preserve >1, <3 pins on these packages.
# https://github.com/googleapis/google-cloud-python/issues/10566
'google-auth>=1.0.0, <3.0.0',
'google-auth-httplib2>=0.0.1',
'grpcio>=1.15.0',
]
hashicorp = [
'hvac>=0.10',
]
hdfs = [
'snakebite-py3',
'hdfs[avro,dataframe,kerberos]>=2.0.4',
]
hive = [
'hmsclient>=0.1.0',
'pyhive[hive]>=0.6.0',
# in case of Python 3.9 sasl library needs to be installed with version higher or equal than
# 0.3.1 because only that version supports Python 3.9. For other Python version pyhive[hive] pulls
# the sasl library anyway (and there sasl library version is not relevant)
'sasl>=0.3.1; python_version>="3.9"',
'thrift>=0.9.2',
pandas_requirement,
]
http = [
# The 2.26.0 release of requests got rid of the chardet LGPL mandatory dependency, allowing us to
# release it as a requirement for airflow
'requests>=2.26.0',
]
http_provider = [
'apache-airflow-providers-http',
]
influxdb = [
'influxdb-client>=1.19.0',
pandas_requirement,
]
jdbc = [
'jaydebeapi>=1.1.1',
]
jenkins = [
'python-jenkins>=1.0.0',
]
jira = [
'JIRA>1.0.7',
]
kerberos = [
'pykerberos>=1.1.13',
'requests_kerberos>=0.10.0',
'thrift_sasl>=0.2.0',
]
kubernetes = [
'cryptography>=2.0.0',
# The Kubernetes API is known to introduce problems when upgraded to a MAJOR version. Airflow Core
# Uses Kubernetes for Kubernetes executor, and we also know that Kubernetes Python client follows SemVer
# (https://github.com/kubernetes-client/python#compatibility). This is a crucial component of Airflow
# So we should limit it to the next MAJOR version and only deliberately bump the version when we
# tested it, and we know it can be bumped. Bumping this version should also be connected with
# limiting minimum airflow version supported in cncf.kubernetes provider, due to the
# potential breaking changes in Airflow Core as well (kubernetes is added as extra, so Airflow
# core is not hard-limited via install-requirements, only by extra).
'kubernetes>=21.7.0,<24',
]
kylin = ['kylinpy>=2.6']
ldap = [
'ldap3>=2.5.1',
'python-ldap',
]
leveldb = ['plyvel; platform_machine != "aarch64"']
mongo = [
'dnspython>=1.13.0',
# pymongo 4.0.0 removes connection option `ssl_cert_reqs` which is used in providers-mongo/2.2.0
# TODO: Upgrade to pymongo 4.0.0+
'pymongo>=3.6.0,<4.0.0',
]
mssql = [
'pymssql>=2.1.5; platform_machine != "aarch64"',
]
mysql = [
'mysql-connector-python>=8.0.11; platform_machine != "aarch64"',
'mysqlclient>=1.3.6; platform_machine != "aarch64"',
]
neo4j = ['neo4j>=4.2.1']
odbc = [
'pyodbc',
]
opsgenie = [
'opsgenie-sdk>=2.1.5',
]
oracle = [
'cx_Oracle>=5.1.2',
]
pagerduty = [
'pdpyras>=4.1.2',
]
pandas = [
pandas_requirement,
]
papermill = [
'papermill[all]>=1.2.1',
'scrapbook[all]',
]
password = [
'bcrypt>=2.0.0',
'flask-bcrypt>=0.7.1',
]
pinot = [
# pinotdb v0.1.1 may still work with older versions of Apache Pinot, but we've confirmed that it
# causes a problem with newer versions.
'pinotdb>0.1.2',
]
plexus = [
'arrow>=0.16.0',
]
postgres = [
'psycopg2-binary>=2.7.4',
]
presto = [
# The limit to Presto 0.8 for unknown reason
# TODO: Remove the limit
'presto-python-client>=0.7.0,<0.8',
pandas_requirement,
]
psrp = [
'pypsrp>=0.8',
]
qubole = [
'qds-sdk>=1.10.4',
]
rabbitmq = [
'amqp',
]
redis = [
# Redis 4 introduced a number of changes that likely need testing including mixins in redis commands
# as well as unquoting URLS with `urllib.parse.unquote`:
# https://github.com/redis/redis-py/blob/master/CHANGES
# TODO: upgrade to support redis package >=4
'redis~=3.2',
]
salesforce = ['simple-salesforce>=1.0.0', 'tableauserverclient', pandas_requirement]
samba = [
'smbprotocol>=1.5.0',
]
segment = [
'analytics-python>=1.2.9',
]
sendgrid = [
'sendgrid>=6.0.0',
]
sentry = [
'blinker>=1.1',
'sentry-sdk>=0.8.0',
]
singularity = ['spython>=0.0.56']
slack = [
'slack_sdk>=3.0.0',
]
snowflake = [
'snowflake-connector-python>=2.4.1',
'snowflake-sqlalchemy>=1.1.0',
]
spark = [
'pyspark',
]
ssh = [
'paramiko>=2.6.0',
'pysftp>=0.2.9',
'sshtunnel>=0.3.2',
]
statsd = [
'statsd>=3.3.0',
]
tableau = [
'tableauserverclient',
]
telegram = [
'python-telegram-bot>=13.0',
]
trino = [
'trino>=0.301.0',
pandas_requirement,
]
vertica = [
'vertica-python>=0.5.1',
]
virtualenv = [
'virtualenv',
]
webhdfs = [
'hdfs[avro,dataframe,kerberos]>=2.0.4',
]
winrm = [
'pywinrm>=0.4',
]
yandex = [
'yandexcloud>=0.146.0',
]
zendesk = [
'zenpy>=2.0.24',
]
# End dependencies group
# Mypy 0.900 and above ships only with stubs from stdlib so if we need other stubs, we need to install them
# manually as `types-*`. See https://mypy.readthedocs.io/en/stable/running_mypy.html#missing-imports
# for details. Wy want to install them explicitly because we want to eventually move to
# mypyd which does not support installing the types dynamically with --install-types
mypy_dependencies = [
# TODO: upgrade to newer versions of MyPy continuously as they are released
'mypy==0.910',
'types-boto',
'types-certifi',
'types-croniter',
'types-Deprecated',
'types-docutils',
'types-freezegun',
'types-paramiko',
'types-protobuf',
'types-python-dateutil',
'types-python-slugify',
'types-pytz',
'types-redis',
'types-requests',
'types-setuptools',
'types-termcolor',
'types-tabulate',
'types-toml',
'types-Markdown',
'types-PyMySQL',
'types-PyYAML',
]
# Dependencies needed for development only
devel_only = [
'aws_xray_sdk',
'beautifulsoup4>=4.7.1',
'black',
'blinker',
'bowler',
'click>=8.0',
'coverage',
'filelock',
'flake8>=3.6.0',
'flake8-colors',
'flaky',
'freezegun',
# Github3 version 3.1.2 requires PyJWT>=2.3.0 which clashes with Flask App Builder where PyJWT is <2.0.0
# Actually GitHub3.1.0 already introduced PyJWT>=2.3.0 but so far `pip` was able to resolve it without
# getting into a long backtracking loop and figure out that github3 3.0.0 version is the right version
# similarly limiting it to 3.1.2 causes pip not to enter the backtracking loop. Apparently when there
# are 3 versions with PyJWT>=2.3.0 (3.1.0, 3.1.1 an 3.1.2) pip enters into backtrack loop and fails
# to resolve that github3 3.0.0 is the right version to use.
# This limitation could be removed if PyJWT limitation < 2.0.0 is dropped from FAB or when
# pip resolution is improved to handle the case. The issue which describes this PIP behaviour
# and hopefully allowing to improve it is tracked in https://github.com/pypa/pip/issues/10924
'github3.py<3.1.0',
'gitpython',
'ipdb',
'jira',
'jsondiff',
'mongomock',
'moto[glue]>=3.1.6',
'parameterized',
'paramiko',
'pipdeptree',
'pre-commit',
'pypsrp',
'pygithub',
'pysftp',
# Pytest 7 has been released in February 2022 and we should attempt to upgrade and remove the limit
# It contains a number of potential breaking changes but none of them looks breaking our use
# https://docs.pytest.org/en/latest/changelog.html#pytest-7-0-0-2022-02-03
# TODO: upgrade it and remove the limit
'pytest~=6.0',
'pytest-asyncio',
'pytest-cov',
'pytest-instafail',
# We should attempt to remove the limit when we upgrade Pytest
# TODO: remove the limit when we upgrade pytest
'pytest-rerunfailures~=9.1',
'pytest-timeouts',
'pytest-xdist',
'python-jose',
'pywinrm',
'qds-sdk>=1.9.6',
'pytest-httpx',
'requests_mock',
'rich_click',
'semver',
'towncrier',
'twine',
'wheel',
'yamllint',
]
devel = cgroups + devel_only + doc + kubernetes + mypy_dependencies + mysql + pandas + password
devel_hadoop = devel + hdfs + hive + kerberos + presto + webhdfs
# Dict of all providers which are part of the Apache Airflow repository together with their requirements
PROVIDERS_REQUIREMENTS: Dict[str, List[str]] = {
'airbyte': http_provider,
'alibaba': alibaba,
'amazon': amazon,
'apache.beam': apache_beam,
'apache.cassandra': cassandra,
'apache.drill': drill,
'apache.druid': druid,
'apache.hdfs': hdfs,
'apache.hive': hive,
'apache.kylin': kylin,
'apache.livy': http_provider,
'apache.pig': [],
'apache.pinot': pinot,
'apache.spark': spark,
'apache.sqoop': [],
'arangodb': arangodb,
'asana': asana,
'celery': celery,
'cloudant': cloudant,
'cncf.kubernetes': kubernetes,
'databricks': databricks,
'datadog': datadog,
'dbt.cloud': http_provider,
'dingding': [],
'discord': [],
'docker': docker,
'elasticsearch': elasticsearch,
'exasol': exasol,
'facebook': facebook,
'ftp': [],
'github': github,
'google': google,
'grpc': grpc,
'hashicorp': hashicorp,
'http': http,
'imap': [],
'influxdb': influxdb,
'jdbc': jdbc,
'jenkins': jenkins,
'jira': jira,
'microsoft.azure': azure,
'microsoft.mssql': mssql,
'microsoft.psrp': psrp,
'microsoft.winrm': winrm,
'mongo': mongo,
'mysql': mysql,
'neo4j': neo4j,
'odbc': odbc,
'openfaas': [],
'opsgenie': opsgenie,
'oracle': oracle,
'pagerduty': pagerduty,
'papermill': papermill,
'plexus': plexus,
'postgres': postgres,
'presto': presto,
'qubole': qubole,
'redis': redis,
'salesforce': salesforce,
'samba': samba,
'segment': segment,
'sendgrid': sendgrid,
'sftp': ssh,
'singularity': singularity,
'slack': slack,
'snowflake': snowflake,
'sqlite': [],
'ssh': ssh,
'tableau': tableau,
'telegram': telegram,
'trino': trino,
'vertica': vertica,
'yandex': yandex,
'zendesk': zendesk,
}
# Those are all additional extras which do not have their own 'providers'
# The 'apache.atlas' and 'apache.webhdfs' are extras that provide additional libraries
# but they do not have separate providers (yet?), they are merely there to add extra libraries
# That can be used in custom python/bash operators.
ADDITIONAL_EXTRAS_REQUIREMENTS: Dict[str, List[str]] = {
'apache.atlas': atlas,
'apache.webhdfs': webhdfs,
}
# Those are extras that are extensions of the 'core' Airflow. They provide additional features
# To airflow core. They do not have separate providers because they do not have any operators/hooks etc.
CORE_EXTRAS_REQUIREMENTS: Dict[str, List[str]] = {
'async': async_packages,
'celery': celery, # also has provider, but it extends the core with the CeleryExecutor
'cgroups': cgroups,
'cncf.kubernetes': kubernetes, # also has provider, but it extends the core with the KubernetesExecutor
'dask': dask,
'deprecated_api': deprecated_api,
'github_enterprise': flask_appbuilder_authlib,
'google_auth': flask_appbuilder_authlib,
'kerberos': kerberos,
'ldap': ldap,
'leveldb': leveldb,
'pandas': pandas,
'password': password,
'rabbitmq': rabbitmq,
'sentry': sentry,
'statsd': statsd,
'virtualenv': virtualenv,
}
EXTRAS_REQUIREMENTS: Dict[str, List[str]] = deepcopy(CORE_EXTRAS_REQUIREMENTS)
def add_extras_for_all_providers() -> None:
"""
Adds extras for all providers.
By default all providers have the same extra name as provider id, for example
'apache.hive' extra has 'apache.hive' provider requirement.
"""
for provider_name, provider_requirement in PROVIDERS_REQUIREMENTS.items():
EXTRAS_REQUIREMENTS[provider_name] = provider_requirement
def add_additional_extras() -> None:
"""Adds extras for all additional extras."""
for extra_name, extra_requirement in ADDITIONAL_EXTRAS_REQUIREMENTS.items():
EXTRAS_REQUIREMENTS[extra_name] = extra_requirement
add_extras_for_all_providers()
add_additional_extras()
#############################################################################################################
# The whole section can be removed in Airflow 3.0 as those old aliases are deprecated in 2.* series
#############################################################################################################
# Dictionary of aliases from 1.10 - deprecated in Airflow 2.*
EXTRAS_DEPRECATED_ALIASES: Dict[str, str] = {
'atlas': 'apache.atlas',
'aws': 'amazon',
'azure': 'microsoft.azure',
'cassandra': 'apache.cassandra',
'crypto': '', # All crypto requirements are installation requirements of core Airflow
'druid': 'apache.druid',
'gcp': 'google',
'gcp_api': 'google',
'hdfs': 'apache.hdfs',
'hive': 'apache.hive',
'kubernetes': 'cncf.kubernetes',
'mssql': 'microsoft.mssql',
'pinot': 'apache.pinot',
'qds': 'qubole',
's3': 'amazon',
'spark': 'apache.spark',
'webhdfs': 'apache.webhdfs',
'winrm': 'microsoft.winrm',
}
EXTRAS_DEPRECATED_ALIASES_NOT_PROVIDERS: List[str] = [
"crypto",
"webhdfs",
]
def add_extras_for_all_deprecated_aliases() -> None:
"""
Add extras for all deprecated aliases. Requirements for those deprecated aliases are the same
as the extras they are replaced with.
The requirements are not copies - those are the same lists as for the new extras. This is intended.
Thanks to that if the original extras are later extended with providers, aliases are extended as well.
"""
for alias, extra in EXTRAS_DEPRECATED_ALIASES.items():
requirements = EXTRAS_REQUIREMENTS.get(extra) if extra != '' else []
if requirements is None:
raise Exception(f"The extra {extra} is missing for deprecated alias {alias}")
EXTRAS_REQUIREMENTS[alias] = requirements
def add_all_deprecated_provider_packages() -> None:
"""
For deprecated aliases that are providers, we will swap the providers requirements to instead
be the provider itself.
e.g. {"kubernetes": ["kubernetes>=3.0.0, <12.0.0", ...]} becomes
{"kubernetes": ["apache-airflow-provider-cncf-kubernetes"]}
"""
for alias, provider in EXTRAS_DEPRECATED_ALIASES.items():
if alias in EXTRAS_DEPRECATED_ALIASES_NOT_PROVIDERS:
continue
replace_extra_requirement_with_provider_packages(alias, [provider])
add_extras_for_all_deprecated_aliases()
#############################################################################################################
# End of deprecated section
#############################################################################################################
# This is list of all providers. It's a shortcut for anyone who would like to easily get list of
# All providers. It is used by pre-commits.
ALL_PROVIDERS = list(PROVIDERS_REQUIREMENTS.keys())
ALL_DB_PROVIDERS = [
'apache.cassandra',
'apache.drill',
'apache.druid',
'apache.hdfs',
'apache.hive',
'apache.pinot',
'arangodb',
'cloudant',
'databricks',
'exasol',
'influxdb',
'microsoft.mssql',
'mongo',
'mysql',
'neo4j',
'postgres',
'presto',
'trino',
'vertica',
]
# Special requirements for all database-related providers. They are de-duplicated.
all_dbs = list({req for db_provider in ALL_DB_PROVIDERS for req in PROVIDERS_REQUIREMENTS[db_provider]})
# Requirements for all "user" extras (no devel). They are de-duplicated. Note that we do not need
# to separately add providers requirements - they have been already added as 'providers' extras above
_all_requirements = list({req for extras_reqs in EXTRAS_REQUIREMENTS.values() for req in extras_reqs})
# All user extras here
EXTRAS_REQUIREMENTS["all"] = _all_requirements
# All db user extras here
EXTRAS_REQUIREMENTS["all_dbs"] = all_dbs + pandas
# This can be simplified to devel_hadoop + _all_requirements due to inclusions
# but we keep it for explicit sake. We are de-duplicating it anyway.
devel_all = list(set(_all_requirements + doc + devel + devel_hadoop))
# Those are packages excluded for "all" dependencies
PACKAGES_EXCLUDED_FOR_ALL = []
PACKAGES_EXCLUDED_FOR_ALL.extend(
[
'snakebite',
]
)
def is_package_excluded(package: str, exclusion_list: List[str]) -> bool:
"""
Checks if package should be excluded.
:param package: package name (beginning of it)
:param exclusion_list: list of excluded packages
:return: true if package should be excluded
"""
return any(package.startswith(excluded_package) for excluded_package in exclusion_list)
devel_all = [
package
for package in devel_all
if not is_package_excluded(package=package, exclusion_list=PACKAGES_EXCLUDED_FOR_ALL)
]
devel_ci = devel_all
# Those are extras that we have to add for development purposes
# They can be use to install some predefined set of dependencies.
EXTRAS_REQUIREMENTS["doc"] = doc
EXTRAS_REQUIREMENTS["devel"] = devel # devel already includes doc
EXTRAS_REQUIREMENTS["devel_hadoop"] = devel_hadoop # devel_hadoop already includes devel
EXTRAS_REQUIREMENTS["devel_all"] = devel_all
EXTRAS_REQUIREMENTS["devel_ci"] = devel_ci
def sort_extras_requirements() -> Dict[str, List[str]]:
"""
The dictionary order remains when keys() are retrieved.
Sort both: extras and list of dependencies to make it easier to analyse problems
external packages will be first, then if providers are added they are added at the end of the lists.
"""
sorted_requirements = dict(sorted(EXTRAS_REQUIREMENTS.items()))
for extra_list in sorted_requirements.values():
extra_list.sort()
return sorted_requirements
EXTRAS_REQUIREMENTS = sort_extras_requirements()
# Those providers are pre-installed always when airflow is installed.
# Those providers do not have dependency on airflow2.0 because that would lead to circular dependencies.
# This is not a problem for PIP but some tools (pipdeptree) show those as a warning.
PREINSTALLED_PROVIDERS = [
'ftp',
'http',
'imap',
'sqlite',
]
def get_provider_package_from_package_id(package_id: str) -> str:
"""
Builds the name of provider package out of the package id provided/
:param package_id: id of the package (like amazon or microsoft.azure)
:return: full name of package in PyPI
"""
package_suffix = package_id.replace(".", "-")
return f"apache-airflow-providers-{package_suffix}"
def get_excluded_providers() -> List[str]:
"""Returns packages excluded for the current python version."""
return []
def get_all_provider_packages() -> str:
"""Returns all provider packages configured in setup.py"""
excluded_providers = get_excluded_providers()
return " ".join(
get_provider_package_from_package_id(package)
for package in PROVIDERS_REQUIREMENTS
if package not in excluded_providers
)
class AirflowDistribution(Distribution):
"""The setuptools.Distribution subclass with Airflow specific behaviour"""
def __init__(self, attrs=None):
super().__init__(attrs)
self.install_requires = None
def parse_config_files(self, *args, **kwargs) -> None:
"""
Ensure that when we have been asked to install providers from sources
that we don't *also* try to install those providers from PyPI.
Also we should make sure that in this case we copy provider.yaml files so that
Providers manager can find package information.
"""
super().parse_config_files(*args, **kwargs)
if os.getenv(INSTALL_PROVIDERS_FROM_SOURCES) == 'true':
self.install_requires = [
req for req in self.install_requires if not req.startswith('apache-airflow-providers-')
]
provider_yaml_files = glob.glob("airflow/providers/**/provider.yaml", recursive=True)
for provider_yaml_file in provider_yaml_files:
provider_relative_path = relpath(provider_yaml_file, os.path.join(my_dir, "airflow"))
self.package_data['airflow'].append(provider_relative_path)
else:
self.install_requires.extend(
[get_provider_package_from_package_id(package_id) for package_id in PREINSTALLED_PROVIDERS]
)
def replace_extra_requirement_with_provider_packages(extra: str, providers: List[str]) -> None:
"""
Replaces extra requirement with provider package. The intention here is that when
the provider is added as dependency of extra, there is no need to add the dependencies
separately. This is not needed and even harmful, because in case of future versions of
the provider, the requirements might change, so hard-coding requirements from the version
that was available at the release time might cause dependency conflicts in the future.
Say for example that you have salesforce provider with those deps:
{ 'salesforce': ['simple-salesforce>=1.0.0', 'tableauserverclient'] }
Initially ['salesforce'] extra has those requirements and it works like that when you install
it when INSTALL_PROVIDERS_FROM_SOURCES is set to `true` (during the development). However, when
the production installation is used, The dependencies are changed:
{ 'salesforce': ['apache-airflow-providers-salesforce'] }
And then, 'apache-airflow-providers-salesforce' package has those 'install_requires' dependencies:
['simple-salesforce>=1.0.0', 'tableauserverclient']
So transitively 'salesforce' extra has all the requirements it needs and in case the provider
changes its dependencies, they will transitively change as well.
In the constraint mechanism we save both - provider versions and it's dependencies
version, which means that installation using constraints is repeatable.
For K8s and Celery which are both "Core executors" and "Providers" we have to
add the base dependencies to core as well, in order to mitigate problems where
newer version of provider will have less strict limits. This should be done for both
extras and their deprecated aliases. This is not a full protection however, the way
extras work, this will not add "hard" limits for Airflow and the user who does not use
constraints.
:param extra: Name of the extra to add providers to
:param providers: list of provider ids
"""
if extra in ['cncf.kubernetes', 'kubernetes', 'celery']:
EXTRAS_REQUIREMENTS[extra].extend(
[get_provider_package_from_package_id(package_name) for package_name in providers]
)
else:
EXTRAS_REQUIREMENTS[extra] = [
get_provider_package_from_package_id(package_name) for package_name in providers
]
def add_provider_packages_to_extra_requirements(extra: str, providers: List[str]) -> None:
"""
Adds provider packages as requirements to extra. This is used to add provider packages as requirements
to the "bulk" kind of extras. Those bulk extras do not have the detailed 'extra' requirements as
initial values, so instead of replacing them (see previous function) we can extend them.
:param extra: Name of the extra to add providers to
:param providers: list of provider ids
"""
EXTRAS_REQUIREMENTS[extra].extend(
[get_provider_package_from_package_id(package_name) for package_name in providers]
)
def add_all_provider_packages() -> None:
"""
In case of regular installation (providers installed from packages), we should add extra dependencies to
Airflow - to get the providers automatically installed when those extras are installed.
For providers installed from sources we skip that step. That helps to test and install airflow with
all packages in CI - for example when new providers are added, otherwise the installation would fail
as the new provider is not yet in PyPI.
"""
for provider in ALL_PROVIDERS:
replace_extra_requirement_with_provider_packages(provider, [provider])
add_provider_packages_to_extra_requirements("all", ALL_PROVIDERS)
add_provider_packages_to_extra_requirements("devel_ci", ALL_PROVIDERS)
add_provider_packages_to_extra_requirements("devel_all", ALL_PROVIDERS)
add_provider_packages_to_extra_requirements("all_dbs", ALL_DB_PROVIDERS)
add_provider_packages_to_extra_requirements(
"devel_hadoop", ["apache.hdfs", "apache.hive", "presto", "trino"]
)
add_all_deprecated_provider_packages()
class Develop(develop_orig):
"""Forces removal of providers in editable mode."""
def run(self) -> None: # type: ignore
self.announce('Installing in editable mode. Uninstalling provider packages!', level=log.INFO)
# We need to run "python3 -m pip" because it might be that older PIP binary is in the path
# And it results with an error when running pip directly (cannot import pip module)
# also PIP does not have a stable API so we have to run subprocesses ¯\_(ツ)_/¯
try:
installed_packages = (
subprocess.check_output(["python3", "-m", "pip", "freeze"]).decode().splitlines()
)
airflow_provider_packages = [
package_line.split("=")[0]
for package_line in installed_packages
if package_line.startswith("apache-airflow-providers")
]
self.announce(f'Uninstalling ${airflow_provider_packages}!', level=log.INFO)
subprocess.check_call(["python3", "-m", "pip", "uninstall", "--yes", *airflow_provider_packages])
except subprocess.CalledProcessError as e:
self.announce(f'Error when uninstalling airflow provider packages: {e}!', level=log.WARN)
super().run()
class Install(install_orig):
"""Forces installation of providers from sources in editable mode."""
def run(self) -> None:
self.announce('Standard installation. Providers are installed from packages', level=log.INFO)
super().run()
def do_setup() -> None:
"""
Perform the Airflow package setup.
Most values come from setup.cfg, only the dynamically calculated ones are passed to setup
function call. See https://setuptools.readthedocs.io/en/latest/userguide/declarative_config.html
"""
setup_kwargs = {}
def include_provider_namespace_packages_when_installing_from_sources() -> None:
"""
When installing providers from sources we install all namespace packages found below airflow,
including airflow and provider packages, otherwise defaults from setup.cfg control this.
The kwargs in setup() call override those that are specified in setup.cfg.
"""
if os.getenv(INSTALL_PROVIDERS_FROM_SOURCES) == 'true':
setup_kwargs['packages'] = find_namespace_packages(include=['airflow*'])
include_provider_namespace_packages_when_installing_from_sources()
if os.getenv(INSTALL_PROVIDERS_FROM_SOURCES) == 'true':
print("Installing providers from sources. Skip adding providers as dependencies")
else:
add_all_provider_packages()
write_version()
setup(
distclass=AirflowDistribution,
version=version,
extras_require=EXTRAS_REQUIREMENTS,
download_url=('https://archive.apache.org/dist/airflow/' + version),
cmdclass={
'extra_clean': CleanCommand,
'compile_assets': CompileAssets,
'list_extras': ListExtras,
'install': Install, # type: ignore
'develop': Develop,
},
test_suite='setup.airflow_test_suite',
**setup_kwargs, # type: ignore
)
if __name__ == "__main__":
do_setup() # comment
| 34.572873
| 110
| 0.669428
|
024796d43fb3318444b50d74f16942094bda2f6c
| 235
|
py
|
Python
|
src/fractal/world/fractum/placement/placement/analysis.py
|
jedhsu/fractal
|
97833ddc5063fae72352cf590738fef508c02f0c
|
[
"MIT"
] | null | null | null |
src/fractal/world/fractum/placement/placement/analysis.py
|
jedhsu/fractal
|
97833ddc5063fae72352cf590738fef508c02f0c
|
[
"MIT"
] | null | null | null |
src/fractal/world/fractum/placement/placement/analysis.py
|
jedhsu/fractal
|
97833ddc5063fae72352cf590738fef508c02f0c
|
[
"MIT"
] | null | null | null |
from dataclasses import dataclass
@dataclass
class PlacementAnalysis(Placement):
prior_probability: float # Prior probability as given by the orac
energy: float # Cumulated Q-value for the action (Q = W/N)
excites: int
| 26.111111
| 70
| 0.744681
|
93c15870bd3d644ee75092f5a428bdda4835f206
| 462
|
py
|
Python
|
Day 7/day_7.py
|
loveyourpassion/comm
|
8295a72557d928ccb9a9d37fad73692ea82f3035
|
[
"MIT"
] | 149
|
2020-03-18T17:46:09.000Z
|
2022-03-17T09:30:18.000Z
|
Day 7/day_7.py
|
loveyourpassion/comm
|
8295a72557d928ccb9a9d37fad73692ea82f3035
|
[
"MIT"
] | 2
|
2020-10-15T17:17:13.000Z
|
2021-04-30T10:34:52.000Z
|
Day 7/day_7.py
|
loveyourpassion/comm
|
8295a72557d928ccb9a9d37fad73692ea82f3035
|
[
"MIT"
] | 90
|
2020-03-23T19:06:01.000Z
|
2022-03-10T11:08:04.000Z
|
class Animal():
name = 'Amy'
noise = "Grunt"
size = "Large"
color = "Brown"
hair = 'Covers body'
def get_color(self):
return self.color
def make_noise(self):
return self.noise
dog = Animal()
dog.make_noise()
dog.size = "small"
dog.color = "black"
dog.hair = "hairless"
class Dog(Animal):
name = 'Jon'
size = "small"
color = "black"
age = 19
jon = Dog()
jon.color = 'white'
jon.name = 'Jon Snow'
| 14.903226
| 25
| 0.571429
|
37a69527dd77246a5e343319211c833e9987164b
| 8,708
|
py
|
Python
|
main.py
|
JibbyJames/coursera-data-scraper
|
7b012576795e7724b2495ac51f3eb15dfbbdaae1
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
JibbyJames/coursera-data-scraper
|
7b012576795e7724b2495ac51f3eb15dfbbdaae1
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
JibbyJames/coursera-data-scraper
|
7b012576795e7724b2495ac51f3eb15dfbbdaae1
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import json
import time
import urllib.request
import pandas as pd
import numpy as np
import seaborn as sns
from matplotlib import pyplot as plt
def expand_list(df, list_column, new_column):
lens_of_lists = df[list_column].apply(len)
origin_rows = range(df.shape[0])
destination_rows = np.repeat(origin_rows, lens_of_lists)
non_list_cols = (
[idx for idx, col in enumerate(df.columns)
if col != list_column]
)
expanded_df = df.iloc[destination_rows, non_list_cols].copy()
expanded_df[new_column] = (
[item for items in df[list_column] for item in items]
)
expanded_df.reset_index(inplace=True, drop=True)
return expanded_df
run_locally = True
wait_length = 0.5
algolia_url = 'https://lua9b20g37-dsn.algolia.net/1/indexes/test_products?x-algolia-application-id=LUA9B20G37&x-algolia-api-key=dcc55281ffd7ba6f24c3a9b18288499b&hitsPerPage=1000&page='
product_pages = 4
## Fetch the data ##########################################################
if(run_locally):
print(f"Reading courses from local file courses.json.")
courses_df = pd.read_json('courses.json', orient='records')
print(f"Reading courses from local file specializations.json.")
specs_df = pd.read_json('specializations.json', orient='records')
else:
all_products_list = []
# Loop through each of the pages returned for the all products request
for i in range(0, product_pages + 1):
# Request data from algolia for current page
with urllib.request.urlopen(f'{algolia_url}{i}') as url:
print(f"Fetching coursera program data on page {i}.")
page_data = json.loads(url.read().decode())
# Save page data to local json file.
with open(f'all-products-{i}.json', 'w') as outfile:
json.dump(page_data, outfile)
# Merge all products data into single list.
all_products_list = all_products_list + page_data['hits']
# Wait before scraping next data
time.sleep(wait_length)
# Convert raw products json data into datframe
all_products_df = pd.DataFrame.from_dict(all_products_list)
# Group Courses, and clean data before creating dict
courses_df = all_products_df.loc[all_products_df['entityType'] == 'COURSE'].reset_index(drop=True)
courses_df['id'] = courses_df.apply(lambda row: row['objectID'].replace('course~',''), axis=1)
courses_df = courses_df.set_index('id')
courses = courses_df.to_dict('index')
# Group Specializations, and clean data before creating dict
specs_df = all_products_df.loc[all_products_df['entityType'] == 'SPECIALIZATION'].reset_index(drop=True)
specs_df['id'] = specs_df.apply(lambda row: row['objectID'].replace('s12n~',''), axis=1)
specs_df = specs_df.set_index('id')
specs = specs_df.to_dict('index')
# Loop through all specializations to collect their courses
loop_length = len(specs.keys())
for index, spec_id in enumerate(list(specs.keys())[:loop_length]):
# Get specialization URL
specs[spec_id]['courses'] = []
spec_row = specs[spec_id]
slug = spec_row['objectUrl'].replace("/specializations/", "")
print(f"[{index+1}/{loop_length}] - Fetching course data for \"{slug}\"")
spec_url = f"https://www.coursera.org/api/onDemandSpecializations.v1?q=slug&slug={slug}&fields=courseIds,id"
# Make a request to that URL
with urllib.request.urlopen(spec_url) as url:
# Parse the JSON response.
spec_data = json.loads(url.read().decode())
course_ids = spec_data['elements'][0]['courseIds']
# Loop through each course
for course_id in course_ids:
# Check that we have a record of this course already from Algolia
if course_id not in courses:
print(f" - {course_id} - 404")
else:
# Initialize specs array for course if required.
if 'specializations' not in courses[course_id].keys():
courses[course_id]['specializations'] = []
print(f" - {courses[course_id]['name']}")
# Add Specialization to Course, and vice versa
if spec_id not in courses[course_id]['specializations']:
courses[course_id]['specializations'].append(spec_id)
if course_id not in specs[spec_id]['courses']:
specs[spec_id]['courses'].append(course_id)
# Wait before scraping next data
time.sleep(wait_length)
# Convert back to DF and save to local JSON
specs_df = pd.DataFrame.from_dict(specs, orient='index')
specs_df.to_json('specializations.json')
# Pricing Data for courses
loop_length = len(courses.keys())
for index, course_id in enumerate(list(courses.keys())[:loop_length]):
print(f"[{index+1}/{loop_length}] - Fetching price data for \"{courses[course_id]['name']}\"")
courses[course_id]['price'] = 0
price_url = f"https://www.coursera.org/api/productPrices.v3/VerifiedCertificate~{course_id}~GBP~GB"
try:
with urllib.request.urlopen(price_url) as url:
price_data = json.loads(url.read().decode())
courses[course_id]['price'] = price_data['elements'][0]['amount']
print(f'{courses[course_id]["name"]}: £{courses[course_id]["price"]}')
except:
print(f'{courses[course_id]["name"]}: ERROR - Not found')
# Wait before scraping next data
time.sleep(wait_length)
# Convert back to DF and save to JSON
courses_df = pd.DataFrame.from_dict(courses, orient='index')
courses_df.to_json('courses.json')
## Aggregate metrics ###########################################
# Add some fields for later use
specs_df['partners_str'] = specs_df.apply(lambda x : 'Offered by ' + ' & '.join(x['partners']),axis=1)
specs_df['specialization'] = specs_df['name'] + '\n' + specs_df['partners_str']
courses_df['partners_str'] = courses_df.apply(lambda x : 'Offered by ' + ' & '.join(x['partners']),axis=1)
courses_df['course'] = courses_df['name'] + '\n' + courses_df['partners_str']
# Expand the lists we want to aggregate in the specializations table
specs_df['courses'] = specs_df['courses'].apply(lambda d: d if isinstance(d, list) else [])
specs_with_expanded_courses_df = expand_list(specs_df, 'courses', 'course_id')
specs_with_expanded_partners_df = expand_list(specs_df, 'partners', 'partner_name')
# Join to the courses dataframe for additional metrics and clean columns names.
merged_specs_df = pd.merge(specs_with_expanded_courses_df, courses_df, left_on='course_id', right_index=True)
aggd_specs_df = merged_specs_df.groupby('specialization', as_index=False).sum()[['specialization','avgLearningHours_y','price']]
aggd_specs_df.rename(columns={'avgLearningHours_y': 'avgLearningHours', 'avgLearningHours_y': 'avgLearningHours'}, inplace=True)
## Plot some graphs ############################################
# Init Seaborn style
sns.set(style="whitegrid")
# What are some general stats on all specializations?
fig, axes = plt.subplots(4, 1)
sns.boxplot(x='enrollments', data=specs_df, ax=axes[0])
sns.boxplot(x='avgLearningHours', data=aggd_specs_df, ax=axes[1])
sns.boxplot(x='price', data=aggd_specs_df, ax=axes[2])
sns.boxplot(x='avgProductRating', data=specs_df, ax=axes[3])
# What are the most popular specializations?
top_specs_enrollments = specs_df.nlargest(15,'enrollments')
sns.barplot(x="enrollments", y="specialization", data=top_specs_enrollments)
# What are the most popular courses?
top_courses_enrollments = courses_df.nlargest(15,'enrollments')
sns.barplot(x="enrollments", y="course", data=courses_df)
# Are popular courses generally rated higher? (min number of enrollments)
sns.scatterplot(x="enrollments", y="avgProductRating", data=specs_df)
#courses_df.boxplot()
# Do longer courses have fewer enrollments?
# Scatter
# Do more expensive courses have fewer enrollments?
# Scatter
# Is there an organisation that provides the best courses?
# Does specialization duration have an impact on enrollments or reviews?
# Does price?
# What are the top ten courses by enrollments
| 39.944954
| 184
| 0.639757
|
d09da574941d23c9c605c085d32feed9a0aeff78
| 1,545
|
py
|
Python
|
src/compas_fab/utilities/filesystem.py
|
Kathrin3010/compas_fab
|
18230b70479ab57635b24832762c340e41102c10
|
[
"MIT"
] | 1
|
2019-08-15T17:34:43.000Z
|
2019-08-15T17:34:43.000Z
|
src/compas_fab/utilities/filesystem.py
|
Kathrin3010/compas_fab
|
18230b70479ab57635b24832762c340e41102c10
|
[
"MIT"
] | 3
|
2019-07-30T13:50:51.000Z
|
2019-07-30T14:27:27.000Z
|
src/compas_fab/utilities/filesystem.py
|
yijiangh/compas_fab
|
d4e8e630d4daeb8981200456f2ef6b6a09ac88a8
|
[
"MIT"
] | null | null | null |
import os
__all__ = [
'list_files_in_directory'
]
def list_files_in_directory(directory, fullpath=False, extensions=[]):
"""This function lists just the files in a directory, not sub-directories.
Args:
directory (str): the directory to search for files.
fullpath (:obj:`bool`, optional): specifies if the returned list of
strings is with the full path. Defaults to False.
extensions (:obj:`list` of :obj:`str`, optional): a list of allowed
extensions, e.g. ["jpg", "png"] if you just want to list images.
Defaults to empty list.
Returns:
files (:obj:`list` of :obj:`str`): A list of files as string if files
exist, or empty list.
"""
directory = os.path.abspath(directory)
files = []
extensions = [".%s" % ext for ext in extensions if ext[0] != "."]
for item in os.listdir(directory):
item_fullpath = os.path.join(directory, item)
if os.path.isfile(item_fullpath):
if len(extensions):
found = reduce(lambda x, y: x or y, [item.endswith(ext) for ext in extensions])
if not found:
continue
if fullpath:
files.append(item_fullpath)
else:
files.append(item)
return files
if __name__ == "__main__":
path = os.path.join(os.path.dirname(__file__), "..", "robots", "ur", "ur10", "model")
os.listdir(path)
print(list_files_in_directory(path, fullpath=True, extensions=["obj"]))
| 34.333333
| 95
| 0.598058
|
def68f6235f4a2b14d62ec0cc6cca1c36cde911a
| 13,973
|
py
|
Python
|
highway_env/road/road.py
|
ma-rapp/highway-env
|
d5ce0c38a615b9e5a111bcaf7183045265cbd190
|
[
"MIT"
] | 1
|
2021-07-05T09:14:39.000Z
|
2021-07-05T09:14:39.000Z
|
highway_env/road/road.py
|
Derekabc/highway-env
|
f26387926be88cf1f9e2e99a9ab99439687d0fd8
|
[
"MIT"
] | null | null | null |
highway_env/road/road.py
|
Derekabc/highway-env
|
f26387926be88cf1f9e2e99a9ab99439687d0fd8
|
[
"MIT"
] | 1
|
2020-11-03T01:10:54.000Z
|
2020-11-03T01:10:54.000Z
|
import numpy as np
import pandas as pd
import logging
from typing import List, Tuple, Dict, TYPE_CHECKING, Optional
from highway_env.road.lane import LineType, StraightLane, AbstractLane
from highway_env.road.objects import Landmark
if TYPE_CHECKING:
from highway_env.vehicle import kinematics
from highway_env.road import objects
logger = logging.getLogger(__name__)
LaneIndex = Tuple[str, str, int]
Route = List[LaneIndex]
class RoadNetwork(object):
graph: Dict[str, Dict[str, List[AbstractLane]]]
def __init__(self):
self.graph = {}
def add_lane(self, _from: str, _to: str, lane: AbstractLane) -> None:
"""
A lane is encoded as an edge in the road network.
:param _from: the node at which the lane starts.
:param _to: the node at which the lane ends.
:param AbstractLane lane: the lane geometry.
"""
if _from not in self.graph:
self.graph[_from] = {}
if _to not in self.graph[_from]:
self.graph[_from][_to] = []
self.graph[_from][_to].append(lane)
def get_lane(self, index: LaneIndex) -> AbstractLane:
"""
Get the lane geometry corresponding to a given index in the road network.
:param index: a tuple (origin node, destination node, lane id on the road).
:return: the corresponding lane geometry.
"""
_from, _to, _id = index
if _id is None and len(self.graph[_from][_to]) == 1:
_id = 0
return self.graph[_from][_to][_id]
def get_closest_lane_index(self, position: np.ndarray) -> LaneIndex:
"""
Get the index of the lane closest to a world position.
:param position: a world position [m].
:return: the index of the closest lane.
"""
indexes, distances = [], []
for _from, to_dict in self.graph.items():
for _to, lanes in to_dict.items():
for _id, l in enumerate(lanes):
distances.append(l.distance(position))
indexes.append((_from, _to, _id))
return indexes[int(np.argmin(distances))]
def next_lane(self, current_index: LaneIndex, route: Route = None, position: np.ndarray = None,
np_random: np.random.RandomState = np.random) -> LaneIndex:
"""
Get the index of the next lane that should be followed after finishing the current lane.
- If a plan is available and matches with current lane, follow it.
- Else, pick next road randomly.
- If it has the same number of lanes as current road, stay in the same lane.
- Else, pick next road's closest lane.
:param current_index: the index of the current lane.
:param route: the planned route, if any.
:param position: the vehicle position.
:param np_random: a source of randomness.
:return: the index of the next lane to be followed when current lane is finished.
"""
_from, _to, _id = current_index
next_to = None
# Pick next road according to planned route
if route:
if route[0][:2] == current_index[:2]: # We just finished the first step of the route, drop it.
route.pop(0)
if route and route[0][0] == _to: # Next road in route is starting at the end of current road.
_, next_to, _ = route[0]
elif route:
logger.warning("Route {} does not start after current road {}.".format(route[0], current_index))
# Randomly pick next road
if not next_to:
try:
next_to = list(self.graph[_to].keys())[np_random.randint(len(self.graph[_to]))]
except KeyError:
# logger.warning("End of lane reached.")
return current_index
# If next road has same number of lane, stay on the same lane
if len(self.graph[_from][_to]) == len(self.graph[_to][next_to]):
next_id = _id
# Else, pick closest lane
else:
lanes = range(len(self.graph[_to][next_to]))
next_id = min(lanes,
key=lambda l: self.get_lane((_to, next_to, l)).distance(position))
return _to, next_to, next_id
def bfs_paths(self, start: str, goal: str) -> List[List[str]]:
"""
Breadth-first search of all routes from start to goal.
:param start: starting node
:param goal: goal node
:return: list of paths from start to goal.
"""
queue = [(start, [start])]
while queue:
(node, path) = queue.pop(0)
if node not in self.graph:
yield []
for _next in set(self.graph[node].keys()) - set(path):
if _next == goal:
yield path + [_next]
elif _next in self.graph:
queue.append((_next, path + [_next]))
def shortest_path(self, start: str, goal: str) -> List[str]:
"""
Breadth-first search of shortest path from start to goal.
:param start: starting node
:param goal: goal node
:return: shortest path from start to goal.
"""
return next(self.bfs_paths(start, goal), [])
def all_side_lanes(self, lane_index: LaneIndex) -> List[LaneIndex]:
"""
:param lane_index: the index of a lane.
:return: all lanes belonging to the same road.
"""
return [(lane_index[0], lane_index[1], i) for i in range(len(self.graph[lane_index[0]][lane_index[1]]))]
def side_lanes(self, lane_index: LaneIndex) -> List[LaneIndex]:
"""
:param lane_index: the index of a lane.
:return: indexes of lanes next to a an input lane, to its right or left.
"""
_from, _to, _id = lane_index
lanes = []
if _id > 0:
lanes.append((_from, _to, _id - 1))
if _id < len(self.graph[_from][_to]) - 1:
lanes.append((_from, _to, _id + 1))
return lanes
@staticmethod
def is_same_road(lane_index_1: LaneIndex, lane_index_2: LaneIndex, same_lane: bool = False) -> bool:
"""Is lane 1 in the same road as lane 2?"""
return lane_index_1[:2] == lane_index_2[:2] and (not same_lane or lane_index_1[2] == lane_index_2[2])
@staticmethod
def is_leading_to_road(lane_index_1: LaneIndex, lane_index_2: LaneIndex, same_lane: bool = False) -> bool:
"""Is lane 1 leading to of lane 2?"""
return lane_index_1[1] == lane_index_2[0] and (not same_lane or lane_index_1[2] == lane_index_2[2])
def is_connected_road(self, lane_index_1: LaneIndex, lane_index_2: LaneIndex, route: Route = None,
same_lane: bool = False, depth: int = 0) -> bool:
"""
Is the lane 2 leading to a road within lane 1's route?
Vehicles on these lanes must be considered for collisions.
:param lane_index_1: origin lane
:param lane_index_2: target lane
:param route: route from origin lane, if any
:param same_lane: compare lane id
:param depth: search depth from lane 1 along its route
:return: whether the roads are connected
"""
if RoadNetwork.is_same_road(lane_index_2, lane_index_1, same_lane) \
or RoadNetwork.is_leading_to_road(lane_index_2, lane_index_1, same_lane):
return True
if depth > 0:
if route and route[0][:2] == lane_index_1[:2]:
# Route is starting at current road, skip it
return self.is_connected_road(lane_index_1, lane_index_2, route[1:], same_lane, depth)
elif route and route[0][0] == lane_index_1[1]:
# Route is continuing from current road, follow it
return self.is_connected_road(route[0], lane_index_2, route[1:], same_lane, depth - 1)
else:
# Recursively search all roads at intersection
_from, _to, _id = lane_index_1
return any([self.is_connected_road((_to, l1_to, _id), lane_index_2, route, same_lane, depth - 1)
for l1_to in self.graph.get(_to, {}).keys()])
return False
def lanes_list(self) -> List[AbstractLane]:
return [lane for to in self.graph.values() for ids in to.values() for lane in ids]
@staticmethod
def straight_road_network(lanes: int = 4, length: float = 10000, angle: float = 0) -> 'RoadNetwork':
net = RoadNetwork()
for lane in range(lanes):
origin = np.array([0, lane * StraightLane.DEFAULT_WIDTH])
end = np.array([length, lane * StraightLane.DEFAULT_WIDTH])
rotation = np.array([[np.cos(angle), np.sin(angle)], [-np.sin(angle), np.cos(angle)]])
origin = rotation @ origin
end = rotation @ end
line_types = [LineType.CONTINUOUS_LINE if lane == 0 else LineType.STRIPED,
LineType.CONTINUOUS_LINE if lane == lanes - 1 else LineType.NONE]
net.add_lane("0", "1", StraightLane(origin, end, line_types=line_types))
return net
def position_heading_along_route(self, route: Route, longitudinal: float, lateral: float) \
-> Tuple[np.ndarray, float]:
"""
Get the absolute position and heading along a route composed of several lanes at some local coordinates.
:param route: a planned route, list of lane indexes
:param longitudinal: longitudinal position
:param lateral: : lateral position
:return: position, heading
"""
while len(route) > 1 and longitudinal > self.get_lane(route[0]).length:
longitudinal -= self.get_lane(route[0]).length
route = route[1:]
return self.get_lane(route[0]).position(longitudinal, lateral), self.get_lane(route[0]).heading_at(longitudinal)
class Road(object):
"""A road is a set of lanes, and a set of vehicles driving on these lanes."""
def __init__(self,
network: RoadNetwork = None,
vehicles: List['kinematics.Vehicle'] = None,
road_objects: List['objects.RoadObject'] = None,
np_random: np.random.RandomState = None,
record_history: bool = False) -> None:
"""
New road.
:param network: the road network describing the lanes
:param vehicles: the vehicles driving on the road
:param road_objects: the objects on the road including obstacles and landmarks
:param np.random.RandomState np_random: a random number generator for vehicle behaviour
:param record_history: whether the recent trajectories of vehicles should be recorded for display
"""
self.network = network
self.vehicles = vehicles or []
self.objects = road_objects or []
self.np_random = np_random if np_random else np.random.RandomState()
self.record_history = record_history
def close_vehicles_to(self, vehicle: 'kinematics.Vehicle', distance: float, count: int = None,
see_behind: bool = True) -> object:
vehicles = [v for v in self.vehicles
if np.linalg.norm(v.position - vehicle.position) < distance
and v is not vehicle
and (see_behind or -2 * vehicle.LENGTH < vehicle.lane_distance_to(v))]
vehicles = sorted(vehicles, key=lambda v: abs(vehicle.lane_distance_to(v)))
if count:
vehicles = vehicles[:count]
return vehicles
def act(self) -> None:
"""Decide the actions of each entity on the road."""
for vehicle in self.vehicles:
vehicle.act()
def step(self, dt: float) -> None:
"""
Step the dynamics of each entity on the road.
:param dt: timestep [s]
"""
for vehicle in self.vehicles:
vehicle.step(dt)
for vehicle in self.vehicles:
for other in self.vehicles:
vehicle.check_collision(other)
for other in self.objects:
vehicle.check_collision(other)
def neighbour_vehicles(self, vehicle: 'kinematics.Vehicle', lane_index: LaneIndex = None) \
-> Tuple[Optional['kinematics.Vehicle'], Optional['kinematics.Vehicle']]:
"""
Find the preceding and following vehicles of a given vehicle.
:param vehicle: the vehicle whose neighbours must be found
:param lane_index: the lane on which to look for preceding and following vehicles.
It doesn't have to be the current vehicle lane but can also be another lane, in which case the
vehicle is projected on it considering its local coordinates in the lane.
:return: its preceding vehicle, its following vehicle
"""
lane_index = lane_index or vehicle.lane_index
if not lane_index:
return None, None
lane = self.network.get_lane(lane_index)
s = self.network.get_lane(lane_index).local_coordinates(vehicle.position)[0]
s_front = s_rear = None
v_front = v_rear = None
for v in self.vehicles + self.objects:
if v is not vehicle and not isinstance(v, Landmark): # self.network.is_connected_road(v.lane_index,
# lane_index, same_lane=True):
s_v, lat_v = lane.local_coordinates(v.position)
if not lane.on_lane(v.position, s_v, lat_v, margin=1):
continue
if s <= s_v and (s_front is None or s_v <= s_front):
s_front = s_v
v_front = v
if s_v < s and (s_rear is None or s_v > s_rear):
s_rear = s_v
v_rear = v
return v_front, v_rear
def __repr__(self):
return self.vehicles.__repr__()
| 43.39441
| 120
| 0.600945
|
2be9b52c7ac0476096392703b38a249caa45d610
| 61
|
py
|
Python
|
src/ralph/lib/table/__init__.py
|
angry-tony/cmdb-ralph
|
eb2ad2212a133025b698eb48e379c0bfe14cace0
|
[
"Apache-2.0"
] | null | null | null |
src/ralph/lib/table/__init__.py
|
angry-tony/cmdb-ralph
|
eb2ad2212a133025b698eb48e379c0bfe14cace0
|
[
"Apache-2.0"
] | null | null | null |
src/ralph/lib/table/__init__.py
|
angry-tony/cmdb-ralph
|
eb2ad2212a133025b698eb48e379c0bfe14cace0
|
[
"Apache-2.0"
] | null | null | null |
from ralph.lib.table.table import Table
__all__ = ['Table']
| 15.25
| 39
| 0.737705
|
82ec43434d3779f29f8f0596258d56a540a6011d
| 2,512
|
py
|
Python
|
Libraries/Python/CommonEnvironment/v1.0/CommonEnvironment/TypeInfo/FundamentalTypes/Serialization/UnitTests/JsonLikeSerialization_UnitTest.py
|
davidbrownell/Common_Environment
|
4015872aeac8d5da30a6aa7940e1035a6aa6a75d
|
[
"BSL-1.0"
] | 1
|
2017-04-25T13:15:10.000Z
|
2017-04-25T13:15:10.000Z
|
Libraries/Python/CommonEnvironment/v1.0/CommonEnvironment/TypeInfo/FundamentalTypes/Serialization/UnitTests/JsonLikeSerialization_UnitTest.py
|
davidbrownell/Common_Environment
|
4015872aeac8d5da30a6aa7940e1035a6aa6a75d
|
[
"BSL-1.0"
] | null | null | null |
Libraries/Python/CommonEnvironment/v1.0/CommonEnvironment/TypeInfo/FundamentalTypes/Serialization/UnitTests/JsonLikeSerialization_UnitTest.py
|
davidbrownell/Common_Environment
|
4015872aeac8d5da30a6aa7940e1035a6aa6a75d
|
[
"BSL-1.0"
] | null | null | null |
# ----------------------------------------------------------------------
# |
# | JsonLikeSerialization_UnitTest.py
# |
# | David Brownell <db@DavidBrownell.com>
# | 2016-09-06 17:31:15
# |
# ----------------------------------------------------------------------
# |
# | Copyright David Brownell 2016-18.
# | Distributed under the Boost Software License, Version 1.0.
# | (See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
# |
# ----------------------------------------------------------------------
import datetime
import os
import sys
import unittest
from CommonEnvironment import Package
# ----------------------------------------------------------------------
_script_fullpath = os.path.abspath(__file__) if "python" in sys.executable.lower() else sys.executable
_script_dir, _script_name = os.path.split(_script_fullpath)
# ----------------------------------------------------------------------
with Package.NameInfo(__package__) as ni:
__package__ = ni.created
from ..JsonLikeSerialization import *
from ... import *
__package__ = ni.original
# ----------------------------------------------------------------------
class UnitTest(unittest.TestCase):
# ----------------------------------------------------------------------
def test_SerializeItem(self):
self.assertEqual(JsonLikeSerialization.SerializeItem(BoolTypeInfo(), True), True)
self.assertEqual(JsonLikeSerialization.SerializeItem(FloatTypeInfo(), 1.0), 1.0)
self.assertEqual(JsonLikeSerialization.SerializeItem(IntTypeInfo(), 100), 100)
self.assertEqual(JsonLikeSerialization.SerializeItem(DurationTypeInfo(), datetime.timedelta(seconds=130)), "0:02:10.000000")
# ----------------------------------------------------------------------
def test_DeserializeItem(self):
self.assertEqual(JsonLikeSerialization.DeserializeItem(BoolTypeInfo(), True), True)
self.assertEqual(JsonLikeSerialization.DeserializeItem(FloatTypeInfo(), 1.0), 1.0)
self.assertEqual(JsonLikeSerialization.DeserializeItem(IntTypeInfo(), 100), 100)
self.assertEqual(JsonLikeSerialization.DeserializeItem(DurationTypeInfo(), "0:02:10.0"), datetime.timedelta(seconds=130))
# ---------------------------------------------------------------------------
if __name__ == "__main__":
try: sys.exit(unittest.main(verbosity=2))
except KeyboardInterrupt: pass
| 44.857143
| 133
| 0.52508
|
0d8219d2d4ddf5daacb13d3536d21eb28b1bae58
| 12,928
|
py
|
Python
|
SARSA_Agent.py
|
GrayKS3248/RL_Agents
|
dc84321b18131b811f39bc5491f4714e3554d610
|
[
"MIT"
] | null | null | null |
SARSA_Agent.py
|
GrayKS3248/RL_Agents
|
dc84321b18131b811f39bc5491f4714e3554d610
|
[
"MIT"
] | null | null | null |
SARSA_Agent.py
|
GrayKS3248/RL_Agents
|
dc84321b18131b811f39bc5491f4714e3554d610
|
[
"MIT"
] | null | null | null |
"""
Created on Tuesday Sep 15 10:56:33 CST 2020
@author: Grayson Schaer
"""
import numpy as np
import random
class SARSA_Agent():
# Agent constructor
# @param n_s - integer size of the state space
# @param n_a - integer size of the action space
# @param n_rows - Size of the state space in the x direction
# @param n_cols - Size of the state space in the y direction
# @param alpha - learning rate
# @param gamma - discount ratio
# @param epsilon - elpsilon-greedy action choice parameter
def __init__(self, n_s, n_a, n_rows, n_cols, alpha, gamma, epsilon):
# Sets the size of the state space
self.n_s = n_s
# Sets the size of the action space
self.n_a = n_a
# Sets the size of the x direction in the state space
self.n_rows = n_rows
# Sets the size of the y direction in the state space
self.n_cols = n_cols
# Defines the learning rate for the agent
self.alpha = alpha
# Defines the discounting rate defined by the problem definition
self.gamma = gamma
# Defines the greedy ratio for exploration - exploitation (epsilon-greedy)
self.epsilon = epsilon
# Initializes estimated Value function (updated by TD(0))
self.V = [0.0]*self.n_s
# Initializes estimated Quality function (updated by SARSA algorithm)
self.Q = np.zeros((self.n_s, self.n_a))
# Initializes policy (deterministic based on Quality function)
self.p = np.zeros((self.n_s, self.n_a))
for i in range(self.n_s):
self.p[i][np.argmax(self.Q[i])] = 1.0
# Defines the variables used in the log
self.r_tot = 0
self.r_tot_discount = 0
self.curr_step = 0
# Defines the variables used in the logbook
self.entry_number = 1
self.highest_r_tot = -1
self.last_episode_last_index = -1
# Creates a log that tracks learning during a single episode set
# @entry s - the state at each iteration step
# @entry a - the action taken at each iteration step
# @entry r - the reward given at each iteration step
# @entry r_tot - the sum of all rewards given up to the current iteration step
# @entry r_tot_discount - the sum of all the discounted rewards given up to the current iteration step
# @entry avg_value - the average of the value function at each iteration step
# @curr_step - the iteration step at which all other values are measured
self.log = {
's': [],
'a': [],
'r': [],
'r_tot': [],
'r_tot_discount': [],
'avg_value': [],
}
# Creates a book that tracks results from previous episode sets
# @entry logs - records each log for each set of episodes run by an agent
# @entry avg_Q - tracks the average Quality function generated by each agent
# @entry avg_V - tracks the average Value function generated by each agent
# @entry avg_p - tracks the average policy function generated by each agent
# @entry alpha - learning rate of episode set
# @entry gamma - discount factor of episode set
# @entry epsilon - exploration rate of episode set
self.logbook = {
'best_s': [],
'best_a': [],
'best_r': [],
'r_tot_avg': 0,
'r_tot_discount_avg': 0,
'avg_val_avg': 0,
'avg_Q': 0,
'avg_V': 0,
'avg_p': 0,
'alpha': [],
'gamma': [],
'epsilon': [],
}
# Public getter for the estimated value function
# @return estimated value function in planar state space form
def get_V(self):
# Init planar representation of the Value function
planar_V = np.zeros((self.n_rows, self.n_cols))
# Assign each cell in planar_V its associated Value function output
index = 0
for i in range(self.n_rows):
for j in range(self.n_cols):
planar_V[i][j] = self.V[index]
index += 1
# Return the pretty version of the Value function
return planar_V
# Public getter for the current policy
# @return current policy in planar state space form
def get_p(self):
# Init planar representation of the policy function
planar_p = np.zeros((self.n_rows, self.n_cols))
# Get best policy data
self.update_p()
# Assign each cell in planar_p its associated action based on the policy, action_min, and action_space
index = 0
for i in range(self.n_rows):
for j in range(self.n_cols):
action_index = np.argmax(self.p[index])
planar_p[i][j] = action_index
index += 1
# Return the pretty representation of the policy
return planar_p
# Updates the best trajectory at the end of each episode
def end_episode(self):
# Update the best trajectory log
if self.log['r_tot'][-1] > self.highest_r_tot:
self.highest_r_tot = self.log['r_tot'][-1]
self.logbook['best_s'] = self.log['s'][(self.last_episode_last_index + 1):(len(self.log['r_tot']) - 1)]
self.logbook['best_a'] = self.log['a'][(self.last_episode_last_index + 1):(len(self.log['r_tot']) - 1)]
self.logbook['best_r'] = self.log['r'][(self.last_episode_last_index + 1):(len(self.log['r_tot']) - 1)]
self.last_episode_last_index = len(self.log['r_tot']) - 1
# Archives the log from the current episode to the log book. Resets agent to initial conditions
# @param alpha - learning rate
# @param gamma - discount ratio
# @param epsilon - elpsilon-greedy action choice parameter
def terminate_agent(self):
# Update the average learning curves
n = self.entry_number
self.entry_number += 1
r_tot_avg = (1/n) * ((n - 1) * self.logbook['r_tot_avg'] + np.asarray(self.log['r_tot']))
r_tot_discount_avg = (1/n) * ((n - 1) * self.logbook['r_tot_discount_avg'] + np.asarray(self.log['r_tot_discount']))
avg_val_avg = (1/n) * ((n - 1) * self.logbook['avg_val_avg'] + np.asarray(self.log['avg_value']))
avg_Q = (1/n) * ((n - 1) * self.logbook['avg_Q'] + self.Q)
avg_V = (1/n) * ((n - 1) * self.logbook['avg_V'] + self.get_V())
avg_p = (1/n) * ((n - 1) * self.logbook['avg_p'] + self.get_p())
# In the case of the first set, the average value is just the current value
if n==1:
avg_Q = self.Q
avg_V = self.get_V()
avg_p = self.get_p()
r_tot_avg = self.log['r_tot']
r_tot_discount_avg = self.log['r_tot_discount']
avg_val_avg = self.log['avg_value']
# Input the average function values
self.logbook['r_tot_avg'] = r_tot_avg
self.logbook['r_tot_discount_avg'] = r_tot_discount_avg
self.logbook['avg_val_avg'] = avg_val_avg
self.logbook['avg_Q'] = avg_Q
self.logbook['avg_V'] = avg_V
self.logbook['avg_p'] = avg_p
# Input the agent parameters
self.logbook['alpha'].append(self.alpha)
self.logbook['gamma'].append(self.gamma)
self.logbook['epsilon'].append(self.epsilon)
# Reset the log
self.log = {
's': [],
'a': [],
'r': [],
'r_tot': [],
'r_tot_discount': [],
'avg_value': [],
}
# Initializes estimated Value function (updated by TD(0))
self.V = [0.0]*self.n_s
# Initializes estimated Quality function (updated by SARSA algorithm)
self.Q = np.zeros((self.n_s, self.n_a))
# Initializes policy (deterministic based on Quality function)
self.p = np.zeros((self.n_s, self.n_a))
for i in range(self.n_s):
self.p[i][np.argmax(self.Q[i])] = 1.0
# Defines the variables used in the log
self.r_tot = 0
self.r_tot_discount = 0
self.curr_step = 0
# Updates the quality function estimate based on the Q-learning algorithm
# @param s1 - The state before an action is taken
# @param a1 - The action taken in the second state
# @param s2 - The state after an action is taken
# @param r - The reward returned traveling from s1 to s2
# @return Current estimate of quality function
def update_Q(self, s1, a1, s2, a2, r):
# Implementation of SARSA algorithm
self.Q[s1][a1] = self.Q[s1][a1] + self.alpha*(r + self.gamma*self.Q[s2][a2] - self.Q[s1][a1])
# Update the current Value function estimate
self.update_V(s1, s2, r)
#Update the log
self.r_tot = self.r_tot + r
self.r_tot_discount = self.r_tot_discount + (self.gamma ** (self.curr_step)) * r
avg_value = sum(self.V) / len(self.V)
self.log['s'].append(s1)
self.log['a'].append(a1)
self.log['r'].append(r)
self.log['r_tot'].append(self.r_tot)
self.log['r_tot_discount'].append(self.r_tot_discount)
self.log['avg_value'].append(avg_value)
self.curr_step = self.curr_step + 1
return self.Q
# Updates the policy function based on the Quality function
# @return Current policy
def update_p(self):
# Update the deterministic policy function based on the new Quality function
self.p = np.zeros((self.n_s, self.n_a))
for i in range(self.n_s):
self.p[i][np.argmax(self.Q[i])] = 1.0
return self.p
# Updates the value function estimate based on the TD(0) algorithm
# @param s1 - The state before an action is taken
# @param s2 - The state after an action is taken
# @param r - The reward returned traveling from s1 to s2
# @return Current estimate of value function
def update_V(self, s1, s2, r):
# Implementation of TD(0) algorithm
self.V[s1] = self.V[s1] + self.alpha * (r + self.gamma * self.V[s2] - self.V[s1])
return self.V
# Ask for an action based on the current quality fnc, epsilon-greedy parameters, and the current state
# @param s - The current state
# @return - The calculated epsilon-greedy action
def get_action(self, s):
# Choose a random variable to determine action
choice = random.random()
# Init action to impossible value
# the variable a necesarily must be updated, so it is placed outside of the if loop so that it can be returned
# i.e., I don't know how scope works in Python so I want to make sure a is scoped to the proper field
# but I also want to make sure if it isn't selected properly the env throws an error.
# This is because I am too lazy to do error checks here
a = -1
# If the choice variable is smaller than epsilon, take a random action in the action space
if (choice <= self.epsilon):
a = random.randint(0, self.n_a-1)
# Otherwise, choose the best action based on the current Q function
else:
# Get the argmax a from Q
a = np.argmax(self.Q[s])
# Checks if any quality fnc outputs are the same
identical = np.zeros(self.n_a)
for i in range(self.n_a):
if (self.Q[s][i] == self.Q[s][a]):
identical[i] = 1
# If there are identities, randomly sample from the collection of identies
if sum(identical) != 1:
# Chose a random index of one of the identical actions
ident_a_index = random.randint(0, sum(identical) - 1)
# Convert the identical array from truth array to identical action index+1 array
identical = np.matmul(identical,np.diag(range(1,self.n_a + 1)))
# Remove all indices that are zero (ie actions do not have identical outcomes)
identical = identical[identical != 0]
# Convert the identical array from identical action index+1 array to identical action index array
identical -= 1
# Select the random sampled action from the space of all identical outcome actions
a = int(identical[ident_a_index])
# Return the selected action
return a
| 41.569132
| 124
| 0.578125
|
a5e7dd2e876c6c04295a7a2a406f14c0bea3f9c2
| 3,568
|
py
|
Python
|
installSynApps/view_model/new_config_screen.py
|
NSLS-II/installSynApps
|
0f8e978939715bbba1a064ead3044fa36215cb09
|
[
"BSD-3-Clause"
] | 3
|
2019-10-16T20:57:31.000Z
|
2019-10-24T15:30:01.000Z
|
installSynApps/view_model/new_config_screen.py
|
jwlodek/installSynApps
|
d1fe1703cdee71be9ee34f62c85ee527d1f9ebcc
|
[
"BSD-3-Clause"
] | 62
|
2019-02-02T22:51:12.000Z
|
2020-12-14T20:32:17.000Z
|
installSynApps/view_model/new_config_screen.py
|
epicsNSLS2-deploy/installSynApps
|
e39307fffb071d25031b4988285f5010e9e2bc29
|
[
"BSD-3-Clause"
] | 3
|
2019-02-13T20:21:03.000Z
|
2019-11-25T20:26:34.000Z
|
"""Module containing view model for creating new configurations
"""
# Tkinter imports
import tkinter as tk
from tkinter import Button, Label, Toplevel, Frame, BooleanVar, Checkbutton
from tkinter import GROOVE, Text, END, INSERT
from tkinter import messagebox
from tkinter import filedialog
from tkinter import font as tkFont
import tkinter.scrolledtext as ScrolledText
# Standard lib imports
import os
class NewConfigGUI:
"""Class for a window that allows editing of loaded macros.
Attributes
----------
root : Tk
Main Tk root window
master : TopLevel
New window
smallFont : ttkFont
Smaller font size
largeFont : ttkFont
larger font size
viewFrame : Frame
main container frame
config_type : IntVar
selector for which config base to use
config_types : dict of int->str
maps config_type to string
dir_box : Text
Textfield for target directory
update_tags_var : booleanVar
variable that stores whether to auto-update tags
update_tags_button : checkbutton
toggles update_tags_var
"""
def __init__(self, root):
"""Initializer for NewConfigGUI
"""
self.root = root
self.master = Toplevel()
self.master.title('New Install Config')
self.master.resizable(False, False)
self.smallFont = tkFont.Font(family = "Helvetica", size = 10)
self.largeFont = tkFont.Font(family = "Helvetica", size = 14)
self.viewFrame = Frame(self.master, relief = GROOVE, padx = 10, pady = 10)
self.viewFrame.pack()
Label(self.viewFrame, text='Install Location:').grid(row=1, column=0, pady=5)
Button(self.viewFrame, text='Browse', command=self.selectInstallLoc).grid(row=1, column=3, padx=5, pady=5)
self.dir_box = Text(self.viewFrame, height=1, width=45, padx=3, pady=3)
self.dir_box.grid(row=1, column=1, columnspan=2)
Label(self.viewFrame, text='').grid(row=4)
self.update_tags_var = BooleanVar()
self.update_tags_var.set(True)
self.update_tags_button = Checkbutton(self.viewFrame, text='Auto-Update Tags', onvalue=True, offvalue=False, variable=self.update_tags_var)
self.update_tags_button.grid(row=4, column=0, padx=3, pady=3)
Button(self.viewFrame, text='Create', command = self.applyChanges, width=12).grid(row = 4, column = 2, columnspan = 1, padx = 5, pady = 5)
Button(self.viewFrame, text='Cancel', command = self.cancel, width=12).grid(row = 4, column = 3, columnspan = 1, padx = 5, pady = 5)
self.master.mainloop()
def reloadPanel(self):
"""Refreshes the current window
"""
self.dir_box.delete('1.0', END)
def applyChanges(self):
"""Creates new install configuration given properties
"""
u = self.update_tags_var.get()
i = self.dir_box.get('1.0', END).strip()
if not os.path.exists(i) or not os.path.isdir(i):
self.root.showErrorMessage('Error', 'ERROR - Path: {} does not exist or is not a directory!'.format(i), force_popup=True)
return
self.root.newConfig(i, update_tags=u)
self.cancel()
def cancel(self):
"""Closes window
"""
self.master.destroy()
def selectInstallLoc(self):
"""Shows file selection popup
"""
dir = filedialog.askdirectory(initialdir='.')
if dir is not None:
self.dir_box.delete('1.0', END)
self.dir_box.insert(INSERT, dir)
| 31.857143
| 147
| 0.642096
|
8f2334028f28b64dc069f820c16b5e879596c6aa
| 1,118
|
py
|
Python
|
aula5.py
|
mauriciopicirillo/Aprendendo-Python
|
fceb5124dc1f0446a364351358b74d37b3c99335
|
[
"MIT"
] | null | null | null |
aula5.py
|
mauriciopicirillo/Aprendendo-Python
|
fceb5124dc1f0446a364351358b74d37b3c99335
|
[
"MIT"
] | null | null | null |
aula5.py
|
mauriciopicirillo/Aprendendo-Python
|
fceb5124dc1f0446a364351358b74d37b3c99335
|
[
"MIT"
] | null | null | null |
lista = [12, 10, 5, 7]
lista_animal = ['cachorro', 'gato', 'elefante']
tupla = (1, 10, 12, 14)
print(len(tupla))
print(len(lista_animal))
tupla_animal = tuple(lista_animal)
print(type(tupla_animal))
print(tupla_animal)
lista_numerica = list(tupla)
print(type(lista_numerica))
lista_numerica[0] = 100
print(lista_numerica)
# lista.sort()
# lista_animal.sort()
# print(lista)
# print(lista_animal)
# lista_animal.reverse()
# print(lista_animal)
# if 'lobo' in lista_animal:
# print('Existe um lobo na lista')
# else:
# print('Não existe um lobo na lista. Será incluido.')
# lista_animal.append('lobo')
# print(lista_animal)
# lista_animal.remove('elefante')
# print(lista_animal)
# lista_animal.pop(0)
# print(lista_animal)
# nova_lista = lista_animal * 3
# print(nova_lista)
# if 'lobo' in lista_animal:
# print('existe um lobo na lista')
# else:
# print('não existe um lobo na lista')
# print(lista_animal[1])
# print(min(lista))
# print(max(lista))
# print(sum(lista))
# soma = 0
# for x in lista:
# print(x)
# soma += x
# print(soma)
# for x in lista_animal:
# print(x)
| 19.964286
| 58
| 0.677996
|
204235a7acd1f47284d913c8d89e5e0b9784c8f2
| 8,938
|
py
|
Python
|
tests/test_path.py
|
agos-tech/jsons
|
33db6d4c4d186303ddac2a7c3b1a17dcb141e7a8
|
[
"MIT"
] | null | null | null |
tests/test_path.py
|
agos-tech/jsons
|
33db6d4c4d186303ddac2a7c3b1a17dcb141e7a8
|
[
"MIT"
] | null | null | null |
tests/test_path.py
|
agos-tech/jsons
|
33db6d4c4d186303ddac2a7c3b1a17dcb141e7a8
|
[
"MIT"
] | null | null | null |
import os.path
from pathlib import Path, PureWindowsPath, PurePosixPath
from unittest import TestCase
import jsons
class TestPath(TestCase):
def test_dump_singlepart_relative_path(self):
self.assertEqual('abc', jsons.dump(Path('abc')))
def test_dump_singlepart_pure_windows_path(self):
self.assertEqual('abc', jsons.dump(PureWindowsPath('abc')))
def test_dump_singlepart_pure_posix_path(self):
self.assertEqual('abc', jsons.dump(PurePosixPath('abc')))
def test_dump_multipart_relative_path(self):
self.assertEqual(
'abc/def/ghi',
jsons.dump(Path('abc', 'def', 'ghi'))
)
self.assertEqual(
'abc/def/ghi',
jsons.dump(Path('abc/def/ghi'))
)
def test_dump_multipart_pure_windows_path(self):
self.assertEqual(
'abc/def/ghi',
jsons.dump(PureWindowsPath('abc', 'def', 'ghi'))
)
self.assertEqual(
'abc/def/ghi',
jsons.dump(PureWindowsPath('abc/def/ghi'))
)
self.assertEqual(
'abc/def/ghi',
jsons.dump(PureWindowsPath('abc\\def\\ghi'))
)
def test_dump_multipart_pure_posix_path(self):
self.assertEqual(
'abc/def/ghi',
jsons.dump(PurePosixPath('abc', 'def', 'ghi'))
)
self.assertEqual(
'abc/def/ghi',
jsons.dump(PurePosixPath('abc/def/ghi'))
)
self.assertEqual(
'abc\\def\\ghi',
jsons.dump(PurePosixPath('abc\\def\\ghi'))
)
def test_dump_multipart_drived_pure_windows_path(self):
self.assertEqual(
'Z:/abc/def/ghi',
jsons.dump(PureWindowsPath('Z:\\', 'abc', 'def', 'ghi'))
)
self.assertEqual(
'Z:/abc/def/ghi',
jsons.dump(PureWindowsPath('Z:/abc/def/ghi'))
)
self.assertEqual(
'Z:/abc/def/ghi',
jsons.dump(PureWindowsPath('Z:\\abc\\def\\ghi'))
)
def test_dump_multipart_drived_pure_posix_path(self):
self.assertEqual(
'Z:/abc/def/ghi',
jsons.dump(PurePosixPath('Z:', 'abc', 'def', 'ghi'))
)
self.assertEqual(
'Z:/abc/def/ghi',
jsons.dump(PurePosixPath('Z:/abc/def/ghi'))
)
self.assertEqual(
'Z:\\abc\\def\\ghi',
jsons.dump(PurePosixPath('Z:\\abc\\def\\ghi'))
)
#################
def test_load_singlepart_relative_path(self):
self.assertEqual(
Path('abc'),
jsons.load('abc', Path)
)
def test_load_singlepart_pure_windows_path(self):
self.assertEqual(
PureWindowsPath('abc'),
jsons.load('abc', PureWindowsPath)
)
def test_load_singlepart_pure_posix_path(self):
self.assertEqual(
PurePosixPath('abc'),
jsons.load('abc', PurePosixPath)
)
def test_load_multipart_relative_path(self):
self.assertEqual(
Path('abc', 'def', 'ghi'),
jsons.load('abc/def/ghi', Path)
)
self.assertEqual(
Path('abc/def/ghi'),
jsons.load('abc/def/ghi', Path)
)
def test_load_multipart_pure_windows_path(self):
# We should be able to load Posix-style paths on Windows.
self.assertEqual(
PureWindowsPath('abc', 'def', 'ghi'),
jsons.load('abc/def/ghi', PureWindowsPath)
)
self.assertEqual(
PureWindowsPath('abc/def/ghi'),
jsons.load('abc/def/ghi', PureWindowsPath)
)
self.assertEqual(
PureWindowsPath('abc\\def\\ghi'),
jsons.load('abc/def/ghi', PureWindowsPath)
)
# We should be able to load Windows-style paths on Windows.
self.assertEqual(
PureWindowsPath('abc', 'def', 'ghi'),
jsons.load('abc\\def\\ghi', PureWindowsPath)
)
self.assertEqual(
PureWindowsPath('abc/def/ghi'),
jsons.load('abc\\def\\ghi', PureWindowsPath)
)
self.assertEqual(
PureWindowsPath('abc\\def\\ghi'),
jsons.load('abc\\def\\ghi', PureWindowsPath)
)
def test_load_multipart_pure_posix_path(self):
# We should be able to load Posix-style paths on Posix systems.
self.assertEqual(
PurePosixPath('abc', 'def', 'ghi'),
jsons.load('abc/def/ghi', PurePosixPath)
)
self.assertEqual(
PurePosixPath('abc/def/ghi'),
jsons.load('abc/def/ghi', PurePosixPath)
)
self.assertNotEqual(
PurePosixPath('abc\\def\\ghi'),
jsons.load('abc/def/ghi', PurePosixPath)
)
# Backslashes on Posix systems should be interpreted as escapes.
self.assertNotEqual(
PurePosixPath('abc', 'def', 'ghi'),
jsons.load('abc\\def\\ghi', PurePosixPath)
)
self.assertNotEqual(
PurePosixPath('abc/def/ghi'),
jsons.load('abc\\def\\ghi', PurePosixPath)
)
self.assertEqual(
PurePosixPath('abc\\def\\ghi'),
jsons.load('abc\\def\\ghi', PurePosixPath)
)
def test_load_multipart_drived_pure_windows_path(self):
# We should be able to load Posix-style paths on Windows.
self.assertEqual(
PureWindowsPath('Z:\\', 'abc', 'def', 'ghi'),
jsons.load('Z:/abc/def/ghi', PureWindowsPath)
)
self.assertEqual(
PureWindowsPath('Z:/abc/def/ghi'),
jsons.load('Z:/abc/def/ghi', PureWindowsPath)
)
self.assertEqual(
PureWindowsPath('Z:\\abc\\def\\ghi'),
jsons.load('Z:/abc/def/ghi', PureWindowsPath)
)
# We should be able to load Windows-style paths on Windows.
self.assertEqual(
PureWindowsPath('Z:\\', 'abc', 'def', 'ghi'),
jsons.load('Z:\\abc\\def\\ghi', PureWindowsPath)
)
self.assertEqual(
PureWindowsPath('Z:/abc/def/ghi'),
jsons.load('Z:\\abc\\def\\ghi', PureWindowsPath)
)
self.assertEqual(
PureWindowsPath('Z:\\abc\\def\\ghi'),
jsons.load('Z:\\abc\\def\\ghi', PureWindowsPath)
)
def test_load_multipart_drived_pure_posix_path(self):
# We should be able to load Posix-style paths on Windows.
self.assertEqual(
PurePosixPath('Z:', 'abc', 'def', 'ghi'),
jsons.load('Z:/abc/def/ghi', PurePosixPath)
)
self.assertEqual(
PurePosixPath('Z:/abc/def/ghi'),
jsons.load('Z:/abc/def/ghi', PurePosixPath)
)
self.assertNotEqual(
PurePosixPath('Z:\\abc\\def\\ghi'),
jsons.load('Z:/abc/def/ghi', PurePosixPath)
)
# Backslashes on Posix systems should be interpreted as escapes.
self.assertNotEqual(
PurePosixPath('Z:', 'abc', 'def', 'ghi'),
jsons.load('Z:\\abc\\def\\ghi', PurePosixPath)
)
self.assertNotEqual(
PurePosixPath('Z:/abc/def/ghi'),
jsons.load('Z:\\abc\\def\\ghi', PurePosixPath)
)
self.assertEqual(
PurePosixPath('Z:\\abc\\def\\ghi'),
jsons.load('Z:\\abc\\def\\ghi', PurePosixPath)
)
def test_dump_posix_load_windows(self):
dump_result = jsons.dump(PurePosixPath('abc', 'def', 'ghi'))
self.assertEqual(
'abc/def/ghi',
dump_result
)
load_result = jsons.load(dump_result, PureWindowsPath)
self.assertEqual(
PureWindowsPath('abc', 'def', 'ghi'),
load_result
)
def test_dump_windows_load_posix(self):
dump_result = jsons.dump(PureWindowsPath('abc', 'def', 'ghi'))
self.assertEqual(
'abc/def/ghi',
dump_result
)
load_result = jsons.load(dump_result, PurePosixPath)
self.assertEqual(
PurePosixPath('abc', 'def', 'ghi'),
load_result
)
def test_dump_posix_load_posix(self):
dump_result = jsons.dump(PurePosixPath('abc', 'def', 'ghi'))
self.assertEqual(
'abc/def/ghi',
dump_result
)
load_result = jsons.load(dump_result, PurePosixPath)
self.assertEqual(
PurePosixPath('abc', 'def', 'ghi'),
load_result
)
def test_dump_windows_load_windows(self):
dump_result = jsons.dump(PureWindowsPath('abc', 'def', 'ghi'))
self.assertEqual(
'abc/def/ghi',
dump_result
)
load_result = jsons.load(dump_result, PureWindowsPath)
self.assertEqual(
PureWindowsPath('abc', 'def', 'ghi'),
load_result
)
| 32.620438
| 72
| 0.54934
|
642bef74b6b8d741cd4326ef0b9a883e114eaade
| 3,159
|
py
|
Python
|
backend/src/baserow/contrib/database/webhooks/models.py
|
LiuJun666888/baserow
|
bc5b7f8ebe319f90ed1aabdb7f5dfd8916c3dad1
|
[
"MIT"
] | null | null | null |
backend/src/baserow/contrib/database/webhooks/models.py
|
LiuJun666888/baserow
|
bc5b7f8ebe319f90ed1aabdb7f5dfd8916c3dad1
|
[
"MIT"
] | null | null | null |
backend/src/baserow/contrib/database/webhooks/models.py
|
LiuJun666888/baserow
|
bc5b7f8ebe319f90ed1aabdb7f5dfd8916c3dad1
|
[
"MIT"
] | null | null | null |
import uuid
from django.db import models
from baserow.core.models import CreatedAndUpdatedOnMixin
from baserow.contrib.database.table.models import Table
from .validators import header_name_validator, header_value_validator
class WebhookRequestMethods(models.TextChoices):
POST = "POST"
GET = "GET"
PUT = "PUT"
PATCH = "PATCH"
DELETE = "DELETE"
class TableWebhook(CreatedAndUpdatedOnMixin, models.Model):
table = models.ForeignKey(Table, on_delete=models.CASCADE)
active = models.BooleanField(
default=True,
help_text="Indicates whether the web hook is active. When a webhook has "
"failed multiple times, it will automatically be deactivated.",
)
use_user_field_names = models.BooleanField(
default=True,
help_text="Indicates whether the field names must be used as payload key "
"instead of the id.",
)
url = models.URLField(
help_text="The URL that must call when the webhook is " "triggered."
)
request_method = models.CharField(
max_length=10,
choices=WebhookRequestMethods.choices,
default=WebhookRequestMethods.POST,
help_text="The request method that be used when the event occurs.",
)
name = models.CharField(
max_length=255, help_text="An internal name of the webhook."
)
include_all_events = models.BooleanField(
default=True,
help_text="Indicates whether this webhook should listen to all events.",
)
failed_triggers = models.IntegerField(
default=0, help_text="The amount of failed webhook calls."
)
@property
def header_dict(self):
return {header.name: header.value for header in self.headers.all()}
class Meta:
ordering = ("id",)
class TableWebhookEvent(CreatedAndUpdatedOnMixin, models.Model):
webhook = models.ForeignKey(
TableWebhook, related_name="events", on_delete=models.CASCADE
)
event_type = models.CharField(max_length=50)
class TableWebhookHeader(models.Model):
webhook = models.ForeignKey(
TableWebhook, related_name="headers", on_delete=models.CASCADE
)
name = models.TextField(validators=[header_name_validator])
value = models.TextField(validators=[header_value_validator])
class TableWebhookCall(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
webhook = models.ForeignKey(
TableWebhook, related_name="calls", on_delete=models.CASCADE
)
event_type = models.CharField(max_length=50)
called_time = models.DateTimeField(null=True)
called_url = models.URLField()
request = models.TextField(
null=True, help_text="A text copy of the request headers and body."
)
response = models.TextField(
null=True, help_text="A text copy of the response headers and body."
)
response_status = models.IntegerField(
null=True, help_text="The HTTP response status code."
)
error = models.TextField(
null=True, help_text="An internal error reflecting what went wrong."
)
class Meta:
ordering = ("-called_time",)
| 32.56701
| 82
| 0.697056
|
6a35a52fcf7dcf718ba0fa780280b780d1a577b0
| 96
|
py
|
Python
|
apps.py
|
giovanniherdigein/my_first_django
|
ed547cf8802951a6af17c0683a642548e025935f
|
[
"Unlicense"
] | null | null | null |
apps.py
|
giovanniherdigein/my_first_django
|
ed547cf8802951a6af17c0683a642548e025935f
|
[
"Unlicense"
] | null | null | null |
apps.py
|
giovanniherdigein/my_first_django
|
ed547cf8802951a6af17c0683a642548e025935f
|
[
"Unlicense"
] | null | null | null |
from django.apps import AppConfig
class CrudsiteConfig(AppConfig):
name = 'crudsite'
| 16
| 34
| 0.71875
|
a52cf3c301bea855a7cdc839ddcd30b8d4a0a340
| 1,528
|
py
|
Python
|
fastai/callbacks/rnn.py
|
suleepkumar/fastai
|
728a7154dc120c177fe3499d2e3bf1ba389580fb
|
[
"Apache-2.0"
] | 1
|
2018-12-30T04:12:43.000Z
|
2018-12-30T04:12:43.000Z
|
fastai/callbacks/rnn.py
|
suleepkumar/fastai
|
728a7154dc120c177fe3499d2e3bf1ba389580fb
|
[
"Apache-2.0"
] | 2
|
2021-05-20T23:02:08.000Z
|
2021-09-28T05:48:00.000Z
|
fastai/callbacks/rnn.py
|
Acidburn0zzz/fastai
|
0bc9f78668455af3c0ff8894ea1b2375ec4cd91b
|
[
"Apache-2.0"
] | null | null | null |
"Regroups lr adjustment to seq_len, AR and TAR"
from ..torch_core import *
from ..callback import *
from ..basic_train import Learner, LearnerCallback
__all__ = ['RNNTrainer']
@dataclass
class RNNTrainer(LearnerCallback):
"`Callback` that regroups lr adjustment to seq_len, AR and TAR."
bptt:int=0
alpha:float=0.
beta:float=0.
adjust:bool=True
def on_train_begin(self, **kwargs):
"IN LM, put the training dataloader `first` attribute to `True` to avoid OOM."
if hasattr(self.learn.data.train_dl, 'first'):
self.learn.data.first = True
def on_epoch_begin(self, **kwargs):
"Reset the hidden state of the model."
self.learn.model.reset()
def on_loss_begin(self, last_output:Tuple[Tensor,Tensor,Tensor], **kwargs):
"Save the extra outputs for later and only returns the true output."
self.raw_out,self.out = last_output[1],last_output[2]
return last_output[0]
def on_backward_begin(self, last_loss:Rank0Tensor, last_input:Tensor, **kwargs):
"Adjusts the lr to the sequence length and applies AR and TAR to `last_loss`."
if self.adjust: self.learn.opt.lr *= last_input.size(1) / self.bptt
#AR and TAR
if self.alpha != 0.: last_loss += (self.alpha * self.out[-1].pow(2).mean()).sum().float()
if self.beta != 0.:
h = self.raw_out[-1]
if len(h)>1: last_loss += (self.beta * (h[1:] - h[:-1]).pow(2).mean()).sum().float()
return last_loss
| 39.179487
| 98
| 0.639398
|
f2250ff3f18b334488053a4837d680aa449442ea
| 5,061
|
py
|
Python
|
examples/decoding/ssd_spatial_filters.py
|
andylikescodes/mne-python
|
79ea57a4318d8d045f5966c26360b079f40a4865
|
[
"BSD-3-Clause"
] | 1
|
2022-02-19T08:13:49.000Z
|
2022-02-19T08:13:49.000Z
|
examples/decoding/ssd_spatial_filters.py
|
LiFeng-SECUC/mne-python
|
732bb1f994e64e41a8e95dcc10dc98c22cac95c0
|
[
"BSD-3-Clause"
] | null | null | null |
examples/decoding/ssd_spatial_filters.py
|
LiFeng-SECUC/mne-python
|
732bb1f994e64e41a8e95dcc10dc98c22cac95c0
|
[
"BSD-3-Clause"
] | null | null | null |
"""
===========================================================
Compute Spectro-Spatial Decomposition (SSD) spatial filters
===========================================================
In this example, we will compute spatial filters for retaining
oscillatory brain activity and down-weighting 1/f background signals
as proposed by :footcite:`NikulinEtAl2011`.
The idea is to learn spatial filters that separate oscillatory dynamics
from surrounding non-oscillatory noise based on the covariance in the
frequency band of interest and the noise covariance based on surrounding
frequencies.
"""
# Author: Denis A. Engemann <denis.engemann@gmail.com>
# Victoria Peterson <victoriapeterson09@gmail.com>
# License: BSD-3-Clause
# %%
import matplotlib.pyplot as plt
import mne
from mne import Epochs
from mne.datasets.fieldtrip_cmc import data_path
from mne.decoding import SSD
# %%
# Define parameters
fname = data_path() / 'SubjectCMC.ds'
# Prepare data
raw = mne.io.read_raw_ctf(fname)
raw.crop(50., 110.).load_data() # crop for memory purposes
raw.resample(sfreq=250)
raw.pick_types(meg=True, eeg=False, ref_meg=False)
freqs_sig = 9, 12
freqs_noise = 8, 13
ssd = SSD(info=raw.info,
reg='oas',
sort_by_spectral_ratio=False, # False for purpose of example.
filt_params_signal=dict(l_freq=freqs_sig[0], h_freq=freqs_sig[1],
l_trans_bandwidth=1, h_trans_bandwidth=1),
filt_params_noise=dict(l_freq=freqs_noise[0], h_freq=freqs_noise[1],
l_trans_bandwidth=1, h_trans_bandwidth=1))
ssd.fit(X=raw.get_data())
# %%
# Let's investigate spatial filter with max power ratio.
# We will first inspect the topographies.
# According to Nikulin et al. 2011 this is done by either inverting the filters
# (W^{-1}) or by multiplying the noise cov with the filters Eq. (22) (C_n W)^t.
# We rely on the inversion approach here.
pattern = mne.EvokedArray(data=ssd.patterns_[:4].T,
info=ssd.info)
pattern.plot_topomap(units=dict(mag='A.U.'), time_format='')
# The topographies suggest that we picked up a parietal alpha generator.
# Transform
ssd_sources = ssd.transform(X=raw.get_data())
# Get psd of SSD-filtered signals.
psd, freqs = mne.time_frequency.psd_array_welch(
ssd_sources, sfreq=raw.info['sfreq'], n_fft=4096)
# Get spec_ratio information (already sorted).
# Note that this is not necessary if sort_by_spectral_ratio=True (default).
spec_ratio, sorter = ssd.get_spectral_ratio(ssd_sources)
# Plot spectral ratio (see Eq. 24 in Nikulin 2011).
fig, ax = plt.subplots(1)
ax.plot(spec_ratio, color='black')
ax.plot(spec_ratio[sorter], color='orange', label='sorted eigenvalues')
ax.set_xlabel("Eigenvalue Index")
ax.set_ylabel(r"Spectral Ratio $\frac{P_f}{P_{sf}}$")
ax.legend()
ax.axhline(1, linestyle='--')
# We can see that the initial sorting based on the eigenvalues
# was already quite good. However, when using few components only
# the sorting might make a difference.
# %%
# Let's also look at the power spectrum of that source and compare it to
# to the power spectrum of the source with lowest SNR.
below50 = freqs < 50
# for highlighting the freq. band of interest
bandfilt = (freqs_sig[0] <= freqs) & (freqs <= freqs_sig[1])
fig, ax = plt.subplots(1)
ax.loglog(freqs[below50], psd[0, below50], label='max SNR')
ax.loglog(freqs[below50], psd[-1, below50], label='min SNR')
ax.loglog(freqs[below50], psd[:, below50].mean(axis=0), label='mean')
ax.fill_between(freqs[bandfilt], 0, 10000, color='green', alpha=0.15)
ax.set_xlabel('log(frequency)')
ax.set_ylabel('log(power)')
ax.legend()
# We can clearly see that the selected component enjoys an SNR that is
# way above the average power spectrum.
# %%
# Epoched data
# ------------
# Although we suggest to use this method before epoching, there might be some
# situations in which data can only be treated by chunks.
# Build epochs as sliding windows over the continuous raw file.
events = mne.make_fixed_length_events(raw, id=1, duration=5.0, overlap=0.0)
# Epoch length is 5 seconds.
epochs = Epochs(raw, events, tmin=0., tmax=5,
baseline=None, preload=True)
ssd_epochs = SSD(info=epochs.info,
reg='oas',
filt_params_signal=dict(l_freq=freqs_sig[0],
h_freq=freqs_sig[1],
l_trans_bandwidth=1,
h_trans_bandwidth=1),
filt_params_noise=dict(l_freq=freqs_noise[0],
h_freq=freqs_noise[1],
l_trans_bandwidth=1,
h_trans_bandwidth=1))
ssd_epochs.fit(X=epochs.get_data())
# Plot topographies.
pattern_epochs = mne.EvokedArray(data=ssd_epochs.patterns_[:4].T,
info=ssd_epochs.info)
pattern_epochs.plot_topomap(units=dict(mag='A.U.'), time_format='')
# %%
# References
# ----------
#
# .. footbibliography::
| 35.640845
| 79
| 0.662912
|
cff551e5b5c1f9558d284ea20028f6d72517a187
| 23,069
|
py
|
Python
|
zerver/lib/test_helpers.py
|
guettli/zulip
|
d9431a5e66a97c619aecfe92c4a2cb4acc609431
|
[
"Apache-2.0"
] | null | null | null |
zerver/lib/test_helpers.py
|
guettli/zulip
|
d9431a5e66a97c619aecfe92c4a2cb4acc609431
|
[
"Apache-2.0"
] | null | null | null |
zerver/lib/test_helpers.py
|
guettli/zulip
|
d9431a5e66a97c619aecfe92c4a2cb4acc609431
|
[
"Apache-2.0"
] | null | null | null |
import collections
import os
import re
import sys
import time
from contextlib import contextmanager
from typing import (
IO,
TYPE_CHECKING,
Any,
Callable,
Dict,
Generator,
Iterable,
Iterator,
List,
Mapping,
Optional,
Tuple,
TypeVar,
Union,
cast,
)
from unittest import mock
import boto3
import fakeldap
import ldap
import orjson
from boto3.resources.base import ServiceResource
from django.conf import settings
from django.db.migrations.state import StateApps
from django.http import HttpResponse, HttpResponseRedirect
from django.test import override_settings
from django.urls import URLResolver
from moto import mock_s3
import zerver.lib.upload
from zerver.lib import cache
from zerver.lib.actions import do_set_realm_property
from zerver.lib.avatar import avatar_url
from zerver.lib.cache import get_cache_backend
from zerver.lib.db import Params, ParamsT, Query, TimeTrackingCursor
from zerver.lib.integrations import WEBHOOK_INTEGRATIONS
from zerver.lib.upload import LocalUploadBackend, S3UploadBackend
from zerver.models import (
Client,
Message,
Realm,
Subscription,
UserMessage,
UserProfile,
get_realm,
get_stream,
)
from zerver.tornado import django_api as django_tornado_api
from zerver.tornado.handlers import AsyncDjangoHandler, allocate_handler_id
from zerver.worker import queue_processors
from zproject.backends import ExternalAuthDataDict, ExternalAuthResult
if TYPE_CHECKING:
# Avoid an import cycle; we only need these for type annotations.
from zerver.lib.test_classes import MigrationsTestCase, ZulipTestCase
class MockLDAP(fakeldap.MockLDAP):
class LDAPError(ldap.LDAPError):
pass
class INVALID_CREDENTIALS(ldap.INVALID_CREDENTIALS):
pass
class NO_SUCH_OBJECT(ldap.NO_SUCH_OBJECT):
pass
class ALREADY_EXISTS(ldap.ALREADY_EXISTS):
pass
@contextmanager
def stub_event_queue_user_events(event_queue_return: Any, user_events_return: Any) -> Iterator[None]:
with mock.patch('zerver.lib.events.request_event_queue',
return_value=event_queue_return):
with mock.patch('zerver.lib.events.get_user_events',
return_value=user_events_return):
yield
@contextmanager
def simulated_queue_client(client: Callable[..., Any]) -> Iterator[None]:
with mock.patch.object(queue_processors, 'SimpleQueueClient', client):
yield
@contextmanager
def tornado_redirected_to_list(lst: List[Mapping[str, Any]]) -> Iterator[None]:
real_event_queue_process_notification = django_tornado_api.process_notification
django_tornado_api.process_notification = lambda notice: lst.append(notice)
# process_notification takes a single parameter called 'notice'.
# lst.append takes a single argument called 'object'.
# Some code might call process_notification using keyword arguments,
# so mypy doesn't allow assigning lst.append to process_notification
# So explicitly change parameter name to 'notice' to work around this problem
yield
django_tornado_api.process_notification = real_event_queue_process_notification
class EventInfo:
def populate(self, call_args_list: List[Any]) -> None:
args = call_args_list[0][0]
self.realm_id = args[0]
self.payload = args[1]
self.user_ids = args[2]
@contextmanager
def capture_event(event_info: EventInfo) -> Iterator[None]:
# Use this for simple endpoints that throw a single event
# in zerver.lib.actions.
with mock.patch('zerver.lib.actions.send_event') as m:
yield
if len(m.call_args_list) == 0:
raise AssertionError('No event was sent inside actions.py')
if len(m.call_args_list) > 1:
raise AssertionError('Too many events sent by action')
event_info.populate(m.call_args_list)
@contextmanager
def simulated_empty_cache() -> Iterator[List[Tuple[str, Union[str, List[str]], Optional[str]]]]:
cache_queries: List[Tuple[str, Union[str, List[str]], Optional[str]]] = []
def my_cache_get(key: str, cache_name: Optional[str]=None) -> Optional[Dict[str, Any]]:
cache_queries.append(('get', key, cache_name))
return None
def my_cache_get_many(keys: List[str], cache_name: Optional[str]=None) -> Dict[str, Any]: # nocoverage -- simulated code doesn't use this
cache_queries.append(('getmany', keys, cache_name))
return {}
old_get = cache.cache_get
old_get_many = cache.cache_get_many
cache.cache_get = my_cache_get
cache.cache_get_many = my_cache_get_many
yield cache_queries
cache.cache_get = old_get
cache.cache_get_many = old_get_many
@contextmanager
def queries_captured(include_savepoints: bool=False) -> Generator[
List[Dict[str, Union[str, bytes]]], None, None]:
'''
Allow a user to capture just the queries executed during
the with statement.
'''
queries: List[Dict[str, Union[str, bytes]]] = []
def wrapper_execute(self: TimeTrackingCursor,
action: Callable[[str, ParamsT], None],
sql: Query,
params: ParamsT) -> None:
cache = get_cache_backend(None)
cache.clear()
start = time.time()
try:
return action(sql, params)
finally:
stop = time.time()
duration = stop - start
if include_savepoints or not isinstance(sql, str) or 'SAVEPOINT' not in sql:
queries.append({
'sql': self.mogrify(sql, params).decode('utf-8'),
'time': f"{duration:.3f}",
})
def cursor_execute(self: TimeTrackingCursor, sql: Query,
params: Optional[Params]=None) -> None:
return wrapper_execute(self, super(TimeTrackingCursor, self).execute, sql, params)
def cursor_executemany(self: TimeTrackingCursor, sql: Query,
params: Iterable[Params]) -> None:
return wrapper_execute(self, super(TimeTrackingCursor, self).executemany, sql, params) # nocoverage -- doesn't actually get used in tests
with mock.patch.multiple(TimeTrackingCursor, execute=cursor_execute, executemany=cursor_executemany):
yield queries
@contextmanager
def stdout_suppressed() -> Iterator[IO[str]]:
"""Redirect stdout to /dev/null."""
with open(os.devnull, 'a') as devnull:
stdout, sys.stdout = sys.stdout, devnull
yield stdout
sys.stdout = stdout
def reset_emails_in_zulip_realm() -> None:
realm = get_realm('zulip')
do_set_realm_property(realm, 'email_address_visibility',
Realm.EMAIL_ADDRESS_VISIBILITY_EVERYONE)
def get_test_image_file(filename: str) -> IO[Any]:
test_avatar_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '../tests/images'))
return open(os.path.join(test_avatar_dir, filename), 'rb')
def avatar_disk_path(user_profile: UserProfile, medium: bool=False, original: bool=False) -> str:
avatar_url_path = avatar_url(user_profile, medium)
assert avatar_url_path is not None
avatar_disk_path = os.path.join(settings.LOCAL_UPLOADS_DIR, "avatars",
avatar_url_path.split("/")[-2],
avatar_url_path.split("/")[-1].split("?")[0])
if original:
return avatar_disk_path.replace(".png", ".original")
return avatar_disk_path
def make_client(name: str) -> Client:
client, _ = Client.objects.get_or_create(name=name)
return client
def find_key_by_email(address: str) -> Optional[str]:
from django.core.mail import outbox
key_regex = re.compile("accounts/do_confirm/([a-z0-9]{24})>")
for message in reversed(outbox):
if address in message.to:
match = key_regex.search(message.body)
assert match is not None
[key] = match.groups()
return key
return None # nocoverage -- in theory a test might want this case, but none do
def message_stream_count(user_profile: UserProfile) -> int:
return UserMessage.objects. \
select_related("message"). \
filter(user_profile=user_profile). \
count()
def most_recent_usermessage(user_profile: UserProfile) -> UserMessage:
query = UserMessage.objects. \
select_related("message"). \
filter(user_profile=user_profile). \
order_by('-message')
return query[0] # Django does LIMIT here
def most_recent_message(user_profile: UserProfile) -> Message:
usermessage = most_recent_usermessage(user_profile)
return usermessage.message
def get_subscription(stream_name: str, user_profile: UserProfile) -> Subscription:
stream = get_stream(stream_name, user_profile.realm)
recipient_id = stream.recipient_id
return Subscription.objects.get(user_profile=user_profile,
recipient_id=recipient_id, active=True)
def get_user_messages(user_profile: UserProfile) -> List[Message]:
query = UserMessage.objects. \
select_related("message"). \
filter(user_profile=user_profile). \
order_by('message')
return [um.message for um in query]
class DummyHandler(AsyncDjangoHandler):
def __init__(self) -> None:
allocate_handler_id(self)
class POSTRequestMock:
method = "POST"
def __init__(self, post_data: Dict[str, Any], user_profile: Optional[UserProfile]) -> None:
self.GET: Dict[str, Any] = {}
# Convert any integer parameters passed into strings, even
# though of course the HTTP API would do so. Ideally, we'd
# get rid of this abstraction entirely and just use the HTTP
# API directly, but while it exists, we need this code.
self.POST: Dict[str, str] = {}
for key in post_data:
self.POST[key] = str(post_data[key])
self.user = user_profile
self._tornado_handler = DummyHandler()
self._log_data: Dict[str, Any] = {}
self.META = {'PATH_INFO': 'test'}
self.path = ''
class HostRequestMock:
"""A mock request object where get_host() works. Useful for testing
routes that use Zulip's subdomains feature"""
def __init__(self, user_profile: Optional[UserProfile]=None, host: str=settings.EXTERNAL_HOST) -> None:
self.host = host
self.GET: Dict[str, Any] = {}
self.POST: Dict[str, Any] = {}
self.META = {'PATH_INFO': 'test'}
self.path = ''
self.user = user_profile
self.method = ''
self.body = ''
self.content_type = ''
def get_host(self) -> str:
return self.host
class MockPythonResponse:
def __init__(self, text: str, status_code: int, headers: Optional[Dict[str, str]]=None) -> None:
self.text = text
self.status_code = status_code
if headers is None:
headers = {'content-type': 'text/html'}
self.headers = headers
@property
def ok(self) -> bool:
return self.status_code == 200
def iter_content(self, n: int) -> Generator[str, Any, None]:
yield self.text[:n]
INSTRUMENTING = os.environ.get('TEST_INSTRUMENT_URL_COVERAGE', '') == 'TRUE'
INSTRUMENTED_CALLS: List[Dict[str, Any]] = []
UrlFuncT = TypeVar("UrlFuncT", bound=Callable[..., HttpResponse]) # TODO: make more specific
def append_instrumentation_data(data: Dict[str, Any]) -> None:
INSTRUMENTED_CALLS.append(data)
def instrument_url(f: UrlFuncT) -> UrlFuncT:
if not INSTRUMENTING: # nocoverage -- option is always enabled; should we remove?
return f
else:
def wrapper(self: 'ZulipTestCase', url: str, info: object = {},
**kwargs: Any) -> HttpResponse:
start = time.time()
result = f(self, url, info, **kwargs)
delay = time.time() - start
test_name = self.id()
if '?' in url:
url, extra_info = url.split('?', 1)
else:
extra_info = ''
if isinstance(info, POSTRequestMock):
info = "<POSTRequestMock>"
elif isinstance(info, bytes):
info = "<bytes>"
elif isinstance(info, dict):
info = {
k: "<file object>" if hasattr(v, "read") and callable(getattr(v, "read")) else v
for k, v in info.items()
}
append_instrumentation_data(dict(
url=url,
status_code=result.status_code,
method=f.__name__,
delay=delay,
extra_info=extra_info,
info=info,
test_name=test_name,
kwargs=kwargs))
return result
return cast(UrlFuncT, wrapper) # https://github.com/python/mypy/issues/1927
def write_instrumentation_reports(full_suite: bool, include_webhooks: bool) -> None:
if INSTRUMENTING:
calls = INSTRUMENTED_CALLS
from zproject.urls import urlpatterns, v1_api_and_json_patterns
# Find our untested urls.
pattern_cnt: Dict[str, int] = collections.defaultdict(int)
def re_strip(r: Any) -> str:
return str(r).lstrip('^').rstrip('$')
def find_patterns(patterns: List[Any], prefixes: List[str]) -> None:
for pattern in patterns:
find_pattern(pattern, prefixes)
def cleanup_url(url: str) -> str:
if url.startswith('/'):
url = url[1:]
if url.startswith('http://testserver/'):
url = url[len('http://testserver/'):]
if url.startswith('http://zulip.testserver/'):
url = url[len('http://zulip.testserver/'):]
if url.startswith('http://testserver:9080/'):
url = url[len('http://testserver:9080/'):]
return url
def find_pattern(pattern: Any, prefixes: List[str]) -> None:
if isinstance(pattern, type(URLResolver)):
return # nocoverage -- shouldn't actually happen
if hasattr(pattern, 'url_patterns'):
return
canon_pattern = prefixes[0] + re_strip(pattern.pattern.regex.pattern)
cnt = 0
for call in calls:
if 'pattern' in call:
continue
url = cleanup_url(call['url'])
for prefix in prefixes:
if url.startswith(prefix):
match_url = url[len(prefix):]
if pattern.resolve(match_url):
if call['status_code'] in [200, 204, 301, 302]:
cnt += 1
call['pattern'] = canon_pattern
pattern_cnt[canon_pattern] += cnt
find_patterns(urlpatterns, ['', 'en/', 'de/'])
find_patterns(v1_api_and_json_patterns, ['api/v1/', 'json/'])
assert len(pattern_cnt) > 100
untested_patterns = {p.replace("\\", "") for p in pattern_cnt if pattern_cnt[p] == 0}
exempt_patterns = {
# We exempt some patterns that are called via Tornado.
'api/v1/events',
'api/v1/events/internal',
'api/v1/register',
# We also exempt some development environment debugging
# static content URLs, since the content they point to may
# or may not exist.
'coverage/(?P<path>.+)',
'node-coverage/(?P<path>.+)',
'docs/(?P<path>.+)',
'casper/(?P<path>.+)',
'static/(?P<path>.*)',
*(webhook.url for webhook in WEBHOOK_INTEGRATIONS if not include_webhooks),
}
untested_patterns -= exempt_patterns
var_dir = 'var' # TODO make sure path is robust here
fn = os.path.join(var_dir, 'url_coverage.txt')
with open(fn, 'wb') as f:
for call in calls:
f.write(orjson.dumps(call, option=orjson.OPT_APPEND_NEWLINE))
if full_suite:
print(f'INFO: URL coverage report is in {fn}')
print('INFO: Try running: ./tools/create-test-api-docs')
if full_suite and len(untested_patterns): # nocoverage -- test suite error handling
print("\nERROR: Some URLs are untested! Here's the list of untested URLs:")
for untested_pattern in sorted(untested_patterns):
print(f" {untested_pattern}")
sys.exit(1)
def load_subdomain_token(response: HttpResponse) -> ExternalAuthDataDict:
assert isinstance(response, HttpResponseRedirect)
token = response.url.rsplit('/', 1)[1]
data = ExternalAuthResult(login_token=token, delete_stored_data=False).data_dict
assert data is not None
return data
FuncT = TypeVar('FuncT', bound=Callable[..., None])
def use_s3_backend(method: FuncT) -> FuncT:
@mock_s3
@override_settings(LOCAL_UPLOADS_DIR=None)
def new_method(*args: Any, **kwargs: Any) -> Any:
zerver.lib.upload.upload_backend = S3UploadBackend()
try:
return method(*args, **kwargs)
finally:
zerver.lib.upload.upload_backend = LocalUploadBackend()
return new_method
def create_s3_buckets(*bucket_names: Tuple[str]) -> List[ServiceResource]:
session = boto3.Session(settings.S3_KEY, settings.S3_SECRET_KEY)
s3 = session.resource('s3')
buckets = [s3.create_bucket(Bucket=name) for name in bucket_names]
return buckets
def use_db_models(method: Callable[..., None]) -> Callable[..., None]: # nocoverage
def method_patched_with_mock(self: 'MigrationsTestCase', apps: StateApps) -> None:
ArchivedAttachment = apps.get_model('zerver', 'ArchivedAttachment')
ArchivedMessage = apps.get_model('zerver', 'ArchivedMessage')
ArchivedUserMessage = apps.get_model('zerver', 'ArchivedUserMessage')
Attachment = apps.get_model('zerver', 'Attachment')
BotConfigData = apps.get_model('zerver', 'BotConfigData')
BotStorageData = apps.get_model('zerver', 'BotStorageData')
Client = apps.get_model('zerver', 'Client')
CustomProfileField = apps.get_model('zerver', 'CustomProfileField')
CustomProfileFieldValue = apps.get_model('zerver', 'CustomProfileFieldValue')
DefaultStream = apps.get_model('zerver', 'DefaultStream')
DefaultStreamGroup = apps.get_model('zerver', 'DefaultStreamGroup')
EmailChangeStatus = apps.get_model('zerver', 'EmailChangeStatus')
Huddle = apps.get_model('zerver', 'Huddle')
Message = apps.get_model('zerver', 'Message')
MultiuseInvite = apps.get_model('zerver', 'MultiuseInvite')
MutedTopic = apps.get_model('zerver', 'MutedTopic')
PreregistrationUser = apps.get_model('zerver', 'PreregistrationUser')
PushDeviceToken = apps.get_model('zerver', 'PushDeviceToken')
Reaction = apps.get_model('zerver', 'Reaction')
Realm = apps.get_model('zerver', 'Realm')
RealmAuditLog = apps.get_model('zerver', 'RealmAuditLog')
RealmDomain = apps.get_model('zerver', 'RealmDomain')
RealmEmoji = apps.get_model('zerver', 'RealmEmoji')
RealmFilter = apps.get_model('zerver', 'RealmFilter')
Recipient = apps.get_model('zerver', 'Recipient')
Recipient.PERSONAL = 1
Recipient.STREAM = 2
Recipient.HUDDLE = 3
ScheduledEmail = apps.get_model('zerver', 'ScheduledEmail')
ScheduledMessage = apps.get_model('zerver', 'ScheduledMessage')
Service = apps.get_model('zerver', 'Service')
Stream = apps.get_model('zerver', 'Stream')
Subscription = apps.get_model('zerver', 'Subscription')
UserActivity = apps.get_model('zerver', 'UserActivity')
UserActivityInterval = apps.get_model('zerver', 'UserActivityInterval')
UserGroup = apps.get_model('zerver', 'UserGroup')
UserGroupMembership = apps.get_model('zerver', 'UserGroupMembership')
UserHotspot = apps.get_model('zerver', 'UserHotspot')
UserMessage = apps.get_model('zerver', 'UserMessage')
UserPresence = apps.get_model('zerver', 'UserPresence')
UserProfile = apps.get_model('zerver', 'UserProfile')
zerver_models_patch = mock.patch.multiple(
'zerver.models',
ArchivedAttachment=ArchivedAttachment,
ArchivedMessage=ArchivedMessage,
ArchivedUserMessage=ArchivedUserMessage,
Attachment=Attachment,
BotConfigData=BotConfigData,
BotStorageData=BotStorageData,
Client=Client,
CustomProfileField=CustomProfileField,
CustomProfileFieldValue=CustomProfileFieldValue,
DefaultStream=DefaultStream,
DefaultStreamGroup=DefaultStreamGroup,
EmailChangeStatus=EmailChangeStatus,
Huddle=Huddle,
Message=Message,
MultiuseInvite=MultiuseInvite,
MutedTopic=MutedTopic,
PreregistrationUser=PreregistrationUser,
PushDeviceToken=PushDeviceToken,
Reaction=Reaction,
Realm=Realm,
RealmAuditLog=RealmAuditLog,
RealmDomain=RealmDomain,
RealmEmoji=RealmEmoji,
RealmFilter=RealmFilter,
Recipient=Recipient,
ScheduledEmail=ScheduledEmail,
ScheduledMessage=ScheduledMessage,
Service=Service,
Stream=Stream,
Subscription=Subscription,
UserActivity=UserActivity,
UserActivityInterval=UserActivityInterval,
UserGroup=UserGroup,
UserGroupMembership=UserGroupMembership,
UserHotspot=UserHotspot,
UserMessage=UserMessage,
UserPresence=UserPresence,
UserProfile=UserProfile,
)
zerver_test_helpers_patch = mock.patch.multiple(
'zerver.lib.test_helpers',
Client=Client,
Message=Message,
Subscription=Subscription,
UserMessage=UserMessage,
UserProfile=UserProfile,
)
zerver_test_classes_patch = mock.patch.multiple(
'zerver.lib.test_classes',
Client=Client,
Message=Message,
Realm=Realm,
Recipient=Recipient,
Stream=Stream,
Subscription=Subscription,
UserProfile=UserProfile,
)
with zerver_models_patch,\
zerver_test_helpers_patch,\
zerver_test_classes_patch:
method(self, apps)
return method_patched_with_mock
def create_dummy_file(filename: str) -> str:
filepath = os.path.join(settings.TEST_WORKER_DIR, filename)
with open(filepath, 'w') as f:
f.write('zulip!')
return filepath
def zulip_reaction_info() -> Dict[str, str]:
return dict(
emoji_name='zulip',
emoji_code='zulip',
reaction_type='zulip_extra_emoji',
)
| 38.320598
| 146
| 0.639213
|
85b16c3336dc2ac41c92a43966b446f103d69bb2
| 4,295
|
py
|
Python
|
model_zoo/dac_ctr/feature_transform.py
|
QiJune/elasticdl
|
6b01f5b32fd757badff96ed652662bd94afe9263
|
[
"MIT"
] | null | null | null |
model_zoo/dac_ctr/feature_transform.py
|
QiJune/elasticdl
|
6b01f5b32fd757badff96ed652662bd94afe9263
|
[
"MIT"
] | null | null | null |
model_zoo/dac_ctr/feature_transform.py
|
QiJune/elasticdl
|
6b01f5b32fd757badff96ed652662bd94afe9263
|
[
"MIT"
] | null | null | null |
# Copyright 2020 The ElasticDL Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from elasticdl.python.common.log_utils import default_logger as logger
from elasticdl_preprocessing.layers import (
ConcatenateWithOffset,
Discretization,
Hashing,
Normalizer,
)
from model_zoo.dac_ctr.feature_config import (
BUCKET_FEATURES,
FEATURE_BOUNDARIES,
FEATURE_DISTINCT_COUNT,
FEATURES_AVGS,
FEATURES_STDDEVS,
HASH_FEATURES,
STANDARDIZED_FEATURES,
)
MAX_HASHING_BUCKET_SIZE = 1000000
def transform_feature(inputs, feature_groups):
"""According to the FeatureConfig object and feature groups to
transform inputs to dense tensors.
Args:
inputs: A dict contains Keras inputs where the key is the
feature name and the value is the Keras input.
feature_groups: 2-D list. each sub-list contains feature names
of a group
Returns:
standardized_tensor: A float tensor
group_tensors: A dict where the key is group name like "group_0"
and the value is the integer tensor.
group_max_ids: A dict which has the same keys as group_tensors and
the value is the max value of the integer tensor in group_tensors.
"""
standardized_outputs = []
for feature in STANDARDIZED_FEATURES:
standardized_result = Normalizer(
subtractor=FEATURES_AVGS[feature],
divisor=FEATURES_STDDEVS[feature],
)(inputs[feature])
standardized_outputs.append(standardized_result)
numerical_tensor = (
tf.concat(standardized_outputs, -1) if standardized_outputs else None
)
if not feature_groups:
feature_names = BUCKET_FEATURES + HASH_FEATURES
feature_groups = [feature_names]
id_tensors = {}
max_ids = {}
for i, features in enumerate(feature_groups):
group_name = "group_{}".format(i)
id_tensor, max_id = transform_group(inputs, features)
id_tensors[group_name] = id_tensor
max_ids[group_name] = max_id
return numerical_tensor, id_tensors, max_ids
def transform_group(inputs, features):
"""Transform the inputs and concatenate inputs in a group
to a dense tensor
Args:
inputs: A dict contains Keras inputs where the key is the
feature name and the value is the Keras input.
features: A list which contains feature names.
Returns:
A integer tensor,
max_id: The max value of the returned tensor
"""
group_items = []
id_offsets = [0]
for feature in features:
if feature in BUCKET_FEATURES:
discretize_layer = Discretization(bins=FEATURE_BOUNDARIES[feature])
transform_output = discretize_layer(inputs[feature])
group_items.append(transform_output)
id_offsets.append(id_offsets[-1] + len(discretize_layer.bins) + 1)
logger.info("{}:{}".format(feature, discretize_layer.bins))
elif feature in HASH_FEATURES:
num_bins = FEATURE_DISTINCT_COUNT[feature]
if num_bins > MAX_HASHING_BUCKET_SIZE:
num_bins = MAX_HASHING_BUCKET_SIZE
hash_layer = Hashing(num_bins=num_bins)
transform_output = hash_layer(inputs[feature])
id_offsets.append(id_offsets[-1] + hash_layer.num_bins)
group_items.append(transform_output)
logger.info("{}:{}".format(feature, hash_layer.num_bins))
else:
logger.warning(
"The preprocessing is not configured for the feature "
"{}".format(feature)
)
concated = ConcatenateWithOffset(id_offsets[0:-1])(group_items)
max_id = id_offsets[-1]
return concated, max_id
| 36.092437
| 79
| 0.686147
|
935cdfa4a2ac2af7ec1353c1ae48f791a06c5e99
| 2,313
|
py
|
Python
|
houdini/handlers/login/__init__.py
|
TeegxCP/houdini
|
edb55ef6094500c6a20a6827a6af9525ea2b7b36
|
[
"MIT"
] | 444
|
2020-05-30T22:28:53.000Z
|
2022-02-13T04:27:48.000Z
|
houdini/handlers/login/__init__.py
|
TeegxCP/houdini
|
edb55ef6094500c6a20a6827a6af9525ea2b7b36
|
[
"MIT"
] | 43
|
2020-05-18T17:40:12.000Z
|
2021-11-05T02:53:46.000Z
|
houdini/handlers/login/__init__.py
|
TeegxCP/houdini
|
edb55ef6094500c6a20a6827a6af9525ea2b7b36
|
[
"MIT"
] | 56
|
2020-05-20T16:31:18.000Z
|
2022-03-07T19:49:59.000Z
|
from houdini import handlers
from houdini.constants import ClientType
from houdini.converters import VersionChkConverter
from houdini.data.buddy import BuddyList
from houdini.handlers import XMLPacket
@handlers.handler(XMLPacket('verChk'))
@handlers.allow_once
async def handle_version_check(p, version: VersionChkConverter):
if not p.server.config.single_client_mode:
if p.server.config.legacy_version == version:
p.client_type = ClientType.Legacy
elif p.server.config.vanilla_version == version:
p.client_type = ClientType.Vanilla
elif p.server.config.default_version == version:
p.client_type = p.server.config.default_client
if p.client_type is None:
await p.send_xml({'body': {'action': 'apiKO', 'r': '0'}})
await p.close()
else:
await p.send_xml({'body': {'action': 'apiOK', 'r': '0'}})
@handlers.handler(XMLPacket('rndK'))
@handlers.allow_once
async def handle_random_key(p, _):
await p.send_xml({'body': {'action': 'rndK', 'r': '-1'}, 'k': p.server.config.auth_key})
async def get_server_presence(p, pdata):
buddy_worlds = []
world_populations = []
pops = await p.server.redis.hgetall('houdini.population')
for server_id, server_population in pops.items():
server_population = 7 if int(server_population) == p.server.config.capacity \
else int(server_population) // (p.server.config.capacity // 6)
server_population = server_population if not pdata.moderator else 0
world_populations.append(f'{int(server_id)},{int(server_population)}')
server_key = f'houdini.players.{int(server_id)}'
if await p.server.redis.scard(server_key):
async with p.server.db.transaction():
buddies = BuddyList.select('buddy_id').where(BuddyList.penguin_id == pdata.id).gino.iterate()
async with p.server.redis.pipeline(transaction=True) as tr:
async for buddy_id, in buddies:
tr.sismember(server_key, buddy_id)
online_buddies = await tr.execute()
if any(online_buddies):
buddy_worlds.append(str(int(server_id)))
return '|'.join(world_populations), '|'.join(buddy_worlds)
| 39.87931
| 109
| 0.653696
|
89199b273ca9e9f3c60ee387520a6385f6db6c3e
| 6,265
|
py
|
Python
|
galpy/potential_src/FlattenedPowerPotential.py
|
fardal/galpy
|
93a1b6fc8d138899922127086cc66184919c8cba
|
[
"BSD-3-Clause"
] | null | null | null |
galpy/potential_src/FlattenedPowerPotential.py
|
fardal/galpy
|
93a1b6fc8d138899922127086cc66184919c8cba
|
[
"BSD-3-Clause"
] | null | null | null |
galpy/potential_src/FlattenedPowerPotential.py
|
fardal/galpy
|
93a1b6fc8d138899922127086cc66184919c8cba
|
[
"BSD-3-Clause"
] | null | null | null |
###############################################################################
# FlattenedPowerPotential.py: Power-law potential that is flattened in the
# potential (NOT the density)
#
# amp
# phi(R,z)= --------- ; m^2 = R^2 + z^2/q^2
# m^\alpha
###############################################################################
import numpy as nu
from scipy import special, integrate
from galpy.potential_src.Potential import Potential
_CORE=10**-8
class FlattenedPowerPotential(Potential):
"""Class that implements a power-law potential that is flattened in the potential (NOT the density)
.. math::
\\Phi(R,z) = -\\frac{\\mathrm{amp}}{\\alpha\\,\\left(R^2+(z/q)^2+\\mathrm{core}^2\\right)^{\\alpha/2}}
and the same as LogarithmicHaloPotential for :math:`\\alpha=0`
See Figure 1 in `Evans (1994) <http://adsabs.harvard.edu/abs/1994MNRAS.267..333E>`_ for combinations of alpha and q that correspond to positive densities
"""
def __init__(self,amp=1.,alpha=0.5,q=0.9,core=_CORE,normalize=False):
"""
NAME:
__init__
PURPOSE:
initialize a flattened power-law potential
INPUT:
amp - amplitude to be applied to the potential (default: 1)
alpha - power
q - flattening
core - core radius
normalize - if True, normalize such that vc(1.,0.)=1., or, if given as a number, such that the force is this fraction of the force necessary to make vc(1.,0.)=1.
OUTPUT:
(none)
HISTORY:
2013-01-09 - Written - Bovy (IAS)
"""
Potential.__init__(self,amp=amp)
self.alpha= alpha
self.q2= q**2.
self.core2= core**2.
if normalize or \
(isinstance(normalize,(int,float)) \
and not isinstance(normalize,bool)): #pragma: no cover
self.normalize(normalize)
self.hasC= True
self.hasC_dxdv= True
def _evaluate(self,R,z,phi=0.,t=0.):
"""
NAME:
_evaluate
PURPOSE:
evaluate the potential at R,z
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
Phi(R,z)
HISTORY:
2013-01-09 - Started - Bovy (IAS)
"""
if self.alpha == 0.:
return 1./2.*nu.log(R**2.+z**2./self.q2+self.core2)
else:
m2= self.core2+R**2.+z**2./self.q2
return -m2**(-self.alpha/2.)/self.alpha
def _Rforce(self,R,z,phi=0.,t=0.):
"""
NAME:
_Rforce
PURPOSE:
evaluate the radial force for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the radial force
HISTORY:
2010-07-10 - Written - Bovy (NYU)
"""
if self.alpha == 0.:
return -R/(R**2.+z**2./self.q2+self.core2)
else:
m2= self.core2+R**2.+z**2./self.q2
return -m2**(-self.alpha/2.-1.)*R
def _zforce(self,R,z,phi=0.,t=0.):
"""
NAME:
_zforce
PURPOSE:
evaluate the vertical force for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the vertical force
HISTORY:
2010-07-10 - Written - Bovy (NYU)
"""
if self.alpha == 0.:
return -z/self.q2/(R**2.+z**2./self.q2+self.core2)
else:
m2= self.core2+R**2.+z**2./self.q2
return -m2**(-self.alpha/2.-1.)*z/self.q2
def _R2deriv(self,R,z,phi=0.,t=0.):
"""
NAME:
_Rderiv
PURPOSE:
evaluate the second radial derivative for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the second radial derivative
HISTORY:
2011-10-09 - Written - Bovy (NYU)
"""
if self.alpha == 0.:
denom= 1./(R**2.+z**2./self.q2+self.core2)
return denom-2.*R**2.*denom**2.
else:
m2= self.core2+R**2.+z**2./self.q2
return -m2**(-self.alpha/2.-1.)*((self.alpha+2)*R**2./m2-1.)
def _z2deriv(self,R,z,phi=0.,t=0.):
"""
NAME:
_z2deriv
PURPOSE:
evaluate the second vertical derivative for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t- time
OUTPUT:
the second vertical derivative
HISTORY:
2012-07-26 - Written - Bovy (IAS@MPIA)
"""
if self.alpha == 0.:
denom= 1./(R**2.+z**2./self.q2+self.core2)
return denom/self.q2-2.*z**2.*denom**2./self.q2**2.
else:
m2= self.core2+R**2.+z**2./self.q2
return -1./self.q2*m2**(-self.alpha/2.-1.)*((self.alpha+2)*z**2./m2/self.q2-1.)
def _dens(self,R,z,phi=0.,t=0.):
"""
NAME:
_dens
PURPOSE:
evaluate the density force for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the density
HISTORY:
2013-01-09 - Written - Bovy (IAS)
"""
if self.alpha == 0.:
return 1./4./nu.pi/self.q2*((2.*self.q2+1.)*self.core2+R**2.\
+(2.-1./self.q2)*z**2.)/\
(R**2.+z**2./self.q2+self.core2)**2.
else:
m2= self.core2+R**2.+z**2./self.q2
return 1./self.q2*(self.core2*(1.+2.*self.q2)+R**2.*(1.-self.alpha*self.q2)+z**2.*(2.-(1.+self.alpha)/self.q2))*m2**(-self.alpha/2.-2.)/4./nu.pi
| 31.014851
| 172
| 0.480447
|
e6866b851103bd65fe3988d9b6f2dd5bc74c5176
| 5,978
|
py
|
Python
|
fkie_iop_node_manager/src/fkie_iop_node_manager/statistics/collector.py
|
fkie/iop_node_manager
|
c2e12989a6baf7098c93c33ca6e95acf7584e462
|
[
"Apache-2.0"
] | 1
|
2021-05-05T14:57:06.000Z
|
2021-05-05T14:57:06.000Z
|
fkie_iop_node_manager/src/fkie_iop_node_manager/statistics/collector.py
|
fkie/iop_node_manager
|
c2e12989a6baf7098c93c33ca6e95acf7584e462
|
[
"Apache-2.0"
] | null | null | null |
fkie_iop_node_manager/src/fkie_iop_node_manager/statistics/collector.py
|
fkie/iop_node_manager
|
c2e12989a6baf7098c93c33ca6e95acf7584e462
|
[
"Apache-2.0"
] | null | null | null |
# ****************************************************************************
#
# fkie_iop_node_manager
# Copyright 2019 Fraunhofer FKIE
# Author: Alexander Tiderko
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ****************************************************************************
from __future__ import division, absolute_import, print_function, unicode_literals
import logging
import os
import threading
import time
import traceback
from .msg_entry import MsgEntry
class Empty(Exception):
pass
class Full(Exception):
pass
class Collector(object):
def __init__(self, cfg, logger_name='collector'):
'''
:param fkie_iop_node_manager.config.Config cfg: configuration
'''
self.cfg = cfg
self.logger = logging.getLogger(logger_name)
self._stop = False
self._cv = threading.Condition()
self._msgs_recv = []
self._idx_current = 0
self._count = 0
self.stats_file = None
self.stats_path = ''
self.stats_enabled = self.cfg.param('statistics/enable', False)
self.logger.info("Statistics enabled: %s" % self.stats_enabled)
if self.stats_enabled:
self._stats_file_open()
self.cfg.add_param_listener('global/statistics/enable', self._callback_param_enable)
self._thread_analyse = threading.Thread(target=self._loop_write_to_file)
self._thread_analyse.start()
def stop(self):
self._stop = True
self.clear()
self._stats_file_close()
def clear(self):
self.logger.debug("Clear collector")
with self._cv:
del self._msgs_recv[:]
self._cv.notify()
def _stats_file_open(self):
if self.stats_file is not None:
return
try:
self.stats_dir = os.path.expanduser(self.cfg.param('statistics/path', False))
if not os.path.isdir(self.stats_dir):
os.makedirs(self.stats_dir)
self.stats_path = os.path.join(self.stats_dir, 'last.msgs')
if os.path.exists(self.stats_path):
print("rename", self.stats_path, os.path.join(self.stats_dir, 'prev_%.0f.msgs' % time.time()))
os.rename(self.stats_path, os.path.join(self.stats_dir, 'prev_%.0f.msgs' % time.time()))
self.logger.info(" write statistics to '%s'" % self.stats_path)
self.stats_file = open(self.stats_path, 'w+')
self.stats_file.write(MsgEntry.title())
self.stats_file.flush()
except Exception as err:
self.stats_file = None
self.logger.warning("Error while open statistics file: %s" % err)
def _stats_file_close(self):
try:
if self.stats_file is not None:
self.stats_file.close()
self.stats_file = None
except Exception as err:
self.logger.warning("Error while close statistics file: %s" % err)
def str2bool(self, v):
if isinstance(v, bool):
return v
return v.lower() in ["yes", "true", "t", "1"]
def _callback_param_enable(self, param, value):
boolval = self.str2bool(value)
if boolval:
self._stats_file_open()
else:
self._stats_file_close()
def add(self, msg):
'''
Adds a message to the queue. The items of this queue are written in a separate thread to a file.
:param fkie_iop_node_manager.message.Mesage msg: received or sent message.
'''
if self.stats_file is None:
return
msg.ts_recv = time.time()
with self._cv:
self._msgs_recv.append(msg)
self._count += 1
self._cv.notify()
def get(self, block=True):
try:
with self._cv:
if self.size() == 0:
if block:
self._cv.wait()
else:
item = None
try:
item = self._msgs_recv.pop(0)
self._count -= 1
except IndexError:
pass
if item is None:
raise Empty()
self.logger.debug("get %s" % item)
return item
return None
except Exception:
import traceback
print(traceback.format_exc())
def size(self):
return self._count
def _loop_write_to_file(self):
'''
Writes the messages of the queue to a file.
'''
while not self._stop:
try:
msg = self.get()
if msg is not None:
if msg.tinfo_src is None:
self.logger.warning("Collects a message without valid tinfo_src, ignore...")
continue
try:
self.stats_file.write(MsgEntry.toline(msg, self.cfg))
self.stats_file.flush()
except Exception as err:
print(traceback.format_exc())
self.logger.warning("Error write message to statistics file: %s" % err)
except Empty:
pass
except Exception as e:
print(traceback.format_exc())
self.logger.warning("Error while get send item from collector queue: %s" % e)
| 34.554913
| 110
| 0.556708
|
6b9e076b3fe61f349d4021b5919bece1fe3756b4
| 12,410
|
py
|
Python
|
sdks/python/client/argo_workflows/model/key_to_path.py
|
parallel-domain/argo-workflows
|
c055b48b6e216dcdeb1c9840f14199a72329bdaf
|
[
"Apache-2.0"
] | 1
|
2022-02-20T16:56:43.000Z
|
2022-02-20T16:56:43.000Z
|
sdks/python/client/argo_workflows/model/key_to_path.py
|
parallel-domain/argo-workflows
|
c055b48b6e216dcdeb1c9840f14199a72329bdaf
|
[
"Apache-2.0"
] | 10
|
2022-02-21T04:17:37.000Z
|
2022-03-11T11:34:30.000Z
|
sdks/python/client/argo_workflows/model/key_to_path.py
|
parallel-domain/argo-workflows
|
c055b48b6e216dcdeb1c9840f14199a72329bdaf
|
[
"Apache-2.0"
] | null | null | null |
"""
Argo Workflows API
Argo Workflows is an open source container-native workflow engine for orchestrating parallel jobs on Kubernetes. For more information, please see https://argoproj.github.io/argo-workflows/ # noqa: E501
The version of the OpenAPI document: VERSION
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from argo_workflows.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
from ..model_utils import OpenApiModel
from argo_workflows.exceptions import ApiAttributeError
class KeyToPath(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'key': (str,), # noqa: E501
'path': (str,), # noqa: E501
'mode': (int,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'key': 'key', # noqa: E501
'path': 'path', # noqa: E501
'mode': 'mode', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, key, path, *args, **kwargs): # noqa: E501
"""KeyToPath - a model defined in OpenAPI
Args:
key (str): The key to project.
path (str): The relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
mode (int): Optional: mode bits to use on this file, must be a value between 0 and 0777. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.key = key
self.path = path
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, key, path, *args, **kwargs): # noqa: E501
"""KeyToPath - a model defined in OpenAPI
Args:
key (str): The key to project.
path (str): The relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
mode (int): Optional: mode bits to use on this file, must be a value between 0 and 0777. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.key = key
self.path = path
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| 45.625
| 310
| 0.578566
|
37d7feeccb3e6c0919f8d30e723c3af1a9445465
| 662
|
py
|
Python
|
venv/lib/python3.9/site-packages/libfuturize/fixes/fix_add__future__imports_except_unicode_literals.py
|
qarik-hanrattyjen/apache-airflow-backport-providers-google-2021.3.3
|
630dcef73e6a258b6e9a52f934e2dd912ce741f8
|
[
"Apache-2.0"
] | 908
|
2015-01-01T21:20:45.000Z
|
2022-03-29T20:47:16.000Z
|
venv/lib/python3.9/site-packages/libfuturize/fixes/fix_add__future__imports_except_unicode_literals.py
|
qarik-hanrattyjen/apache-airflow-backport-providers-google-2021.3.3
|
630dcef73e6a258b6e9a52f934e2dd912ce741f8
|
[
"Apache-2.0"
] | 402
|
2015-01-04T01:30:19.000Z
|
2022-03-24T11:56:38.000Z
|
venv/lib/python3.9/site-packages/libfuturize/fixes/fix_add__future__imports_except_unicode_literals.py
|
qarik-hanrattyjen/apache-airflow-backport-providers-google-2021.3.3
|
630dcef73e6a258b6e9a52f934e2dd912ce741f8
|
[
"Apache-2.0"
] | 305
|
2015-01-18T19:29:37.000Z
|
2022-03-24T09:40:09.000Z
|
"""
Fixer for adding:
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
This is "stage 1": hopefully uncontroversial changes.
Stage 2 adds ``unicode_literals``.
"""
from lib2to3 import fixer_base
from libfuturize.fixer_util import future_import
class FixAddFutureImportsExceptUnicodeLiterals(fixer_base.BaseFix):
BM_compatible = True
PATTERN = "file_input"
run_order = 9
def transform(self, node, results):
# Reverse order:
future_import(u"absolute_import", node)
future_import(u"division", node)
future_import(u"print_function", node)
| 24.518519
| 67
| 0.73565
|
3ec6bda85c25e2b025def7d171ba9457d0985703
| 475
|
py
|
Python
|
apps/user_app/migrations/0006_auto_20161227_2017.py
|
lightless233/npiss
|
8338f50d971600fe2b2366836ca2fb543f2276d5
|
[
"MIT"
] | 1
|
2016-11-22T13:25:02.000Z
|
2016-11-22T13:25:02.000Z
|
apps/user_app/migrations/0006_auto_20161227_2017.py
|
LiGhT1EsS/npiss
|
8338f50d971600fe2b2366836ca2fb543f2276d5
|
[
"MIT"
] | 4
|
2020-06-05T17:28:20.000Z
|
2022-03-11T23:15:49.000Z
|
apps/user_app/migrations/0006_auto_20161227_2017.py
|
lightless233/npiss
|
8338f50d971600fe2b2366836ca2fb543f2276d5
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-12-27 12:17
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user_app', '0005_auto_20161227_2016'),
]
operations = [
migrations.AlterField(
model_name='pissuser',
name='created_time',
field=models.DateTimeField(auto_now=True, null=True),
),
]
| 22.619048
| 65
| 0.629474
|
5c3b1724c19e25ee420ec68a069a3bb17d0a9ee4
| 6,526
|
py
|
Python
|
texar/tf/module_base.py
|
jiajunhua/asyml-texar
|
22d7b8eea5bd43eef68b615ba87b2e8220bafdf8
|
[
"Apache-2.0"
] | 1
|
2020-09-18T04:36:43.000Z
|
2020-09-18T04:36:43.000Z
|
texar/tf/module_base.py
|
jiajunhua/asyml-texar
|
22d7b8eea5bd43eef68b615ba87b2e8220bafdf8
|
[
"Apache-2.0"
] | 6
|
2020-09-26T01:31:48.000Z
|
2021-08-25T16:13:51.000Z
|
texar/tf/module_base.py
|
jiajunhua/asyml-texar
|
22d7b8eea5bd43eef68b615ba87b2e8220bafdf8
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 The Texar Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Base class for modules.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import tensorflow as tf
from texar.tf.utils.exceptions import TexarError
from texar.tf.hyperparams import HParams
__all__ = [
"ModuleBase"
]
class ModuleBase(object):
"""Base class inherited by modules that create Variables and are
configurable through hyperparameters.
A Texar module inheriting :class:`~texar.tf.ModuleBase` has following key
features:
- **Convenient variable re-use**: A module instance creates \
its own sets of variables, and automatically re-uses its variables on \
subsequent calls. Hence TF variable/name scope is \
transparent to users. For example:
.. code-block:: python
encoder = UnidirectionalRNNEncoder(hparams) # create instance
output_1 = encoder(inputs_1) # variables are created
output_2 = encoder(inputs_2) # variables are re-used
print(encoder.trainable_variables) # access trainable variables
# [ ... ]
- **Configurable through hyperparameters**: Each module defines \
allowed hyperparameters and default values. Hyperparameters not \
specified by users will take default values.
- **Callable**: As the above example, a module instance is "called" \
with input tensors and returns output tensors. Every call of a module \
will add ops to the Graph to perform the module's logic.
Args:
hparams (dict, optional): Hyperparameters of the module. See
:meth:`default_hparams` for the structure and default values.
.. document private functions
.. automethod:: _build
"""
def __init__(self, hparams=None):
if not hasattr(self, '_hparams'):
self._hparams = HParams(hparams, self.default_hparams())
else:
# Probably already parsed by subclasses. We rely on subclass
# implementations to get this right.
# As a sanity check, we require `hparams` to be `None` in this case.
if hparams is not None:
raise ValueError(
"`self._hparams` already exists. Argument `hparams` "
"must be set to `None` in this case.")
self._template = tf.make_template(self._hparams.name, self._build,
create_scope_now_=True)
self._unique_name = self.variable_scope.name.split("/")[-1]
self._trainable_variables = []
self._built = False
@staticmethod
def default_hparams():
"""Returns a `dict` of hyperparameters of the module with default
values. Used to replace the missing values of input `hparams`
during module construction.
.. code-block:: python
{
"name": "module"
}
"""
return {
"name": "module"
}
def _build(self, *args, **kwargs):
"""Subclass must implement this method to build the logic.
Args:
*args: Arguments.
**kwargs: Keyword arguments.
Returns:
Output Tensor(s).
"""
raise NotImplementedError
def __call__(self, *args, **kwargs):
"""Executes the module logic defined in _build method
Args:
*args: Arguments of _build method.
**kwargs: Keyword arguments of _build method.
Returns:
The output of _build method.
"""
return self._template(*args, **kwargs)
def _add_internal_trainable_variables(self): # pylint: disable=invalid-name
"""Collects trainable variables constructured internally in this module.
This is typically called at the end of `_build()` where all necessary
trainable variables have been constructed.
"""
scope_name = self.variable_scope.name
# Escape to handle possible "." characters in the name.
# Append a slash to the end to avoid searching scopes that have this
# scope name as a prefix.
scope_name = re.escape(scope_name) + "/"
internal_trainable_variables = tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope_name)
self._add_trainable_variable(internal_trainable_variables)
def _add_trainable_variable(self, variable):
"""Adds a trainable variable to the trainable variable list of the
module.
Args:
variable: a (list of) trainable variable(s) constructed either
internally in the module or constructured outside but used
inside the module.
"""
if isinstance(variable, (list, tuple)):
for var in variable:
self._add_trainable_variable(var)
else:
if variable not in self._trainable_variables:
self._trainable_variables.append(variable)
@property
def variable_scope(self):
"""The variable scope of the module.
"""
return self._template.variable_scope
@property
def name(self):
"""The uniquified name of the module.
"""
return self._unique_name
@property
def trainable_variables(self):
"""The list of trainable variables of the module.
"""
if not self._built:
raise TexarError(
"Attempting to access trainable_variables before module %s "
"was fully built. The module is built once it is called, "
"e.g., with `%s(...)`" % (self.name, self.name))
return self._trainable_variables
@property
def hparams(self):
"""An :class:`~texar.tf.HParams` instance. The hyperparameters
of the module.
"""
return self._hparams
| 34.529101
| 80
| 0.630555
|
6c664978ad6b2474b5abc6332caee877be61fb00
| 22,466
|
py
|
Python
|
lgp/models/alfred/hlsm/hlsm_subgoal_model.py
|
keio-smilab22/HLSM-MAT
|
6f34a5ec9226b28c01ae47ef16fe28662ab32935
|
[
"BSD-3-Clause"
] | null | null | null |
lgp/models/alfred/hlsm/hlsm_subgoal_model.py
|
keio-smilab22/HLSM-MAT
|
6f34a5ec9226b28c01ae47ef16fe28662ab32935
|
[
"BSD-3-Clause"
] | null | null | null |
lgp/models/alfred/hlsm/hlsm_subgoal_model.py
|
keio-smilab22/HLSM-MAT
|
6f34a5ec9226b28c01ae47ef16fe28662ab32935
|
[
"BSD-3-Clause"
] | null | null | null |
from typing import Dict, List, Union
import torch
import torch.nn as nn
from lgp.abcd.functions.action_proposal import ActionProposal
from lgp.abcd.model import LearnableModel
import lgp.env.alfred.segmentation_definitions as segdef
from lgp.env.alfred.alfred_subgoal import AlfredSubgoal
from lgp.ops.spatial_distr import multidim_logsoftmax
from lgp.models.alfred.hlsm.hlsm_state_repr import AlfredSpatialStateRepr
from lgp.models.alfred.hlsm.hlsm_task_repr import HlsmTaskRepr
from lgp.models.alfred.hlsm.transformer_modules.subgoal_history_encoder import SubgoalHistoryEncoder
from lgp.models.alfred.hlsm.transformer_modules.state_repr_encoder_pooled import StateReprEncoderPooled
from lgp.models.alfred.hlsm.transformer_modules.language_encoder import BERTLanguageEncoder
from lgp.models.alfred.hlsm.transformer_modules.action_predictor import ActionPredictor
from lgp.models.alfred.hlsm.unets.lingunet_3 import Lingunet3
from lgp.models.alfred.voxel_grid import VoxelGrid
from lgp.ops.misc import batched_index_select
from lgp.utils.viz import show_image
from lgp.flags import GLOBAL_VIZ
from lgp.parameters import Hyperparams
class HlsmSubgoalModel(ActionProposal, LearnableModel):
class ModelState(ActionProposal.ModelState):
def __init__(self):
self.action_history = []
self.logged_failure = False
self.step = 0
def action_execution_failed(self):
# Remove the last action from the action history and allow a re-try.
if not self.logged_failure:
print(" LOGGING SKILL FAILURE")
self.action_history = self.action_history[:-1]
self.logged_failure = True
def log_action(self, action):
# When we log a new predicted action, reset the logged_failure flag so that if this action fails,
# it can be removed from the action history
self.logged_failure = False
self.action_history.append(action)
print(" LOGGING NEW ACTION")
@classmethod
def blank(cls):
return None
def __init__(self, hparams: Hyperparams):
super().__init__()
self.action_type_dim = AlfredSubgoal.get_action_type_space_dim()
self.data_c = AlfredSpatialStateRepr.get_num_data_channels()
self.hidden_dim = 128
self.trm_dim = hparams.get("trm_dim")
self.no_posemb_baseline = hparams.get("no_posemb_baseline", False)
self.no_acthist_baseline = hparams.get("no_acthist_baseline", False)
self.no_vision_baseline = hparams.get("no_vision_baseline", False)
self.no_language_baseline = hparams.get("no_language_baseline", False)
print("SpatialTransformerModel2 baseline config:"
f"No vision: {self.no_vision_baseline}"
f"No language: {self.no_language_baseline}"
f"No posemb: {self.no_posemb_baseline}"
f"No acthist: {self.no_acthist_baseline}")
# Networks / Models
if not self.no_language_baseline:
self.language_encoder = BERTLanguageEncoder(self.hidden_dim)
if not self.no_vision_baseline:
self.state_repr_encoder = StateReprEncoderPooled(self.hidden_dim)
self.action_history_encoder = SubgoalHistoryEncoder(self.hidden_dim,
ablate_no_acthist=self.no_acthist_baseline,
ablate_no_posemb=self.no_posemb_baseline)
self.mask_model = Lingunet3(2 * self.action_type_dim + AlfredSpatialStateRepr.get_2d_feature_dim(),
self.hidden_dim, 1)
self.action_predictor = ActionPredictor(self.hidden_dim, joint_prob=True, trm_dim=self.trm_dim)
self.nllloss = nn.NLLLoss(reduce=True, size_average=True)
self.act = nn.LeakyReLU()
self.iter_step = 0
self.model_state = None
self.log_internal_activations = True
self.trace = {
"subgoal": None
}
self.metrics = {}
self.reset_state()
def set_log_internal_activations(self, enable):
self.log_internal_activations = enable
def _get_state_for_action(self, action: AlfredSubgoal) -> "ActionProposal.ModelState":
# TODO: If we want to do any action-conditioned reasoning, do it here.
# TODO: Make sure to not increment model_state.step in two different places (here and forward)
return self.model_state
def get_trace(self, device="cpu") -> Dict:
return {k: v.to(device) if v is not None else v for k, v in self.trace.items()}
def clear_trace(self):
...
#self.trace = {}
def action_execution_failed(self):
self.model_state.action_execution_failed()
def log_action(self, action: AlfredSubgoal):
self.model_state.log_action(action)
def get_state(self) -> "HlsmSubgoalModel.ModelState":
return self.model_state
def set_state(self, state: "HlsmSubgoalModel.ModelState"):
self.model_state = state
def reset_state(self):
self.model_state = HlsmSubgoalModel.ModelState()
self.trace = {
"subgoal": None
}
def _argmax_action(self, type_distr, arg_vectors):
act_type_id = torch.argmax(type_distr, dim=1)
act_type_id = act_type_id[0].item()
act_type_str = AlfredSubgoal.action_type_intid_to_str(act_type_id)
# TODO: Check for HL->Subgoal
arg_vector = arg_vectors[:, act_type_id, :]
# Computed for debugging purposes only
top5_objects = [(segdef.object_intid_to_string(x.item() - 1), arg_vector[0, x.item()].item()) for x in reversed(arg_vector[0].argsort()[-5:])]
# print(f"Top5 objects: {top5_objects}")
pass_objects = arg_vector > 0.04
arg_vector = arg_vector * pass_objects
arg_vector /= (arg_vector.sum() + 1e-10)
return AlfredSubgoal.from_type_str_and_arg_vector(act_type_str, arg_vector)
def _sample_subgoal(self, type_distr, arg_vectors):
act_type_id = torch.distributions.Categorical(type_distr).sample().item()
act_type_str = AlfredSubgoal.action_type_intid_to_str(act_type_id)
arg_vector = arg_vectors[:, act_type_id, :]
top5_types = [(AlfredSubgoal.action_type_intid_to_str(a.item()), type_distr[0, a.item()].item()) for a in reversed(type_distr[0].argsort()[-5:])]
print(f"Top5 types: {top5_types}")
# Computed for debugging purposes only
top5_objects = [(segdef.object_intid_to_string(x.item() - 1), arg_vector[0, x.item()].item()) for x in reversed(arg_vector[0].argsort()[-5:])]
print(f"Top5 objects: {top5_objects}")
print(f"Action history: {[str(a) for a in self.model_state.action_history]}")
# Zero out the long tail - otherwise that contains most of the prob mass which doesn't make sense.
pass_objects = arg_vector > 0.04
arg_vector = arg_vector * pass_objects
arg_vector /= (arg_vector.sum() + 1e-10)
act_arg_id = torch.distributions.Categorical(arg_vector).sample().item()
arg_vector_out = torch.zeros_like(arg_vector)
arg_vector_out[0, act_arg_id] = 1.0
return AlfredSubgoal.from_type_str_and_arg_vector(act_type_str, arg_vector_out)
def mle(self,
state: AlfredSpatialStateRepr,
task: HlsmTaskRepr,
model_state: "HlsmSubgoalModel.ModelState"):
return self.forward_inference(state, task, model_state)
# ---------------------------------------------------------------------------------
def _log_activations(self, states, act_type_distr, act_arg_distr, action):
if self.log_internal_activations:
with torch.no_grad():
act_arg_distr_argmax_type = act_arg_distr[:, act_type_distr[0].argmax(), :]
state_kernel = act_arg_distr_argmax_type[:, 1:].clone()
state_data = states.data.data # B x C x W x L x H
state_response = torch.einsum("bcwlh,bc->bwlh", state_data.float(), state_kernel.float()) # B x W x L x H
action_arg_distribution_log = multidim_logsoftmax(state_response, dims=(1, 2, 3))
# Replicate - use the same argument distribution for "all action types"
action_arg_distribution_log = action_arg_distribution_log[:, None, :, :, :].repeat(
(1, self.action_type_dim, 1, 1, 1))
#self.trace["state_repr"] = states
#self.trace["filters"] = state_kernel
#self.trace["action_type_distribution"] = torch.exp(act_type_distr)
#self.trace["action_arg_distribution"] = torch.exp(action_arg_distribution_log)
self.trace["subgoal"] = action
return
def forward_inference(self,
states: AlfredSpatialStateRepr,
tasks: HlsmTaskRepr,
model_state: "HlsmSubgoalModel.ModelState"):
device = states.data.data.device
action_history = model_state.action_history
# The most recent timestep doesn't have an action label - that's what we're predicting.
# Add a dummy tensor there. The downstream model doesn't look at it anyway (it's masked out in attention masks)
current_action_label = torch.zeros((1, 2), device=device, dtype=torch.int64)
current_action_label[0, 0] = 2
current_action_label[0, 1] = 33
action_labels = torch.cat([a.to_tensor(device=device, dtype=torch.int64) for a in action_history] + [current_action_label], dim=0)
batch_id = [0 for _ in range(len(action_labels))]
_, _, act_type_logprobs, act_arg_logprobs, task_emb = self._forward_model(states, tasks, action_labels, batch_id)
# We care about predicting the CURRENT action (the last one),
# even though we are running the model on the entire action history sequence.
act_type_logprob = act_type_logprobs[-1:, :]
act_arg_logprob = act_arg_logprobs[-1:, :]
type_distr = torch.exp(act_type_logprob)
arg_distr = torch.exp(act_arg_logprob)
arg_distr = arg_distr / arg_distr.sum(dim=2, keepdim=True) # Re-normalize
subgoal = self._sample_subgoal(type_distr, arg_distr)
arg_mask_3d = self.forward_mask(states, task_emb, subgoal, self.model_state.action_history, batch_training=False)
arg_mask_voxelgrid = VoxelGrid(arg_mask_3d, arg_mask_3d, states.data.voxel_size, states.data.origin)
subgoal.argument_mask = arg_mask_voxelgrid
# Create some graphics for the gifs
self._log_activations(states, torch.exp(act_type_logprob), torch.exp(act_arg_logprob), subgoal)
return subgoal
def _forward_model(self,
states: Union[AlfredSpatialStateRepr, torch.tensor],
tasks: HlsmTaskRepr,
sem_actions: torch.tensor,
batch_id: List[int],
adv_training: bool = False,
adv_modality: list = None,
adv_delta_state: torch.tensor = None,
adv_delta_task: torch.tensor = None,
adv_delta_action_hist: torch.tensor = None):
# sem_actions: B x 2
bs = states.data.data.shape[0]
device = states.data.data.device
# TxD_{u}
if self.no_language_baseline:
task_embeddings = torch.zeros((bs, self.hidden_dim), device=device, dtype=torch.float32)
else:
task_embeddings = self.language_encoder(tasks)
# TxD_{a}
action_hist_embeddings = self.action_history_encoder(sem_actions, batch_id)
# TxD_{s}
if self.no_vision_baseline:
state_embeddings = torch.zeros((bs, self.hidden_dim), device=device)
else:
state_embeddings = self.state_repr_encoder(states, task_embeddings, action_hist_embeddings)
# Drop the last action history embedding
action_hist_embeddings = action_hist_embeddings[:-1]
# If we're running inference, we want to predict the most recent action from the most recent state.
# Take the most recent action embedding
ns = state_embeddings.shape[0]
action_hist_embeddings = action_hist_embeddings[-ns:]
if adv_training and 'state' in adv_modality:
state_embeddings = state_embeddings + adv_delta_state
if adv_training and 'task' in adv_modality:
task_embeddings = task_embeddings + adv_delta_task
if adv_training and 'action_hist' in adv_modality:
action_hist_embeddings = action_hist_embeddings + adv_delta_action_hist
act_type_prob, act_arg_prob, act_type_logprob, act_arg_logprob = self.action_predictor(
state_embeddings, task_embeddings, action_hist_embeddings)
return act_type_prob, act_arg_prob, act_type_logprob, act_arg_logprob, task_embeddings
def forward_mask(self,
state_repr,
task_emb : torch.tensor,
action: AlfredSubgoal,
action_history: List[AlfredSubgoal],
batch_training=False):
# STATE REPRESENTATION
state_features = state_repr.get_nav_features_2d()
# ACTION HISTORY REPRESENTATION
if len(action_history) > 0:
action_history = AlfredSubgoal.collate(action_history)
past_action_types = action_history.type_oh()
past_action_masks = action_history.get_argument_mask()
past_action_typed_mask_3d = past_action_types[:, :, None, None, None] * past_action_masks
action_history_typed_masks_2d = past_action_typed_mask_3d.cumsum(dim=0).max(dim=4).values
else:
b, f, h, w = state_features.shape
ac = AlfredSubgoal.get_action_type_space_dim()
action_history_typed_masks_2d = torch.zeros((b, ac, h, w), device=state_features.device)
# PROPOSED ACTION REPRESENTATION
# Build proposal of current action type and arg
proposed_action_masks = action.build_spatial_arg_proposal(state_repr)
proposed_action_masks_2d = proposed_action_masks.max(dim=4).values
proposed_action_types = action.type_oh()
proposed_typed_masks_2d = proposed_action_types[:, :, None, None] * proposed_action_masks_2d
if batch_training:
# Roll action histories forward so that the model can't peek at curent action argument masks
action_history_typed_masks_2d = action_history_typed_masks_2d.roll(shifts=1, dims=0).clone()
# The last action rolls back to the start - zero it out.
action_history_typed_masks_2d[0] = 0
else:
# Take the last action history mask representation
action_history_typed_masks_2d = action_history_typed_masks_2d[-1:]
# Run the mask prediction LingUNet
x = torch.cat([action_history_typed_masks_2d, proposed_typed_masks_2d, state_features], dim=1)
ctx = task_emb
pred_masks_2d = self.mask_model(x, ctx)
pred_logprobs_2d = multidim_logsoftmax(pred_masks_2d, dims=(2, 3))
if batch_training:
# During training, learn to predict masks in top-down view. No need to go to 3D
return pred_logprobs_2d, proposed_action_masks_2d
else:
# At test-time, lift the mask to 3D by masking the proposals
pred_probs_3d = proposed_action_masks * torch.exp(pred_logprobs_2d)[:, :, :, :, None]
# Standardize against the peak activation
pred_probs_3d = pred_probs_3d / (pred_probs_3d.max() + 1e-10)
VIZ = GLOBAL_VIZ
if VIZ:
show_image(pred_probs_3d.max(dim=4).values[0].detach().cpu(), "Refined mask", waitkey=1, scale=4)
show_image(proposed_action_masks_2d[0].detach().cpu(), "Proposed mask", waitkey=1, scale=4)
return pred_probs_3d
def get_name(self) -> str:
return "alfred_spatial_transformer_model_2"
def success(self, pred_logits, class_indices):
amax_idx = pred_logits.argmax(1)
target_idx = class_indices
#print(amax_idx[0], target_idx[0])# == target_idx.sum())
succ = (amax_idx == target_idx)
return succ
def collect_metrics(self, act_type_logprob, act_type_gt, act_arg_logprob, act_arg_gt, sem_actions, batch_id):
# Metrics:
type_step_success = self.success(act_type_logprob, act_type_gt)
arg_step_success = self.success(act_arg_logprob, act_arg_gt)
tensor_batchid = torch.tensor(batch_id, device=sem_actions.device)
type_per_step_success_rate = type_step_success.sum().item() / type_step_success.shape[0]
arg_per_step_success_rate = arg_step_success.sum().item() / arg_step_success.shape[0]
act_per_step_success_rate = (type_step_success * arg_step_success).sum().item() / type_step_success.shape[0]
type_full_correct = 0
arg_full_correct = 0
act_full_correct = 0
num_b = max(batch_id) + 1
for b in range(num_b):
isb = (tensor_batchid == b)
b_type_succ_cnt = (type_step_success * isb).sum()
b_arg_succ_cnt = (arg_step_success * isb).sum()
b_cnt = isb.sum()
b_tc = (b_type_succ_cnt == b_cnt).item()
b_ac = (b_arg_succ_cnt == b_cnt).item()
type_full_correct += 1 if b_tc else 0
arg_full_correct += 1 if b_ac else 0
act_full_correct += 1 if b_ac * b_tc else 0
type_sequence_success_rate = type_full_correct / num_b
arg_sequence_success_rate = arg_full_correct / num_b
act_sequence_success_rate = act_full_correct / num_b
metrics = {
"act_type_step_sr": type_per_step_success_rate,
"act_arg_step_sr": arg_per_step_success_rate,
"act_step_sr": act_per_step_success_rate,
"act_type_seq_sr": type_sequence_success_rate,
"act_arg_seq_sr": arg_sequence_success_rate,
"act_seq_sr": act_sequence_success_rate
}
return metrics
def loss(self, batch: Dict):
# This is now forward
return self.forward(batch)
def forward(self, batch: Dict, adv_training=False, adv_modality=None, adv_delta_state=None, adv_delta_task=None, adv_delta_action_hist=None):
if batch["states"] is None:
states = batch["states_preproc"]
else:
states = batch["states"]
tasks = batch["task_reprs"]
subgoals_gt : AlfredSubgoal = batch["subgoals"]
batch_id = batch["batch_id"]
actions_gt_sem_tensor = subgoals_gt.to_tensor()
act_type_prob, act_arg_prob, act_type_logprob, act_arg_logprob, task_emb = self._forward_model(
states, tasks, actions_gt_sem_tensor, batch_id, adv_training, adv_modality, adv_delta_state, adv_delta_task, adv_delta_action_hist)
output = {}
output['act_type_prob'] = act_type_prob
output['act_type_logprob'] = act_type_logprob
output['act_arg_prob'] = act_arg_prob
output['act_arg_logprob'] = act_arg_logprob
act_type_gt = actions_gt_sem_tensor[:, 0]
act_arg_gt = actions_gt_sem_tensor[:, 1] + 1
# For each batch element, grab the argument distribution corresponding to the ground truth action type
act_arg_logprob = batched_index_select(act_arg_logprob, dim=1, index=act_type_gt)[:, 0, :]
# Predict action argument masks
action_history_gt: List[AlfredSubgoal] = subgoals_gt.disperse()
act_mask_pred_logprob_2d, act_mask_proposed_2d = self.forward_mask(
states, task_emb, subgoals_gt, action_history_gt, batch_training=True)
# It only makes sense to learn action argument prediction over observed space
obs_mask = states.get_observability_map_2d()
act_mask_pred_prob_2d = torch.exp(act_mask_pred_logprob_2d)
act_mask_gt_2d = subgoals_gt.get_argument_mask().max(dim=4).values
act_mask_gt_2d = act_mask_gt_2d * obs_mask
domain_size = act_mask_gt_2d.sum(dim=(1, 2, 3), keepdims=True)
act_mask_gt_2d = act_mask_gt_2d / (domain_size + 1e-10)
has_arg_mask = act_mask_gt_2d.sum(dim=(1, 2, 3)) > 0
output['act_mask_pred_prob_2d'] = act_mask_pred_prob_2d
output['act_mask_pred_logprob_2d'] = act_mask_pred_logprob_2d
# TODO: Sem actions should be AlfredActionHLSem
type_loss = self.nllloss(input=act_type_logprob, target=act_type_gt)
arg_loss = self.nllloss(input=act_arg_logprob, target=act_arg_gt)
# Spatial cross-entropy loss:
argmask_loss = -((act_mask_gt_2d * act_mask_pred_logprob_2d).sum(dim=(2, 3)) * has_arg_mask).mean()
# BCE loss:
#argmask_loss = -((act_mask_gt_2d * torch.log(act_mask_pred_2d)).sum(dim=(1, 2, 3)) / (
# act_mask_gt_2d.sum(dim=(1, 2, 3)) + 1e-10)).mean()
loss = type_loss + arg_loss + argmask_loss
metrics = self.collect_metrics(act_type_logprob, act_type_gt, act_arg_logprob, act_arg_gt, actions_gt_sem_tensor, batch_id)
metrics["loss"] = loss.detach().cpu().item()
metrics["type_loss"] = type_loss.detach().cpu().item()
metrics["arg_loss"] = arg_loss.detach().cpu().item()
metrics["argmask_loss"] = argmask_loss.detach().cpu().item()
VIZ = GLOBAL_VIZ
if VIZ:
with torch.no_grad():
mask_viz = torch.cat([act_mask_gt_2d[0], act_mask_proposed_2d[0], act_mask_pred_prob_2d[0] * domain_size[0]], dim=0).clamp(0, 1)
mask_viz = mask_viz * has_arg_mask[0] # Just blank out examples where there are no argument labels
mask_viz_np = mask_viz.permute((1, 2, 0)).detach().cpu().numpy()
show_image(mask_viz_np, "R: gt, G: proposal, B: refined pred", scale=4, waitkey=1)
state_viz = states.get_nav_features_2d_viz()[0].permute((1, 2, 0)).detach().cpu().numpy()
show_image(state_viz, "State features", scale=4, waitkey=1)
return loss, metrics, output
import lgp.model_registry
lgp.model_registry.register_model("alfred_subgoal_model", HlsmSubgoalModel)
| 46.131417
| 153
| 0.661177
|
8115d691592d91374b6ccc17b919d54bee9b6196
| 1,489
|
py
|
Python
|
runtime/image_classification/models/vgg16/gpus=16_straight/stage10.py
|
NestLakerJasonLIN/pipedream
|
f50827f2e28cbdbd82a4ea686c0498272b1460d6
|
[
"MIT"
] | 273
|
2019-08-31T14:12:11.000Z
|
2022-03-05T13:34:25.000Z
|
runtime/image_classification/models/vgg16/gpus=16_straight/stage10.py
|
albertsh10/pipedream
|
cad624f79a71f44ba79099f0c38321347b13e5c2
|
[
"MIT"
] | 67
|
2019-09-19T15:36:59.000Z
|
2022-01-13T09:11:54.000Z
|
runtime/image_classification/models/vgg16/gpus=16_straight/stage10.py
|
albertsh10/pipedream
|
cad624f79a71f44ba79099f0c38321347b13e5c2
|
[
"MIT"
] | 100
|
2019-09-16T20:59:14.000Z
|
2022-03-23T12:56:56.000Z
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import torch
class Stage10(torch.nn.Module):
def __init__(self):
super(Stage10, self).__init__()
self.layer1 = torch.nn.Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
self.layer2 = torch.nn.ReLU(inplace=True)
self.layer3 = torch.nn.MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
self.layer4 = torch.nn.Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
self.layer5 = torch.nn.ReLU(inplace=True)
self._initialize_weights()
def forward(self, input0):
out0 = input0.clone()
out1 = self.layer1(out0)
out2 = self.layer2(out1)
out3 = self.layer3(out2)
out4 = self.layer4(out3)
out5 = self.layer5(out4)
return out5
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, torch.nn.Conv2d):
torch.nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
torch.nn.init.constant_(m.bias, 0)
elif isinstance(m, torch.nn.BatchNorm2d):
torch.nn.init.constant_(m.weight, 1)
torch.nn.init.constant_(m.bias, 0)
elif isinstance(m, torch.nn.Linear):
torch.nn.init.normal_(m.weight, 0, 0.01)
torch.nn.init.constant_(m.bias, 0)
| 39.184211
| 105
| 0.596373
|
1782b420a2321848288590dc58b6078b526c9ac6
| 899
|
py
|
Python
|
coding202-parsing-json/activity-parse-4.py
|
bayotheman/coding-skills-sample-code
|
f75c45bf85474a179857df857cc8836e5f21ecc2
|
[
"Apache-2.0"
] | 215
|
2015-02-09T18:43:51.000Z
|
2022-03-27T09:07:23.000Z
|
coding202-parsing-json/activity-parse-4.py
|
bayotheman/coding-skills-sample-code
|
f75c45bf85474a179857df857cc8836e5f21ecc2
|
[
"Apache-2.0"
] | 9
|
2015-06-24T22:32:22.000Z
|
2020-09-17T22:22:39.000Z
|
coding202-parsing-json/activity-parse-4.py
|
bayotheman/coding-skills-sample-code
|
f75c45bf85474a179857df857cc8836e5f21ecc2
|
[
"Apache-2.0"
] | 190
|
2015-02-09T18:29:31.000Z
|
2022-02-15T23:00:47.000Z
|
from urllib.request import Request, urlopen
import json
uri_cmx = 'https://devnetapi.cisco.com/sandbox/mse/api/config/v1/maps/info/CiscoCampus'
# Let's wrap our common get functionality in a function definition
def get_content(uri):
req = Request(uri)
req.add_header('Authorization', 'Basic bGVhcm5pbmc6bGVhcm5pbmc=')
# req.add_header('Accept', 'application/json')
response = urlopen(req)
response_string = response.read().decode("utf-8")
response.close()
return response_string
json_object = json.loads(get_content(uri_cmx))
building_names = []
buildings = json_object["buildingList"]
for building in buildings:
building_names.append(building["name"])
print(building_names)
print(type(building_names))
# for building_name in building_names:
# building_uri = uri_cmx + "/" + building_name
# print(building_uri)
# print(get_content(building_uri))
| 27.242424
| 87
| 0.740823
|
ff0beef64b27289422a85df4940b2ba7f5257911
| 413
|
py
|
Python
|
lambda2.py
|
PRASAD-DANGARE/PYTHON
|
36214f7dc3762d327e5a29e40752edeb098249c8
|
[
"MIT"
] | 1
|
2021-06-07T07:55:28.000Z
|
2021-06-07T07:55:28.000Z
|
lambda2.py
|
PRASAD-DANGARE/PYTHON
|
36214f7dc3762d327e5a29e40752edeb098249c8
|
[
"MIT"
] | null | null | null |
lambda2.py
|
PRASAD-DANGARE/PYTHON
|
36214f7dc3762d327e5a29e40752edeb098249c8
|
[
"MIT"
] | null | null | null |
# Python Program Using Lambda Function To Calculate Sum Of Two Numbers
'''
Function Name : Lambda Function To Calculate Sum Of Two Numbers.
Function Date : 8 Sep 2020
Function Author : Prasad Dangare
Input : Float
Output : Float
'''
f = lambda x, y: x + y # Write Lambda Function
result = f(1.55, 10) # Call Lambda Function
print('sum = ', result) # Display Result
| 31.769231
| 71
| 0.641646
|
8f8ebfc07115953f41ad7c8e17a55c37cd98389b
| 4,429
|
py
|
Python
|
src/estimagic/dashboard/plot_functions.py
|
janosg/estimagic
|
58e17ff94339076f4b7688b1dbef5685f48157e2
|
[
"BSD-3-Clause"
] | 7
|
2019-05-11T07:19:46.000Z
|
2019-05-31T07:03:13.000Z
|
src/estimagic/dashboard/plot_functions.py
|
janosg/estimagic
|
58e17ff94339076f4b7688b1dbef5685f48157e2
|
[
"BSD-3-Clause"
] | 14
|
2019-05-04T14:15:52.000Z
|
2019-06-10T11:45:27.000Z
|
src/estimagic/dashboard/plot_functions.py
|
janosg/estimagic
|
58e17ff94339076f4b7688b1dbef5685f48157e2
|
[
"BSD-3-Clause"
] | 1
|
2019-05-21T08:44:37.000Z
|
2019-05-21T08:44:37.000Z
|
"""Helper functions for the dashboard."""
from bokeh.models import HoverTool
from bokeh.models import Legend
from bokeh.plotting import figure
from estimagic.config import GRID_VISIBLE
from estimagic.config import LEGEND_LABEL_TEXT_FONT_SIZE
from estimagic.config import LEGEND_SPACING
from estimagic.config import MAJOR_TICK_IN
from estimagic.config import MAJOR_TICK_OUT
from estimagic.config import MIN_BORDER_BOTTOM
from estimagic.config import MIN_BORDER_LEFT
from estimagic.config import MIN_BORDER_RIGHT
from estimagic.config import MIN_BORDER_TOP
from estimagic.config import MINOR_TICK_LINE_COLOR
from estimagic.config import OUTLINE_LINE_WIDTH
from estimagic.config import PLOT_HEIGHT
from estimagic.config import PLOT_WIDTH
from estimagic.config import TOOLBAR_LOCATION
from estimagic.config import Y_RANGE_PADDING
from estimagic.config import Y_RANGE_PADDING_UNITS
from estimagic.dashboard.colors import get_colors
def plot_time_series(
data,
y_keys,
x_name,
title,
name=None,
y_names=None,
plot_width=PLOT_WIDTH,
):
"""Plot time series linking the *y_keys* to a common *x_name* variable.
Args:
data (ColumnDataSource): data that contain the y_keys and x_name
y_keys (list): list of the entries in the data that are to be plotted.
x_name (str): name of the entry in the data that will be on the x axis.
title (str): title of the plot.
name (str, optional): name of the plot for later retrieval with bokeh.
y_names (list, optional): if given these replace the y keys as line names.
Returns:
plot (bokeh Figure)
"""
if y_names is None:
y_names = [str(key) for key in y_keys]
plot = create_styled_figure(title=title, name=name, plot_width=plot_width)
# this ensures that the y range spans at least 0.1
plot.y_range.range_padding = Y_RANGE_PADDING
plot.y_range.range_padding_units = Y_RANGE_PADDING_UNITS
colors = get_colors("categorical", len(y_keys))
legend_items = []
for color, y_key, y_name in zip(colors, y_keys, y_names):
if len(y_name) <= 35:
label = y_name
else:
label = "..." + y_name[-32:]
line_glyph = plot.line(
source=data,
x=x_name,
y=y_key,
line_width=2,
color=color,
muted_color=color,
muted_alpha=0.2,
)
legend_items.append((label, [line_glyph]))
legend_items.append((" " * 60, []))
tooltips = [(x_name, "@" + x_name)]
tooltips += [("param_name", y_name), ("param_value", "@" + y_key)]
hover = HoverTool(renderers=[line_glyph], tooltips=tooltips)
plot.tools.append(hover)
legend = Legend(
items=legend_items,
border_line_color=None,
label_width=100,
label_text_font_size=LEGEND_LABEL_TEXT_FONT_SIZE,
spacing=LEGEND_SPACING,
)
legend.click_policy = "mute"
plot.add_layout(legend, "right")
return plot
def create_styled_figure(
title,
name=None,
tooltips=None,
plot_width=PLOT_WIDTH,
):
"""Return a styled, empty figure of predetermined height and width.
Args:
title (str): Title of the figure.
name (str): Name of the plot for later retrieval by bokeh. If not given the
title is set as name
tooltips (list, optional): List of bokeh tooltips to add to the figure.
Returns:
fig (bokeh Figure)
"""
assert plot_width is not None
name = name if name is not None else title
fig = figure(
plot_height=PLOT_HEIGHT,
plot_width=plot_width,
title=title.title(),
tooltips=tooltips,
name=name,
y_axis_type="linear",
sizing_mode="scale_width",
)
fig.title.text_font_size = "15pt"
# set minimum borders
fig.min_border_left = MIN_BORDER_LEFT
fig.min_border_right = MIN_BORDER_RIGHT
fig.min_border_top = MIN_BORDER_TOP
fig.min_border_bottom = MIN_BORDER_BOTTOM
# remove toolbar
fig.toolbar_location = TOOLBAR_LOCATION
# remove grid
fig.grid.visible = GRID_VISIBLE
# remove minor ticks
fig.axis.minor_tick_line_color = MINOR_TICK_LINE_COLOR
# remove tick lines
fig.axis.major_tick_out = MAJOR_TICK_OUT
fig.axis.major_tick_in = MAJOR_TICK_IN
# remove outline
fig.outline_line_width = OUTLINE_LINE_WIDTH
return fig
| 30.544828
| 83
| 0.687514
|
601fe42837bb01ce09c706ae2419c23b8001e79f
| 230
|
py
|
Python
|
aqua/tests/common.py
|
divyamamgai/integrations-extras
|
8c40a9cf870578687cc224ee91d3c70cd3a435a4
|
[
"BSD-3-Clause"
] | 158
|
2016-06-02T16:25:31.000Z
|
2022-03-16T15:55:14.000Z
|
aqua/tests/common.py
|
divyamamgai/integrations-extras
|
8c40a9cf870578687cc224ee91d3c70cd3a435a4
|
[
"BSD-3-Clause"
] | 554
|
2016-03-15T17:39:12.000Z
|
2022-03-31T10:29:16.000Z
|
aqua/tests/common.py
|
divyamamgai/integrations-extras
|
8c40a9cf870578687cc224ee91d3c70cd3a435a4
|
[
"BSD-3-Clause"
] | 431
|
2016-05-13T15:33:13.000Z
|
2022-03-31T10:06:46.000Z
|
# (C) Datadog, Inc. 2019
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
from datadog_checks.dev import get_docker_hostname, get_here
HERE = get_here()
HOST = get_docker_hostname()
PORT = '8080'
| 25.555556
| 60
| 0.76087
|
4e4dcc58390964f6462f9ecc6814f7648a10336b
| 5,762
|
py
|
Python
|
mcetl/download/GlobusDownload.py
|
materials-commons/pymcetl
|
b4311ba50bb35bc36527b9d313a91778f9550a92
|
[
"MIT"
] | null | null | null |
mcetl/download/GlobusDownload.py
|
materials-commons/pymcetl
|
b4311ba50bb35bc36527b9d313a91778f9550a92
|
[
"MIT"
] | null | null | null |
mcetl/download/GlobusDownload.py
|
materials-commons/pymcetl
|
b4311ba50bb35bc36527b9d313a91778f9550a92
|
[
"MIT"
] | null | null | null |
import logging
import os
from materials_commons.api import get_project_by_id
from .MaterialsCommonsACLInterface import MaterialsCommonsACLInterface
from ..common.access_exceptions import RequiredAttributeException
from ..common.McdirHelper import McdirHelper
from ..database.DatabaseInterface import DatabaseInterface
DOWNLOAD_NO_FILES_FOUND = "GlobusDownload - No Files Found"
class GlobusDownload:
def __init__(self, mc_user_id, apikey, project_id):
self.log = logging.getLogger(self.__class__.__name__)
self.mc_user_id = mc_user_id
self.apikey = apikey
self.project_id = project_id
self.file_list = None
self.path_list = None
self.transfer_client = None
self.user_dir = None
self.mc_globus_acl_interface = MaterialsCommonsACLInterface(mc_user_id)
download_ep_id = os.environ.get('MC_CONFIDENTIAL_CLIENT_ENDPOINT')
self.log.info("Download endpoint id = {}".format(download_ep_id))
if not download_ep_id:
self.log.error("Download endpoint is not set: MC_CONFIDENTIAL_CLIENT_ENDPOINT is undefined")
raise RequiredAttributeException("MC_CONFIDENTIAL_CLIENT_ENDPOINT is undefined")
self.download_ep_id = download_ep_id
mcdir_helper = McdirHelper()
self.mc_base = mcdir_helper.base_dir()
self.download_dir = mcdir_helper.get_download_dir()
self.mc_endpoint_path = None
def download(self):
if not self.setup():
return DOWNLOAD_NO_FILES_FOUND
self.stage()
self.expose()
return self.exposed_ep_url()
def setup(self):
probe = self.mc_globus_acl_interface.set_user_globus_id()
if not probe:
self.log.error("Users globus_user_id is undefined")
raise RequiredAttributeException("Globus user id is not set correctly")
self.log.info("Using globus id: {}".format(probe))
self.mc_globus_acl_interface.get_cc_transfer_client()
project = get_project_by_id(self.project_id, self.apikey)
self.log.info("Get file list for project = {} ({})".
format(project.name, project.id))
directory = project.get_top_directory()
self.file_list = []
self.path_list = []
path = ""
self.recursively_add_directory(path, directory)
if not self.file_list:
self.log.info("No files found.")
return False
self.log.info("Found {} files.".format(len(self.file_list)))
self.log.info("Found {} directory paths.".format(len(self.path_list)))
for file in self.file_list:
self.log.debug("File: {} - {}".format(file.name, file.path))
for path in self.path_list:
self.log.debug("Path {}".format(path))
return True
def recursively_add_directory(self, path, directory):
if path:
self.path_list.append(path)
file_or_dir_list = directory.get_children()
for file_or_dir in file_or_dir_list:
instance_path = path + file_or_dir.name
self.log.debug("File or dir otype = {}; name = {}; path = {}; {}".
format(file_or_dir.otype, file_or_dir.name,
file_or_dir.path, instance_path))
if file_or_dir.otype == 'file':
file_or_dir.path = instance_path
self.file_list.append(file_or_dir)
if file_or_dir.otype == "directory":
self.recursively_add_directory(instance_path + '/', file_or_dir)
def stage(self):
self.log.info("Staging - start")
staging_dir = self.download_dir
self.user_dir = self.make_random_name('etl-download-')
self.log.info("Staging - user_dir = {}".format(self.user_dir))
staging_dir = os.path.join(staging_dir, self.user_dir)
self.log.info("Staging - staging_dir = {}".format(staging_dir))
self.mc_endpoint_path = "/__download_staging/" + self.user_dir + "/"
self.log.info("Staging - mc_endpoint_path = {}".format(self.mc_endpoint_path))
os.makedirs(staging_dir)
for path in self.path_list:
staging_path = os.path.join(staging_dir, path)
self.log.debug("About to create dir {}".format(staging_path))
os.makedirs(staging_path)
for file in self.file_list:
file_id = file.id
usesid = file.usesid
if usesid:
file_id = usesid
p1 = file_id[9:11]
p2 = file_id[11:13]
file_path = os.path.join(self.mc_base, p1, p2, file_id)
link_path = os.path.join(staging_dir, file.path)
try:
os.link(file_path, link_path)
except:
pass
self.log.info("Staging - end")
def expose(self):
self.log.info("Expose - start")
self.log.info("Setting ACL rule for path = {}".format(self.mc_endpoint_path))
self.mc_globus_acl_interface.set_user_access_rule(self.mc_endpoint_path)
self.log.info("Expose - end")
def exposed_ep_url(self):
origin_id = self.download_ep_id
path = self.mc_endpoint_path
if path:
if not path.startswith('/'):
path = "/" + path
if not path.endswith('/'):
path = path + "/"
url_base = "https://app.globus.org/file-manager"
path = path.replace('/', '%2F')
url = '{}?origin_id={}&origin_path={}'.format(url_base, origin_id, path)
return url
else:
return None
@staticmethod
def make_random_name(prefix):
uuid = DatabaseInterface().get_uuid()
return prefix + uuid
| 41.157143
| 104
| 0.622353
|
fea58feede888178265a32cb7566879a519bd395
| 942
|
py
|
Python
|
server/axeshome/social.py
|
kevinmcguinness/axes-home
|
d41b7e605c8b0d3d6f274eccdd59b3fce794a28b
|
[
"Apache-2.0"
] | 2
|
2015-06-25T03:10:03.000Z
|
2016-02-22T11:34:08.000Z
|
server/axeshome/social.py
|
kevinmcguinness/axes-home
|
d41b7e605c8b0d3d6f274eccdd59b3fce794a28b
|
[
"Apache-2.0"
] | null | null | null |
server/axeshome/social.py
|
kevinmcguinness/axes-home
|
d41b7e605c8b0d3d6f274eccdd59b3fce794a28b
|
[
"Apache-2.0"
] | null | null | null |
#
# (c) Copyright 2015 Kevin McGuinness. All Rights Reserved.
#
"""
Social features
"""
import axeshome.backend as backend
from axeshome.api import mongo
def get_video_stats(uri):
uri = backend.fix_uri(uri)
mongo.db.videostats.ensure_index('uri')
stats = mongo.db.videostats.find_one({'uri': uri})
if stats is None:
stats = {'uri': uri, 'views': 0, 'bookmarks': 0, 'likes': 0}
ident = mongo.db.videostats.insert(stats)
stats = mongo.db.videostats.find_one({'_id': ident})
return stats
def increment_stats(uri, field):
stats = get_video_stats(uri)
stats[field] += 1
mongo.db.videostats.save(stats)
return stats
def decrement_stats(uri, field):
stats = get_video_stats(uri)
stats[field] -= 1
mongo.db.videostats.save(stats)
return stats
def find_popular_videos(n=100):
return mongo.db.videostats.find(
sort=[('likes', -1), ('views', -1)], limit=n)
| 27.705882
| 68
| 0.660297
|
6628a9e9541ae14c6f6eb8f793a0d2e491a9b00a
| 1,084
|
py
|
Python
|
test-support/test-repl.py
|
JakeWheat/burdock
|
5626803ea6bb844951cd0205f2eb7cbfa3f2bab5
|
[
"BSD-3-Clause"
] | null | null | null |
test-support/test-repl.py
|
JakeWheat/burdock
|
5626803ea6bb844951cd0205f2eb7cbfa3f2bab5
|
[
"BSD-3-Clause"
] | null | null | null |
test-support/test-repl.py
|
JakeWheat/burdock
|
5626803ea6bb844951cd0205f2eb7cbfa3f2bab5
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
"""x
definitely want to try to do an expect in burdock
looked at the documentation for pexpect, and it looks very difficult
to do something general
it's probably fairly easy to write a numpty expect that works well
enough for burdock's automated testing needs, which might be better
this also serves as an example of how to do pexpect using burdock,
which is really useful even if it's not needed for the burdock testing
itself
"""
import pexpect
import sys
if len(sys.argv) != 2:
raise Exception("expected 1 arg, got: " + str(sys.argv))
def do_repl_test(lst):
child = pexpect.spawn('_build/burdock')
for l in lst:
child.expect("b > ", timeout=0.1)
child.sendline(l[0])
child.expect(l[1], timeout=0.1)
child.expect("b > ", timeout=0.1)
child.sendcontrol("d")
child.close() # add timeout
do_repl_test(eval(sys.argv[1]))
#do_repl_test([("1 + 2", "3")])
#do_repl_test([("a = 5", ""), ("a + 3", "8")])
#do_repl_test(eval('[("1 + 2", "3")]'))
#do_repl_test([("import lists", ""), ("a + 3", "8")])
| 23.565217
| 70
| 0.653137
|
bd68cbe6f5801e785a59e3b830351b61540d8bfa
| 5,565
|
py
|
Python
|
isi_sdk_8_1_1/isi_sdk_8_1_1/models/event_categories_extended.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 24
|
2018-06-22T14:13:23.000Z
|
2022-03-23T01:21:26.000Z
|
isi_sdk_8_1_1/isi_sdk_8_1_1/models/event_categories_extended.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 46
|
2018-04-30T13:28:22.000Z
|
2022-03-21T21:11:07.000Z
|
isi_sdk_8_1_1/isi_sdk_8_1_1/models/event_categories_extended.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 29
|
2018-06-19T00:14:04.000Z
|
2022-02-08T17:51:19.000Z
|
# coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 6
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from isi_sdk_8_1_1.models.event_categories import EventCategories # noqa: F401,E501
from isi_sdk_8_1_1.models.event_category import EventCategory # noqa: F401,E501
class EventCategoriesExtended(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'categories': 'list[EventCategory]',
'resume': 'str',
'total': 'int'
}
attribute_map = {
'categories': 'categories',
'resume': 'resume',
'total': 'total'
}
def __init__(self, categories=None, resume=None, total=None): # noqa: E501
"""EventCategoriesExtended - a model defined in Swagger""" # noqa: E501
self._categories = None
self._resume = None
self._total = None
self.discriminator = None
if categories is not None:
self.categories = categories
if resume is not None:
self.resume = resume
if total is not None:
self.total = total
@property
def categories(self):
"""Gets the categories of this EventCategoriesExtended. # noqa: E501
:return: The categories of this EventCategoriesExtended. # noqa: E501
:rtype: list[EventCategory]
"""
return self._categories
@categories.setter
def categories(self, categories):
"""Sets the categories of this EventCategoriesExtended.
:param categories: The categories of this EventCategoriesExtended. # noqa: E501
:type: list[EventCategory]
"""
self._categories = categories
@property
def resume(self):
"""Gets the resume of this EventCategoriesExtended. # noqa: E501
Continue returning results from previous call using this token (token should come from the previous call, resume cannot be used with other options). # noqa: E501
:return: The resume of this EventCategoriesExtended. # noqa: E501
:rtype: str
"""
return self._resume
@resume.setter
def resume(self, resume):
"""Sets the resume of this EventCategoriesExtended.
Continue returning results from previous call using this token (token should come from the previous call, resume cannot be used with other options). # noqa: E501
:param resume: The resume of this EventCategoriesExtended. # noqa: E501
:type: str
"""
if resume is not None and len(resume) < 0:
raise ValueError("Invalid value for `resume`, length must be greater than or equal to `0`") # noqa: E501
self._resume = resume
@property
def total(self):
"""Gets the total of this EventCategoriesExtended. # noqa: E501
Total number of items available. # noqa: E501
:return: The total of this EventCategoriesExtended. # noqa: E501
:rtype: int
"""
return self._total
@total.setter
def total(self, total):
"""Sets the total of this EventCategoriesExtended.
Total number of items available. # noqa: E501
:param total: The total of this EventCategoriesExtended. # noqa: E501
:type: int
"""
if total is not None and total > 4294967295: # noqa: E501
raise ValueError("Invalid value for `total`, must be a value less than or equal to `4294967295`") # noqa: E501
if total is not None and total < 0: # noqa: E501
raise ValueError("Invalid value for `total`, must be a value greater than or equal to `0`") # noqa: E501
self._total = total
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, EventCategoriesExtended):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 31.264045
| 170
| 0.600898
|
bc8e7f229c1f54bc983a57247945517401d03081
| 379
|
py
|
Python
|
py_week_work/week8_032.py
|
x06lan/mt
|
3e4ddacc7717595acbfd689e22cd1e154843b99e
|
[
"MIT"
] | null | null | null |
py_week_work/week8_032.py
|
x06lan/mt
|
3e4ddacc7717595acbfd689e22cd1e154843b99e
|
[
"MIT"
] | null | null | null |
py_week_work/week8_032.py
|
x06lan/mt
|
3e4ddacc7717595acbfd689e22cd1e154843b99e
|
[
"MIT"
] | null | null | null |
save={"0":0,"1":1,"2":1}
def findmap(data,targe):
for i in data:
if i==str(targe):
return data[i]
return None
def F(data,n):
# print(n)
if findmap(data,n)==None:
data[str(n)]=F(data,n-1)+F(data,n-2)
return data[str(n)]
i=int(input())
out=[]
while i!=-1:
out.append(str(F(save,i)))
i=int(input())
for i in out:
print(i)
| 19.947368
| 44
| 0.530343
|
5a87ae0a84340c3ec13ca388247fe93ccb6daf46
| 1,337
|
py
|
Python
|
BASS/modules/WebcamVideoStream.py
|
nrvnrv/yv4cpu
|
7e6afd7a5f414f9debe773ac1340c591db6a8adf
|
[
"MIT"
] | null | null | null |
BASS/modules/WebcamVideoStream.py
|
nrvnrv/yv4cpu
|
7e6afd7a5f414f9debe773ac1340c591db6a8adf
|
[
"MIT"
] | null | null | null |
BASS/modules/WebcamVideoStream.py
|
nrvnrv/yv4cpu
|
7e6afd7a5f414f9debe773ac1340c591db6a8adf
|
[
"MIT"
] | null | null | null |
from threading import Thread, Lock
import cv2
class WebcamVideoStream:
def __init__(self, src=0, width=320, height=240):
self.thread = Thread(target=self.update, daemon=True, args=())
self.stream = cv2.VideoCapture(src)
self.stream.set(cv2.CAP_PROP_FRAME_WIDTH, width)
self.stream.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
self.stream.set(cv2.CAP_PROP_BUFFERSIZE,0)
(self.grabbed, self.frame) = self.stream.read()
self.started = False
self.read_lock = Lock()
def start(self):
if self.started:
print("There is an instance of WebcamVideoStream running already")
return None
self.started = True
self.thread.start()
return self
def update(self):
while self.started:
(grabbed, frame) = self.stream.read()
self.read_lock.acquire()
self.grabbed, self.frame = grabbed, frame
self.read_lock.release()
def read(self):
self.read_lock.acquire()
frame = self.frame.copy()
self.read_lock.release()
return frame
def stop(self):
self.started = False
self.thread.join()
def __exit__(self, exc_type, exc_value, traceback):
self.stream.release()
if __name__=="__main__":
WebcamVideoStream(0)
| 29.711111
| 78
| 0.620045
|
3c4ec0120909bd1598a9b0b52e79bc9f682eff02
| 4,796
|
py
|
Python
|
language/gpt/train_gpt.py
|
SaraBakic/ColossalAI-Examples
|
072b7c972e166de0965a71a2cb02c95126cdec42
|
[
"Apache-2.0"
] | null | null | null |
language/gpt/train_gpt.py
|
SaraBakic/ColossalAI-Examples
|
072b7c972e166de0965a71a2cb02c95126cdec42
|
[
"Apache-2.0"
] | null | null | null |
language/gpt/train_gpt.py
|
SaraBakic/ColossalAI-Examples
|
072b7c972e166de0965a71a2cb02c95126cdec42
|
[
"Apache-2.0"
] | null | null | null |
from colossalai.context.parallel_mode import ParallelMode
from colossalai.logging import get_dist_logger, disable_existing_loggers
import colossalai
import os
from colossalai.core import global_context as gpc
from colossalai.utils.timer import MultiTimer
from colossalai.zero import zero3_model_context
import colossalai.utils as utils
from colossalai.trainer import hooks, Trainer
from colossalai.nn import LinearWarmupLR
import torch.nn as nn
from dataset.webtext import WebtextDataset
import contextlib
from colossalai.engine.schedule import PipelineSchedule, InterleavedPipelineSchedule
from model_zoo.gpt.gpt import GPTLMLoss
from colossalai.utils import is_using_pp
def main():
parser = colossalai.get_default_parser()
parser.add_argument('--from_torch', default=False, action='store_true')
args = parser.parse_args()
disable_existing_loggers()
if args.from_torch:
colossalai.launch_from_torch(config=args.config)
else:
colossalai.launch_from_slurm(config=args.config,
host=args.host,
port=29500,
seed=42)
logger = get_dist_logger()
logger.info('Build data loader', ranks=[0])
train_ds = WebtextDataset(os.environ['DATA'], seq_len=gpc.config.SEQ_LEN)
train_dataloader = utils.get_dataloader(train_ds,
seed=42,
batch_size=gpc.config.BATCH_SIZE,
pin_memory=True,
shuffle=True,
drop_last=True)
logger.info('Build model', ranks=[0])
use_pipeline = is_using_pp()
use_interleaved = hasattr(gpc.config.model, 'num_chunks')
use_zero3 = hasattr(gpc.config, 'zero') and gpc.config.zero.level == 3
ctx = zero3_model_context() if use_zero3 else contextlib.nullcontext()
with ctx:
model = gpc.config.model.pop('type')(**gpc.config.model)
if use_pipeline and use_interleaved and not isinstance(model, nn.ModuleList):
model = nn.ModuleList([model])
criterion = getattr(gpc.config, 'loss_fn', None)
if criterion is not None:
criterion = criterion.type()
else:
criterion = GPTLMLoss()
logger.info('Build optimizer', ranks=[0])
optimizer = gpc.config.optimizer.pop('type')(
model.parameters(), **gpc.config.optimizer)
lr_scheduler = LinearWarmupLR(
optimizer, total_steps=gpc.config.NUM_EPOCHS, warmup_steps=5)
engine, train_dataloader, _, lr_scheduler = colossalai.initialize(model,
optimizer,
criterion,
train_dataloader=train_dataloader,
lr_scheduler=lr_scheduler)
global_batch_size = gpc.config.BATCH_SIZE * \
gpc.get_world_size(ParallelMode.DATA) * getattr(gpc.config, "gradient_accumulation", 1)
logger.info(f'Init done, global batch size = {global_batch_size}', ranks=[0])
tensor_shape = getattr(gpc.config, 'TENSOR_SHAPE', None)
schedule = None
if use_pipeline:
if use_interleaved:
logger.info('Build InterleavedPipelineSchedule', ranks=[0])
schedule = InterleavedPipelineSchedule(gpc.config.NUM_MICRO_BATCHES,
gpc.config.model.num_chunks, tensor_shape=tensor_shape, scatter_gather_tensors=True)
else:
logger.info('Build PipelineSchedule', ranks=[0])
schedule = PipelineSchedule(gpc.config.NUM_MICRO_BATCHES,
tensor_shape=tensor_shape, scatter_gather_tensors=True)
timier = MultiTimer()
trainer = Trainer(
engine=engine,
logger=logger,
schedule=schedule,
timer=timier
)
hook_list = [
hooks.LossHook(),
hooks.LRSchedulerHook(lr_scheduler=lr_scheduler, by_epoch=True),
hooks.LogMetricByEpochHook(logger),
hooks.ThroughputHook(),
hooks.LogMetricByStepHook(),
# hooks.TensorboardHook(log_dir='./tb_logs', ranks=[0]),
# hooks.LogMemoryByEpochHook(logger),
# hooks.LogTimingByEpochHook(timer, logger),
# hooks.SaveCheckpointHook(checkpoint_dir='./ckpt')
]
trainer.fit(
train_dataloader=train_dataloader,
epochs=gpc.config.NUM_EPOCHS,
test_interval=1,
hooks=hook_list,
display_progress=True,
return_output_label=False
)
if __name__ == '__main__':
main()
| 40.302521
| 135
| 0.615721
|
9ae554d21299327aa83bbe496106de88c492d22e
| 2,365
|
py
|
Python
|
setup.py
|
WanlongCai/tespy
|
fd00cfaa1872c61a1806a270222de311b3ce0f86
|
[
"MIT"
] | null | null | null |
setup.py
|
WanlongCai/tespy
|
fd00cfaa1872c61a1806a270222de311b3ce0f86
|
[
"MIT"
] | null | null | null |
setup.py
|
WanlongCai/tespy
|
fd00cfaa1872c61a1806a270222de311b3ce0f86
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
import io
import re
from glob import glob
from os.path import basename
from os.path import dirname
from os.path import join
from os.path import splitext
from setuptools import find_packages
from setuptools import setup
def read(*names, **kwargs):
with io.open(
join(dirname(__file__), *names),
encoding=kwargs.get('encoding', 'utf8')
) as fh:
return fh.read()
setup(
name='TESPy',
version='0.3.3.dev0',
license='MIT',
description='Thermal Engineering Systems in Python (TESPy)',
long_description='%s' % (
re.compile('^.. start-badges.*^.. end-badges', re.M | re.S).sub(
'', read('README.rst')
)
),
author='Francesco Witte',
author_email='francesco.witte@hs-flensburg.de',
url='https://github.com/oemof/tespy',
packages=find_packages('src'),
package_dir={'': 'src'},
py_modules=[splitext(basename(path))[0] for path in glob('src/*.py')],
include_package_data=True,
zip_safe=False,
data_files=[('src/tespy/data', [
'src/tespy/data/char_lines.json', 'src/tespy/data/char_maps.json'])],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Operating System :: Unix',
'Operating System :: POSIX',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Python',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: Scientific/Engineering',
],
project_urls={
'Documentation': 'https://tespy.readthedocs.io/',
'Changelog': 'https://tespy.readthedocs.io/en/latest/whats_new.html',
'Issue Tracker': 'https://github.com/oemof/tespy/issues',
},
python_requires='>=3.6, <3.9',
install_requires=[
'CoolProp>=6.4,<7',
'numpy>=1.13.3,<2',
'pandas>=0.19.2,!=1.0.0,<2',
'scipy>=0.19.1,<2',
'tabulate>=0.8.2,<0.9'
],
extras_require={'dev': ['pytest', 'sphinx', 'sphinx_rtd_theme', ],
'dummy': ['tespy']}
)
| 31.118421
| 77
| 0.602114
|
9458647a92d1ce4315a0e47ece9db136211589e1
| 2,746
|
py
|
Python
|
affirm.py
|
gooli/affirm
|
79cf60380be49dfa7b2dfa01ebb5412e06f57f45
|
[
"MIT"
] | 48
|
2016-06-03T05:19:20.000Z
|
2016-08-01T12:03:33.000Z
|
affirm.py
|
gooli/affirm
|
79cf60380be49dfa7b2dfa01ebb5412e06f57f45
|
[
"MIT"
] | 1
|
2016-10-28T23:44:27.000Z
|
2016-10-30T16:41:30.000Z
|
affirm.py
|
gooli/affirm
|
79cf60380be49dfa7b2dfa01ebb5412e06f57f45
|
[
"MIT"
] | 1
|
2016-11-24T07:26:56.000Z
|
2016-11-24T07:26:56.000Z
|
# affirm.py / Eli Finer / 2016
#
# This script causes assert statements to output much better default error messages
# that include the tested condition and the values for any variables referenced in it.
#
# Here are some examples:
#
# >>> assert 1 > 2
# AssertionError: assertion (1 > 2) failed
#
# >>> a = 1; b = 2; c = 'foo'; d = None
# >>> assert a > b
# AssertionError: assertion (a > b) failed with a=1, b=2
#
# >>> assert c is None
# AssertionError: assertion (c is None) failed with c='foo'
#
# >>> assert a == b == c
# AssertionError: assertion (a == b == c) failed with a=1, b=2, c='foo'
import re
import ast
import types
import inspect
from collections import OrderedDict
def make_assert_message(frame, regex):
def extract_condition():
code_context = inspect.getframeinfo(frame)[3]
if not code_context:
return ''
match = re.search(regex, code_context[0])
if not match:
return ''
return match.group(1).strip()
class ReferenceFinder(ast.NodeVisitor):
def __init__(self):
self.names = []
def find(self, tree, frame):
self.visit(tree)
nothing = object()
deref = OrderedDict()
for name in self.names:
value = frame.f_locals.get(name, nothing) or frame.f_globals.get(name, nothing)
if value is not nothing and not isinstance(value, (types.ModuleType, types.FunctionType)):
deref[name] = repr(value)
return deref
def visit_Name(self, node):
self.names.append(node.id)
condition = extract_condition()
if not condition:
return
deref = ReferenceFinder().find(ast.parse(condition), frame)
deref_str = ''
if deref:
deref_str = ' with ' + ', '.join('{}={}'.format(k, v) for k, v in deref.items())
return 'assertion {} failed{}'.format(condition, deref_str)
import sys
_old_excepthook = sys.excepthook
def assert_excepthook(type, value, traceback):
if type == AssertionError:
from traceback import print_exception
if not value.args:
top = traceback
while top.tb_next and top.tb_next.tb_frame:
top = top.tb_next
message = make_assert_message(top.tb_frame, r'assert\s+([^#]+)')
value = AssertionError(message)
print_exception(type, value, traceback)
else:
_old_excepthook(type, value, traceback)
sys.excepthook = assert_excepthook
def affirm(condition):
if not __debug__:
return
if condition:
return
else:
message = make_assert_message(inspect.currentframe().f_back, r'affirm\s*\(\s*(.+)\s*\)')
raise AssertionError(message)
| 32.305882
| 106
| 0.619446
|
adec3950d0cdab369b9e36fb61716b4d67b53b3a
| 72,990
|
py
|
Python
|
pyzotero/zotero.py
|
michi-zuri/pyzotero
|
c984418524c447eabfcaef935c70becad18c4d54
|
[
"MIT"
] | null | null | null |
pyzotero/zotero.py
|
michi-zuri/pyzotero
|
c984418524c447eabfcaef935c70becad18c4d54
|
[
"MIT"
] | null | null | null |
pyzotero/zotero.py
|
michi-zuri/pyzotero
|
c984418524c447eabfcaef935c70becad18c4d54
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# pylint: disable=R0904
"""
zotero.py
Created by Stephan Hügel on 2011-02-28
This file is part of Pyzotero.
The MIT License (MIT)
Copyright (c) 2015 Stephan Hügel
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from __future__ import unicode_literals
__author__ = "Stephan Hügel"
__version__ = "1.4.18"
__api_version__ = "3"
import sys
import requests
from requests import Request
import socket
import feedparser
import bibtexparser
import json
import copy
import uuid
import time
import threading
import os
import hashlib
import datetime
import re
import pytz
import mimetypes
from pathlib import Path
from . import zotero_errors as ze
from functools import wraps
# Python 3 compatibility faffing
if sys.version_info[0] == 2:
from urllib import urlencode
from urllib import quote
from urlparse import urlparse, urlunparse, parse_qsl, urlunsplit
else:
from urllib.parse import urlencode
from urllib.parse import urlparse, urlunparse, parse_qsl, urlunsplit
from urllib.parse import quote
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
# Avoid hanging the application if there's no server response
timeout = 30
socket.setdefaulttimeout(timeout)
def ib64_patched(self, attrsD, contentparams):
""" Patch isBase64 to prevent Base64 encoding of JSON content
"""
if attrsD.get("mode", "") == "base64":
return 0
if self.contentparams["type"].startswith("text/"):
return 0
if self.contentparams["type"].endswith("+xml"):
return 0
if self.contentparams["type"].endswith("/xml"):
return 0
if self.contentparams["type"].endswith("/json"):
return 0
return 0
def token():
""" Return a unique 32-char write-token
"""
return str(uuid.uuid4().hex)
# Override feedparser's buggy isBase64 method until they fix it
feedparser._FeedParserMixin._isBase64 = ib64_patched
def cleanwrap(func):
""" Wrapper for Zotero._cleanup
"""
def enc(self, *args, **kwargs):
""" Send each item to _cleanup() """
return (func(self, item, **kwargs) for item in args)
return enc
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i : i + n]
def tcache(func):
""" Take care of the URL building and caching for template functions """
@wraps(func)
def wrapped_f(self, *args, **kwargs):
""" Calls the decorated function to get query string and params,
builds URL, retrieves template, caches result, and returns template
"""
query_string, params = func(self, *args, **kwargs)
r = Request("GET", self.endpoint + query_string, params=params).prepare()
# now split up the URL
result = urlparse(r.url)
# construct cache key
cachekey = result.path + "_" + result.query
if self.templates.get(cachekey) and not self._updated(
query_string, self.templates[cachekey], cachekey
):
return self.templates[cachekey]["tmplt"]
# otherwise perform a normal request and cache the response
retrieved = self._retrieve_data(query_string, params=params)
return self._cache(retrieved, cachekey)
return wrapped_f
def backoff_check(func):
"""
Perform backoff processing
func must return a Requests GET / POST / PUT / PATCH / DELETE etc
This is is intercepted: we first check for an active backoff
and wait if need be.
After the response is received, we do normal error checking
and set a new backoff if necessary, before returning
Use with functions that are intended to return True
"""
@wraps(func)
def wrapped_f(self, *args, **kwargs):
self._check_backoff()
# resp is a Requests response object
resp = func(self, *args, **kwargs)
try:
resp.raise_for_status()
except requests.exceptions.HTTPError:
error_handler(self, resp)
self.request = resp
backoff = resp.headers.get("backoff")
if backoff:
self._set_backoff(backoff)
return True
return wrapped_f
def retrieve(func):
"""
Decorator for Zotero read API methods; calls _retrieve_data() and passes
the result to the correct processor, based on a lookup
"""
@wraps(func)
def wrapped_f(self, *args, **kwargs):
"""
Returns result of _retrieve_data()
func's return value is part of a URI, and it's this
which is intercepted and passed to _retrieve_data:
'/users/123/items?key=abc123'
"""
if kwargs:
self.add_parameters(**kwargs)
retrieved = self._retrieve_data(func(self, *args))
# we now always have links in the header response
self.links = self._extract_links()
# determine content and format, based on url params
content = (
self.content.search(self.request.url)
and self.content.search(self.request.url).group(0)
or "bib"
)
# JSON by default
formats = {
"application/atom+xml": "atom",
"application/x-bibtex": "bibtex",
"application/json": "json",
"text/html": "snapshot",
"text/plain": "plain",
"application/pdf; charset=utf-8": "pdf",
"application/pdf": "pdf",
"application/msword": "doc",
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet": "xlsx",
"application/vnd.openxmlformats-officedocument.wordprocessingml.document": "docx",
"application/zip": "zip",
"application/epub+zip": "zip",
"audio/mpeg": "mp3",
"video/mp4": "mp4",
"audio/x-wav": "wav",
"video/x-msvideo": "avi",
"application/octet-stream": "octet",
"application/x-tex": "tex",
"application/x-texinfo": "texinfo",
"image/jpeg": "jpeg",
"image/png": "png",
"image/gif": "gif",
"image/tiff": "tiff",
"application/postscript": "postscript",
"application/rtf": "rtf",
}
# select format, or assume JSON
content_type_header = self.request.headers["Content-Type"].lower() + ";"
re.compile("\s+")
fmt = formats.get(
# strip "; charset=..." segment
content_type_header[0 : content_type_header.index(";")],
"json",
)
# clear all query parameters
self.url_params = None
# check to see whether it's tag data
if "tags" in self.request.url:
self.tag_data = False
return self._tags_data(retrieved.json())
if fmt == "atom":
parsed = feedparser.parse(retrieved.text)
# select the correct processor
processor = self.processors.get(content)
# process the content correctly with a custom rule
return processor(parsed)
if fmt == "snapshot":
# we need to dump as a zip!
self.snapshot = True
if fmt == "bibtex":
parser = bibtexparser.bparser.BibTexParser(common_strings=True)
return parser.parse(retrieved.text)
# it's binary, so return raw content
elif fmt != "json":
return retrieved.content
# no need to do anything special, return JSON
else:
return retrieved.json()
return wrapped_f
def ss_wrap(func):
""" ensure that a SavedSearch object exists """
def wrapper(self, *args, **kwargs):
if not self.savedsearch:
self.savedsearch = SavedSearch(self)
return func(self, *args, **kwargs)
return wrapper
class Zotero(object):
"""
Zotero API methods
A full list of methods can be found here:
http://www.zotero.org/support/dev/server_api
"""
def __init__(
self,
library_id=None,
library_type=None,
api_key=None,
preserve_json_order=False,
locale="en-US",
):
""" Store Zotero credentials
"""
self.endpoint = "https://api.zotero.org"
if library_id and library_type:
self.library_id = library_id
# library_type determines whether query begins w. /users or /groups
self.library_type = library_type + "s"
else:
raise ze.MissingCredentials(
"Please provide both the library ID and the library type"
)
# api_key is not required for public individual or group libraries
self.api_key = api_key
self.preserve_json_order = preserve_json_order
self.locale = locale
self.url_params = None
self.tag_data = False
self.request = None
self.snapshot = False
# these aren't valid item fields, so never send them to the server
self.temp_keys = set(["key", "etag", "group_id", "updated"])
# determine which processor to use for the parsed content
self.fmt = re.compile(r"(?<=format=)\w+")
self.content = re.compile(r"(?<=content=)\w+")
self.processors = {
"bib": self._bib_processor,
"citation": self._citation_processor,
"bibtex": self._bib_processor,
"bookmarks": self._bib_processor,
"coins": self._bib_processor,
"csljson": self._csljson_processor,
"mods": self._bib_processor,
"refer": self._bib_processor,
"rdf_bibliontology": self._bib_processor,
"rdf_dc": self._bib_processor,
"rdf_zotero": self._bib_processor,
"ris": self._bib_processor,
"tei": self._bib_processor,
"wikipedia": self._bib_processor,
"json": self._json_processor,
"html": self._bib_processor,
}
self.links = None
self.self_link = {}
self.templates = {}
self.savedsearch = None
# these are required for backoff handling
self.backoff = False
self.backoff_duration = 0.0
def _set_backoff(self, duration):
"""
Set a backoff
Spins up a timer in a background thread which resets the backoff logic
when it expires, then sets the time at which the backoff will expire.
The latter step is required so that other calls can check whether there's
an active backoff, because the threading.Timer method has no way
of returning a duration
"""
duration = float(duration)
self.backoff = True
threading.Timer(duration, self._reset_backoff).start()
self.backoff_duration = time.time() + duration
def _reset_backoff(self):
self.backoff = False
self.backoff_duration = 0.0
def _check_backoff(self):
"""
Before an API call is made, we check whether there's an active backoff.
If there is, we check whether there's any time left on the backoff.
If there is, we sleep for the remainder before returning
"""
if self.backoff:
remainder = self.backoff_duration - time.time()
if remainder > 0.0:
time.sleep(remainder)
def default_headers(self):
"""
It's always OK to include these headers
"""
_headers = {
"User-Agent": "Pyzotero/%s" % __version__,
"Zotero-API-Version": "%s" % __api_version__,
}
if self.api_key:
_headers["Authorization"] = "Bearer %s" % self.api_key
return _headers
def _cache(self, response, key):
"""
Add a retrieved template to the cache for 304 checking
accepts a dict and key name, adds the retrieval time, and adds both
to self.templates as a new dict using the specified key
"""
# cache template and retrieval time for subsequent calls
thetime = datetime.datetime.utcnow().replace(tzinfo=pytz.timezone("GMT"))
self.templates[key] = {"tmplt": response.json(), "updated": thetime}
return copy.deepcopy(response.json())
@cleanwrap
def _cleanup(self, to_clean, allow=()):
""" Remove keys we added for internal use
"""
# this item's been retrieved from the API, we only need the 'data'
# entry
if to_clean.keys() == ["links", "library", "version", "meta", "key", "data"]:
to_clean = to_clean["data"]
return dict(
[
[k, v]
for k, v in list(to_clean.items())
if (k in allow or k not in self.temp_keys)
]
)
def _retrieve_data(self, request=None, params=None):
"""
Retrieve Zotero items via the API
Combine endpoint and request to access the specific resource
Returns a JSON document
"""
full_url = "%s%s" % (self.endpoint, request)
# The API doesn't return this any more, so we have to cheat
self.self_link = request
# ensure that we wait if there's an active backoff
self._check_backoff()
self.request = requests.get(
url=full_url, headers=self.default_headers(), params=params
)
self.request.encoding = "utf-8"
try:
self.request.raise_for_status()
except requests.exceptions.HTTPError:
error_handler(self, self.request)
backoff = self.request.headers.get("backoff")
if backoff:
self._set_backoff(backoff)
return self.request
def _extract_links(self):
"""
Extract self, first, next, last links from a request response
"""
extracted = dict()
try:
for key, value in self.request.links.items():
parsed = urlparse(value["url"])
fragment = "{path}?{query}".format(path=parsed[2], query=parsed[4])
extracted[key] = fragment
# add a 'self' link
parsed = list(urlparse(self.self_link))
# strip 'format' query parameter
stripped = "&".join(
[
"%s=%s" % (p[0], p[1])
for p in parse_qsl(parsed[4])
if p[0] != "format"
]
)
# rebuild url fragment
# this is a death march
extracted["self"] = urlunparse(
[parsed[0], parsed[1], parsed[2], parsed[3], stripped, parsed[5]]
)
return extracted
except KeyError:
# No links present, because it's a single item
return None
def _updated(self, url, payload, template=None):
"""
Generic call to see if a template request returns 304
accepts:
- a string to combine with the API endpoint
- a dict of format values, in case they're required by 'url'
- a template name to check for
As per the API docs, a template less than 1 hour old is
assumed to be fresh, and will immediately return False if found
"""
# If the template is more than an hour old, try a 304
if (
abs(
datetime.datetime.utcnow().replace(tzinfo=pytz.timezone("GMT"))
- self.templates[template]["updated"]
).seconds
> 3600
):
query = self.endpoint + url.format(
u=self.library_id, t=self.library_type, **payload
)
headers = {
"If-Modified-Since": payload["updated"].strftime(
"%a, %d %b %Y %H:%M:%S %Z"
)
}
headers.update(self.default_headers())
# perform the request, and check whether the response returns 304
self._check_backoff()
req = requests.get(query, headers=headers)
try:
req.raise_for_status()
except requests.exceptions.HTTPError:
error_handler(self, req)
backoff = self.request.headers.get("backoff")
if backoff:
self._set_backoff(backoff)
return req.status_code == 304
# Still plenty of life left in't
return False
def add_parameters(self, **params):
"""
Add URL parameters
Also ensure that only valid format/content combinations are requested
"""
self.url_params = None
# we want JSON by default
if not params.get("format"):
params["format"] = "json"
# non-standard content must be retrieved as Atom
if params.get("content"):
params["format"] = "atom"
# TODO: rewrite format=atom, content=json request
if "limit" not in params or params.get("limit") == 0:
params["limit"] = 100
# Need ability to request arbitrary number of results for version
# response
# -1 value is hack that works with current version
elif params["limit"] == -1 or params["limit"] is None:
del params["limit"]
# bib format can't have a limit
if params.get("format") == "bib":
del params["limit"]
self.url_params = urlencode(params, doseq=True)
def _build_query(self, query_string, no_params=False):
"""
Set request parameters. Will always add the user ID if it hasn't
been specifically set by an API method
"""
try:
query = quote(query_string.format(u=self.library_id, t=self.library_type))
except KeyError as err:
raise ze.ParamNotPassed("There's a request parameter missing: %s" % err)
# Add the URL parameters and the user key, if necessary
if no_params is False:
if not self.url_params:
self.add_parameters()
query = "%s?%s" % (query, self.url_params)
return query
@retrieve
def publications(self):
""" Return the contents of My Publications
"""
if self.library_type != "users":
raise ze.CallDoesNotExist(
"This API call does not exist for group libraries"
)
query_string = "/{t}/{u}/publications/items"
return self._build_query(query_string)
# The following methods are Zotero Read API calls
def num_items(self):
""" Return the total number of top-level items in the library
"""
query = "/{t}/{u}/items/top"
return self._totals(query)
def count_items(self):
""" Return the count of all items in a group / library
"""
query = "/{t}/{u}/items"
return self._totals(query)
def num_collectionitems(self, collection):
""" Return the total number of items in the specified collection
"""
query = "/{t}/{u}/collections/{c}/items".format(
u=self.library_id, t=self.library_type, c=collection.upper()
)
return self._totals(query)
def _totals(self, query):
""" General method for returning total counts
"""
self.add_parameters(limit=1)
query = self._build_query(query)
self._retrieve_data(query)
self.url_params = None
# extract the 'total items' figure
return int(self.request.headers["Total-Results"])
@retrieve
def key_info(self, **kwargs):
"""
Retrieve info about the permissions associated with the
key associated to the given Zotero instance
"""
query_string = "/keys/{k}".format(k=self.api_key)
return self._build_query(query_string)
@retrieve
def items(self, **kwargs):
""" Get user items
"""
query_string = "/{t}/{u}/items"
return self._build_query(query_string)
@retrieve
def fulltext_item(self, itemkey, **kwargs):
""" Get full-text content for an item"""
query_string = "/{t}/{u}/items/{itemkey}/fulltext".format(
t=self.library_type, u=self.library_id, itemkey=itemkey
)
return self._build_query(query_string)
@backoff_check
def set_fulltext(self, itemkey, payload):
""""
Set full-text data for an item
<itemkey> should correspond to an existing attachment item.
payload should be a dict containing three keys:
'content': the full-text content and either
For text documents, 'indexedChars' and 'totalChars' OR
For PDFs, 'indexedPages' and 'totalPages'.
"""
headers = self.default_headers()
headers.update({"Content-Type": "application/json"})
return requests.put(
url=self.endpoint
+ "/{t}/{u}/items/{k}/fulltext".format(
t=self.library_type, u=self.library_id, k=itemkey
),
headers=headers,
data=json.dumps(payload),
)
def new_fulltext(self, version):
"""
Retrieve list of full-text content items and versions which are newer
than <version>
"""
query_string = "/{t}/{u}/fulltext".format(
t=self.library_type, u=self.library_id
)
headers = {"since": str(version)}
headers.update(self.default_headers())
self._check_backoff()
resp = requests.get(self.endpoint + query_string, headers=headers)
try:
resp.raise_for_status()
except requests.exceptions.HTTPError:
error_handler(self, resp)
backoff = self.request.headers.get("backoff")
if backoff:
self._set_backoff(backoff)
return resp.json()
def item_versions(self, **kwargs):
"""
Returns dict associating items keys (all no limit by default) to versions.
Accepts a since= parameter in kwargs to limit the data to those updated since since=
"""
if "limit" not in kwargs:
kwargs["limit"] = None
kwargs["format"] = "versions"
return self.items(**kwargs)
def collection_versions(self, **kwargs):
"""
Returns dict associating collection keys (all no limit by default) to versions.
Accepts a since= parameter in kwargs to limit the data to those updated since since=
"""
if "limit" not in kwargs:
kwargs["limit"] = None
kwargs["format"] = "versions"
return self.collections(**kwargs)
def last_modified_version(self, **kwargs):
""" Get the last modified version
"""
self.items(**kwargs)
return int(self.request.headers.get("last-modified-version", 0))
@retrieve
def top(self, **kwargs):
""" Get user top-level items
"""
query_string = "/{t}/{u}/items/top"
return self._build_query(query_string)
@retrieve
def trash(self, **kwargs):
""" Get all items in the trash
"""
query_string = "/{t}/{u}/items/trash"
return self._build_query(query_string)
@retrieve
def searches(self, **kwargs):
""" Get saved searches
"""
query_string = "/{t}/{u}/searches"
return self._build_query(query_string)
@retrieve
def deleted(self, **kwargs):
""" Get all deleted items (requires since= parameter)
"""
if "limit" not in kwargs:
# Currently deleted API doesn't respect limit leaving it out by
# default preserves compat
kwargs["limit"] = None
query_string = "/{t}/{u}/deleted"
return self._build_query(query_string)
@retrieve
def item(self, item, **kwargs):
""" Get a specific item
"""
query_string = "/{t}/{u}/items/{i}".format(
u=self.library_id, t=self.library_type, i=item.upper()
)
return self._build_query(query_string)
@retrieve
def file(self, item, **kwargs):
""" Get the file from an specific item
"""
query_string = "/{t}/{u}/items/{i}/file".format(
u=self.library_id, t=self.library_type, i=item.upper()
)
return self._build_query(query_string, no_params=True)
def dump(self, itemkey, filename=None, path=None):
"""
Dump a file attachment to disk, with optional filename and path
"""
if not filename:
filename = self.item(itemkey)["data"]["filename"]
if path:
pth = os.path.join(path, filename)
else:
pth = filename
file = self.file(itemkey)
if self.snapshot:
self.snapshot = False
pth = pth + ".zip"
with open(pth, "wb") as f:
f.write(file)
@retrieve
def children(self, item, **kwargs):
""" Get a specific item's child items
"""
query_string = "/{t}/{u}/items/{i}/children".format(
u=self.library_id, t=self.library_type, i=item.upper()
)
return self._build_query(query_string)
@retrieve
def collection_items(self, collection, **kwargs):
""" Get a specific collection's items
"""
query_string = "/{t}/{u}/collections/{c}/items".format(
u=self.library_id, t=self.library_type, c=collection.upper()
)
return self._build_query(query_string)
@retrieve
def collection_items_top(self, collection, **kwargs):
""" Get a specific collection's top-level items
"""
query_string = "/{t}/{u}/collections/{c}/items/top".format(
u=self.library_id, t=self.library_type, c=collection.upper()
)
return self._build_query(query_string)
@retrieve
def collection_tags(self, collection, **kwargs):
""" Get a specific collection's tags
"""
query_string = "/{t}/{u}/collections/{c}/tags".format(
u=self.library_id, t=self.library_type, c=collection.upper()
)
return self._build_query(query_string)
@retrieve
def collection(self, collection, **kwargs):
""" Get user collection
"""
query_string = "/{t}/{u}/collections/{c}".format(
u=self.library_id, t=self.library_type, c=collection.upper()
)
return self._build_query(query_string)
@retrieve
def collections(self, **kwargs):
""" Get user collections
"""
query_string = "/{t}/{u}/collections"
return self._build_query(query_string)
def all_collections(self, collid=None):
"""
Retrieve all collections and subcollections. Works for top-level collections
or for a specific collection. Works at all collection depths.
"""
all_collections = []
def subcoll(clct):
""" recursively add collections to a flat master list """
all_collections.append(clct)
if clct["meta"].get("numCollections", 0) > 0:
# add collection to master list & recur with all child
# collections
[
subcoll(c)
for c in self.everything(self.collections_sub(clct["data"]["key"]))
]
# select all top-level collections or a specific collection and
# children
if collid:
toplevel = [self.collection(collid)]
else:
toplevel = self.everything(self.collections_top())
[subcoll(collection) for collection in toplevel]
return all_collections
@retrieve
def collections_top(self, **kwargs):
""" Get top-level user collections
"""
query_string = "/{t}/{u}/collections/top"
return self._build_query(query_string)
@retrieve
def collections_sub(self, collection, **kwargs):
""" Get subcollections for a specific collection
"""
query_string = "/{t}/{u}/collections/{c}/collections".format(
u=self.library_id, t=self.library_type, c=collection.upper()
)
return self._build_query(query_string)
@retrieve
def groups(self, **kwargs):
""" Get user groups
"""
query_string = "/users/{u}/groups"
return self._build_query(query_string)
@retrieve
def tags(self, **kwargs):
""" Get tags
"""
query_string = "/{t}/{u}/tags"
self.tag_data = True
return self._build_query(query_string)
@retrieve
def item_tags(self, item, **kwargs):
""" Get tags for a specific item
"""
query_string = "/{t}/{u}/items/{i}/tags".format(
u=self.library_id, t=self.library_type, i=item.upper()
)
self.tag_data = True
return self._build_query(query_string)
def all_top(self, **kwargs):
""" Retrieve all top-level items
"""
return self.everything(self.top(**kwargs))
@retrieve
def follow(self):
""" Return the result of the call to the URL in the 'Next' link
"""
if self.links.get("next"):
return self.links.get("next")
else:
return
def iterfollow(self):
""" Generator for self.follow()
"""
# use same criterion as self.follow()
while True:
if self.links.get("next"):
yield self.follow()
else:
return
def makeiter(self, func):
""" Return a generator of func's results
"""
# reset the link. This results in an extra API call, yes
self.links["next"] = self.links["self"]
return self.iterfollow()
def everything(self, query):
"""
Retrieve all items in the library for a particular query
This method will override the 'limit' parameter if it's been set
"""
try:
items = []
items.extend(query)
while self.links.get("next"):
items.extend(self.follow())
except TypeError:
# we have a bibliography object ughh
items = copy.deepcopy(query)
while self.links.get("next"):
items.entries.extend(self.follow().entries)
return items
def get_subset(self, subset):
"""
Retrieve a subset of items
Accepts a single argument: a list of item IDs
"""
if len(subset) > 50:
raise ze.TooManyItems("You may only retrieve 50 items per call")
# remember any url parameters that have been set
params = self.url_params
retr = []
for itm in subset:
retr.extend(self.item(itm))
self.url_params = params
# clean up URL params when we're finished
self.url_params = None
return retr
# The following methods process data returned by Read API calls
def _json_processor(self, retrieved):
""" Format and return data from API calls which return Items
"""
json_kwargs = {}
if self.preserve_json_order:
json_kwargs["object_pairs_hook"] = OrderedDict
# send entries to _tags_data if there's no JSON
try:
items = [
json.loads(e["content"][0]["value"], **json_kwargs)
for e in retrieved.entries
]
except KeyError:
return self._tags_data(retrieved)
return items
def _csljson_processor(self, retrieved):
""" Return a list of dicts which are dumped CSL JSON
"""
items = []
json_kwargs = {}
if self.preserve_json_order:
json_kwargs["object_pairs_hook"] = OrderedDict
for csl in retrieved.entries:
items.append(json.loads(csl["content"][0]["value"], **json_kwargs))
self.url_params = None
return items
def _bib_processor(self, retrieved):
""" Return a list of strings formatted as HTML bibliography entries
"""
items = []
for bib in retrieved.entries:
items.append(bib["content"][0]["value"])
self.url_params = None
return items
def _citation_processor(self, retrieved):
""" Return a list of strings formatted as HTML citation entries
"""
items = []
for cit in retrieved.entries:
items.append(cit["content"][0]["value"])
self.url_params = None
return items
def _tags_data(self, retrieved):
""" Format and return data from API calls which return Tags
"""
self.url_params = None
return [t["tag"] for t in retrieved]
# The following methods are Write API calls
def item_template(self, itemtype, linkmode=None):
""" Get a template for a new item
"""
# if we have a template and it hasn't been updated since we stored it
template_name = "{}_{}_{}".format(*["item_template", itemtype, linkmode or ""])
query_string = "/items/new?itemType={i}".format(i=itemtype)
if self.templates.get(template_name) and not self._updated(
query_string, self.templates[template_name], template_name
):
return copy.deepcopy(self.templates[template_name]["tmplt"])
# Set linkMode parameter for API request if itemtype is attachment
if itemtype == "attachment":
query_string = "{}&linkMode={}".format(query_string, linkmode)
# otherwise perform a normal request and cache the response
retrieved = self._retrieve_data(query_string)
return self._cache(retrieved, template_name)
def _attachment_template(self, attachment_type):
"""
Return a new attachment template of the required type:
imported_file
imported_url
linked_file
linked_url
"""
return self.item_template("attachment&linkMode=" + attachment_type)
def _attachment(self, payload, parentid=None):
"""
Create attachments
accepts a list of one or more attachment template dicts
and an optional parent Item ID. If this is specified,
attachments are created under this ID
"""
attachment = Zupload(self, payload, parentid)
res = attachment.upload()
return res
@ss_wrap
def show_operators(self):
""" Show available saved search operators """
return self.savedsearch.operators
@ss_wrap
def show_conditions(self):
""" Show available saved search conditions """
return self.savedsearch.conditions_operators.keys()
@ss_wrap
def show_condition_operators(self, condition):
""" Show available operators for a given saved search condition """
# dict keys of allowed operators for the current condition
permitted_operators = self.savedsearch.conditions_operators.get(condition)
# transform these into values
permitted_operators_list = set(
[self.savedsearch.operators.get(op) for op in permitted_operators]
)
return permitted_operators_list
@ss_wrap
def saved_search(self, name, conditions):
""" Create a saved search. conditions is a list of dicts
containing search conditions, and must contain the following str keys:
condition, operator, value
"""
self.savedsearch._validate(conditions)
payload = [{"name": name, "conditions": conditions}]
headers = {"Zotero-Write-Token": token()}
headers.update(self.default_headers())
self._check_backoff()
req = requests.post(
url=self.endpoint
+ "/{t}/{u}/searches".format(t=self.library_type, u=self.library_id),
headers=headers,
data=json.dumps(payload),
)
self.request = req
try:
req.raise_for_status()
except requests.exceptions.HTTPError:
error_handler(self, req)
backoff = self.request.headers.get("backoff")
if backoff:
self._set_backoff(backoff)
return req.json()
@ss_wrap
def delete_saved_search(self, keys):
""" Delete one or more saved searches by passing a list of one or more
unique search keys
"""
headers = {"Zotero-Write-Token": token()}
headers.update(self.default_headers())
self._check_backoff()
req = requests.delete(
url=self.endpoint
+ "/{t}/{u}/searches".format(t=self.library_type, u=self.library_id),
headers=headers,
params={"searchKey": ",".join(keys)},
)
self.request = req
try:
req.raise_for_status()
except requests.exceptions.HTTPError:
error_handler(self, req)
backoff = self.request.headers.get("backoff")
if backoff:
self._set_backoff(backoff)
return req.status_code
def upload_attachments(self, attachments, parentid=None, basedir=None):
"""Upload files to the already created (but never uploaded) attachments"""
return Zupload(self, attachments, parentid, basedir=basedir).upload()
def add_tags(self, item, *tags):
"""
Add one or more tags to a retrieved item,
then update it on the server
Accepts a dict, and one or more tags to add to it
Returns the updated item from the server
"""
# Make sure there's a tags field, or add one
try:
assert item["data"]["tags"]
except AssertionError:
item["data"]["tags"] = list()
for tag in tags:
item["data"]["tags"].append({"tag": "%s" % tag})
# make sure everything's OK
assert self.check_items([item])
return self.update_item(item)
def check_items(self, items):
"""
Check that items to be created contain no invalid dict keys
Accepts a single argument: a list of one or more dicts
The retrieved fields are cached and re-used until a 304 call fails
"""
params = {"locale": self.locale}
query_string = "/itemFields"
r = Request("GET", self.endpoint + query_string, params=params).prepare()
# now split up the URL
result = urlparse(r.url)
# construct cache key
cachekey = result.path + "_" + result.query
if self.templates.get(cachekey) and not self._updated(
query_string, self.templates[cachekey], cachekey
):
template = set(t["field"] for t in self.templates[cachekey]["tmplt"])
else:
template = set(t["field"] for t in self.item_fields())
# add fields we know to be OK
template = template | set(
[
"path",
"tags",
"notes",
"itemType",
"creators",
"mimeType",
"linkMode",
"note",
"charset",
"dateAdded",
"version",
"collections",
"dateModified",
"relations",
# attachment items
"parentItem",
"mtime",
"contentType",
"md5",
"filename",
]
)
template = template | set(self.temp_keys)
for pos, item in enumerate(items):
if set(item) == set(["links", "library", "version", "meta", "key", "data"]):
# we have an item that was retrieved from the API
item = item["data"]
to_check = set(i for i in list(item.keys()))
difference = to_check.difference(template)
if difference:
raise ze.InvalidItemFields(
"Invalid keys present in item %s: %s"
% (pos + 1, " ".join(i for i in difference))
)
return items
@tcache
def item_types(self):
""" Get all available item types
"""
# Check for a valid cached version
params = {"locale": self.locale}
query_string = "/itemTypes"
return query_string, params
@tcache
def creator_fields(self):
""" Get localised creator fields
"""
# Check for a valid cached version
params = {"locale": self.locale}
query_string = "/creatorFields"
return query_string, params
@tcache
def item_type_fields(self, itemtype):
""" Get all valid fields for an item
"""
params = {"itemType": itemtype, "locale": self.locale}
query_string = "/itemTypeFields"
return query_string, params
@tcache
def item_creator_types(self, itemtype):
""" Get all available creator types for an item
"""
params = {"itemType": itemtype, "locale": self.locale}
query_string = "/itemTypeCreatorTypes"
return query_string, params
@tcache
def item_fields(self):
""" Get all available item fields
"""
# Check for a valid cached version
params = {"locale": self.locale}
query_string = "/itemFields"
return query_string, params
def item_attachment_link_modes(self):
""" Get all available link mode types.
Note: No viable REST API route was found for this, so I tested and built a list from documentation found
here - https://www.zotero.org/support/dev/web_api/json
"""
return ["imported_file", "imported_url", "linked_file", "linked_url"]
def create_items(self, payload, parentid=None, last_modified=None):
"""
Create new Zotero items
Accepts two arguments:
a list containing one or more item dicts
an optional parent item ID.
Note that this can also be used to update existing items
"""
if len(payload) > 50:
raise ze.TooManyItems("You may only create up to 50 items per call")
# TODO: strip extra data if it's an existing item
headers = {"Zotero-Write-Token": token(), "Content-Type": "application/json"}
if last_modified is not None:
headers["If-Unmodified-Since-Version"] = str(last_modified)
to_send = json.dumps([i for i in self._cleanup(*payload, allow=("key"))])
headers.update(self.default_headers())
self._check_backoff()
req = requests.post(
url=self.endpoint
+ "/{t}/{u}/items".format(t=self.library_type, u=self.library_id),
data=to_send,
headers=dict(headers),
)
self.request = req
try:
req.raise_for_status()
except requests.exceptions.HTTPError:
error_handler(self, req)
resp = req.json()
backoff = self.request.headers.get("backoff")
if backoff:
self._set_backoff(backoff)
if parentid:
# we need to create child items using PATCH
# TODO: handle possibility of item creation + failed parent
# attachment
uheaders = {
"If-Unmodified-Since-Version": req.headers["last-modified-version"]
}
uheaders.update(self.default_headers())
for value in resp["success"].values():
payload = json.dumps({"parentItem": parentid})
self._check_backoff()
presp = requests.patch(
url=self.endpoint
+ "/{t}/{u}/items/{v}".format(
t=self.library_type, u=self.library_id, v=value
),
data=payload,
headers=dict(uheaders),
)
self.request = presp
try:
presp.raise_for_status()
except requests.exceptions.HTTPError:
error_handler(self, presp)
backoff = presp.headers.get("backoff")
if backoff:
self._set_backoff(backoff)
return resp
def create_collection(self, payload, last_modified=None):
"""Alias for create_collections to preserve backward compatibility"""
return self.create_collections(payload, last_modified)
def create_collections(self, payload, last_modified=None):
"""
Create new Zotero collections
Accepts one argument, a list of dicts containing the following keys:
'name': the name of the collection
'parentCollection': OPTIONAL, the parent collection to which you wish to add this
"""
# no point in proceeding if there's no 'name' key
for item in payload:
if "name" not in item:
raise ze.ParamNotPassed("The dict you pass must include a 'name' key")
# add a blank 'parentCollection' key if it hasn't been passed
if "parentCollection" not in item:
item["parentCollection"] = ""
headers = {"Zotero-Write-Token": token()}
if last_modified is not None:
headers["If-Unmodified-Since-Version"] = str(last_modified)
headers.update(self.default_headers())
self._check_backoff()
req = requests.post(
url=self.endpoint
+ "/{t}/{u}/collections".format(t=self.library_type, u=self.library_id),
headers=headers,
data=json.dumps(payload),
)
self.request = req
try:
req.raise_for_status()
except requests.exceptions.HTTPError:
error_handler(self, req)
backoff = req.headers.get("backoff")
if backoff:
self._set_backoff(backoff)
return req.json()
@backoff_check
def update_collection(self, payload, last_modified=None):
"""
Update a Zotero collection property such as 'name'
Accepts one argument, a dict containing collection data retrieved
using e.g. 'collections()'
"""
modified = payload["version"]
if last_modified is not None:
modified = last_modified
key = payload["key"]
headers = {"If-Unmodified-Since-Version": str(modified)}
headers.update(self.default_headers())
headers.update({"Content-Type": "application/json"})
return requests.put(
url=self.endpoint
+ "/{t}/{u}/collections/{c}".format(
t=self.library_type, u=self.library_id, c=key
),
headers=headers,
data=json.dumps(payload),
)
def attachment_simple(self, files, parentid=None):
"""
Add attachments using filenames as title
Arguments:
One or more file paths to add as attachments:
An optional Item ID, which will create child attachments
"""
orig = self._attachment_template("imported_file")
to_add = [orig.copy() for fls in files]
for idx, tmplt in enumerate(to_add):
tmplt["title"] = os.path.basename(files[idx])
tmplt["filename"] = files[idx]
if parentid:
return self._attachment(to_add, parentid)
else:
return self._attachment(to_add)
def attachment_both(self, files, parentid=None):
"""
Add child attachments using title, filename
Arguments:
One or more lists or tuples containing title, file path
An optional Item ID, which will create child attachments
"""
orig = self._attachment_template("imported_file")
to_add = [orig.copy() for f in files]
for idx, tmplt in enumerate(to_add):
tmplt["title"] = files[idx][0]
tmplt["filename"] = files[idx][1]
if parentid:
return self._attachment(to_add, parentid)
else:
return self._attachment(to_add)
@backoff_check
def update_item(self, payload, last_modified=None):
"""
Update an existing item
Accepts one argument, a dict containing Item data
"""
to_send = self.check_items([payload])[0]
if last_modified is None:
modified = payload["version"]
else:
modified = last_modified
ident = payload["key"]
headers = {"If-Unmodified-Since-Version": str(modified)}
headers.update(self.default_headers())
return requests.patch(
url=self.endpoint
+ "/{t}/{u}/items/{id}".format(
t=self.library_type, u=self.library_id, id=ident
),
headers=headers,
data=json.dumps(to_send),
)
def update_items(self, payload):
"""
Update existing items
Accepts one argument, a list of dicts containing Item data
"""
to_send = [self.check_items([p])[0] for p in payload]
headers = {}
headers.update(self.default_headers())
# the API only accepts 50 items at a time, so we have to split
# anything longer
for chunk in chunks(to_send, 50):
self._check_backoff()
req = requests.post(
url=self.endpoint
+ "/{t}/{u}/items/".format(t=self.library_type, u=self.library_id),
headers=headers,
data=json.dumps(chunk),
)
self.request = req
try:
req.raise_for_status()
except requests.exceptions.HTTPError:
error_handler(self, req)
backoff = req.headers.get("backoff")
if backoff:
self._set_backoff(backoff)
return True
def update_collections(self, payload):
"""
Update existing collections
Accepts one argument, a list of dicts containing Collection data
"""
to_send = [self.check_items([p])[0] for p in payload]
headers = {}
headers.update(self.default_headers())
# the API only accepts 50 items at a time, so we have to split
# anything longer
for chunk in chunks(to_send, 50):
self._check_backoff()
req = requests.post(
url=self.endpoint
+ "/{t}/{u}/collections/".format(
t=self.library_type, u=self.library_id
),
headers=headers,
data=json.dumps(chunk),
)
self.request = req
try:
req.raise_for_status()
except requests.exceptions.HTTPError:
error_handler(self, req)
backoff = req.headers.get("backoff")
if backoff:
self._set_backoff(backoff)
return True
@backoff_check
def addto_collection(self, collection, payload):
"""
Add one or more items to a collection
Accepts two arguments:
The collection ID, and an item dict
"""
ident = payload["key"]
modified = payload["version"]
# add the collection data from the item
modified_collections = payload["data"]["collections"] + [collection]
headers = {"If-Unmodified-Since-Version": str(modified)}
headers.update(self.default_headers())
return requests.patch(
url=self.endpoint
+ "/{t}/{u}/items/{i}".format(
t=self.library_type, u=self.library_id, i=ident
),
data=json.dumps({"collections": modified_collections}),
headers=headers,
)
@backoff_check
def deletefrom_collection(self, collection, payload):
"""
Delete an item from a collection
Accepts two arguments:
The collection ID, and and an item dict
"""
ident = payload["key"]
modified = payload["version"]
# strip the collection data from the item
modified_collections = [
c for c in payload["data"]["collections"] if c != collection
]
headers = {"If-Unmodified-Since-Version": str(modified)}
headers.update(self.default_headers())
return requests.patch(
url=self.endpoint
+ "/{t}/{u}/items/{i}".format(
t=self.library_type, u=self.library_id, i=ident
),
data=json.dumps({"collections": modified_collections}),
headers=headers,
)
@backoff_check
def delete_tags(self, *payload):
"""
Delete a group of tags
pass in up to 50 tags, or use *[tags]
"""
if len(payload) > 50:
raise ze.TooManyItems("Only 50 tags or fewer may be deleted")
modified_tags = " || ".join([tag for tag in payload])
# first, get version data by getting one tag
self.tags(limit=1)
headers = {
"If-Unmodified-Since-Version": self.request.headers["last-modified-version"]
}
headers.update(self.default_headers())
return requests.delete(
url=self.endpoint
+ "/{t}/{u}/tags".format(t=self.library_type, u=self.library_id),
params={"tag": modified_tags},
headers=headers,
)
@backoff_check
def delete_item(self, payload, last_modified=None):
"""
Delete Items from a Zotero library
Accepts a single argument:
a dict containing item data
OR a list of dicts containing item data
"""
params = None
if isinstance(payload, list):
params = {"itemKey": ",".join([p["key"] for p in payload])}
if last_modified is not None:
modified = last_modified
else:
modified = payload[0]["version"]
url = self.endpoint + "/{t}/{u}/items".format(
t=self.library_type, u=self.library_id
)
else:
ident = payload["key"]
if last_modified is not None:
modified = last_modified
else:
modified = payload["version"]
url = self.endpoint + "/{t}/{u}/items/{c}".format(
t=self.library_type, u=self.library_id, c=ident
)
headers = {"If-Unmodified-Since-Version": str(modified)}
headers.update(self.default_headers())
return requests.delete(url=url, params=params, headers=headers)
@backoff_check
def delete_collection(self, payload, last_modified=None):
"""
Delete a Collection from a Zotero library
Accepts a single argument:
a dict containing item data
OR a list of dicts containing item data
"""
params = None
if isinstance(payload, list):
params = {"collectionKey": ",".join([p["key"] for p in payload])}
if last_modified is not None:
modified = last_modified
else:
modified = payload[0]["version"]
url = self.endpoint + "/{t}/{u}/collections".format(
t=self.library_type, u=self.library_id
)
else:
ident = payload["key"]
if last_modified is not None:
modified = last_modified
else:
modified = payload["version"]
url = self.endpoint + "/{t}/{u}/collections/{c}".format(
t=self.library_type, u=self.library_id, c=ident
)
headers = {"If-Unmodified-Since-Version": str(modified)}
headers.update(self.default_headers())
return requests.delete(url=url, params=params, headers=headers)
def error_handler(zot, req):
""" Error handler for HTTP requests
"""
error_codes = {
400: ze.UnsupportedParams,
401: ze.UserNotAuthorised,
403: ze.UserNotAuthorised,
404: ze.ResourceNotFound,
409: ze.Conflict,
412: ze.PreConditionFailed,
413: ze.RequestEntityTooLarge,
428: ze.PreConditionRequired,
429: ze.TooManyRequests,
}
def err_msg(req):
""" Return a nicely-formatted error message
"""
return "\nCode: %s\nURL: %s\nMethod: %s\nResponse: %s" % (
req.status_code,
# error.msg,
req.url,
req.request.method,
req.text,
)
if error_codes.get(req.status_code):
# check to see whether its 429
if req.status_code == 429:
# try to get backoff duration
delay = req.headers.get("backoff")
if not delay:
raise ze.TooManyRetries(
"You are being rate-limited and no backoff duration has been received from the server. Try again later"
)
else:
zot._set_backoff(delay)
else:
raise error_codes.get(req.status_code)(err_msg(req))
else:
raise ze.HTTPError(err_msg(req))
class SavedSearch(object):
""" Saved search functionality """
def __init__(self, zinstance):
super(SavedSearch, self).__init__()
self.zinstance = zinstance
self.searchkeys = ("condition", "operator", "value")
# always exclude these fields from zotero.item_keys()
self.excluded_items = (
"accessDate",
"date",
"pages",
"section",
"seriesNumber",
"issue",
)
self.operators = {
# this is a bit hacky, but I can't be bothered with Python's enums
"is": "is",
"isNot": "isNot",
"beginsWith": "beginsWith",
"contains": "contains",
"doesNotContain": "doesNotContain",
"isLessThan": "isLessThan",
"isGreaterThan": "isGreaterThan",
"isBefore": "isBefore",
"isAfter": "isAfter",
"isInTheLast": "isInTheLast",
"any": "any",
"all": "all",
"true": "true",
"false": "false",
}
# common groupings of operators
self.groups = {
"A": (self.operators["true"], self.operators["false"]),
"B": (self.operators["any"], self.operators["all"]),
"C": (
self.operators["is"],
self.operators["isNot"],
self.operators["contains"],
self.operators["doesNotContain"],
),
"D": (self.operators["is"], self.operators["isNot"]),
"E": (
self.operators["is"],
self.operators["isNot"],
self.operators["isBefore"],
self.operators["isInTheLast"],
),
"F": (self.operators["contains"], self.operators["doesNotContain"]),
"G": (
self.operators["is"],
self.operators["isNot"],
self.operators["contains"],
self.operators["doesNotContain"],
self.operators["isLessThan"],
self.operators["isGreaterThan"],
),
"H": (
self.operators["is"],
self.operators["isNot"],
self.operators["beginsWith"],
),
"I": (self.operators["is"]),
}
self.conditions_operators = {
"deleted": self.groups["A"],
"noChildren": self.groups["A"],
"unfiled": self.groups["A"],
"publications": self.groups["A"],
"includeParentsAndChildren": self.groups["A"],
"includeParents": self.groups["A"],
"includeChildren": self.groups["A"],
"recursive": self.groups["A"],
"joinMode": self.groups["B"],
"quicksearch-titleCreatorYear": self.groups["C"],
"quicksearch-fields": self.groups["C"],
"quicksearch-everything": self.groups["C"],
"collectionID": self.groups["D"],
"savedSearchID": self.groups["D"],
"collection": self.groups["D"],
"savedSearch": self.groups["D"],
"dateAdded": self.groups["E"],
"dateModified": self.groups["E"],
"itemTypeID": self.groups["D"],
"itemType": self.groups["D"],
"fileTypeID": self.groups["D"],
"tagID": self.groups["D"],
"tag": self.groups["C"],
"note": self.groups["F"],
"childNote": self.groups["F"],
"creator": self.groups["C"],
"lastName": self.groups["C"],
"field": self.groups["C"],
"datefield": self.groups["E"],
"year": self.groups["C"],
"numberfield": self.groups["G"],
"libraryID": self.groups["D"],
"key": self.groups["H"],
"itemID": self.groups["D"],
"annotation": self.groups["F"],
"fulltextWord": self.groups["F"],
"fulltextContent": self.groups["F"],
"tempTable": self.groups["I"],
}
###########
# ALIASES #
###########
# aliases for numberfield
pagefields = (
"pages",
"numPages",
"numberOfVolumes",
"section",
"seriesNumber",
"issue",
)
for pf in pagefields:
self.conditions_operators[pf] = self.conditions_operators.get("numberfield")
# aliases for datefield
datefields = ("accessDate", "date", "dateDue", "accepted")
for df in datefields:
self.conditions_operators[df] = self.conditions_operators.get("datefield")
# aliases for field - this makes a blocking API call unless item types have been cached
item_fields = [
itm["field"]
for itm in self.zinstance.item_fields()
if itm["field"] not in set(self.excluded_items)
]
for itf in item_fields:
self.conditions_operators[itf] = self.conditions_operators.get("field")
def _validate(self, conditions):
""" Validate saved search conditions, raising an error if any contain invalid operators """
allowed_keys = set(self.searchkeys)
operators_set = set(self.operators.keys())
for condition in conditions:
if set(condition.keys()) != allowed_keys:
raise ze.ParamNotPassed(
"Keys must be all of: %s" % ", ".join(self.searchkeys)
)
if condition.get("operator") not in operators_set:
raise ze.ParamNotPassed(
"You have specified an unknown operator: %s"
% condition.get("operator")
)
# dict keys of allowed operators for the current condition
permitted_operators = self.conditions_operators.get(
condition.get("condition")
)
# transform these into values
permitted_operators_list = set(
[self.operators.get(op) for op in permitted_operators]
)
if condition.get("operator") not in permitted_operators_list:
raise ze.ParamNotPassed(
"You may not use the '%s' operator when selecting the '%s' condition. \nAllowed operators: %s"
% (
condition.get("operator"),
condition.get("condition"),
", ".join(list(permitted_operators_list)),
)
)
class Zupload(object):
"""
Zotero file attachment helper
Receives a Zotero instance, file(s) to upload, and optional parent ID
"""
def __init__(self, zinstance, payload, parentid=None, basedir=None):
super(Zupload, self).__init__()
self.zinstance = zinstance
self.payload = payload
self.parentid = parentid
if basedir is None:
self.basedir = Path("")
elif isinstance(basedir, Path):
self.basedir = basedir
else:
self.basedir = Path(basedir)
def _verify(self, payload):
"""
ensure that all files to be attached exist
open()'s better than exists(), cos it avoids a race condition
"""
if not payload: # Check payload has nonzero length
raise ze.ParamNotPassed
for templt in payload:
if os.path.isfile(str(self.basedir.joinpath(templt["filename"]))):
try:
# if it is a file, try to open it, and catch the error
with open(str(self.basedir.joinpath(templt["filename"]))):
pass
except IOError:
raise ze.FileDoesNotExist(
"The file at %s couldn't be opened or found."
% str(self.basedir.joinpath(templt["filename"]))
)
# no point in continuing if the file isn't a file
else:
raise ze.FileDoesNotExist(
"The file at %s couldn't be opened or found."
% str(self.basedir.joinpath(templt["filename"]))
)
def _create_prelim(self):
"""
Step 0: Register intent to upload files
"""
self._verify(self.payload)
if "key" in self.payload[0] and self.payload[0]["key"]:
if next((i for i in self.payload if "key" not in i), False):
raise ze.UnsupportedParams(
"Can't pass payload entries with and without keys to Zupload"
)
return None # Don't do anything if payload comes with keys
liblevel = "/{t}/{u}/items"
# Create one or more new attachments
headers = {"Zotero-Write-Token": token(), "Content-Type": "application/json"}
headers.update(self.zinstance.default_headers())
# If we have a Parent ID, add it as a parentItem
if self.parentid:
for child in self.payload:
child["parentItem"] = self.parentid
to_send = json.dumps(self.payload)
self.zinstance._check_backoff()
req = requests.post(
url=self.zinstance.endpoint
+ liblevel.format(
t=self.zinstance.library_type, u=self.zinstance.library_id
),
data=to_send,
headers=headers,
)
try:
req.raise_for_status()
except requests.exceptions.HTTPError:
error_handler(self.zinstance, req)
backoff = req.headers.get("backoff")
if backoff:
self.zinstance._set_backoff(backoff)
data = req.json()
for k in data["success"]:
self.payload[int(k)]["key"] = data["success"][k]
return data
def _get_auth(self, attachment, reg_key, md5=None):
"""
Step 1: get upload authorisation for a file
"""
mtypes = mimetypes.guess_type(attachment)
digest = hashlib.md5()
with open(attachment, "rb") as att:
for chunk in iter(lambda: att.read(8192), b""):
digest.update(chunk)
auth_headers = {"Content-Type": "application/x-www-form-urlencoded"}
if not md5:
auth_headers["If-None-Match"] = "*"
else:
# docs specify that for existing file we use this
auth_headers["If-Match"] = md5
auth_headers.update(self.zinstance.default_headers())
data = {
"md5": digest.hexdigest(),
"filename": os.path.basename(attachment),
"filesize": os.path.getsize(attachment),
"mtime": str(int(os.path.getmtime(attachment) * 1000)),
"contentType": mtypes[0] or "application/octet-stream",
"charset": mtypes[1],
"params": 1,
}
self.zinstance._check_backoff()
auth_req = requests.post(
url=self.zinstance.endpoint
+ "/{t}/{u}/items/{i}/file".format(
t=self.zinstance.library_type, u=self.zinstance.library_id, i=reg_key
),
data=data,
headers=auth_headers,
)
try:
auth_req.raise_for_status()
except requests.exceptions.HTTPError:
error_handler(self.zinstance, auth_req)
backoff = auth_req.headers.get("backoff")
if backoff:
zinstance._set_backoff(backoff)
return auth_req.json()
def _upload_file(self, authdata, attachment, reg_key):
"""
Step 2: auth successful, and file not on server
zotero.org/support/dev/server_api/file_upload#a_full_upload
reg_key isn't used, but we need to pass it through to Step 3
"""
upload_dict = authdata["params"]
# pass tuple of tuples (not dict!), to ensure key comes first
upload_list = [("key", upload_dict.pop("key"))]
for key, value in upload_dict.items():
upload_list.append((key, value))
upload_list.append(("file", open(attachment, "rb").read()))
upload_pairs = tuple(upload_list)
try:
self.zinstance._check_backoff()
upload = requests.post(
url=authdata["url"],
files=upload_pairs,
headers={"User-Agent": "Pyzotero/%s" % __version__},
)
except (requests.exceptions.ConnectionError):
raise ze.UploadError("ConnectionError")
try:
upload.raise_for_status()
except requests.exceptions.HTTPError:
error_handler(zinstance, upload)
backoff = upload.headers.get("backoff")
if backoff:
zinstance._set_backoff(backoff)
# now check the responses
return self._register_upload(authdata, reg_key)
def _register_upload(self, authdata, reg_key):
"""
Step 3: upload successful, so register it
"""
reg_headers = {
"Content-Type": "application/x-www-form-urlencoded",
"If-None-Match": "*",
}
reg_headers.update(self.zinstance.default_headers())
reg_data = {"upload": authdata.get("uploadKey")}
self.zinstance._check_backoff()
upload_reg = requests.post(
url=self.zinstance.endpoint
+ "/{t}/{u}/items/{i}/file".format(
t=self.zinstance.library_type, u=self.zinstance.library_id, i=reg_key
),
data=reg_data,
headers=dict(reg_headers),
)
try:
upload_reg.raise_for_status()
except requests.exceptions.HTTPError:
error_handler(zinstance, upload_reg)
backoff = upload_reg.headers.get("backoff")
if backoff:
self._set_backoff(backoff)
def upload(self):
"""
File upload functionality
Goes through upload steps 0 - 3 (private class methods), and returns
a dict noting success, failure, or unchanged
(returning the payload entries with that property as a list for each status)
"""
result = {"success": [], "failure": [], "unchanged": []}
self._create_prelim()
for item in self.payload:
if "key" not in item:
result["failure"].append(item)
continue
attach = str(self.basedir.joinpath(item["filename"]))
authdata = self._get_auth(attach, item["key"], md5=item.get("md5", None))
# no need to keep going if the file exists
if authdata.get("exists"):
result["unchanged"].append(item)
continue
self._upload_file(authdata, attach, item["key"])
result["success"].append(item)
return result
| 36.044444
| 123
| 0.570147
|
7ab5c2106e3d1907bf21b279d952fea9575b3647
| 265
|
py
|
Python
|
aptitudetech_private/aptitudetech_private/doctype/issue_time_details/issue_time_details.py
|
CloudGround/aptitudetech_private
|
d4d150226bd33ea0c76086264286ae7cae52457f
|
[
"MIT"
] | null | null | null |
aptitudetech_private/aptitudetech_private/doctype/issue_time_details/issue_time_details.py
|
CloudGround/aptitudetech_private
|
d4d150226bd33ea0c76086264286ae7cae52457f
|
[
"MIT"
] | null | null | null |
aptitudetech_private/aptitudetech_private/doctype/issue_time_details/issue_time_details.py
|
CloudGround/aptitudetech_private
|
d4d150226bd33ea0c76086264286ae7cae52457f
|
[
"MIT"
] | 1
|
2019-05-17T00:04:05.000Z
|
2019-05-17T00:04:05.000Z
|
# -*- coding: utf-8 -*-
# Copyright (c) 2019, Aptitudetech and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class Issuetimedetails(Document):
pass
| 24.090909
| 51
| 0.788679
|
d3509fa3556c4fab8805b2ea4d69a615e33b405e
| 1,340
|
py
|
Python
|
solutions/022/solve22.py
|
zsarge/ProjectEuler
|
751b19df53483d517e6bf71ccc5fb9918ff50322
|
[
"MIT"
] | null | null | null |
solutions/022/solve22.py
|
zsarge/ProjectEuler
|
751b19df53483d517e6bf71ccc5fb9918ff50322
|
[
"MIT"
] | null | null | null |
solutions/022/solve22.py
|
zsarge/ProjectEuler
|
751b19df53483d517e6bf71ccc5fb9918ff50322
|
[
"MIT"
] | 1
|
2021-06-07T18:45:07.000Z
|
2021-06-07T18:45:07.000Z
|
# https://projecteuler.net/problem=22
# Run with: 'python solve22.py'
# using Python 3.6.9
# by Zack Sargent
# Prompt:
# Using p022_names.txt,
# a 46K text file containing over five-thousand first names,
# begin by sorting it into alphabetical order.
# Then working out the alphabetical value for each name,
# multiply this value by its alphabetical position in the list
# to obtain a name score.
#
# For example, when the list is sorted into alphabetical order,
# COLIN, which is worth 3 + 15 + 12 + 9 + 14 = 53,
# is the 938th name in the list.
# So, COLIN would obtain a score of 938 × 53 = 49714.
# What is the total of all the name scores in the file?
names = []
with open("p022_names.txt", "r") as f:
text = f.read()
names = text.replace('"', "").split(",")
names.sort()
def letter_value(letter: str) -> int:
if len(letter) != 1:
raise Exception("letter_value expects one letter")
return (ord(letter) - ord("A") + 1)
# alphabetical value
def alpha_value(name: str) -> int:
value = 0
for letter in name:
value += letter_value(letter)
return value
def position(name: str) -> int:
return names.index(name) + 1
def name_score(name: str) -> int:
return position(name) * alpha_value(name)
total = 0
for name in names:
total += name_score(name)
print(total)
# -> 871198282
| 25.769231
| 63
| 0.671642
|
9c93d80e5a52da798098fdd8e3419c56617f20b5
| 1,760
|
py
|
Python
|
arekit/common/data/views/samples.py
|
nicolay-r/AREk
|
19c39ec0dc9a17464cade03b9c4da0c6d1d21191
|
[
"MIT"
] | 18
|
2019-12-14T18:43:11.000Z
|
2022-03-21T05:55:36.000Z
|
arekit/common/data/views/samples.py
|
nicolay-r/AREk
|
19c39ec0dc9a17464cade03b9c4da0c6d1d21191
|
[
"MIT"
] | 284
|
2020-08-08T20:52:44.000Z
|
2022-03-31T05:26:20.000Z
|
arekit/common/data/views/samples.py
|
nicolay-r/AREk
|
19c39ec0dc9a17464cade03b9c4da0c6d1d21191
|
[
"MIT"
] | 1
|
2021-08-07T13:17:43.000Z
|
2021-08-07T13:17:43.000Z
|
from arekit.common.data import const
from arekit.common.data.row_ids.base import BaseIDProvider
from arekit.common.data.storages.base import BaseRowsStorage
class BaseSampleStorageView(object):
"""
Pandas-based input samples proovider
"""
def __init__(self, storage, row_ids_provider):
assert(isinstance(row_ids_provider, BaseIDProvider))
assert(isinstance(storage, BaseRowsStorage))
self.__row_ids_provider = row_ids_provider
self._storage = storage
# TODO. #240 This is just a wrapper over storage.
def iter_rows(self, handle_rows):
assert(callable(handle_rows) or handle_rows is None)
for row_index, row in self._storage:
if handle_rows is None:
yield row_index, row
else:
yield handle_rows(row)
def iter_rows_linked_by_text_opinions(self):
undefined = -1
linked = []
current_doc_id = undefined
current_opinion_id = undefined
for row_index, sample_id in enumerate(self._storage.iter_column_values(const.ID)):
sample_id = str(sample_id)
doc_id = self._storage.get_cell(row_index=row_index, column_name=const.DOC_ID)
opinion_id = self.__row_ids_provider.parse_opinion_in_sample_id(sample_id)
if current_doc_id != undefined and current_opinion_id != undefined:
if doc_id != current_doc_id or opinion_id != current_opinion_id:
yield linked
linked = []
else:
current_doc_id = doc_id
current_opinion_id = opinion_id
linked.append(self._storage.get_row(row_index))
if len(linked) > 0:
yield linked
| 32.592593
| 90
| 0.648295
|
8304ce3e6606b76f2ec3c687035dd3050c02c5ec
| 4,840
|
py
|
Python
|
demo/mmaction2/webcam_demo.py
|
ZJCV/TSN
|
ec6ad668d20f477df44eab7035e2553d95a835f3
|
[
"Apache-2.0"
] | 2
|
2021-11-29T10:29:40.000Z
|
2022-03-22T02:39:44.000Z
|
demo/mmaction2/webcam_demo.py
|
ZJCV/TSN
|
ec6ad668d20f477df44eab7035e2553d95a835f3
|
[
"Apache-2.0"
] | 1
|
2022-03-13T09:28:52.000Z
|
2022-03-13T09:28:52.000Z
|
demo/mmaction2/webcam_demo.py
|
ZJCV/TSN
|
ec6ad668d20f477df44eab7035e2553d95a835f3
|
[
"Apache-2.0"
] | 1
|
2021-03-09T08:14:22.000Z
|
2021-03-09T08:14:22.000Z
|
# -*- coding: utf-8 -*-
"""
@date: 2021/1/14 下午2:51
@file: visualization.py
@author: zj
@description:
"""
from operator import itemgetter
import numpy as np
import torch
from collections import deque
from threading import Thread
from tsn.model.recognizers.build import build_recognizer
from tsn.data.transforms.build import build_transform
from tsn.util.distributed import get_device, get_local_rank
from demo.mmaction2.visualization.configs.constant import *
from demo.mmaction2.visualization.utils.parser import parse_args, load_config
from demo.mmaction2.visualization.utils.misc import get_cap, get_output_file, get_label
def show_results():
if output_file is None:
print('Press "Esc", "q" or "Q" to exit')
if cap is None:
return
text_info = {}
while True:
msg = 'Waiting for action ...'
ret, frame = cap.read()
if not ret:
break
frame_queue.append(np.array(frame[:, :, ::-1]))
if len(result_queue) != 0:
text_info = {}
results = result_queue.popleft()
for i, result in enumerate(results):
selected_label, score = result
if score < threshold:
# 如果本次检测成绩小于阈值,则不修改显示字符串
break
location = (0, 40 + i * 20)
text = selected_label + ': ' + str(round(score, 2))
text_info[location] = text
cv2.putText(frame, text, location, FONTFACE, FONTSCALE, FONTCOLOR, THICKNESS, LINETYPE)
elif len(text_info):
for location, text in text_info.items():
cv2.putText(frame, text, location, FONTFACE, FONTSCALE, FONTCOLOR, THICKNESS, LINETYPE)
else:
cv2.putText(frame, msg, (0, 40), FONTFACE, FONTSCALE, MSGCOLOR, THICKNESS, LINETYPE)
if output_file is not None:
output_file.write(frame)
else:
cv2.imshow('camera', frame)
ch = cv2.waitKey(int(1 / output_fps * 1000))
if ch == 27 or ch == ord('q') or ch == ord('Q'):
break
if cap is not None:
cap.release()
if output_file is not None:
output_file.release()
cv2.destroyAllWindows()
def inference():
score_cache = deque()
scores_sum = 0
while True:
cur_windows = []
while len(cur_windows) == 0:
if len(frame_queue) == sample_length:
cur_windows = list(np.array(frame_queue))
cur_windows = cur_windows[frame_interval // 2::frame_interval]
frames = [test_pipeline(frame) for frame in cur_windows]
images = torch.stack(frames).transpose(0, 1).unsqueeze(0).to(device=device, non_blocking=True)
with torch.no_grad():
outputs = model(images)['probs'].cpu().detach()
scores = torch.softmax(outputs, dim=1).numpy()[0]
score_cache.append(scores)
scores_sum += scores
if len(score_cache) == average_size:
scores_avg = scores_sum / average_size
num_selected_labels = min(len(label), 5)
scores_tuples = tuple(zip(label, scores_avg))
scores_sorted = sorted(
scores_tuples, key=itemgetter(1), reverse=True)
results = scores_sorted[:num_selected_labels]
result_queue.append(results)
scores_sum -= score_cache.popleft()
def main():
# 帧队列 检测结果 结果队列
global frame_queue, results, result_queue
# 捕获设备 输出文件 输出帧率
global cap, output_file, output_fps
# 可以显示结果的最小阈值 采样长度 帧间隔 测试图像转换 检测模型 设备 平均前N检测成绩的长度 标签列表
global threshold, sample_length, frame_interval, test_pipeline, model, device, average_size, label
args = parse_args()
cfg = load_config(args)
cap = get_cap(cfg)
output_file, output_fps = get_output_file(cfg, cap)
label = get_label(cfg)
average_size = cfg.DEMO.AVG_SIZE
threshold = cfg.DEMO.THRESHOLD
np.random.seed(cfg.RNG_SEED)
torch.manual_seed(cfg.RNG_SEED)
torch.backends.cudnn.deterministic = False
torch.backends.cudnn.benchmark = True
device = get_device(get_local_rank())
model = build_recognizer(cfg, device)
# prepare test pipeline from non-camera pipeline
test_pipeline = build_transform(cfg, is_train=False)
sample_length = cfg.DATASETS.CLIP_LEN * cfg.DATASETS.NUM_CLIPS * cfg.DATASETS.FRAME_INTERVAL
frame_interval = cfg.DATASETS.FRAME_INTERVAL
assert sample_length > 0
try:
frame_queue = deque(maxlen=sample_length)
result_queue = deque(maxlen=1)
pw = Thread(target=show_results, args=(), daemon=True)
pr = Thread(target=inference, args=(), daemon=True)
pw.start()
pr.start()
pw.join()
except KeyboardInterrupt:
pass
if __name__ == '__main__':
main()
| 32.05298
| 103
| 0.630579
|
ebefab53ec3b80637482261b92bf8029b7543a63
| 10,102
|
py
|
Python
|
sdk/python/lib/pulumi/runtime/mocks.py
|
sticha/pulumi
|
76ee1b8ccfee815eb315d9e0e0ddfaaf505c472b
|
[
"Apache-2.0"
] | 1
|
2021-08-22T01:34:11.000Z
|
2021-08-22T01:34:11.000Z
|
sdk/python/lib/pulumi/runtime/mocks.py
|
sticha/pulumi
|
76ee1b8ccfee815eb315d9e0e0ddfaaf505c472b
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/lib/pulumi/runtime/mocks.py
|
sticha/pulumi
|
76ee1b8ccfee815eb315d9e0e0ddfaaf505c472b
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016-2018, Pulumi Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Mocks for testing.
"""
import functools
import logging
from abc import ABC, abstractmethod
from typing import Dict, List, NamedTuple, Optional, Tuple, TYPE_CHECKING
from google.protobuf import empty_pb2
from . import rpc, rpc_manager
from .settings import Settings, configure, get_stack, get_project, get_root_resource
from .sync_await import _ensure_event_loop, _sync_await
from ..runtime.proto import engine_pb2, provider_pb2, resource_pb2
from ..runtime.stack import Stack, run_pulumi_func, wait_for_rpcs
if TYPE_CHECKING:
from ..resource import Resource
def test(fn):
"""
Decorates a test function to make sure that a returned Future
or Output is awaited as part of the test.
"""
@functools.wraps(fn)
def wrapper(*args, **kwargs):
from .. import Output # pylint: disable=import-outside-toplevel
try:
_sync_await(run_pulumi_func(lambda: _sync_await(Output.from_input(fn(*args, **kwargs)).future())))
finally:
rpc_manager.RPC_MANAGER.clear()
return wrapper
class MockResourceArgs:
"""
MockResourceArgs is used to construct a newResource Mock
"""
typ: str
name: str
inputs: dict
provider: Optional[str] = None
resource_id: Optional[str] = None
custom: Optional[bool] = None
def __init__(self,
typ: str,
name: str,
inputs: dict,
provider: Optional[str] = None,
resource_id: Optional[str] = None,
custom: Optional[bool] = None) -> None:
"""
:param str typ: The token that indicates which resource type is being constructed. This token is of the form "package:module:type".
:param str name: The logical name of the resource instance.
:param dict inputs: The inputs for the resource.
:param str provider: The identifier of the provider instance being used to manage this resource.
:param str resource_id: The physical identifier of an existing resource to read or import.
:param bool custom: Specifies whether or not the resource is Custom (i.e. managed by a resource provider).
"""
self.typ = typ
self.name = name
self.inputs = inputs
self.provider = provider
self.resource_id = resource_id
self.custom = custom
class MockCallArgs:
"""
MockCallArgs is used to construct a call Mock
"""
token: str
args: dict
provider: str
def __init__(self, token: str, args: dict, provider: str) -> None:
"""
:param str token: The token that indicates which function is being called. This token is of the form "package:module:function".
:param dict args: The arguments provided to the function call.
:param str provider: The identifier of the provider instance being used to make the call
"""
self.token = token
self.args = args
self.provider = provider
class Mocks(ABC):
"""
Mocks is an abstract class that allows subclasses to replace operations normally implemented by the Pulumi engine with
their own implementations. This can be used during testing to ensure that calls to provider functions and resource constructors
return predictable values.
"""
@abstractmethod
def call(self, args: MockCallArgs) -> Tuple[dict, Optional[List[Tuple[str,str]]]]:
"""
call mocks provider-implemented function calls (e.g. aws.get_availability_zones).
:param MockCallArgs args.
"""
return {}, None
@abstractmethod
def new_resource(self, args: MockResourceArgs) -> Tuple[Optional[str], dict]:
"""
new_resource mocks resource construction calls. This function should return the physical identifier and the output properties
for the resource being constructed.
:param MockResourceArgs args.
"""
return "", {}
class MockMonitor:
class ResourceRegistration(NamedTuple):
urn: str
id: str
state: dict
mocks: Mocks
resources: Dict[str, ResourceRegistration]
def __init__(self, mocks: Mocks):
self.mocks = mocks
self.resources = {}
def make_urn(self, parent: str, type_: str, name: str) -> str:
if parent != "":
qualifiedType = parent.split("::")[2]
parentType = qualifiedType.split("$").pop()
type_ = parentType + "$" + type_
return "urn:pulumi:" + "::".join([get_stack(), get_project(), type_, name])
def Invoke(self, request):
# Ensure we have an event loop on this thread because it's needed when deserializing resource references.
_ensure_event_loop()
args = rpc.deserialize_properties(request.args)
if request.tok == "pulumi:pulumi:getResource":
registered_resource = self.resources.get(args["urn"])
if registered_resource is None:
raise Exception(f"unknown resource {args['urn']}")
ret_proto = _sync_await(rpc.serialize_properties(registered_resource._asdict(), {}))
fields = {"failures": None, "return": ret_proto}
return provider_pb2.InvokeResponse(**fields)
call_args = MockCallArgs(token=request.tok, args=args, provider=request.provider)
tup = self.mocks.call(call_args)
if isinstance(tup, dict):
(ret, failures) = (tup, None)
else:
(ret, failures) = tup[0], [provider_pb2.CheckFailure(property=failure[0], reason=failure[1]) for failure in tup[1]]
ret_proto = _sync_await(rpc.serialize_properties(ret, {}))
fields = {"failures": failures, "return": ret_proto}
return provider_pb2.InvokeResponse(**fields)
def ReadResource(self, request):
# Ensure we have an event loop on this thread because it's needed when deserializing resource references.
_ensure_event_loop()
state = rpc.deserialize_properties(request.properties)
resource_args = MockResourceArgs(typ=request.type,
name=request.name,
inputs=state,
provider=request.provider,
resource_id=request.id)
id_, state = self.mocks.new_resource(resource_args)
props_proto = _sync_await(rpc.serialize_properties(state, {}))
urn = self.make_urn(request.parent, request.type, request.name)
self.resources[urn] = MockMonitor.ResourceRegistration(urn, id_, state)
return resource_pb2.ReadResourceResponse(urn=urn, properties=props_proto)
def RegisterResource(self, request):
urn = self.make_urn(request.parent, request.type, request.name)
if request.type == "pulumi:pulumi:Stack":
return resource_pb2.RegisterResourceResponse(urn=urn)
# Ensure we have an event loop on this thread because it's needed when deserializing resource references.
_ensure_event_loop()
inputs = rpc.deserialize_properties(request.object)
resource_args = MockResourceArgs(typ=request.type,
name=request.name,
inputs=inputs,
provider=request.provider,
resource_id=request.importId,
custom=request.custom or False)
id_, state = self.mocks.new_resource(resource_args)
obj_proto = _sync_await(rpc.serialize_properties(state, {}))
self.resources[urn] = MockMonitor.ResourceRegistration(urn, id_, state)
return resource_pb2.RegisterResourceResponse(urn=urn, id=id_, object=obj_proto)
def RegisterResourceOutputs(self, request):
# pylint: disable=unused-argument
return empty_pb2.Empty()
def SupportsFeature(self, request):
# pylint: disable=unused-argument
return type('SupportsFeatureResponse', (object,), {'hasSupport' : True})
class MockEngine:
logger: logging.Logger
def __init__(self, logger: Optional[logging.Logger]):
self.logger = logger if logger is not None else logging.getLogger()
def Log(self, request):
if request.severity == engine_pb2.DEBUG:
self.logger.debug(request.message)
elif request.severity == engine_pb2.INFO:
self.logger.info(request.message)
elif request.severity == engine_pb2.WARNING:
self.logger.warning(request.message)
elif request.severity == engine_pb2.ERROR:
self.logger.error(request.message)
def set_mocks(mocks: Mocks,
project: Optional[str] = None,
stack: Optional[str] = None,
preview: Optional[bool] = None,
logger: Optional[logging.Logger] = None):
"""
set_mocks configures the Pulumi runtime to use the given mocks for testing.
"""
settings = Settings(monitor=MockMonitor(mocks),
engine=MockEngine(logger),
project=project if project is not None else 'project',
stack=stack if stack is not None else 'stack',
dry_run=preview,
test_mode_enabled=True)
configure(settings)
# Ensure a new root stack resource has been initialized.
if get_root_resource() is None:
Stack(lambda: None)
| 37.553903
| 139
| 0.640764
|
60be7f0bb65cdd4fb2502c7dfcd4f8ff265da1b2
| 23,851
|
py
|
Python
|
lib/rucio/tests/test_subscription.py
|
llwang00/rucio
|
f49c5c9599e147823110dc6da22a0bc33a881f8e
|
[
"Apache-2.0"
] | 1
|
2019-03-15T19:29:35.000Z
|
2019-03-15T19:29:35.000Z
|
lib/rucio/tests/test_subscription.py
|
llwang00/rucio
|
f49c5c9599e147823110dc6da22a0bc33a881f8e
|
[
"Apache-2.0"
] | 58
|
2020-04-14T09:04:04.000Z
|
2021-07-13T15:12:59.000Z
|
lib/rucio/tests/test_subscription.py
|
llwang00/rucio
|
f49c5c9599e147823110dc6da22a0bc33a881f8e
|
[
"Apache-2.0"
] | 1
|
2020-03-02T17:18:14.000Z
|
2020-03-02T17:18:14.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2013-2020 CERN
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# - Cedric Serfon <cedric.serfon@cern.ch>, 2013-2017
# - Vincent Garonne <vincent.garonne@cern.ch>, 2013-2017
# - Thomas Beermann <thomas.beermann@cern.ch>, 2014
# - Martin Barisits <martin.barisits@cern.ch>, 2015-2016
# - Joaquín Bogado <jbogado@linti.unlp.edu.ar>, 2018
# - Mario Lassnig <mario.lassnig@cern.ch>, 2018
# - Andrew Lister <andrew.lister@stfc.ac.uk>, 2019
# - Hannes Hansen <hannes.jakob.hansen@cern.ch>, 2019
# - Eli Chadwick <eli.chadwick@stfc.ac.uk>, 2020
# - Patrick Austin <patrick.austin@stfc.ac.uk>, 2020
# - Benedikt Ziemons <benedikt.ziemons@cern.ch>, 2020
from __future__ import print_function
import unittest
from json import loads
import pytest
from rucio.api.subscription import list_subscriptions, add_subscription, update_subscription, \
list_subscription_rule_states, get_subscription_by_id
from rucio.client.didclient import DIDClient
from rucio.client.subscriptionclient import SubscriptionClient
from rucio.common.config import config_get, config_get_bool
from rucio.common.exception import InvalidObject, SubscriptionNotFound, SubscriptionDuplicate
from rucio.common.types import InternalAccount, InternalScope
from rucio.common.utils import generate_uuid as uuid
from rucio.core.account import add_account
from rucio.core.account_limit import set_local_account_limit
from rucio.core.did import add_did, set_new_dids
from rucio.core.rse import add_rse
from rucio.core.rule import add_rule
from rucio.core.scope import add_scope
from rucio.daemons.transmogrifier.transmogrifier import run
from rucio.db.sqla.constants import AccountType, DIDType
from rucio.tests.common import headers, auth
class TestSubscriptionCoreApi(unittest.TestCase):
@classmethod
def setUpClass(cls):
if config_get_bool('common', 'multi_vo', raise_exception=False, default=False):
cls.vo = {'vo': config_get('client', 'vo', raise_exception=False, default='tst')}
else:
cls.vo = {}
cls.projects = ['data12_900GeV', 'data12_8TeV', 'data13_900GeV', 'data13_8TeV']
cls.pattern1 = r'(_tid|physics_(Muons|JetTauEtmiss|Egamma)\..*\.ESD|express_express(?!.*NTUP|.*\.ESD|.*RAW)|(physics|express)(?!.*NTUP).* \
\.x|physics_WarmStart|calibration(?!_PixelBeam.merge.(NTUP_IDVTXLUMI|AOD))|merge.HIST|NTUP_MUONCALIB|NTUP_TRIG)'
def test_create_and_update_and_list_subscription(self):
""" SUBSCRIPTION (API): Test the creation of a new subscription, update it, list it """
subscription_name = uuid()
with pytest.raises(InvalidObject):
result = add_subscription(name=subscription_name,
account='root',
filter={'project': self.projects, 'datatype': ['AOD', ], 'excluded_pattern': self.pattern1, 'account': ['tier0', ]},
replication_rules=[{'lifetime': 86400, 'rse_expression': 'MOCK|MOCK2', 'copies': 2, 'activity': 'noactivity'}],
lifetime=100000,
retroactive=0,
dry_run=0,
comments='This is a comment',
issuer='root',
**self.vo)
result = add_subscription(name=subscription_name,
account='root',
filter={'project': self.projects, 'datatype': ['AOD', ], 'excluded_pattern': self.pattern1, 'account': ['tier0', ]},
replication_rules=[{'lifetime': 86400, 'rse_expression': 'MOCK|MOCK2', 'copies': 2, 'activity': 'Data Brokering'}],
lifetime=100000,
retroactive=0,
dry_run=0,
comments='This is a comment',
issuer='root',
**self.vo)
with pytest.raises(TypeError):
result = update_subscription(name=subscription_name, account='root', metadata={'filter': 'toto'}, issuer='root', **self.vo)
with pytest.raises(InvalidObject):
result = update_subscription(name=subscription_name, account='root', metadata={'filter': {'project': 'toto'}}, issuer='root', **self.vo)
result = update_subscription(name=subscription_name, account='root', metadata={'filter': {'project': ['toto', ]}}, issuer='root', **self.vo)
assert result is None
result = list_subscriptions(name=subscription_name, account='root', **self.vo)
sub = []
for res in result:
sub.append(res)
assert len(sub) == 1
assert loads(sub[0]['filter'])['project'][0] == 'toto'
def test_create_list_subscription_by_id(self):
""" SUBSCRIPTION (API): Test the creation of a new subscription and list it by id """
subscription_name = uuid()
subscription_id = add_subscription(name=subscription_name,
account='root',
filter={'project': self.projects, 'datatype': ['AOD', ], 'excluded_pattern': self.pattern1, 'account': ['tier0', ]},
replication_rules=[{'lifetime': 86400, 'rse_expression': 'MOCK|MOCK2', 'copies': 2, 'activity': 'Data Brokering'}],
lifetime=100000,
retroactive=0,
dry_run=0,
comments='This is a comment',
issuer='root',
**self.vo)
subscription_info = get_subscription_by_id(subscription_id, **self.vo)
assert loads(subscription_info['filter'])['project'] == self.projects
def test_create_existing_subscription(self):
""" SUBSCRIPTION (API): Test the creation of a existing subscription """
subscription_name = uuid()
def genkwargs():
kwargs = {
'name': subscription_name,
'account': 'root',
'filter': {'project': self.projects, 'datatype': ['AOD', ], 'excluded_pattern': self.pattern1, 'account': ['tier0', ]},
'replication_rules': [{'lifetime': 86400, 'rse_expression': 'MOCK|MOCK2', 'copies': 2, 'activity': 'Data Brokering'}],
'lifetime': 100000,
'retroactive': 0,
'dry_run': 0,
'comments': 'This is a comment',
'issuer': 'root'
}
kwargs.update(self.vo)
return kwargs
add_subscription(**genkwargs())
with pytest.raises(SubscriptionDuplicate):
add_subscription(**genkwargs())
def test_update_nonexisting_subscription(self):
""" SUBSCRIPTION (API): Test the update of a non-existing subscription """
subscription_name = uuid()
with pytest.raises(SubscriptionNotFound):
update_subscription(name=subscription_name, account='root', metadata={'filter': {'project': ['toto', ]}}, issuer='root', **self.vo)
def test_list_rules_states(self):
""" SUBSCRIPTION (API): Test listing of rule states for subscription """
tmp_scope = InternalScope('mock_' + uuid()[:8], **self.vo)
root = InternalAccount('root', **self.vo)
add_scope(tmp_scope, root)
site_a = 'RSE%s' % uuid().upper()
site_b = 'RSE%s' % uuid().upper()
site_a_id = add_rse(site_a, **self.vo)
site_b_id = add_rse(site_b, **self.vo)
# Add quota
set_local_account_limit(root, site_a_id, -1)
set_local_account_limit(root, site_b_id, -1)
# add a new dataset
dsn = 'dataset-%s' % uuid()
add_did(scope=tmp_scope, name=dsn,
type=DIDType.DATASET, account=root)
subscription_name = uuid()
subid = add_subscription(name=subscription_name,
account='root',
filter={'account': ['root', ], 'scope': [tmp_scope.external, ]},
replication_rules=[{'lifetime': 86400, 'rse_expression': 'MOCK|MOCK2', 'copies': 2, 'activity': 'Data Brokering'}],
lifetime=100000,
retroactive=0,
dry_run=0,
comments='This is a comment',
issuer='root',
**self.vo)
# Add two rules
add_rule(dids=[{'scope': tmp_scope, 'name': dsn}], account=root, copies=1, rse_expression=site_a, grouping='NONE', weight=None, lifetime=None, locked=False, subscription_id=subid)
add_rule(dids=[{'scope': tmp_scope, 'name': dsn}], account=root, copies=1, rse_expression=site_b, grouping='NONE', weight=None, lifetime=None, locked=False, subscription_id=subid)
for rule in list_subscription_rule_states(account='root', name=subscription_name, **self.vo):
assert rule[3] == 2
def test_create_and_update_and_list_subscription(rest_client, auth_token):
""" SUBSCRIPTION (REST): Test the creation of a new subscription, update it, list it """
subscription_name = uuid()
projects = ['data12_900GeV', 'data12_8TeV', 'data13_900GeV', 'data13_8TeV']
pattern1 = r'(_tid|physics_(Muons|JetTauEtmiss|Egamma)\..*\.ESD|express_express(?!.*NTUP|.*\.ESD|.*RAW)|(physics|express)(?!.*NTUP).* \
\.x|physics_WarmStart|calibration(?!_PixelBeam.merge.(NTUP_IDVTXLUMI|AOD))|merge.HIST|NTUP_MUONCALIB|NTUP_TRIG)'
data = {'options': {'filter': {'project': projects, 'datatype': ['AOD', ], 'excluded_pattern': pattern1, 'account': ['tier0', ]},
'replication_rules': [{'lifetime': 86400, 'rse_expression': 'MOCK|MOCK2', 'copies': 2, 'activity': 'Data Brokering'}],
'lifetime': 100000, 'retroactive': 0, 'dry_run': 0, 'comments': 'blahblah'}}
response = rest_client.post('/subscriptions/root/' + subscription_name, headers=headers(auth(auth_token)), json=data)
assert response.status_code == 201
data = {'options': {'filter': {'project': ['toto', ]}}}
response = rest_client.put('/subscriptions/root/' + subscription_name, headers=headers(auth(auth_token)), json=data)
assert response.status_code == 201
response = rest_client.get('/subscriptions/root/' + subscription_name, headers=headers(auth(auth_token)))
assert response.status_code == 200
assert loads(loads(response.get_data(as_text=True))['filter'])['project'][0] == 'toto'
def test_create_and_list_subscription_by_id(rest_client, auth_token):
""" SUBSCRIPTION (REST): Test the creation of a new subscription and get by subscription id """
subscription_name = uuid()
projects = ['data12_900GeV', 'data12_8TeV', 'data13_900GeV', 'data13_8TeV']
pattern1 = r'(_tid|physics_(Muons|JetTauEtmiss|Egamma)\..*\.ESD|express_express(?!.*NTUP|.*\.ESD|.*RAW)|(physics|express)(?!.*NTUP).* \
\.x|physics_WarmStart|calibration(?!_PixelBeam.merge.(NTUP_IDVTXLUMI|AOD))|merge.HIST|NTUP_MUONCALIB|NTUP_TRIG)'
data = {'options': {'filter': {'project': projects, 'datatype': ['AOD', ], 'excluded_pattern': pattern1, 'account': ['tier0', ]},
'replication_rules': [{'lifetime': 86400, 'rse_expression': 'MOCK|MOCK2', 'copies': 2, 'activity': 'Data Brokering'}],
'lifetime': 100000, 'retroactive': 0, 'dry_run': 0, 'comments': 'blahblah'}}
response = rest_client.post('/subscriptions/root/' + subscription_name, headers=headers(auth(auth_token)), json=data)
assert response.status_code == 201
subscription_id = response.get_data(as_text=True)
response = rest_client.get('/subscriptions/Id/' + subscription_id, headers=headers(auth(auth_token)))
assert response.status_code == 200
assert loads(loads(response.get_data(as_text=True))['filter'])['project'][0] == 'data12_900GeV'
def test_create_existing_subscription(rest_client, auth_token):
""" SUBSCRIPTION (REST): Test the creation of a existing subscription """
subscription_name = uuid()
projects = ['data12_900GeV', 'data12_8TeV', 'data13_900GeV', 'data13_8TeV']
pattern1 = r'(_tid|physics_(Muons|JetTauEtmiss|Egamma)\..*\.ESD|express_express(?!.*NTUP|.*\.ESD|.*RAW)|(physics|express)(?!.*NTUP).* \
\.x|physics_WarmStart|calibration(?!_PixelBeam.merge.(NTUP_IDVTXLUMI|AOD))|merge.HIST|NTUP_MUONCALIB|NTUP_TRIG)'
data = {'options': {'name': subscription_name, 'filter': {'project': projects, 'datatype': ['AOD', ], 'excluded_pattern': pattern1, 'account': ['tier0', ]},
'replication_rules': [{'lifetime': 86400, 'rse_expression': 'MOCK|MOCK2', 'copies': 2, 'activity': 'Data Brokering'}],
'lifetime': 100000, 'retroactive': 0, 'dry_run': 0, 'comments': 'We are the knights who say Ni !'}}
response = rest_client.post('/subscriptions/root/' + subscription_name, headers=headers(auth(auth_token)), json=data)
assert response.status_code == 201
response = rest_client.post('/subscriptions/root/' + subscription_name, headers=headers(auth(auth_token)), json=data)
assert response.status_code == 409
assert response.headers.get('ExceptionClass') == 'SubscriptionDuplicate'
def test_update_nonexisting_subscription(rest_client, auth_token):
""" SUBSCRIPTION (REST): Test the update of a non-existing subscription """
subscription_name = uuid()
data = {'options': {'filter': {'project': ['toto', ]}}}
response = rest_client.put('/subscriptions/root/' + subscription_name, headers=headers(auth(auth_token)), json=data)
assert response.status_code == 404
assert response.headers.get('ExceptionClass') == 'SubscriptionNotFound'
def test_list_rules_states(vo, rest_client, auth_token):
""" SUBSCRIPTION (REST): Test listing of rule states for subscription """
tmp_scope = InternalScope('mock_' + uuid()[:8], vo=vo)
root = InternalAccount('root', vo=vo)
add_scope(tmp_scope, root)
site_a = 'RSE%s' % uuid().upper()
site_b = 'RSE%s' % uuid().upper()
site_a_id = add_rse(site_a, vo=vo)
site_b_id = add_rse(site_b, vo=vo)
# Add quota
set_local_account_limit(root, site_a_id, -1)
set_local_account_limit(root, site_b_id, -1)
# add a new dataset
dsn = 'dataset-%s' % uuid()
add_did(scope=tmp_scope, name=dsn,
type=DIDType.DATASET, account=root)
subscription_name = uuid()
subid = add_subscription(name=subscription_name,
account='root',
filter={'account': ['root', ], 'scope': [tmp_scope.external, ]},
replication_rules=[{'lifetime': 86400, 'rse_expression': 'MOCK|MOCK2', 'copies': 2, 'activity': 'Data Brokering'}],
lifetime=100000,
retroactive=0,
dry_run=0,
comments='We want a shrubbery',
issuer='root',
vo=vo)
# Add two rules
add_rule(dids=[{'scope': tmp_scope, 'name': dsn}], account=root, copies=1, rse_expression=site_a, grouping='NONE', weight=None, lifetime=None, locked=False, subscription_id=subid)
add_rule(dids=[{'scope': tmp_scope, 'name': dsn}], account=root, copies=1, rse_expression=site_b, grouping='NONE', weight=None, lifetime=None, locked=False, subscription_id=subid)
response = rest_client.get('/subscriptions/%s/%s/Rules/States' % ('root', subscription_name), headers=headers(auth(auth_token)))
assert response.status_code == 200
rulestates = None
for line in response.get_data(as_text=True).split('\n'):
if line:
rulestates = loads(line)
if rulestates[1] == subscription_name:
break
assert rulestates is not None
assert rulestates[3] == 2
class TestSubscriptionClient(unittest.TestCase):
@classmethod
def setUpClass(cls):
if config_get_bool('common', 'multi_vo', raise_exception=False, default=False):
cls.vo = {'vo': config_get('client', 'vo', raise_exception=False, default='tst')}
else:
cls.vo = {}
cls.sub_client = SubscriptionClient()
cls.did_client = DIDClient()
cls.projects = ['data12_900GeV', 'data12_8TeV', 'data13_900GeV', 'data13_8TeV']
cls.pattern1 = r'(_tid|physics_(Muons|JetTauEtmiss|Egamma)\..*\.ESD|express_express(?!.*NTUP|.*\.ESD|.*RAW)|(physics|express)(?!.*NTUP).* \
\.x|physics_WarmStart|calibration(?!_PixelBeam.merge.(NTUP_IDVTXLUMI|AOD))|merge.HIST|NTUP_MUONCALIB|NTUP_TRIG)'
def test_create_and_update_and_list_subscription(self):
""" SUBSCRIPTION (CLIENT): Test the creation of a new subscription, update it, list it """
subscription_name = uuid()
with pytest.raises(InvalidObject):
subid = self.sub_client.add_subscription(name=subscription_name, account='root', filter={'project': self.projects, 'datatype': ['AOD', ], 'excluded_pattern': self.pattern1, 'account': ['tier0', ]},
replication_rules=[{'lifetime': 86400, 'rse_expression': 'MOCK|MOCK2', 'copies': 2, 'activity': 'noactivity'}], lifetime=100000, retroactive=0, dry_run=0, comments='Ni ! Ni!')
subid = self.sub_client.add_subscription(name=subscription_name, account='root', filter={'project': self.projects, 'datatype': ['AOD', ], 'excluded_pattern': self.pattern1, 'account': ['tier0', ]},
replication_rules=[{'lifetime': 86400, 'rse_expression': 'MOCK|MOCK2', 'copies': 2, 'activity': 'Data Brokering'}], lifetime=100000, retroactive=0, dry_run=0, comments='Ni ! Ni!')
result = [sub['id'] for sub in list_subscriptions(name=subscription_name, account='root', **self.vo)]
assert subid == result[0]
with pytest.raises(TypeError):
result = self.sub_client.update_subscription(name=subscription_name, account='root', filter='toto')
result = self.sub_client.update_subscription(name=subscription_name, account='root', filter={'project': ['toto', ]})
assert result
result = list_subscriptions(name=subscription_name, account='root', **self.vo)
sub = []
for res in result:
sub.append(res)
assert len(sub) == 1
assert loads(sub[0]['filter'])['project'][0] == 'toto'
def test_create_existing_subscription(self):
""" SUBSCRIPTION (CLIENT): Test the creation of a existing subscription """
subscription_name = uuid()
result = self.sub_client.add_subscription(name=subscription_name, account='root', filter={'project': self.projects, 'datatype': ['AOD', ], 'excluded_pattern': self.pattern1, 'account': ['tier0', ]},
replication_rules=[{'lifetime': 86400, 'rse_expression': 'MOCK|MOCK2', 'copies': 2, 'activity': 'Data Brokering'}], lifetime=100000, retroactive=0, dry_run=0, comments='Ni ! Ni!')
assert result
with pytest.raises(SubscriptionDuplicate):
self.sub_client.add_subscription(name=subscription_name, account='root', filter={'project': self.projects, 'datatype': ['AOD', ], 'excluded_pattern': self.pattern1, 'account': ['tier0', ]},
replication_rules=[{'lifetime': 86400, 'rse_expression': 'MOCK|MOCK2', 'copies': 2, 'activity': 'Data Brokering'}], lifetime=100000, retroactive=0, dry_run=0, comments='Ni ! Ni!')
def test_update_nonexisting_subscription(self):
""" SUBSCRIPTION (CLIENT): Test the update of a non-existing subscription """
subscription_name = uuid()
with pytest.raises(SubscriptionNotFound):
self.sub_client.update_subscription(name=subscription_name, filter={'project': ['toto', ]})
def test_create_and_list_subscription_by_account(self):
""" SUBSCRIPTION (CLIENT): Test retrieval of subscriptions for an account """
subscription_name = uuid()
account_name = uuid()[:10]
add_account(InternalAccount(account_name, **self.vo), AccountType.USER, 'rucio@email.com')
subid = self.sub_client.add_subscription(name=subscription_name, account=account_name, filter={'project': self.projects, 'datatype': ['AOD', ], 'excluded_pattern': self.pattern1, 'account': ['tier0', ]},
replication_rules=[{'lifetime': 86400, 'rse_expression': 'MOCK|MOCK2', 'copies': 2, 'activity': 'Data Brokering'}], lifetime=100000, retroactive=0, dry_run=0, comments='Ni ! Ni!')
result = [sub['id'] for sub in self.sub_client.list_subscriptions(account=account_name)]
assert subid == result[0]
def test_create_and_list_subscription_by_name(self):
""" SUBSCRIPTION (CLIENT): Test retrieval of subscriptions for an account """
subscription_name = uuid()
subid = self.sub_client.add_subscription(name=subscription_name, account='root', filter={'project': self.projects, 'datatype': ['AOD', ], 'excluded_pattern': self.pattern1, 'account': ['tier0', ]},
replication_rules=[{'lifetime': 86400, 'rse_expression': 'MOCK|MOCK2', 'copies': 2, 'activity': 'Data Brokering'}], lifetime=100000, retroactive=0, dry_run=0, comments='Ni ! Ni!')
result = [sub['id'] for sub in self.sub_client.list_subscriptions(name=subscription_name)]
assert subid == result[0]
def test_run_transmogrifier(self):
""" SUBSCRIPTION (DAEMON): Test the transmogrifier and the split_rule mode """
tmp_scope = InternalScope('mock_' + uuid()[:8], **self.vo)
root = InternalAccount('root', **self.vo)
add_scope(tmp_scope, root)
subscription_name = uuid()
dsn = 'dataset-%s' % uuid()
add_did(scope=tmp_scope, name=dsn, type=DIDType.DATASET, account=root)
subid = self.sub_client.add_subscription(name=subscription_name, account='root', filter={'scope': [tmp_scope.external, ], 'pattern': 'dataset-.*', 'split_rule': True},
replication_rules=[{'lifetime': 86400, 'rse_expression': 'MOCK-POSIX|MOCK2|MOCK3', 'copies': 2, 'activity': 'Data Brokering'}],
lifetime=None, retroactive=0, dry_run=0, comments='Ni ! Ni!', priority=1)
run(threads=1, bulk=1000000, once=True)
rules = [rule for rule in self.did_client.list_did_rules(scope=tmp_scope.external, name=dsn) if str(rule['subscription_id']) == str(subid)]
assert len(rules) == 2
set_new_dids([{'scope': tmp_scope, 'name': dsn}, ], 1)
run(threads=1, bulk=1000000, once=True)
rules = [rule for rule in self.did_client.list_did_rules(scope=tmp_scope.external, name=dsn) if str(rule['subscription_id']) == str(subid)]
assert len(rules) == 2
| 60.078086
| 229
| 0.621945
|
cd49468429721093dab5a59de12204cbf09fefea
| 3,436
|
py
|
Python
|
Project/src/Shared/Recomender.py
|
gasperthegracner/psis2017
|
8f9c74ec788eccea1bc969a2baf080ff75e33e7a
|
[
"Apache-2.0"
] | null | null | null |
Project/src/Shared/Recomender.py
|
gasperthegracner/psis2017
|
8f9c74ec788eccea1bc969a2baf080ff75e33e7a
|
[
"Apache-2.0"
] | null | null | null |
Project/src/Shared/Recomender.py
|
gasperthegracner/psis2017
|
8f9c74ec788eccea1bc969a2baf080ff75e33e7a
|
[
"Apache-2.0"
] | null | null | null |
import numpy
import random
import os
class Recommender:
'''
Class for generating recommendation data based input parameters like min intensity,
max intensity, min duration, max duration, min heath rate, max heath rate and training days
'''
def __init__(self, filename="../../Data/clusteredIntensity_cleaned.csv", savetofile="../../Data/generated_trainings.csv"):
raw_data = numpy.genfromtxt(open(filename, 'rb'), delimiter=";", dtype=None).astype(str)
self.data = []
self.save_to_file = savetofile
for ind, row in enumerate(raw_data):
if ind == 0:
continue
tmpobj = {
"avgHR": float(row[0]),
"intensity": float(row[1]),
"duration": float(row[3]),
"all": [float(row[0]), float(row[1]), float(row[3])]
}
self.data.append(tmpobj)
def __save_generated_trainings(self, generated):
header_string = 'mon_intensity;mon_duration;mon_hr;tue_intensity;tue_duration;tue_hr;' \
'wed_intensity;wed_duration;wed_hr;thu_intensity;thu_duration;thu_hr;fri_intensity;' \
'fri_duration;fri_hr;sat_intensity;sat_duration;sat_hr;sun_intensity;sun_duration;sun_hr'
path = self.save_to_file
with open(path, 'w') as the_file:
the_file.write(header_string+"\n")
for g in generated:
s = ";".join(str(g_item) for g_item in g)
the_file.write(s + "\n")
return os.path.abspath(path)
def generate_possible_combinations(self, min_intensity=0, max_intensity=3, min_duration=0, max_duration=1000000, min_HR=0, max_HR=1000, days=7):
fitted = []
min_intensity = float(min_intensity)
max_intensity = float(max_intensity)
min_duration = float(min_duration)
min_HR = float(min_HR)
max_HR = float(max_HR)
for row in self.data:
if row["intensity"] > min_intensity and row["intensity"] <= max_intensity:
if row["avgHR"] > min_HR and row["avgHR"] <= max_HR:
if row["duration"] > min_duration and row["duration"] <= max_duration:
fitted.append(row)
# mon_intensity;mon_duration;mon_hr;tue_intensity;tue_duration;tue_hr;wed_intensity;
# wed_duration;wed_hr;thu_intensity;thu_duration;thu_hr;fri_intensity;fri_duration;fri_hr;
# sat_intensity;sat_duration;sat_hr;sun_intensity;sun_duration;sun_hr
len_of_fitted = len(fitted)
num_of_posibilities = 100
weeks_posibilities = []
for p in range(0, num_of_posibilities):
num_of_true = 0
week = []
for i in range(0, 7):
take_day = random.randrange(2)
if take_day == 1 and num_of_true <= days:
num_of_true += 1
fited_ind = random.randrange(len_of_fitted)
week.append(fitted[fited_ind]["intensity"])
week.append(fitted[fited_ind]["duration"])
week.append(fitted[fited_ind]["avgHR"])
else:
week.append(0)
week.append(0)
week.append(0)
weeks_posibilities.append(week)
file_path = self.__save_generated_trainings(weeks_posibilities)
return file_path
| 40.904762
| 148
| 0.595169
|
edbc1d9241418f15d7e6d56e2f8e7cd73bd4c0fc
| 27,022
|
py
|
Python
|
tests/postgres_tests/test_array.py
|
dmedvinsky/django
|
f6681393d3f53a67b4e0645e8d02f95579d8ae2d
|
[
"PSF-2.0",
"BSD-3-Clause"
] | 1
|
2017-02-11T07:07:16.000Z
|
2017-02-11T07:07:16.000Z
|
tests/postgres_tests/test_array.py
|
dmedvinsky/django
|
f6681393d3f53a67b4e0645e8d02f95579d8ae2d
|
[
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null |
tests/postgres_tests/test_array.py
|
dmedvinsky/django
|
f6681393d3f53a67b4e0645e8d02f95579d8ae2d
|
[
"PSF-2.0",
"BSD-3-Clause"
] | 2
|
2018-03-30T04:24:48.000Z
|
2021-05-09T12:39:09.000Z
|
import decimal
import json
import unittest
import uuid
from django import forms
from django.core import exceptions, serializers, validators
from django.core.management import call_command
from django.db import IntegrityError, connection, models
from django.test import TransactionTestCase, override_settings
from django.test.utils import isolate_apps
from django.utils import timezone
from . import PostgreSQLTestCase
from .models import (
ArrayFieldSubclass, CharArrayModel, DateTimeArrayModel, IntegerArrayModel,
NestedIntegerArrayModel, NullableIntegerArrayModel, OtherTypesArrayModel,
PostgreSQLModel, Tag,
)
try:
from django.contrib.postgres.fields import ArrayField
from django.contrib.postgres.forms import SimpleArrayField, SplitArrayField
except ImportError:
pass
class TestSaveLoad(PostgreSQLTestCase):
def test_integer(self):
instance = IntegerArrayModel(field=[1, 2, 3])
instance.save()
loaded = IntegerArrayModel.objects.get()
self.assertEqual(instance.field, loaded.field)
def test_char(self):
instance = CharArrayModel(field=['hello', 'goodbye'])
instance.save()
loaded = CharArrayModel.objects.get()
self.assertEqual(instance.field, loaded.field)
def test_dates(self):
instance = DateTimeArrayModel(
datetimes=[timezone.now()],
dates=[timezone.now().date()],
times=[timezone.now().time()],
)
instance.save()
loaded = DateTimeArrayModel.objects.get()
self.assertEqual(instance.datetimes, loaded.datetimes)
self.assertEqual(instance.dates, loaded.dates)
self.assertEqual(instance.times, loaded.times)
def test_tuples(self):
instance = IntegerArrayModel(field=(1,))
instance.save()
loaded = IntegerArrayModel.objects.get()
self.assertSequenceEqual(instance.field, loaded.field)
def test_integers_passed_as_strings(self):
# This checks that get_prep_value is deferred properly
instance = IntegerArrayModel(field=['1'])
instance.save()
loaded = IntegerArrayModel.objects.get()
self.assertEqual(loaded.field, [1])
def test_default_null(self):
instance = NullableIntegerArrayModel()
instance.save()
loaded = NullableIntegerArrayModel.objects.get(pk=instance.pk)
self.assertEqual(loaded.field, None)
self.assertEqual(instance.field, loaded.field)
def test_null_handling(self):
instance = NullableIntegerArrayModel(field=None)
instance.save()
loaded = NullableIntegerArrayModel.objects.get()
self.assertEqual(instance.field, loaded.field)
instance = IntegerArrayModel(field=None)
with self.assertRaises(IntegrityError):
instance.save()
def test_nested(self):
instance = NestedIntegerArrayModel(field=[[1, 2], [3, 4]])
instance.save()
loaded = NestedIntegerArrayModel.objects.get()
self.assertEqual(instance.field, loaded.field)
def test_other_array_types(self):
instance = OtherTypesArrayModel(
ips=['192.168.0.1', '::1'],
uuids=[uuid.uuid4()],
decimals=[decimal.Decimal(1.25), 1.75],
tags=[Tag(1), Tag(2), Tag(3)],
)
instance.save()
loaded = OtherTypesArrayModel.objects.get()
self.assertEqual(instance.ips, loaded.ips)
self.assertEqual(instance.uuids, loaded.uuids)
self.assertEqual(instance.decimals, loaded.decimals)
self.assertEqual(instance.tags, loaded.tags)
def test_null_from_db_value_handling(self):
instance = OtherTypesArrayModel.objects.create(
ips=['192.168.0.1', '::1'],
uuids=[uuid.uuid4()],
decimals=[decimal.Decimal(1.25), 1.75],
tags=None,
)
instance.refresh_from_db()
self.assertIsNone(instance.tags)
def test_model_set_on_base_field(self):
instance = IntegerArrayModel()
field = instance._meta.get_field('field')
self.assertEqual(field.model, IntegerArrayModel)
self.assertEqual(field.base_field.model, IntegerArrayModel)
class TestQuerying(PostgreSQLTestCase):
def setUp(self):
self.objs = [
NullableIntegerArrayModel.objects.create(field=[1]),
NullableIntegerArrayModel.objects.create(field=[2]),
NullableIntegerArrayModel.objects.create(field=[2, 3]),
NullableIntegerArrayModel.objects.create(field=[20, 30, 40]),
NullableIntegerArrayModel.objects.create(field=None),
]
def test_exact(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__exact=[1]),
self.objs[:1]
)
def test_exact_charfield(self):
instance = CharArrayModel.objects.create(field=['text'])
self.assertSequenceEqual(
CharArrayModel.objects.filter(field=['text']),
[instance]
)
def test_exact_nested(self):
instance = NestedIntegerArrayModel.objects.create(field=[[1, 2], [3, 4]])
self.assertSequenceEqual(
NestedIntegerArrayModel.objects.filter(field=[[1, 2], [3, 4]]),
[instance]
)
def test_isnull(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__isnull=True),
self.objs[-1:]
)
def test_gt(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__gt=[0]),
self.objs[:4]
)
def test_lt(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__lt=[2]),
self.objs[:1]
)
def test_in(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__in=[[1], [2]]),
self.objs[:2]
)
def test_contained_by(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__contained_by=[1, 2]),
self.objs[:2]
)
def test_contains(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__contains=[2]),
self.objs[1:3]
)
def test_contains_charfield(self):
# Regression for #22907
self.assertSequenceEqual(
CharArrayModel.objects.filter(field__contains=['text']),
[]
)
def test_contained_by_charfield(self):
self.assertSequenceEqual(
CharArrayModel.objects.filter(field__contained_by=['text']),
[]
)
def test_overlap_charfield(self):
self.assertSequenceEqual(
CharArrayModel.objects.filter(field__overlap=['text']),
[]
)
def test_index(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__0=2),
self.objs[1:3]
)
def test_index_chained(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__0__lt=3),
self.objs[0:3]
)
def test_index_nested(self):
instance = NestedIntegerArrayModel.objects.create(field=[[1, 2], [3, 4]])
self.assertSequenceEqual(
NestedIntegerArrayModel.objects.filter(field__0__0=1),
[instance]
)
@unittest.expectedFailure
def test_index_used_on_nested_data(self):
instance = NestedIntegerArrayModel.objects.create(field=[[1, 2], [3, 4]])
self.assertSequenceEqual(
NestedIntegerArrayModel.objects.filter(field__0=[1, 2]),
[instance]
)
def test_overlap(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__overlap=[1, 2]),
self.objs[0:3]
)
def test_len(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__len__lte=2),
self.objs[0:3]
)
def test_len_empty_array(self):
obj = NullableIntegerArrayModel.objects.create(field=[])
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__len=0),
[obj]
)
def test_slice(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__0_1=[2]),
self.objs[1:3]
)
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__0_2=[2, 3]),
self.objs[2:3]
)
@unittest.expectedFailure
def test_slice_nested(self):
instance = NestedIntegerArrayModel.objects.create(field=[[1, 2], [3, 4]])
self.assertSequenceEqual(
NestedIntegerArrayModel.objects.filter(field__0__0_1=[1]),
[instance]
)
def test_usage_in_subquery(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(
id__in=NullableIntegerArrayModel.objects.filter(field__len=3)
),
[self.objs[3]]
)
class TestDateTimeExactQuerying(PostgreSQLTestCase):
def setUp(self):
now = timezone.now()
self.datetimes = [now]
self.dates = [now.date()]
self.times = [now.time()]
self.objs = [
DateTimeArrayModel.objects.create(
datetimes=self.datetimes,
dates=self.dates,
times=self.times,
)
]
def test_exact_datetimes(self):
self.assertSequenceEqual(
DateTimeArrayModel.objects.filter(datetimes=self.datetimes),
self.objs
)
def test_exact_dates(self):
self.assertSequenceEqual(
DateTimeArrayModel.objects.filter(dates=self.dates),
self.objs
)
def test_exact_times(self):
self.assertSequenceEqual(
DateTimeArrayModel.objects.filter(times=self.times),
self.objs
)
class TestOtherTypesExactQuerying(PostgreSQLTestCase):
def setUp(self):
self.ips = ['192.168.0.1', '::1']
self.uuids = [uuid.uuid4()]
self.decimals = [decimal.Decimal(1.25), 1.75]
self.tags = [Tag(1), Tag(2), Tag(3)]
self.objs = [
OtherTypesArrayModel.objects.create(
ips=self.ips,
uuids=self.uuids,
decimals=self.decimals,
tags=self.tags,
)
]
def test_exact_ip_addresses(self):
self.assertSequenceEqual(
OtherTypesArrayModel.objects.filter(ips=self.ips),
self.objs
)
def test_exact_uuids(self):
self.assertSequenceEqual(
OtherTypesArrayModel.objects.filter(uuids=self.uuids),
self.objs
)
def test_exact_decimals(self):
self.assertSequenceEqual(
OtherTypesArrayModel.objects.filter(decimals=self.decimals),
self.objs
)
def test_exact_tags(self):
self.assertSequenceEqual(
OtherTypesArrayModel.objects.filter(tags=self.tags),
self.objs
)
@isolate_apps('postgres_tests')
class TestChecks(PostgreSQLTestCase):
def test_field_checks(self):
class MyModel(PostgreSQLModel):
field = ArrayField(models.CharField())
model = MyModel()
errors = model.check()
self.assertEqual(len(errors), 1)
# The inner CharField is missing a max_length.
self.assertEqual(errors[0].id, 'postgres.E001')
self.assertIn('max_length', errors[0].msg)
def test_invalid_base_fields(self):
class MyModel(PostgreSQLModel):
field = ArrayField(models.ManyToManyField('postgres_tests.IntegerArrayModel'))
model = MyModel()
errors = model.check()
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].id, 'postgres.E002')
def test_nested_field_checks(self):
"""
Nested ArrayFields are permitted.
"""
class MyModel(PostgreSQLModel):
field = ArrayField(ArrayField(models.CharField()))
model = MyModel()
errors = model.check()
self.assertEqual(len(errors), 1)
# The inner CharField is missing a max_length.
self.assertEqual(errors[0].id, 'postgres.E001')
self.assertIn('max_length', errors[0].msg)
@unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific tests")
class TestMigrations(TransactionTestCase):
available_apps = ['postgres_tests']
def test_deconstruct(self):
field = ArrayField(models.IntegerField())
name, path, args, kwargs = field.deconstruct()
new = ArrayField(*args, **kwargs)
self.assertEqual(type(new.base_field), type(field.base_field))
def test_deconstruct_with_size(self):
field = ArrayField(models.IntegerField(), size=3)
name, path, args, kwargs = field.deconstruct()
new = ArrayField(*args, **kwargs)
self.assertEqual(new.size, field.size)
def test_deconstruct_args(self):
field = ArrayField(models.CharField(max_length=20))
name, path, args, kwargs = field.deconstruct()
new = ArrayField(*args, **kwargs)
self.assertEqual(new.base_field.max_length, field.base_field.max_length)
def test_subclass_deconstruct(self):
field = ArrayField(models.IntegerField())
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, 'django.contrib.postgres.fields.ArrayField')
field = ArrayFieldSubclass()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, 'postgres_tests.models.ArrayFieldSubclass')
@override_settings(MIGRATION_MODULES={
"postgres_tests": "postgres_tests.array_default_migrations",
})
def test_adding_field_with_default(self):
# See #22962
table_name = 'postgres_tests_integerarraydefaultmodel'
with connection.cursor() as cursor:
self.assertNotIn(table_name, connection.introspection.table_names(cursor))
call_command('migrate', 'postgres_tests', verbosity=0)
with connection.cursor() as cursor:
self.assertIn(table_name, connection.introspection.table_names(cursor))
call_command('migrate', 'postgres_tests', 'zero', verbosity=0)
with connection.cursor() as cursor:
self.assertNotIn(table_name, connection.introspection.table_names(cursor))
@override_settings(MIGRATION_MODULES={
"postgres_tests": "postgres_tests.array_index_migrations",
})
def test_adding_arrayfield_with_index(self):
"""
ArrayField shouldn't have varchar_patterns_ops or text_patterns_ops indexes.
"""
table_name = 'postgres_tests_chartextarrayindexmodel'
call_command('migrate', 'postgres_tests', verbosity=0)
with connection.cursor() as cursor:
like_constraint_field_names = [
c.rsplit('_', 2)[0][len(table_name) + 1:]
for c in connection.introspection.get_constraints(cursor, table_name)
if c.endswith('_like')
]
# Only the CharField should have a LIKE index.
self.assertEqual(like_constraint_field_names, ['char2'])
with connection.cursor() as cursor:
indexes = connection.introspection.get_indexes(cursor, table_name)
# All fields should have regular indexes.
self.assertIn('char', indexes)
self.assertIn('char2', indexes)
self.assertIn('text', indexes)
call_command('migrate', 'postgres_tests', 'zero', verbosity=0)
with connection.cursor() as cursor:
self.assertNotIn(table_name, connection.introspection.table_names(cursor))
class TestSerialization(PostgreSQLTestCase):
test_data = (
'[{"fields": {"field": "[\\"1\\", \\"2\\", null]"}, "model": "postgres_tests.integerarraymodel", "pk": null}]'
)
def test_dumping(self):
instance = IntegerArrayModel(field=[1, 2, None])
data = serializers.serialize('json', [instance])
self.assertEqual(json.loads(data), json.loads(self.test_data))
def test_loading(self):
instance = list(serializers.deserialize('json', self.test_data))[0].object
self.assertEqual(instance.field, [1, 2, None])
class TestValidation(PostgreSQLTestCase):
def test_unbounded(self):
field = ArrayField(models.IntegerField())
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean([1, None], None)
self.assertEqual(cm.exception.code, 'item_invalid')
self.assertEqual(
cm.exception.message % cm.exception.params,
'Item 1 in the array did not validate: This field cannot be null.'
)
def test_blank_true(self):
field = ArrayField(models.IntegerField(blank=True, null=True))
# This should not raise a validation error
field.clean([1, None], None)
def test_with_size(self):
field = ArrayField(models.IntegerField(), size=3)
field.clean([1, 2, 3], None)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean([1, 2, 3, 4], None)
self.assertEqual(cm.exception.messages[0], 'List contains 4 items, it should contain no more than 3.')
def test_nested_array_mismatch(self):
field = ArrayField(ArrayField(models.IntegerField()))
field.clean([[1, 2], [3, 4]], None)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean([[1, 2], [3, 4, 5]], None)
self.assertEqual(cm.exception.code, 'nested_array_mismatch')
self.assertEqual(cm.exception.messages[0], 'Nested arrays must have the same length.')
def test_with_base_field_error_params(self):
field = ArrayField(models.CharField(max_length=2))
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['abc'], None)
self.assertEqual(len(cm.exception.error_list), 1)
exception = cm.exception.error_list[0]
self.assertEqual(
exception.message,
'Item 0 in the array did not validate: Ensure this value has at most 2 characters (it has 3).'
)
self.assertEqual(exception.code, 'item_invalid')
self.assertEqual(exception.params, {'nth': 0, 'value': 'abc', 'limit_value': 2, 'show_value': 3})
def test_with_validators(self):
field = ArrayField(models.IntegerField(validators=[validators.MinValueValidator(1)]))
field.clean([1, 2], None)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean([0], None)
self.assertEqual(len(cm.exception.error_list), 1)
exception = cm.exception.error_list[0]
self.assertEqual(
exception.message,
'Item 0 in the array did not validate: Ensure this value is greater than or equal to 1.'
)
self.assertEqual(exception.code, 'item_invalid')
self.assertEqual(exception.params, {'nth': 0, 'value': 0, 'limit_value': 1, 'show_value': 0})
class TestSimpleFormField(PostgreSQLTestCase):
def test_valid(self):
field = SimpleArrayField(forms.CharField())
value = field.clean('a,b,c')
self.assertEqual(value, ['a', 'b', 'c'])
def test_to_python_fail(self):
field = SimpleArrayField(forms.IntegerField())
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('a,b,9')
self.assertEqual(cm.exception.messages[0], 'Item 0 in the array did not validate: Enter a whole number.')
def test_validate_fail(self):
field = SimpleArrayField(forms.CharField(required=True))
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('a,b,')
self.assertEqual(cm.exception.messages[0], 'Item 2 in the array did not validate: This field is required.')
def test_validate_fail_base_field_error_params(self):
field = SimpleArrayField(forms.CharField(max_length=2))
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('abc,c,defg')
errors = cm.exception.error_list
self.assertEqual(len(errors), 2)
first_error = errors[0]
self.assertEqual(
first_error.message,
'Item 0 in the array did not validate: Ensure this value has at most 2 characters (it has 3).'
)
self.assertEqual(first_error.code, 'item_invalid')
self.assertEqual(first_error.params, {'nth': 0, 'value': 'abc', 'limit_value': 2, 'show_value': 3})
second_error = errors[1]
self.assertEqual(
second_error.message,
'Item 2 in the array did not validate: Ensure this value has at most 2 characters (it has 4).'
)
self.assertEqual(second_error.code, 'item_invalid')
self.assertEqual(second_error.params, {'nth': 2, 'value': 'defg', 'limit_value': 2, 'show_value': 4})
def test_validators_fail(self):
field = SimpleArrayField(forms.RegexField('[a-e]{2}'))
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('a,bc,de')
self.assertEqual(cm.exception.messages[0], 'Item 0 in the array did not validate: Enter a valid value.')
def test_delimiter(self):
field = SimpleArrayField(forms.CharField(), delimiter='|')
value = field.clean('a|b|c')
self.assertEqual(value, ['a', 'b', 'c'])
def test_delimiter_with_nesting(self):
field = SimpleArrayField(SimpleArrayField(forms.CharField()), delimiter='|')
value = field.clean('a,b|c,d')
self.assertEqual(value, [['a', 'b'], ['c', 'd']])
def test_prepare_value(self):
field = SimpleArrayField(forms.CharField())
value = field.prepare_value(['a', 'b', 'c'])
self.assertEqual(value, 'a,b,c')
def test_max_length(self):
field = SimpleArrayField(forms.CharField(), max_length=2)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('a,b,c')
self.assertEqual(cm.exception.messages[0], 'List contains 3 items, it should contain no more than 2.')
def test_min_length(self):
field = SimpleArrayField(forms.CharField(), min_length=4)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('a,b,c')
self.assertEqual(cm.exception.messages[0], 'List contains 3 items, it should contain no fewer than 4.')
def test_required(self):
field = SimpleArrayField(forms.CharField(), required=True)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('')
self.assertEqual(cm.exception.messages[0], 'This field is required.')
def test_model_field_formfield(self):
model_field = ArrayField(models.CharField(max_length=27))
form_field = model_field.formfield()
self.assertIsInstance(form_field, SimpleArrayField)
self.assertIsInstance(form_field.base_field, forms.CharField)
self.assertEqual(form_field.base_field.max_length, 27)
def test_model_field_formfield_size(self):
model_field = ArrayField(models.CharField(max_length=27), size=4)
form_field = model_field.formfield()
self.assertIsInstance(form_field, SimpleArrayField)
self.assertEqual(form_field.max_length, 4)
class TestSplitFormField(PostgreSQLTestCase):
def test_valid(self):
class SplitForm(forms.Form):
array = SplitArrayField(forms.CharField(), size=3)
data = {'array_0': 'a', 'array_1': 'b', 'array_2': 'c'}
form = SplitForm(data)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data, {'array': ['a', 'b', 'c']})
def test_required(self):
class SplitForm(forms.Form):
array = SplitArrayField(forms.CharField(), required=True, size=3)
data = {'array_0': '', 'array_1': '', 'array_2': ''}
form = SplitForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {'array': ['This field is required.']})
def test_remove_trailing_nulls(self):
class SplitForm(forms.Form):
array = SplitArrayField(forms.CharField(required=False), size=5, remove_trailing_nulls=True)
data = {'array_0': 'a', 'array_1': '', 'array_2': 'b', 'array_3': '', 'array_4': ''}
form = SplitForm(data)
self.assertTrue(form.is_valid(), form.errors)
self.assertEqual(form.cleaned_data, {'array': ['a', '', 'b']})
def test_remove_trailing_nulls_not_required(self):
class SplitForm(forms.Form):
array = SplitArrayField(
forms.CharField(required=False),
size=2,
remove_trailing_nulls=True,
required=False,
)
data = {'array_0': '', 'array_1': ''}
form = SplitForm(data)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data, {'array': []})
def test_required_field(self):
class SplitForm(forms.Form):
array = SplitArrayField(forms.CharField(), size=3)
data = {'array_0': 'a', 'array_1': 'b', 'array_2': ''}
form = SplitForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {'array': ['Item 2 in the array did not validate: This field is required.']})
def test_invalid_integer(self):
msg = 'Item 1 in the array did not validate: Ensure this value is less than or equal to 100.'
with self.assertRaisesMessage(exceptions.ValidationError, msg):
SplitArrayField(forms.IntegerField(max_value=100), size=2).clean([0, 101])
def test_rendering(self):
class SplitForm(forms.Form):
array = SplitArrayField(forms.CharField(), size=3)
self.assertHTMLEqual(str(SplitForm()), '''
<tr>
<th><label for="id_array_0">Array:</label></th>
<td>
<input id="id_array_0" name="array_0" type="text" required />
<input id="id_array_1" name="array_1" type="text" required />
<input id="id_array_2" name="array_2" type="text" required />
</td>
</tr>
''')
def test_invalid_char_length(self):
field = SplitArrayField(forms.CharField(max_length=2), size=3)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['abc', 'c', 'defg'])
self.assertEqual(cm.exception.messages, [
'Item 0 in the array did not validate: Ensure this value has at most 2 characters (it has 3).',
'Item 2 in the array did not validate: Ensure this value has at most 2 characters (it has 4).',
])
| 37.687587
| 118
| 0.637185
|
9b757b23d0640a3f48fe4929260897d16668b053
| 13,117
|
py
|
Python
|
privex/jsonrpc/helpers.py
|
r3st3rcr0w/python-jsonrpc
|
cdf2fae411a7dac038aedea0d790d43d690f4930
|
[
"X11"
] | 2
|
2019-03-18T03:48:25.000Z
|
2020-06-25T22:13:54.000Z
|
privex/jsonrpc/helpers.py
|
r3st3rcr0w/python-jsonrpc
|
cdf2fae411a7dac038aedea0d790d43d690f4930
|
[
"X11"
] | 1
|
2019-07-17T09:26:59.000Z
|
2019-07-17T09:26:59.000Z
|
privex/jsonrpc/helpers.py
|
r3st3rcr0w/python-jsonrpc
|
cdf2fae411a7dac038aedea0d790d43d690f4930
|
[
"X11"
] | 2
|
2019-03-23T20:45:41.000Z
|
2021-01-09T05:27:52.000Z
|
import logging
from decimal import Decimal
from privex.jsonrpc.JsonRPC import JsonRPC
from typing import List, Union, Dict
log = logging.getLogger(__name__)
class BitcoinRPC(JsonRPC):
"""
Wrapper class for JsonRPC, with default host 127.0.0.1 and port 8332
Contains pre-defined methods with pydoc for interacting with `bitcoind` compatible JsonRPC services
including most coin daemons forked from Bitcoin, e.g. litecoind, dogecoind etc.
If a method is not defined, you can still use it! You just won't get any IDE hints with the parameters.
Basic usage (by default, connects to http://127.0.0.1:8332):
>>> j = BitcoinRPC(username='bitcoinrpc', password='somesecurepassword')
>>> j.getbalance()
Decimal(0.2456337)
"""
def __init__(self, hostname='127.0.0.1', port=8332, username=None, password=None, ssl=False, timeout=120,
url: str = '', auth: str = 'plain'):
super().__init__(
hostname=hostname, port=port, username=username, password=password,
ssl=ssl, timeout=timeout, url=url, auth=auth
)
def getnewaddress(self, account="", address_type=None) -> str:
"""
Generate a new crypto address and return it as a string.
:param account: Name of the account to store address in. Default is blank ``""``
:param address_type: The address type to use. Options are ``legacy``, ``p2sh-segwit``, and ``bech32``.
:return: string - the address that was generated
"""
if address_type is None:
return self.call('getnewaddress', account)
return self.call('getnewaddress', account, address_type)
def getbalance(self, account="*", confirmations: int = 0, watch_only=False) -> Decimal:
"""
Get the current wallet balance as a Decimal
:param str account: DEPRECATED - Get the balance of this wallet account, ``*`` means all accs.
:param int confirmations: Get wallet balance that has at least this many confirms
:param bool watch_only: Include "Watch Only" addresses in the balance figure
:return Decimal balance: The total balance of the given account
"""
bal = self.call('getbalance', account, confirmations, watch_only)
if type(bal) == float:
bal = '{0:.8f}'.format(bal)
return Decimal(bal)
def getreceivedbyaddress(self, address, confirmations: int = 0) -> Decimal:
"""
Get the total amount of coins received by an address (must exist in the wallet)
:param str address: The address to lookup
:param int confirmations: Get received amount that has at least this many confirms
:return Decimal balance: The total amount of coins received by an address.
"""
bal = self.call('getreceivedbyaddress', address, confirmations)
if type(bal) == float:
bal = '{0:.8f}'.format(bal)
return Decimal(bal)
def sendtoaddress(self, address, amount: Union[float, str, Decimal], comment="", comment_to="",
subtractfee: bool = False, force_float=True) -> str:
"""
Send coins to an address
:param str address: The destination address to send coins to
:param float amount: The amount of coins to send. If coin supports string amounts, see ``force_float`` param.
:param str comment: A comment used to store what the transaction is for.
:param str comment_to: A comment, representing the name of the person or organization you're sending to.
:param bool subtractfee: (Default False) If set to True, reduce the sending amount to cover the TX fee.
:param bool force_float: (Default True) If set to True, the ``amount`` parameter will be casted to a float
before sending via JSONRPC. If you're dealing with a coin daemon that can handle
string amounts, set this to False and pass amount as a str
:return str txid: The transaction ID for this "send coins" transaction.
"""
if force_float:
amount = float(amount)
return self.call('sendtoaddress', address, amount, comment, comment_to, subtractfee)
def listtransactions(self, account="*", count: int = 10, skip: int = 0, watch_only=False) -> List[dict]:
"""
List transactions sent/received/generated by an account, or all accounts
:param account: Account to list TXs for
:param count: Load this many recent TXs
:param skip: Skip this many recent TXs (for pagination)
:param watch_only: Include watchonly addresses
:return: [ {account, address, category, amount, label, vout, fee, confirmations, trusted, generated,
blockhash, blockindex, blocktime, txid, walletconflicts, time, timereceived, comment,
to, otheraccount, bip125-replaceable, abandoned}, ... ]
"""
return self.call('listtransactions', account, count, skip, watch_only)
def getblockchaininfo(self) -> dict:
"""
Get information about the blockchain, such as the current block/header height, network difficulty etc.
:return dict networkinfo: Returns blockchain information as a dict, in this format
Return format::
{
chain:str, blocks:int, headers: int, bestblockhash: str, difficulty: float,
mediantime: int, verificationprogress: float, initialblockdownload: bool,
chainwork: str, size_on_disk: int, pruned: bool, softforks: List[dict],
bip9_softforks: Dict[dict], warnings: str
}
"""
return self.call('getblockchaininfo')
def getnetworkinfo(self) -> dict:
"""
Get information about the network, such as daemon version, relay fees, total connections etc.
:return dict networkinfo: Returns network information as a dict, in this format
Return format::
{
version:int, subversion:str, localservices:str, localrelay:bool,
timeoffset:int, networkactive:bool, connections:int, networks:List[dict],
relayfee:float, incrementalfee:float, localaddresses:List[dict], warnings:str
}
"""
return self.call('getnetworkinfo')
def getinfo(self) -> dict:
"""
WARNING: This is deprecated in favour of getnetworkinfo/getblockchaininfo, and is only here for compatibility
with older cryptocurrency daemons.
:return dict daemoninfo: Various status info, such as current block, balance etc. See below.
Return format::
{
version:int, protocolversion: int, walletversion: int, balance: float, blocks:int,
timeoffset: int, connections: int, proxy: str, difficulty: float, testnet: bool,
keypoololdest: int, keypoolsize: int, paytxfee: float, relayfee: float, warnings: str
}
"""
return self.call('getinfo')
class LitecoinRPC(BitcoinRPC):
"""
Wrapper class for JsonRPC, with default host 127.0.0.1 and port 8332
"""
def __init__(self, hostname='127.0.0.1', port=9332, username=None, password=None, ssl=False, timeout=120,
url: str = '', auth: str = 'plain'):
super().__init__(
hostname=hostname, port=port, username=username, password=password,
ssl=ssl, timeout=timeout, url=url, auth=auth
)
class SteemEngineRPC(JsonRPC):
"""
+===================================================+
| © 2019 Privex Inc. |
| https://www.privex.io |
+===================================================+
| |
| Python Simple JSON RPC library |
| License: X11/MIT |
| |
| Core Developer(s): |
| |
| (+) Chris (@someguy123) [Privex] |
| |
+===================================================+
"""
DEF_HOST = 'api.steem-engine.com'
DEF_URL = '/rpc/contracts'
def __init__(self, hostname=DEF_HOST, port=443, username=None, password=None, ssl=True, timeout=120, url=DEF_URL, auth='plain'):
super().__init__(
hostname=hostname, port=port, username=username, password=password,
ssl=ssl, timeout=timeout, url=url, auth=auth
)
def getcontract(self, name: str) -> dict:
"""
Returns information about a given contract, such as 'tokens'
:param name: Name of the contract, e.g. tokens
:return: None if not found
:return: {name, owner, code, codeHash, tables, $loki}
"""
return self.call('getContract', name=name)
def findone(self, contract: str, table: str, query: dict) -> dict:
"""
Returns the first result of a contract table query as a dictionary
>>> rpc = SteemEngineRPC()
>>> t = rpc.findone(contract='tokens',table='tokens',query=dict(symbol='ENG'))
>>> t['name']
'Steem Engine Token'
:param contract: Name of the contract, e.g. tokens
:param table: The table of the contract to query, e.g. balances
:param query: A dictionary query for filtering results, e.g. {'account': 'someguy123'}
:return: None if not found
:return: Dictionary containing the row data
"""
return self.call('findOne', contract=contract, table=table, query=query)
def find(self, contract, table, query: dict = None, limit: int = 1000,
offset: int = 0, indexes: list = None) -> list:
"""
Returns a list of matching rows for a given contract table query
Example - Get a list of all tokens (max 1000 results by default):
>>> rpc = SteemEngineRPC()
>>> t = rpc.find(contract='tokens',table='tokens')
:param contract: Name of the contract, e.g. tokens
:param table: The table of the contract to query, e.g. balances
:param query: A dictionary query for filtering results, e.g. {'account': 'someguy123'} (Default: {})
:param limit: Maximum results to retrieve
:param offset: Skip this many results
:param indexes:
:return: A list of matching rows, as dict's
"""
return self.call(
'find',
contract=contract,
table=table,
query=query if query is not None else {},
limit=limit,
offset=offset,
indexes=indexes if indexes is not None else []
)
"""
+===================================================+
| © 2019 Privex Inc. |
| https://www.privex.io |
+===================================================+
| |
| Python Simple JSON RPC library |
| License: X11/MIT |
| |
| Core Developer(s): |
| |
| (+) Chris (@someguy123) [Privex] |
| |
+===================================================+
Python Json RPC - A simple library for interacting with JsonRPC services
Copyright (c) 2019 Privex Inc. ( https://www.privex.io )
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy,
modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of
the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
Except as contained in this notice, the name(s) of the above copyright holders shall not be used in advertising or
otherwise to promote the sale, use or other dealings in this Software without prior written authorization.
"""
| 46.679715
| 132
| 0.586033
|
806f0a3bfdfab6ee06b775a27c47db7a6c91716c
| 1,069
|
py
|
Python
|
malwarebazaar/platform.py
|
Rafiot/bazaar
|
ee61622de49a7ba3fa7c4c8ad07346b36e5e36e0
|
[
"MIT"
] | 15
|
2021-06-16T21:25:27.000Z
|
2022-02-21T10:48:32.000Z
|
malwarebazaar/platform.py
|
Rafiot/bazaar
|
ee61622de49a7ba3fa7c4c8ad07346b36e5e36e0
|
[
"MIT"
] | 3
|
2021-09-28T07:33:21.000Z
|
2022-02-21T19:03:23.000Z
|
malwarebazaar/platform.py
|
Rafiot/bazaar
|
ee61622de49a7ba3fa7c4c8ad07346b36e5e36e0
|
[
"MIT"
] | 3
|
2021-06-17T20:12:54.000Z
|
2022-02-21T09:31:55.000Z
|
import os
from sys import platform
from rich.console import Console
def is_linux() -> bool:
return platform == "linux"
def is_win() -> bool:
return platform == "win32"
def is_darwin() -> bool:
return platform == "darwin"
def get_config_dir(ec: Console = Console(stderr=True, style="bold red")) -> str:
"""Returns path to directory in user dir"""
u_path = None
u_env = os.getenv("BAZAAR_PATH", None)
if u_env:
return u_env
if is_linux() or is_darwin():
u_path = os.path.abspath(os.path.join(os.getenv("HOME"), ".config", "bazaar"))
elif is_win():
u_path = os.path.abspath(os.path.join(os.getenv("APPDATA"), "bazaar"))
else:
ec.print(f"Unknown platform: {platform}.")
exit(-1)
if not os.path.exists(u_path):
os.mkdir(u_path)
return u_path
def get_config_path() -> str:
"""Return path to config.toml"""
c_env = os.getenv("BAZAAR_CONFIG", None)
if c_env:
return c_env
c_path = os.path.join(get_config_dir(), "config.yml")
return c_path
| 23.23913
| 86
| 0.624883
|
97ac4c1ffc51093d5b56fdd99168c098a6c5cd21
| 1,953
|
py
|
Python
|
model/contact.py
|
zarinabaisakalova/trainings
|
4f8149ffa4797eb678fd5649f29821e1146bd57e
|
[
"Apache-2.0"
] | null | null | null |
model/contact.py
|
zarinabaisakalova/trainings
|
4f8149ffa4797eb678fd5649f29821e1146bd57e
|
[
"Apache-2.0"
] | null | null | null |
model/contact.py
|
zarinabaisakalova/trainings
|
4f8149ffa4797eb678fd5649f29821e1146bd57e
|
[
"Apache-2.0"
] | null | null | null |
_author_ = 'zarina'
from sys import maxsize
class Contact:
def __init__(self, id=None, firstname=None, lastname=None, middlename=None, nickname=None, title=None, company=None,
all_phones_from_home_page=None, all_emails_from_home_page=None,
address=None, homephone=None, mobilephone=None, workphone=None, secondaryphone=None, fax=None,
homepage=None, address2=None, notes=None, bday=None, bmonth=None, byear=None, aday=None, amonth=None,
ayear=None, email=None, email2=None, email3=None):
self.firstname = firstname
self.lastname = lastname
self.middlename = middlename
self.nickname = nickname
self.id = id
self.homephone = homephone
self.mobilephone = mobilephone
self.workphone = workphone
self.secondaryphone = secondaryphone
self.fax = fax
self.address = address
self.address2 = address2
self.email = email
self.email2 = email2
self.email3 = email3
self.all_phones_from_home_page = all_phones_from_home_page
self.all_emails_from_home_page = all_emails_from_home_page
self.title = title
self.company = company
self.homepage = homepage
self.notes = notes
self.bday = bday
self.bmonth = bmonth
self.byear = byear
self.aday = aday
self.amonth = amonth
self.ayear = ayear
def __repr__(self):
#return f'{self.id}:{self.firstname} {self.lastname}'
return "%s:%s:%s" % (self.id, self.lastname, self.firstname)
def __eq__(self, other):
return (self.id == other.id or self.id is None or other.id is None) \
and self.firstname == other.firstname \
and self.lastname == other.lastname
def id_or_max(self):
if self.id:
return int(self.id)
else:
return maxsize
| 34.875
| 120
| 0.61956
|
c1be88d05dd0c859c3da3f8b911606e3a8ea43f6
| 1,208
|
py
|
Python
|
utils/parameters/environment.py
|
martyw/finance
|
b323a0224c8152ce1c1ce628d92f4563f3a59f90
|
[
"MIT"
] | null | null | null |
utils/parameters/environment.py
|
martyw/finance
|
b323a0224c8152ce1c1ce628d92f4563f3a59f90
|
[
"MIT"
] | null | null | null |
utils/parameters/environment.py
|
martyw/finance
|
b323a0224c8152ce1c1ce628d92f4563f3a59f90
|
[
"MIT"
] | null | null | null |
import unittest
from datetime import date
class Environment:
def __init__(self, vd=date(2008, 1, 1)):
self._valuation_date = vd
self.curves = {}
self.surfaces = {}
self.constants = {}
@property
def valuation_date(self):
return self._valuation_date
@valuation_date.setter
def valuation_date(self, val_date):
self._valuation_date = val_date
def relative_date(self, rel_date):
ret = (rel_date - self._valuation_date).days
assert ret > 0, "date before pricing date"
return ret
def add_curve(self, key, curve):
if key in self.curves:
del self.curves[key]
self.curves[key] = curve
def add_surface(self, key, surface):
if key in self.surfaces:
del self.surfaces[key]
self.surfaces[key] = surface
def add_constant(self, key, constant):
if key in self.constants:
del self.constants[key]
self.constants[key] = constant
def get_curve(self, key):
return self.curves[key]
def get_surface(self, key):
return self.surfaces[key]
def get_constant(self, key):
return self.constants[key]
| 25.166667
| 52
| 0.61755
|
9a5838da4be758a6471d0ad45394c9a5d7fc1331
| 3,466
|
py
|
Python
|
tests/test_rand_zoomd.py
|
loftwah/MONAI
|
37fb3e779121e6dc74127993df102fc91d9065f8
|
[
"Apache-2.0"
] | 1
|
2020-04-23T13:05:29.000Z
|
2020-04-23T13:05:29.000Z
|
tests/test_rand_zoomd.py
|
tranduyquockhanh/MONAI
|
37fb3e779121e6dc74127993df102fc91d9065f8
|
[
"Apache-2.0"
] | null | null | null |
tests/test_rand_zoomd.py
|
tranduyquockhanh/MONAI
|
37fb3e779121e6dc74127993df102fc91d9065f8
|
[
"Apache-2.0"
] | 1
|
2021-09-20T12:10:01.000Z
|
2021-09-20T12:10:01.000Z
|
# Copyright 2020 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import importlib
from scipy.ndimage import zoom as zoom_scipy
from parameterized import parameterized
from monai.transforms import RandZoomd
from tests.utils import NumpyImageTestCase2D
VALID_CASES = [(0.9, 1.1, 3, 'constant', 0, True, False, False)]
class TestRandZoomd(NumpyImageTestCase2D):
@parameterized.expand(VALID_CASES)
def test_correct_results(self, min_zoom, max_zoom, order, mode,
cval, prefilter, use_gpu, keep_size):
key = 'img'
random_zoom = RandZoomd(key, prob=1.0, min_zoom=min_zoom, max_zoom=max_zoom, order=order,
mode=mode, cval=cval, prefilter=prefilter, use_gpu=use_gpu,
keep_size=keep_size)
random_zoom.set_random_state(234)
zoomed = random_zoom({key: self.imt[0]})
expected = list()
for channel in self.imt[0]:
expected.append(zoom_scipy(channel, zoom=random_zoom._zoom, mode=mode, order=order,
cval=cval, prefilter=prefilter))
expected = np.stack(expected).astype(np.float32)
self.assertTrue(np.allclose(expected, zoomed[key]))
@parameterized.expand([
(0.8, 1.2, 1, 'constant', 0, True)
])
def test_gpu_zoom(self, min_zoom, max_zoom, order, mode, cval, prefilter):
key = 'img'
if importlib.util.find_spec('cupy'):
random_zoom = RandZoomd(
key, prob=1.0, min_zoom=min_zoom, max_zoom=max_zoom, order=order,
mode=mode, cval=cval, prefilter=prefilter, use_gpu=True,
keep_size=False)
random_zoom.set_random_state(234)
zoomed = random_zoom({key: self.imt[0]})
expected = list()
for channel in self.imt[0]:
expected.append(zoom_scipy(channel, zoom=random_zoom._zoom, mode=mode, order=order,
cval=cval, prefilter=prefilter))
expected = np.stack(expected).astype(np.float32)
self.assertTrue(np.allclose(expected, zoomed))
def test_keep_size(self):
key = 'img'
random_zoom = RandZoomd(key, prob=1.0, min_zoom=0.6,
max_zoom=0.7, keep_size=True)
zoomed = random_zoom({key: self.imt[0]})
self.assertTrue(np.array_equal(zoomed[key].shape, self.imt.shape[1:]))
@parameterized.expand([
("no_min_zoom", None, 1.1, 1, TypeError),
("invalid_order", 0.9, 1.1 , 's', AssertionError)
])
def test_invalid_inputs(self, _, min_zoom, max_zoom, order, raises):
key = 'img'
with self.assertRaises(raises):
random_zoom = RandZoomd(key, prob=1.0, min_zoom=min_zoom, max_zoom=max_zoom, order=order)
zoomed = random_zoom({key: self.imt[0]})
if __name__ == '__main__':
unittest.main()
| 41.261905
| 101
| 0.639065
|
a6e7d8ea0751d5d09fa1b14f83214be024a9c8db
| 14,881
|
py
|
Python
|
src/best_params.py
|
waddupitzme/graph-neural-pde
|
004a30c9e838866ac8b78d14b7414224a24014a5
|
[
"Apache-2.0"
] | 125
|
2021-06-16T09:36:18.000Z
|
2022-03-26T00:16:22.000Z
|
src/best_params.py
|
waddupitzme/graph-neural-pde
|
004a30c9e838866ac8b78d14b7414224a24014a5
|
[
"Apache-2.0"
] | 8
|
2021-06-23T04:49:12.000Z
|
2022-03-28T20:25:47.000Z
|
src/best_params.py
|
waddupitzme/graph-neural-pde
|
004a30c9e838866ac8b78d14b7414224a24014a5
|
[
"Apache-2.0"
] | 20
|
2021-06-23T06:55:35.000Z
|
2022-03-21T17:04:17.000Z
|
best_params_dict = {'Cora': {'M_nodes': 64, 'adaptive': False, 'add_source': True, 'adjoint': False, 'adjoint_method': 'adaptive_heun', 'adjoint_step_size': 1, 'alpha': 1.0, 'alpha_dim': 'sc', 'att_samp_pct': 1, 'attention_dim': 128, 'attention_norm_idx': 1, 'attention_rewiring': False, 'attention_type': 'scaled_dot', 'augment': False, 'baseline': False, 'batch_norm': False, 'beltrami': False, 'beta_dim': 'sc', 'block': 'attention', 'cpus': 1, 'data_norm': 'rw', 'dataset': 'Cora', 'decay': 0.00507685443154266, 'directional_penalty': None, 'dropout': 0.046878964627763316, 'dt': 0.001, 'dt_min': 1e-05, 'epoch': 100, 'exact': True, 'fc_out': False, 'feat_hidden_dim': 64, 'function': 'laplacian', 'gdc_avg_degree': 64, 'gdc_k': 64, 'gdc_method': 'ppr', 'gdc_sparsification': 'topk', 'gdc_threshold': 0.01, 'gpus': 0.5, 'grace_period': 20, 'heads': 8, 'heat_time': 3.0, 'hidden_dim': 80, 'input_dropout': 0.5, 'jacobian_norm2': None, 'kinetic_energy': None, 'label_rate': 0.5, 'leaky_relu_slope': 0.2, 'lr': 0.022924849756740397, 'max_epochs': 1000, 'max_iters': 100, 'max_nfe': 2000, 'method': 'dopri5', 'metric': 'accuracy', 'mix_features': False, 'name': 'cora_beltrami_splits', 'new_edges': 'random', 'no_alpha_sigmoid': False, 'not_lcc': True, 'num_init': 1, 'num_samples': 1000, 'num_splits': 2, 'ode_blocks': 1, 'optimizer': 'adamax', 'patience': 100, 'pos_enc_hidden_dim': 16, 'pos_enc_orientation': 'row', 'pos_enc_type': 'GDC', 'ppr_alpha': 0.05, 'reduction_factor': 10, 'regularise': False, 'reweight_attention': False, 'rewire_KNN': False, 'rewire_KNN_T': 'T0', 'rewire_KNN_epoch': 10, 'rewire_KNN_k': 64, 'rewire_KNN_sym': False, 'rewiring': None, 'rw_addD': 0.02, 'rw_rmvR': 0.02, 'self_loop_weight': 1, 'sparsify': 'S_hat', 'square_plus': True, 'step_size': 1, 'threshold_type': 'addD_rvR', 'time': 18.294754260552843, 'tol_scale': 821.9773048827274, 'tol_scale_adjoint': 1.0, 'total_deriv': None, 'use_cora_defaults': False, 'use_flux': False, 'use_labels': False, 'use_lcc': True, 'use_mlp': False},
'Citeseer': {'M_nodes': 64, 'adaptive': False, 'add_source': True, 'adjoint': False, 'adjoint_method': 'adaptive_heun', 'adjoint_step_size': 1, 'alpha': 1.0, 'alpha_dim': 'sc', 'att_samp_pct': 1, 'attention_dim': 32, 'attention_norm_idx': 1, 'attention_rewiring': False, 'attention_type': 'exp_kernel', 'augment': False, 'baseline': False, 'batch_norm': False, 'beltrami': False, 'beta_dim': 'sc', 'block': 'attention', 'cpus': 1, 'data_norm': 'rw', 'dataset': 'Citeseer', 'decay': 0.1, 'directional_penalty': None, 'dropout': 0.7488085003122172, 'dt': 0.001, 'dt_min': 1e-05, 'epoch': 250, 'exact': True, 'fc_out': False, 'feat_hidden_dim': 64, 'function': 'laplacian', 'gdc_avg_degree': 64, 'gdc_k': 128, 'gdc_method': 'ppr', 'gdc_sparsification': 'topk', 'gdc_threshold': 0.01, 'gpus': 1.0, 'grace_period': 20, 'heads': 8, 'heat_time': 3.0, 'hidden_dim': 80, 'input_dropout': 0.6803233752085334, 'jacobian_norm2': None, 'kinetic_energy': None, 'label_rate': 0.5, 'leaky_relu_slope': 0.5825086997804176, 'lr': 0.00863585231323069, 'max_epochs': 1000, 'max_iters': 100, 'max_nfe': 3000, 'method': 'dopri5', 'metric': 'accuracy', 'mix_features': False, 'name': 'Citeseer_beltrami_1_KNN', 'new_edges': 'random', 'no_alpha_sigmoid': False, 'not_lcc': True, 'num_class': 6, 'num_feature': 3703, 'num_init': 2, 'num_nodes': 2120, 'num_samples': 400, 'num_splits': 0, 'ode_blocks': 1, 'optimizer': 'adam', 'patience': 100, 'pos_enc_dim': 'row', 'pos_enc_hidden_dim': 16, 'ppr_alpha': 0.05, 'reduction_factor': 4, 'regularise': False, 'reweight_attention': False, 'rewire_KNN': False, 'rewire_KNN_epoch': 10, 'rewire_KNN_k': 64, 'rewire_KNN_sym': False, 'rewiring': None, 'rw_addD': 0.02, 'rw_rmvR': 0.02, 'self_loop_weight': 1, 'sparsify': 'S_hat', 'square_plus': True, 'step_size': 1, 'threshold_type': 'addD_rvR', 'time': 7.874113442879092, 'tol_scale': 2.9010446330432815, 'tol_scale_adjoint': 1.0, 'total_deriv': None, 'use_cora_defaults': False, 'use_flux': False, 'use_labels': False, 'use_lcc': True, 'use_mlp': False},
'Pubmed': {'M_nodes': 64, 'adaptive': False, 'add_source': True, 'adjoint': True, 'adjoint_method': 'adaptive_heun', 'adjoint_step_size': 1, 'alpha': 1.0, 'alpha_dim': 'sc', 'att_samp_pct': 1, 'attention_dim': 16, 'attention_norm_idx': 0, 'attention_rewiring': False, 'attention_type': 'cosine_sim', 'augment': False, 'baseline': False, 'batch_norm': False, 'beltrami': False, 'beta_dim': 'sc', 'block': 'attention', 'cpus': 1, 'data_norm': 'rw', 'dataset': 'Pubmed', 'decay': 0.0018236722171703636, 'directional_penalty': None, 'dropout': 0.07191100715473969, 'dt': 0.001, 'dt_min': 1e-05, 'epoch': 600, 'exact': False, 'fc_out': False, 'feat_hidden_dim': 64, 'function': 'laplacian', 'gdc_avg_degree': 64, 'gdc_k': 64, 'gdc_method': 'ppr', 'gdc_sparsification': 'topk', 'gdc_threshold': 0.01, 'gpus': 1.0, 'grace_period': 20, 'heads': 1, 'heat_time': 3.0, 'hidden_dim': 128, 'input_dropout': 0.5, 'jacobian_norm2': None, 'kinetic_energy': None, 'label_rate': 0.5, 'leaky_relu_slope': 0.2, 'lr': 0.014669345840305131, 'max_epochs': 1000, 'max_iters': 100, 'max_nfe': 5000, 'method': 'dopri5', 'metric': 'test_acc', 'mix_features': False, 'name': None, 'new_edges': 'random', 'no_alpha_sigmoid': False, 'not_lcc': True, 'num_init': 1, 'num_samples': 400, 'num_splits': 8, 'ode_blocks': 1, 'optimizer': 'adamax', 'patience': 100, 'pos_enc_dim': 'row', 'pos_enc_hidden_dim': 16, 'ppr_alpha': 0.05, 'reduction_factor': 10, 'regularise': False, 'reweight_attention': False, 'rewire_KNN': False, 'rewire_KNN_T': 'T0', 'rewire_KNN_epoch': 10, 'rewire_KNN_k': 64, 'rewire_KNN_sym': False, 'rewiring': None, 'rw_addD': 0.02, 'rw_rmvR': 0.02, 'self_loop_weight': 1, 'sparsify': 'S_hat', 'square_plus': True, 'step_size': 1, 'threshold_type': 'addD_rvR', 'time': 12.942327880200853, 'tol_scale': 1991.0688305523001, 'tol_scale_adjoint': 16324.368093998313, 'total_deriv': None, 'use_cora_defaults': False, 'use_flux': False, 'use_labels': False, 'use_lcc': True, 'use_mlp': False, 'folder': 'pubmed_linear_att_beltrami_adj2', 'index': 0, 'run_with_KNN': False, 'change_att_sim_type': False, 'reps': 1, 'max_test_steps': 100, 'no_early': False, 'earlystopxT': 5.0, 'pos_enc_csv': False, 'pos_enc_type': 'GDC'},
'CoauthorCS': {'M_nodes': 64, 'adaptive': False, 'add_source': False, 'adjoint': True, 'adjoint_method': 'dopri5', 'adjoint_step_size': 1, 'alpha': 1.0, 'alpha_dim': 'sc', 'att_samp_pct': 1, 'attention_dim': 8, 'attention_norm_idx': 1, 'attention_rewiring': False, 'attention_type': 'scaled_dot', 'augment': False, 'baseline': False, 'batch_norm': False, 'beltrami': False, 'beta_dim': 'sc', 'block': 'attention', 'cpus': 1, 'data_norm': 'rw', 'dataset': 'CoauthorCS', 'decay': 0.004738413087298854, 'directional_penalty': None, 'dropout': 0.6857774850321, 'dt': 0.001, 'dt_min': 1e-05, 'edge_sampling': False, 'edge_sampling_T': 'T0', 'edge_sampling_add': 0.05, 'edge_sampling_epoch': 5, 'edge_sampling_online': False, 'edge_sampling_online_reps': 4, 'edge_sampling_rmv': 0.05, 'edge_sampling_space': 'pos_distance', 'edge_sampling_sym': False, 'epoch': 250, 'exact': False, 'fa_layer': False, 'fc_out': False, 'feat_hidden_dim': 128, 'function': 'laplacian', 'gdc_avg_degree': 64, 'gdc_k': 64, 'gdc_method': 'ppr', 'gdc_sparsification': 'topk', 'gdc_threshold': 0.0001, 'gpus': 1, 'grace_period': 20, 'heads': 4, 'heat_time': 3.0, 'hidden_dim': 16, 'input_dropout': 0.5275042493231822, 'jacobian_norm2': None, 'kinetic_energy': None, 'label_rate': 0.5, 'leaky_relu_slope': 0.7181389780997276, 'lr': 0.0009342860080741642, 'max_iters': 100, 'max_nfe': 3000, 'method': 'dopri5', 'metric': 'accuracy', 'mix_features': False, 'name': 'CoauthorCS_final_tune_posencGDC', 'new_edges': 'random', 'no_alpha_sigmoid': False, 'not_lcc': True, 'num_init': 1, 'num_samples': 400, 'num_splits': 4, 'ode_blocks': 1, 'optimizer': 'rmsprop', 'pos_dist_quantile': 0.001, 'pos_enc_csv': False, 'pos_enc_hidden_dim': 32, 'pos_enc_orientation': 'row', 'pos_enc_type': 'GDC', 'ppr_alpha': 0.05, 'reduction_factor': 10, 'regularise': False, 'reweight_attention': False, 'rewire_KNN': False, 'rewire_KNN_T': 'T0', 'rewire_KNN_epoch': 5, 'rewire_KNN_k': 64, 'rewire_KNN_sym': False, 'rewiring': None, 'rw_addD': 0.02, 'rw_rmvR': 0.02, 'self_loop_weight': 0, 'sparsify': 'S_hat', 'square_plus': True, 'step_size': 1, 'symmetric_attention': False, 'threshold_type': 'addD_rvR', 'time': 3.126400580172773, 'tol_scale': 9348.983916372074, 'tol_scale_adjoint': 6599.1250595331385, 'total_deriv': None, 'use_cora_defaults': False, 'use_flux': False, 'use_labels': False, 'use_mlp': False},
'Computers': {'M_nodes': 64, 'adaptive': False, 'add_source': False, 'adjoint': True, 'adjoint_method': 'dopri5', 'adjoint_step_size': 1, 'alpha': 1.0, 'alpha_dim': 'sc', 'att_samp_pct': 0.572918052062338, 'attention_dim': 64, 'attention_norm_idx': 0, 'attention_rewiring': False, 'attention_type': 'scaled_dot', 'augment': False, 'baseline': False, 'batch_norm': False, 'beltrami': False, 'beta_dim': 'sc', 'block': 'hard_attention', 'cpus': 1, 'data_norm': 'rw', 'dataset': 'Computers', 'decay': 0.007674669913252157, 'directional_penalty': None, 'dropout': 0.08732611854459256, 'dt': 0.001, 'dt_min': 1e-05, 'epoch': 100, 'exact': False, 'fc_out': False, 'feat_hidden_dim': 64, 'function': 'laplacian', 'gdc_avg_degree': 64, 'gdc_k': 64, 'gdc_method': 'ppr', 'gdc_sparsification': 'topk', 'gdc_threshold': 0.01, 'gpus': 1.0, 'grace_period': 25, 'heads': 4, 'heat_time': 3.0, 'hidden_dim': 128, 'input_dropout': 0.5973137276937647, 'jacobian_norm2': None, 'kinetic_energy': None, 'label_rate': 0.5, 'leaky_relu_slope': 0.2, 'lr': 0.0035304663972281548, 'max_epochs': 1000, 'max_iters': 100, 'max_nfe': 500, 'method': 'dopri5', 'metric': 'accuracy', 'mix_features': False, 'name': 'computer_beltrami_hard_att1', 'new_edges': 'random', 'no_alpha_sigmoid': False, 'not_lcc': True, 'num_init': 1, 'num_samples': 400, 'num_splits': 2, 'ode_blocks': 1, 'optimizer': 'adam', 'patience': 100, 'pos_enc_hidden_dim': 32, 'pos_enc_orientation': 'row', 'pos_enc_type': 'DW128', 'ppr_alpha': 0.05, 'reduction_factor': 10, 'regularise': False, 'reweight_attention': False, 'rewire_KNN': False, 'rewire_KNN_T': 'T0', 'rewire_KNN_epoch': 10, 'rewire_KNN_k': 64, 'rewire_KNN_sym': False, 'rewiring': None, 'rw_addD': 0.02, 'rw_rmvR': 0.02, 'self_loop_weight': 1.7138583550928912, 'sparsify': 'S_hat', 'square_plus': False, 'step_size': 1, 'threshold_type': 'addD_rvR', 'time': 3.249016177876166, 'tol_scale': 127.46369887079446, 'tol_scale_adjoint': 443.81436775321754, 'total_deriv': None, 'use_cora_defaults': False, 'use_flux': False, 'use_labels': False, 'use_mlp': False},
'Photo': {'M_nodes': 64, 'adaptive': False, 'add_source': False, 'adjoint': True, 'adjoint_method': 'rk4', 'adjoint_step_size': 1, 'alpha': 1.0, 'alpha_dim': 'sc', 'att_samp_pct': 0.9282359956104751, 'attention_dim': 64, 'attention_norm_idx': 0, 'attention_rewiring': False, 'attention_type': 'pearson', 'augment': False, 'baseline': False, 'batch_norm': True, 'beltrami': False, 'beta_dim': 'sc', 'block': 'hard_attention', 'cpus': 1, 'data_norm': 'rw', 'dataset': 'Photo', 'decay': 0.004707800883497945, 'directional_penalty': None, 'dropout': 0.46502284638600183, 'dt': 0.001, 'dt_min': 1e-05, 'epoch': 100, 'exact': False, 'fc_out': False, 'feat_hidden_dim': 64, 'function': 'laplacian', 'gdc_avg_degree': 64, 'gdc_k': 64, 'gdc_method': 'ppr', 'gdc_sparsification': 'topk', 'gdc_threshold': 0.01, 'gpus': 1.0, 'grace_period': 25, 'heads': 4, 'heat_time': 3.0, 'hidden_dim': 64, 'input_dropout': 0.42903126506740247, 'jacobian_norm2': None, 'kinetic_energy': None, 'label_rate': 0.5, 'leaky_relu_slope': 0.2, 'lr': 0.005560726683883279, 'max_epochs': 1000, 'max_iters': 100, 'max_nfe': 500, 'method': 'dopri5', 'metric': 'accuracy', 'mix_features': False, 'name': 'photo_beltrami_hard_att1', 'new_edges': 'random', 'no_alpha_sigmoid': False, 'not_lcc': True, 'num_init': 1, 'num_samples': 400, 'num_splits': 2, 'ode_blocks': 1, 'optimizer': 'adam', 'patience': 100, 'pos_enc_hidden_dim': 16, 'pos_enc_orientation': 'row', 'pos_enc_type': 'DW128', 'ppr_alpha': 0.05, 'reduction_factor': 10, 'regularise': False, 'reweight_attention': False, 'rewire_KNN': False, 'rewire_KNN_T': 'T0', 'rewire_KNN_epoch': 10, 'rewire_KNN_k': 64, 'rewire_KNN_sym': False, 'rewiring': None, 'rw_addD': 0.02, 'rw_rmvR': 0.02, 'self_loop_weight': 0.05783612585280118, 'sparsify': 'S_hat', 'square_plus': False, 'step_size': 1, 'threshold_type': 'addD_rvR', 'time': 3.5824027975386623, 'tol_scale': 2086.525473167121, 'tol_scale_adjoint': 14777.606112557354, 'total_deriv': None, 'use_cora_defaults': False, 'use_flux': False, 'use_labels': False, 'use_mlp': False},
'ogbn-arxiv': {'M_nodes': 64, 'adaptive': False, 'add_source': False, 'adjoint': True, 'adjoint_method': 'rk4', 'adjoint_step_size': 1, 'alpha': 1.0, 'alpha_dim': 'sc', 'att_samp_pct': 0.8105268910037231, 'attention_dim': 32, 'attention_norm_idx': 0, 'attention_rewiring': False, 'attention_type': 'scaled_dot', 'augment': False, 'baseline': False, 'batch_norm': True, 'beltrami': False, 'beta_dim': 'sc', 'block': 'hard_attention', 'cpus': 1, 'data_norm': 'rw', 'dataset': 'ogbn-arxiv', 'decay': 0, 'directional_penalty': None, 'dropout': 0.11594990901233933, 'dt': 0.001, 'dt_min': 1e-05, 'epoch': 100, 'exact': False, 'fc_out': False, 'feat_hidden_dim': 64, 'function': 'laplacian', 'gdc_avg_degree': 64, 'gdc_k': 64, 'gdc_method': 'ppr', 'gdc_sparsification': 'topk', 'gdc_threshold': 0.01, 'gpus': 1.0, 'grace_period': 20, 'heads': 2, 'heat_time': 3.0, 'hidden_dim': 162, 'input_dropout': 0, 'jacobian_norm2': None, 'kinetic_energy': None, 'label_rate': 0.21964773835397075, 'leaky_relu_slope': 0.2, 'lr': 0.005451476553977102, 'max_epochs': 1000, 'max_iters': 100, 'max_nfe': 500, 'method': 'dopri5', 'metric': 'accuracy', 'mix_features': False, 'name': 'arxiv_beltrami_hard_att', 'new_edges': 'random', 'no_alpha_sigmoid': False, 'not_lcc': False, 'num_init': 2, 'num_samples': 200, 'num_splits': 0, 'ode_blocks': 1, 'optimizer': 'rmsprop', 'patience': 100, 'pos_enc_hidden_dim': 98, 'pos_enc_orientation': 'row', 'pos_enc_type': 'DW64', 'ppr_alpha': 0.05, 'reduction_factor': 10, 'regularise': False, 'reweight_attention': False, 'rewire_KNN': False, 'rewire_KNN_T': 'T0', 'rewire_KNN_epoch': 10, 'rewire_KNN_k': 64, 'rewire_KNN_sym': False, 'rewiring': None, 'rw_addD': 0.02, 'rw_rmvR': 0.02, 'self_loop_weight': 1, 'sparsify': 'S_hat', 'square_plus': False, 'step_size': 1, 'threshold_type': 'addD_rvR', 'time': 3.6760155951687636, 'tol_scale': 11353.558848254957, 'tol_scale_adjoint': 1.0, 'total_deriv': None, 'use_cora_defaults': False, 'use_flux': False, 'use_labels': False, 'use_lcc': True, 'use_mlp': False}
}
| 1,488.1
| 2,380
| 0.689806
|
af54bb5e0a3cdcb5c9e66d75e27248d701c1652c
| 3,720
|
py
|
Python
|
kubernetes/client/models/v1_volume.py
|
SEJeff/client-python
|
baba523c28a684b3f537502977d600dedd1f17c5
|
[
"Apache-2.0"
] | null | null | null |
kubernetes/client/models/v1_volume.py
|
SEJeff/client-python
|
baba523c28a684b3f537502977d600dedd1f17c5
|
[
"Apache-2.0"
] | null | null | null |
kubernetes/client/models/v1_volume.py
|
SEJeff/client-python
|
baba523c28a684b3f537502977d600dedd1f17c5
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.5.0-beta.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
import re
class V1Volume(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, name=None):
"""
V1Volume - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'name': 'str'
}
self.attribute_map = {
'name': 'name'
}
self._name = name
@property
def name(self):
"""
Gets the name of this V1Volume.
Volume's name. Must be a DNS_LABEL and unique within the pod. More info: http://kubernetes.io/docs/user-guide/identifiers#names
:return: The name of this V1Volume.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this V1Volume.
Volume's name. Must be a DNS_LABEL and unique within the pod. More info: http://kubernetes.io/docs/user-guide/identifiers#names
:param name: The name of this V1Volume.
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`")
self._name = name
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 28.837209
| 135
| 0.568548
|
b3d17993485d8f2275024e20b40cd628301c3186
| 3,095
|
py
|
Python
|
models/convnet.py
|
latimerb/soft_dtree
|
fac3f3eded0cabe512e3cbf7a2945dcfcfd70fe4
|
[
"MIT"
] | 57
|
2018-10-05T07:57:06.000Z
|
2022-02-23T19:50:31.000Z
|
models/convnet.py
|
latimerb/soft_dtree
|
fac3f3eded0cabe512e3cbf7a2945dcfcfd70fe4
|
[
"MIT"
] | 6
|
2018-10-17T14:21:54.000Z
|
2021-03-14T11:07:28.000Z
|
models/convnet.py
|
latimerb/soft_dtree
|
fac3f3eded0cabe512e3cbf7a2945dcfcfd70fe4
|
[
"MIT"
] | 27
|
2018-09-10T20:50:53.000Z
|
2021-12-08T21:50:12.000Z
|
import os
import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import (Input, Permute, Conv2D, MaxPooling2D,
Dropout, Flatten, Dense)
from tensorflow.keras.models import Model, load_model
from tensorflow.keras.optimizers import Adam
class ConvNet(object):
def __init__(self, img_rows, img_cols, img_chans, n_classes,
optimizer=Adam, loss='categorical_crossentropy',
metrics=['acc'], learning_rate=3e-04):
self.img_rows = img_rows
self.img_cols = img_cols
self.img_chans = img_chans
self.n_classes = n_classes
self.optimizer = Adam(lr=learning_rate)
self.loss = loss
self.metrics = metrics
self.model = None
def build_model(self):
input_layer = Input(shape=(self.img_rows, self.img_cols, self.img_chans))
# handle image dimensions ordering
if tf.keras.backend.image_data_format() == 'channels_first':
latent = Permute((3, 1, 2))(input_layer)
else:
latent = input_layer
# define the network architecture
latent = Conv2D(filters=32, kernel_size=(3, 3),
activation='relu')(latent)
latent = Conv2D(filters=64, kernel_size=(3, 3),
activation='relu')(latent)
latent = MaxPooling2D(pool_size=(2, 2))(latent)
latent = Dropout(rate=0.25)(latent)
latent = Flatten()(latent)
latent = Dense(units=128, activation='relu')(latent)
latent = Dropout(rate=0.5)(latent)
output_layer = Dense(units=self.n_classes, activation='softmax')(latent)
self.model = Model(inputs=input_layer, outputs=output_layer)
self.model.compile(optimizer=self.optimizer, loss=self.loss,
metrics=self.metrics)
def maybe_train(self, data_train, data_valid, batch_size, epochs):
DIR_ASSETS = 'assets/'
PATH_MODEL = DIR_ASSETS + 'nn-model.hdf5'
if os.path.exists(PATH_MODEL):
print('Loading trained model from {}.'.format(PATH_MODEL))
self.model = load_model(PATH_MODEL)
else:
print('No checkpoint found on {}. Training from scratch.'.format(
PATH_MODEL))
self.build_model()
x_train, y_train = data_train
self.model.fit(x_train, y_train, validation_data=data_valid,
batch_size=batch_size, epochs=epochs)
print('Saving trained model to {}.'.format(PATH_MODEL))
if not os.path.isdir(DIR_ASSETS):
os.mkdir(DIR_ASSETS)
self.model.save(PATH_MODEL)
def evaluate(self, x, y):
if self.model:
score = self.model.evaluate(x, y)
print('accuracy: {:.2f}% | loss: {}'.format(100*score[1], score[0]))
else:
print('Missing model instance.')
def predict(self, x):
if self.model:
return self.model.predict(x, verbose=1)
else:
print('Missing model instance.')
| 37.289157
| 81
| 0.599354
|
130035b54d4d70de371ccd9fe60922f1f423ae6b
| 4,068
|
py
|
Python
|
leads/migrations/0001_initial.py
|
rakibul-islam-raju/django-crm
|
bd2fe84e6662b2d2425d890c64a9f15c3b76127a
|
[
"MIT"
] | null | null | null |
leads/migrations/0001_initial.py
|
rakibul-islam-raju/django-crm
|
bd2fe84e6662b2d2425d890c64a9f15c3b76127a
|
[
"MIT"
] | null | null | null |
leads/migrations/0001_initial.py
|
rakibul-islam-raju/django-crm
|
bd2fe84e6662b2d2425d890c64a9f15c3b76127a
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.5 on 2021-01-14 05:39
from django.conf import settings
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=150, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'abstract': False,
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
migrations.CreateModel(
name='Agent',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Lead',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=20)),
('last_name', models.CharField(max_length=20)),
('age', models.IntegerField(default=0)),
('description', models.TextField()),
('date_added', models.DateTimeField(auto_now_add=True)),
('phone_number', models.CharField(max_length=20)),
('email', models.EmailField(max_length=254)),
('agent', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='leads.agent')),
],
),
]
| 59.823529
| 329
| 0.639381
|
a7601499b4525ab528a8d05d549bd9d2c6d25b84
| 1,972
|
py
|
Python
|
PyPortal_Electioncal_US/electioncal/code.py
|
albinger/Adafruit_Learning_System_Guides
|
4fe2da261fe5d1ca282b86bd3b93ee1466346fa7
|
[
"MIT"
] | null | null | null |
PyPortal_Electioncal_US/electioncal/code.py
|
albinger/Adafruit_Learning_System_Guides
|
4fe2da261fe5d1ca282b86bd3b93ee1466346fa7
|
[
"MIT"
] | null | null | null |
PyPortal_Electioncal_US/electioncal/code.py
|
albinger/Adafruit_Learning_System_Guides
|
4fe2da261fe5d1ca282b86bd3b93ee1466346fa7
|
[
"MIT"
] | null | null | null |
# SPDX-FileCopyrightText: 2020 Alvaro Figueroa for Adafruit Industries
#
# SPDX-License-Identifier: MIT
import sys
import time
import board
from adafruit_pyportal import PyPortal
cwd = ("/"+__file__).rsplit('/', 1)[0] # the current working directory (where this file is)
sys.path.append(cwd)
import electioncal_graphics # pylint: disable=wrong-import-position
# Get wifi details and more from a secrets.py file
try:
from secrets import secrets # pylint: disable=unused-import
except ImportError:
print("WiFi secrets are kept in secrets.py, please add them there!")
raise
# Change this to your state and county, replacing spaces for underscores and in lowercase
STATE="new_york"
COUNTY="new_york"
DATA_SOURCE = "https://electioncal.us/en/" + STATE +"/" + COUNTY + "/voter.json"
DATA_LOCATION = []
# Initialize the pyportal object and let us know what data to fetch and where
# to display it
pyportal = PyPortal(url=DATA_SOURCE,
json_path=DATA_LOCATION,
status_neopixel=board.NEOPIXEL,
default_bg=0x000000)
gfx = electioncal_graphics.Electioncal_Graphics(pyportal.splash, am_pm=True)
display_refresh = None
while True:
# only query the online time once per hour (and on first run)
if (not display_refresh) or (time.monotonic() - display_refresh) > 3600:
try:
print("Getting time from internet!")
pyportal.get_local_time()
display_refresh = time.monotonic()
except RuntimeError as e:
print("Some error occured, retrying! -", e)
continue
try:
value = pyportal.fetch()
#print("Response is", value)
gfx.load_data(value)
except RuntimeError as e:
print("Some error occured, retrying! -", e)
continue
try:
gfx.elections_cycle()
except RuntimeError as e:
print("Some error ocurred, retrying! -", e)
continue
| 32.866667
| 91
| 0.666329
|
e352a8a5ac6bb4a3b63f6a21c10e195cc277a55d
| 293
|
py
|
Python
|
Appendix B/setup.py
|
lzhang1/BeginningPygame
|
c239925041a6fa361386f65316ef4bea12c3b482
|
[
"MIT"
] | 43
|
2015-09-20T02:05:48.000Z
|
2022-03-01T22:00:43.000Z
|
Appendix B/setup.py
|
lzhang1/BeginningPygame
|
c239925041a6fa361386f65316ef4bea12c3b482
|
[
"MIT"
] | null | null | null |
Appendix B/setup.py
|
lzhang1/BeginningPygame
|
c239925041a6fa361386f65316ef4bea12c3b482
|
[
"MIT"
] | 40
|
2015-05-19T06:51:13.000Z
|
2022-03-27T18:11:16.000Z
|
import cx_Freeze
executables = [cx_Freeze.Executable("ants_game.py")]
cx_Freeze.setup(
name="Ant Game",
options={"build_exe": {"packages":["pygame"],
"include_files":["ant.png","leaf.png","spider.png",'gameobjects']}},
executables = executables
)
| 24.416667
| 95
| 0.610922
|
40a376b22d8d7348adc8c693ee9c49003b59ccc5
| 6,225
|
py
|
Python
|
carla/env/env_rendering.py
|
sebzap/CarlaRL
|
5283d15dee9e8dc5e728314d56875b4fbca3acb2
|
[
"MIT"
] | null | null | null |
carla/env/env_rendering.py
|
sebzap/CarlaRL
|
5283d15dee9e8dc5e728314d56875b4fbca3acb2
|
[
"MIT"
] | null | null | null |
carla/env/env_rendering.py
|
sebzap/CarlaRL
|
5283d15dee9e8dc5e728314d56875b4fbca3acb2
|
[
"MIT"
] | null | null | null |
import math
import numpy as np
from contextual_gridworld.environment.colors import IDX_TO_COLOR, COLOR_TO_IDX
from contextual_gridworld.environment.env import load_context_config, Grid, WorldObj
class EnvRenderer:
def __init__(self, total_objects, grid_size=8, tile_size=8, context_config="color_contexts.yaml"):
self.tile_size = tile_size
self.total_objects = total_objects
self.contexts, self.subdivs = load_context_config(context_config)
self.context = self.contexts[0]
self.obstacles = []
self.goodies = []
# Environment configuration
self.width = grid_size
self.height = grid_size
# Current position and direction of the agent
self.agent_pos = None
self.agent_dir = None
self.grid = None
self.empty_grid()
def empty_grid(self):
# Create an empty grid
self.grid = Grid(self.width, self.height)
# Generate the surrounding walls
self.grid.wall_rect(0, 0, self.width, self.height)
def get_empty_positions(self, agent_pos, agent_dir):
self.empty_grid()
self.agent_pos = np.round(agent_pos).astype(np.int32)
self.agent_dir = agent_dir
empty_positions = []
empty_positions_transformed = []
grid = self.grid
for j in range(0, grid.height):
for i in range(0, grid.width):
cell = grid.get(i, j)
agent_here = agent_pos[0] == i and agent_pos[1] == j
if not agent_here and cell is None:
pos = np.asarray([i, j])
empty_positions.append(pos)
theta = np.deg2rad(-self.agent_dir * 90)
rotation = np.array([[math.cos(theta), -math.sin(theta)], [math.sin(theta), math.cos(theta)]])
pos = np.dot(rotation, pos) % (self.width - 1)
empty_positions_transformed.append(pos)
self.agent_pos = None
self.agent_dir = None
return empty_positions, empty_positions_transformed
def transfer_positions(self, pos):
# undo rotation
theta = np.deg2rad(self.agent_dir*90)
rotation = np.array([[math.cos(theta), -math.sin(theta)], [math.sin(theta), math.cos(theta)]])
pos = np.dot(rotation, pos) % (self.width - 1)
return np.round(pos).astype(dtype=np.int32)
def render_gt(self, gt, agent_pos, agent_dir):
self.empty_grid()
self.agent_pos = np.round(agent_pos).astype(np.int32)
self.agent_dir = agent_dir
agent_gt = gt[::self.total_objects]
goal_gt = gt[1::self.total_objects]
n_goodies = n_obstacles = (self.total_objects - 2) // 2
goodies_gt = []
for i in range(2, 2+n_goodies):
goodies_gt.append(gt[i::self.total_objects])
obstacles_gt = []
for i in range(2+n_goodies, 2+n_goodies+n_obstacles):
obstacles_gt.append(gt[i::self.total_objects])
goal_x, goal_y, color_idx = goal_gt
if goal_x >= 0:
goal_x, goal_y = self.transfer_positions(np.asarray([goal_x, goal_y]))
self.grid.set(goal_x, goal_y, WorldObj('goal', IDX_TO_COLOR[int(color_idx)]))
for goodie in goodies_gt:
pos_x, pos_y, color_idx = goodie
if pos_x >= 0:
pos_x, pos_y = self.transfer_positions(np.asarray([pos_x, pos_y]))
self.grid.set(pos_x, pos_y, WorldObj('goodie', IDX_TO_COLOR[int(color_idx)]))
for obstacle in obstacles_gt:
pos_x, pos_y, color_idx = obstacle
if pos_x >= 0:
pos_x, pos_y = self.transfer_positions(np.asarray([pos_x, pos_y]))
self.grid.set(pos_x, pos_y, WorldObj('obstacle', IDX_TO_COLOR[int(color_idx)]))
# Render the whole grid
img = self.grid.render(
self.tile_size,
self.agent_pos,
self.agent_dir,
# 0,
subdivs=self.subdivs,
agent_id=IDX_TO_COLOR[int(agent_gt[2])]
)
img = np.rot90(img, k=self.agent_dir)
return img
def get_gt_factors(env, total_objects, max_n_goodies, max_n_obstacles):
"""
this is the order of variables:
x,y,color
and this is the order of objects:
agent, goal, goodie1, goodie2, obstacle1, obstacle2
in details, this is how the array is filled:
agent_x, goal_x, ...., agent_y, goal_y,...., agent_colour, goal_colour ....
:return:
"""
gt = np.ones(3 * total_objects) * -1
# rotate positions according to agent direction
theta = np.deg2rad(-env.agent_dir*90)
rotation = np.array([[math.cos(theta), -math.sin(theta)], [math.sin(theta), math.cos(theta)]])
agent_pos = np.dot(rotation, env.agent_pos) % (env.width - 1)
goal_pos = np.dot(rotation, env.goal_pos) % (env.width - 1)
goodies_pos = []
for goodie in env.goodies:
goodies_pos.append(np.dot(rotation, goodie.cur_pos) % (env.width - 1))
obstacles_pos = []
for obstacle in env.obstacles:
obstacles_pos.append(np.dot(rotation, obstacle.cur_pos) % (env.width - 1))
offset = 0
# place agent
gt[offset] = agent_pos[0]
gt[total_objects + offset] = agent_pos[1]
gt[2*total_objects + offset] = COLOR_TO_IDX[env.context['agent']]
offset += 1
# place goal
if goal_pos is not None:
gt[offset] = goal_pos[0]
gt[total_objects + offset] = goal_pos[1]
gt[2*total_objects + offset] = COLOR_TO_IDX[env.context['goal']]
offset += 1
for idx in range(max_n_goodies):
if len(goodies_pos) > idx:
gt[offset] = goodies_pos[idx][0]
gt[total_objects + offset] = goodies_pos[idx][1]
gt[2*total_objects + offset] = COLOR_TO_IDX[env.context['goodie']]
offset += 1
for idx in range(max_n_obstacles):
if len(obstacles_pos) > idx:
gt[offset] = obstacles_pos[idx][0]
gt[total_objects + offset] = obstacles_pos[idx][1]
gt[2*total_objects + offset] = COLOR_TO_IDX[env.context['obstacle']]
offset += 1
return gt, env.agent_pos, env.agent_dir
| 30.665025
| 114
| 0.606265
|
0ef523af9a2b173cea183c43fc6e162147490a5f
| 629
|
py
|
Python
|
46.py
|
silviuapostu/project-euler-solutions
|
6b6c9ca3eee8d351a1e98783f3087a05f6a894fc
|
[
"MIT"
] | null | null | null |
46.py
|
silviuapostu/project-euler-solutions
|
6b6c9ca3eee8d351a1e98783f3087a05f6a894fc
|
[
"MIT"
] | null | null | null |
46.py
|
silviuapostu/project-euler-solutions
|
6b6c9ca3eee8d351a1e98783f3087a05f6a894fc
|
[
"MIT"
] | null | null | null |
'''
Goldbach's other conjecture
What is the smallest odd composite that cannot be written as the sum of
a prime and twice a square?
'''
from euler_utils import is_prime
from math import sqrt, ceil
def goldbach2():
n = 35
while True:
if not is_prime(n) and n % 2 == 1:
y = 1
found = False
while y <= ceil(sqrt((n-1)/2)) and not found:
x = n - 2 * y**2
if is_prime(x):
found = True
y += 1
if not found:
return n
n += 1
if __name__ == '__main__':
print(goldbach2())
| 20.966667
| 71
| 0.502385
|
48079c5b109413cb6c5f8098890a53c2c13d11fc
| 24,486
|
py
|
Python
|
pybats/dglm.py
|
bking124/pybats
|
b99def91901e7a154e95d32e22ceb44a48374f1d
|
[
"Apache-2.0"
] | 29
|
2019-11-11T14:49:11.000Z
|
2022-03-11T13:23:57.000Z
|
pybats/dglm.py
|
bking124/pybats
|
b99def91901e7a154e95d32e22ceb44a48374f1d
|
[
"Apache-2.0"
] | 7
|
2020-08-04T15:08:54.000Z
|
2022-02-26T10:10:16.000Z
|
pybats/dglm.py
|
bking124/pybats
|
b99def91901e7a154e95d32e22ceb44a48374f1d
|
[
"Apache-2.0"
] | 12
|
2019-11-20T15:21:48.000Z
|
2022-02-16T23:18:44.000Z
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/00_dglm.ipynb (unless otherwise specified).
__all__ = ['dglm', 'bern_dglm', 'pois_dglm', 'dlm', 'bin_dglm']
# Internal Cell
from nbdev.showdoc import *
import numpy as np
import scipy as sc
import pandas as pd
from collections.abc import Iterable
from .latent_factor_fxns import update_lf_analytic, update_lf_sample, forecast_marginal_lf_analytic, \
forecast_marginal_lf_sample, forecast_path_lf_copula, forecast_path_lf_sample, get_mean_and_var_lf, \
get_mean_and_var_lf_dlm, update_lf_analytic_dlm
from .seasonal import seascomp, createFourierToSeasonalL
from .update import update, update_dlm, update_bindglm
from .forecast import forecast_marginal, forecast_path, forecast_path_copula,\
forecast_marginal_bindglm, forecast_path_dlm, forecast_state_mean_and_var
from .conjugates import trigamma, bern_conjugate_params, bin_conjugate_params, pois_conjugate_params
# These are for the bernoulli and Poisson DGLMs
from scipy.special import digamma
from scipy.special import beta as beta_fxn
from scipy import stats
# Cell
class dglm:
def __init__(self,
a0=None,
R0=None,
nregn=0,
ntrend=0,
nlf=0,
nhol=0,
seasPeriods=[],
seasHarmComponents=[],
deltrend=1, delregn=1,
dellf=1,
delhol=1, delseas=1,
rho=1,
interpolate=True,
adapt_discount=False,
adapt_factor=0.5,
discount_forecast=False):
"""
A Dynamic Generalized Linear Model (DGLM). Generic Class. Children include Poisson, Bernoulli, and Binomial DGLMs, as well as the Normal DLM.
:param a0: Prior mean vector
:param R0: Prior covariance matrix
:param nregn: Number of regression components
:param ntrend: Number of trend components
:param nhol: Number of holiday components
:param seasPeriods: List of periods of seasonal components
:param seasHarmComponents: List of harmonic components included for each period
:param deltrend: Discount factor on trend components
:param delregn: Discount factor on regression components
:param delhol: Discount factor on holiday components (currently deprecated)
:param delseas: Discount factor on seasonal components
:param interpolate: Whether to use interpolation for conjugate parameters (provides a computational speedup)
:param adapt_discount: What method of discount adaption. False = None, 'positive_regn' = only discount if regression information is available, 'info' = information based,\
:param adapt_factor: If adapt_discount='info', then a higher value adapt_factor leads to a quicker adaptation (with less discounting) on overly uncertain parameters
:param discount_forecast: Whether to use discounting when forecasting
:return An object of class dglm
"""
# Setting up trend F, G matrices
i = 0
self.itrend = list(range(i, ntrend))
i += ntrend
if ntrend == 0:
Gtrend = np.empty([0, 0])
Ftrend = np.zeros([ntrend]).reshape(-1, 1)
# Local level
elif ntrend == 1:
Gtrend = np.identity(ntrend)
Ftrend = np.array([1]).reshape(-1, 1)
# Locally linear
elif ntrend == 2:
Gtrend = np.array([[1, 1], [0, 1]])
Ftrend = np.array([1, 0]).reshape(-1, 1)
# Setting up regression F, G matrices
self.iregn = list(range(i, i + nregn))
if nregn == 0:
Fregn = np.empty([0]).reshape(-1, 1)
Gregn = np.empty([0, 0])
else:
Gregn = np.identity(nregn)
Fregn = np.ones([nregn]).reshape(-1, 1)
i += nregn
# Setting up holiday F, G matrices (additional regression indicators components)
self.ihol = list(range(i, i + nhol))
self.iregn.extend(self.ihol) # Adding on to the self.iregn
if nhol == 0:
Fhol = np.empty([0]).reshape(-1, 1)
Ghol = np.empty([0, 0])
else:
Ghol = np.identity(nhol)
Fhol = np.ones([nhol]).reshape(-1, 1)
i += nhol
# Setting up seasonal F, G matrices
self.iseas = []
if len(seasPeriods) == 0:
Fseas = np.empty([0]).reshape(-1, 1)
Gseas = np.empty([0, 0])
nseas = 0
else:
output = list(map(seascomp, seasPeriods, seasHarmComponents))
Flist = [x[0] for x in output]
Glist = [x[1] for x in output]
self.L = list(map(createFourierToSeasonalL, seasPeriods, seasHarmComponents, Flist, Glist))
nseas = 2 * sum(map(len, seasHarmComponents))
Fseas = np.zeros([nseas]).reshape(-1, 1)
Gseas = np.zeros([nseas, nseas])
idx = 0
for harmComponents in seasHarmComponents:
self.iseas.append(list(range(i, i + 2 * len(harmComponents))))
i += 2 * len(harmComponents)
for Fs, Gs in output:
idx2 = idx + Fs.shape[0]
Fseas[idx:idx2, 0] = Fs.squeeze()
Gseas[idx:idx2, idx:idx2] = Gs
idx = idx2
# Setting up the latent factor F, G matrices
if nlf == 0:
self.latent_factor = False
Glf = np.empty([0, 0])
Flf = np.zeros([0]).reshape(-1, 1)
else:
self.latent_factor = True
Glf = np.identity(nlf)
Flf = np.ones([nlf]).reshape(-1, 1)
self.ilf = list(range(i, i + nlf))
i += nlf
# Combine the F and G components together
F = np.vstack([Ftrend, Fregn, Fhol, Fseas, Flf])
G = sc.linalg.block_diag(Gtrend, Gregn, Ghol, Gseas, Glf)
# store the discount info
self.deltrend = deltrend
self.delregn = delregn
self.delhol = delhol
self.delseas = delseas
self.dellf = dellf
# Random effect to inflate variance (if rho < 1)
self.rho = rho
self.ntrend = ntrend
self.nregn = nregn + nhol # Adding on nhol
self.nregn_exhol = nregn
self.nhol = nhol
self.nseas = nseas
self.nlf = nlf
self.adapt_discount = adapt_discount
self.k = adapt_factor
# Set up discount matrix
self.discount_forecast = discount_forecast
Discount = self.build_discount_matrix()
self.Discount = Discount
self.param1 = 2 # Random initial guess
self.param2 = 2 # Random initial guess
self.seasPeriods = seasPeriods
self.seasHarmComponents = seasHarmComponents
self.F = F
self.G = G
self.a = a0.reshape(-1, 1)
self.R = R0
self.t = 0
self.interpolate = interpolate
self.W = self.get_W()
def build_discount_matrix(self, X=None, phi_mu=None):
# build up discount factors while possibly taking special care to not discount when the "regn"
# type factors are zero
# do this all with matrix slicing which is much faster than the block diag
p = np.sum([self.ntrend, self.nregn_exhol, self.nhol, self.nseas, self.nlf])
# start with no discounting
component_discounts = np.ones([p, p])
i = 0 # this will be the offset of the current block
for discount_pair, n in zip([('std', self.deltrend), ('regn', self.delregn), ('hol', self.delhol),
('std', self.delseas), ('lf', self.dellf)],
[self.ntrend, self.nregn_exhol, self.nhol, self.nseas, self.nlf]):
discount_type, discount = discount_pair
if n > 0:
if isinstance(discount, Iterable):
if len(discount) < n:
raise ValueError('Error: Length of discount factors must be 1 or match component length')
for j, disc in enumerate(discount[:n]):
# fill the diags one at a time
component_discounts[i+j, i+j] = disc
else:
# fill the block with the constant
component_discounts[i:(i+n), i:(i+n)] = discount
# overwrite with ones if doing the positive logic
if X is not None and self.adapt_discount == 'positive_regn' and (discount_type == 'regn' or discount_type == 'hol'):
if not isinstance(X, Iterable):
X = [X]
if discount_type == 'regn':
# offset of the regression params
regn_i = 0
elif discount_type == 'hol':
regn_i = self.nregn_exhol
# look through the regression params and set that slice on the
# discount to 1 if 0
for j in range(n):
if X[regn_i] == 0:
# set all discounts to one (i offsets the block and j offsets the regn param)
component_discounts[i + j, :] = 1.
component_discounts[:, i + j] = 1.
regn_i += 1
if phi_mu is not None and self.adapt_discount == 'positive_regn' and discount_type == 'lf':
# offset of the latent factor params
lf_i = 0
# look through the latent factor params and set that slice on the
# discount to 1 if 0
for j in range(n):
if phi_mu[lf_i] == 0:
# set all discounts to one (i offsets the block and j offsets the regn param)
component_discounts[i + j, :] = 1.
component_discounts[:, i + j] = 1.
lf_i += 1
# move on to the next block
i += n
return component_discounts
def update(self, y=None, X=None, phi_mu = None, phi_sigma = None, analytic=True, phi_samps=None, **kwargs):
"""
Update the DGLM state vector mean and covariance after observing 'y', with covariates 'X'.
"""
if self.latent_factor:
if analytic:
update_lf_analytic(self, y, X, phi_mu, phi_sigma)
else:
parallel = kwargs.get('parallel')
if parallel is None: parallel = False
update_lf_sample(self, y, X, phi_samps, parallel)
else:
update(self, y, X)
def forecast_marginal(self, k, X=None, nsamps=1, mean_only=False,
phi_mu = None, phi_sigma=None, analytic=True, phi_samps=None,
state_mean_var=False, y=None):
"""
Simulate from the forecast distribution at time *t+k*.
"""
if self.latent_factor:
if analytic:
return forecast_marginal_lf_analytic(self, k, X, phi_mu, phi_sigma, nsamps, mean_only, state_mean_var)
else:
forecast_marginal_lf_sample(self, k, X, phi_samps, mean_only)
else:
return forecast_marginal(self, k, X, nsamps, mean_only, state_mean_var, y)
def forecast_path(self, k, X=None, nsamps=1, copula=True,
phi_mu=None, phi_sigma=None, phi_psi=None, analytic=True, phi_samps=None,
**kwargs):
"""
Simulate from the path (joint) forecast distribution from *1* to *k* steps ahead.
"""
if self.latent_factor:
if analytic:
return forecast_path_lf_copula(self, k, X, phi_mu, phi_sigma, phi_psi, nsamps, **kwargs)
else:
return forecast_path_lf_sample(self, k, X, phi_samps)
else:
if copula:
return forecast_path_copula(self, k, X, nsamps, **kwargs)
else:
return forecast_path(self, k, X, nsamps)
def forecast_path_copula(self, k, X=None, nsamps=1, **kwargs):
return forecast_path_copula(self, k, X, nsamps, **kwargs)
# Define specific update and forecast functions, which advanced users can access manually
def update_lf_sample(self, y=None, X=None, phi_samps=None, parallel=False):
update_lf_sample(self, y, X, phi_samps, parallel)
def update_lf_analytic(self, y=None, X=None, phi_mu=None, phi_sigma=None):
update_lf_analytic(self, y, X, phi_mu, phi_sigma)
def forecast_marginal_lf_analytic(self, k, X=None, phi_mu=None, phi_sigma=None, nsamps=1, mean_only=False, state_mean_var=False):
return forecast_marginal_lf_analytic(self, k, X, phi_mu, phi_sigma, nsamps, mean_only, state_mean_var)
def forecast_marginal_lf_sample(self, k, X=None, phi_samps=None, mean_only=False):
return forecast_marginal_lf_sample(self, k, X, phi_samps, mean_only)
def forecast_path_lf_copula(self, k, X=None, phi_mu=None, phi_sigma=None, phi_psi=None, nsamps=1, **kwargs):
return forecast_path_lf_copula(self, k, X, phi_mu, phi_sigma, phi_psi, nsamps, **kwargs)
def forecast_path_lf_sample(self, k, X=None, phi_samps=None, nsamps=1, **kwargs):
return forecast_path_lf_sample(self, k, X, phi_samps)
def forecast_state_mean_and_var(self, k, X = None):
return forecast_state_mean_and_var(self, k, X)
def get_mean_and_var(self, F, a, R):
mean, var = F.T @ a, F.T @ R @ F / self.rho
return np.ravel(mean)[0], np.ravel(var)[0]
def get_mean_and_var_lf(self, F, a, R, phi_mu, phi_sigma, ilf):
return get_mean_and_var_lf(self, F, a, R, phi_mu, phi_sigma, ilf)
def get_W(self, X=None):
if self.adapt_discount == 'info':
info = np.abs(self.a.flatten() / np.sqrt(self.R.diagonal()))
diag = self.Discount.diagonal()
diag = np.round(diag + (1 - diag) * np.exp(-self.k * info), 5)
Discount = np.ones(self.Discount.shape)
np.fill_diagonal(Discount, diag)
elif self.adapt_discount == 'positive_regn' and X is not None:
Discount = self.build_discount_matrix(X)
else:
Discount = self.Discount
return self.R / Discount - self.R
def get_coef(self, component=None):
"""Return the coefficient (state vector) means and standard deviations.
If component=None, then the full state vector is returned.
Otherwise, specify a single component from 'trend', 'regn', 'seas', 'hol', and 'lf'.
"""
trend_names = ['Intercept', 'Local Slope'][:self.ntrend]
regn_names = ['Regn ' + str(i) for i in range(1, self.nregn_exhol+1)]
seas_names = ['Seas ' + str(i) for i in range(1, self.nseas+1)]
hol_names = ['Hol ' + str(i) for i in range(1, self.nhol+1)]
lf_names = ['LF ' + str(i) for i in range(1, self.nlf+1)]
if component is None:
names = [*trend_names, *regn_names, *seas_names, *hol_names, *lf_names]
return pd.DataFrame({'Mean':self.a.reshape(-1),
'Standard Deviation': np.sqrt(self.R.diagonal())},
index=names).round(2)
elif component == 'trend':
names = trend_names
return pd.DataFrame({'Mean':self.a.reshape(-1)[self.itrend],
'Standard Deviation': np.sqrt(self.R.diagonal())[self.itrend]},
index=names).round(2)
elif component == 'regn':
names = regn_names
return pd.DataFrame({'Mean':self.a.reshape(-1)[self.iregn],
'Standard Deviation': np.sqrt(self.R.diagonal())[self.iregn]},
index=names).round(2)
elif component == 'seas':
names = seas_names
seas_idx = []
for idx in self.iseas:
seas_idx.extend(idx)
return pd.DataFrame({'Mean':self.a.reshape(-1)[seas_idx],
'Standard Deviation': np.sqrt(self.R.diagonal())[seas_idx]},
index=names).round(2)
elif component == 'hol':
names = hol_names
return pd.DataFrame({'Mean':self.a.reshape(-1)[self.ihol],
'Standard Deviation': np.sqrt(self.R.diagonal())[self.ihol]},
index=names).round(2)
elif component == 'lf':
names = lf_names
return pd.DataFrame({'Mean':self.a.reshape(-1)[self.ilf],
'Standard Deviation': np.sqrt(self.R.diagonal())[self.ilf]},
index=names).round(2)
# Cell
class bern_dglm(dglm):
def get_conjugate_params(self, ft, qt, alpha_init, beta_init):
# Choose conjugate prior, beta, and match mean & variance
return bern_conjugate_params(ft, qt, alpha_init, beta_init, interp=self.interpolate)
def update_conjugate_params(self, y, alpha, beta):
# Update alpha and beta to the conjugate posterior coefficients
alpha = alpha + y
beta = beta + 1 - y
# Get updated ft* and qt*
ft_star = digamma(alpha) - digamma(beta)
qt_star = trigamma(alpha) + trigamma(beta)
# constrain this thing from going to crazy places
ft_star = max(-8, min(ft_star, 8))
qt_star = max(0.001 ** 2, min(qt_star, 4 ** 2))
return alpha, beta, ft_star, qt_star
def simulate(self, alpha, beta, nsamps):
p = np.random.beta(alpha, beta, [nsamps])
return np.random.binomial(1, p, size=[nsamps])
def simulate_from_sampling_model(self, p, nsamps):
return np.random.binomial(1, p, [nsamps])
def simulate_from_prior(self, alpha, beta, nsamps):
return stats.beta.rvs(a=alpha, b=beta, size=nsamps)
def prior_inverse_cdf(self, cdf, alpha, beta):
return stats.beta.ppf(cdf, alpha, beta)
def sampling_density(self, y, p):
return stats.binom.pmf(n=1, p=p, k=y)
def marginal_cdf(self, y, alpha, beta):
if y == 1:
return 1
elif y == 0:
return beta_fxn(y + alpha, 1 - y + beta) / beta_fxn(alpha, beta)
def loglik(self, y, alpha, beta):
return stats.bernoulli.logpmf(y, alpha / (alpha + beta))
def get_mean(self, alpha, beta):
return np.ravel(alpha / (alpha + beta))[0]
def get_prior_var(self, alpha, beta):
return (alpha * beta) / ((alpha + beta) ** 2 * (alpha + beta + 1))
# Cell
class pois_dglm(dglm):
def get_conjugate_params(self, ft, qt, alpha_init, beta_init):
# Choose conjugate prior, gamma, and match mean & variance
return pois_conjugate_params(ft, qt, alpha_init, beta_init, interp=self.interpolate)
def update_conjugate_params(self, y, alpha, beta):
# Update alpha and beta to the conjugate posterior coefficients
alpha = alpha + float(y)
beta = beta + 1
# Get updated ft* and qt*
ft_star = digamma(alpha) - np.log(beta)
qt_star = trigamma(alpha)
# constrain this thing from going to crazy places?
qt_star = max(0.001 ** 2, min(qt_star, 4 ** 2))
return alpha, beta, ft_star, qt_star
def simulate(self, alpha, beta, nsamps):
return np.random.negative_binomial(alpha, beta / (1 + beta), [nsamps])
def simulate_from_sampling_model(self, rate, nsamps):
return np.random.poisson(rate, [nsamps])
def simulate_from_prior(self, alpha, beta, nsamps):
return stats.gamma.rvs(a=alpha, scale=1/beta, size=nsamps)
def prior_inverse_cdf(self, cdf, alpha, beta):
return stats.gamma.ppf(cdf, a=alpha, scale=1 / beta)
def sampling_density(self, y, mu):
return stats.poisson.pmf(mu=mu, k=y)
def marginal_cdf(self, y, alpha, beta):
return stats.nbinom.cdf(y, alpha, beta / (1 + beta))
def marginal_inverse_cdf(self, cdf, alpha, beta):
return stats.nbinom.ppf(cdf, alpha, beta / (1 + beta))
def loglik(self, y, alpha, beta):
return stats.nbinom.logpmf(y, alpha, beta / (1 + beta))
def get_mean(self, alpha, beta):
return np.ravel(alpha/beta)[0]
def get_prior_var(self, alpha, beta):
return alpha / beta ** 2
# Cell
class dlm(dglm):
def __init__(self, *args, n0=1, s0=1, delVar=1, **kwargs):
self.delVar = delVar # Discount factor for the variance - using a beta-gamma random walk
self.n = n0 # Prior sample size for the variance
self.s = s0 # Prior mean for the variance
super().__init__(*args, **kwargs)
def get_mean_and_var(self, F, a, R):
return F.T @ a, F.T @ R @ F + self.s
def get_mean_and_var_lf(self, F, a, R, phi_mu, phi_sigma, ilf):
ct = self.n / (self.n - 2)
ft, qt = get_mean_and_var_lf_dlm(F, a, R, phi_mu, phi_sigma, ilf, ct)
qt = qt + self.s
return ft, qt
def get_mean(self, ft, qt):
return np.ravel(ft)[0]
def get_conjugate_params(self, ft, qt, mean, var):
return ft, qt
def simulate(self, mean, var, nsamps):
return mean + np.sqrt(var) * np.random.standard_t(self.n, size=[nsamps])
def simulate_from_sampling_model(self, mean, var, nsamps):
return np.random.normal(mean, np.sqrt(var), nsamps)
def update(self, y=None, X=None, phi_mu=None, phi_sigma=None, analytic=True, phi_samps=None, **kwargs):
if self.latent_factor:
if analytic:
update_lf_analytic_dlm(self, y, X, phi_mu, phi_sigma)
else:
print('Sampled-based updating for the Latent Factor DLM is not yet implemented - please use analytic inference')
else:
update_dlm(self, y, X)
def forecast_path(self, k, X=None, nsamps=1, **kwargs):
if self.latent_factor:
print('Path forecasting for latent factor DLMs is not yet implemented')
else:
return forecast_path_dlm(self, k, X, nsamps)
def update_lf_analytic(self, y=None, X=None, phi_mu=None, phi_sigma=None):
update_lf_analytic_dlm(self, y, X, phi_mu, phi_sigma)
def loglik(self, y, mean, var):
return stats.t.logpdf(y, df=self.n, loc=mean, scale=np.sqrt(var))
# Cell
class bin_dglm(dglm):
def get_conjugate_params(self, ft, qt, alpha_init, beta_init):
# Choose conjugate prior, beta, and match mean & variance
return bin_conjugate_params(ft, qt, alpha_init, beta_init, interp=self.interpolate)
def update_conjugate_params(self, n, y, alpha, beta):
# Update alpha and beta to the conjugate posterior coefficients
alpha = alpha + y
beta = beta + n - y
# Get updated ft* and qt*
ft_star = digamma(alpha) - digamma(beta)
qt_star = trigamma(alpha) + trigamma(beta)
# constrain this thing from going to crazy places?
ft_star = max(-8, min(ft_star, 8))
qt_star = max(0.001 ** 2, min(qt_star, 4 ** 2))
return alpha, beta, ft_star, qt_star
def simulate(self, n, alpha, beta, nsamps):
p = np.random.beta(alpha, beta, [nsamps])
return np.random.binomial(n.astype(int), p, size=[nsamps])
def simulate_from_sampling_model(self, n, p, nsamps):
return np.random.binomial(n, p, [nsamps])
def prior_inverse_cdf(self, cdf, alpha, beta):
return stats.beta.ppf(cdf, alpha, beta)
def marginal_cdf(self, y, n, alpha, beta):
cdf = 0.0
for i in range(y + 1):
cdf += sc.misc.comb(n, y) * beta_fxn(y + alpha, n - y + beta) / beta_fxn(alpha, beta)
return cdf
def loglik(self, data, alpha, beta):
n, y = data
return stats.binom.logpmf(y, n, alpha / (alpha + beta))
def get_mean(self, n, alpha, beta):
return np.ravel(n * (alpha / (alpha + beta)))[0]
def get_prior_var(self, alpha, beta):
return (alpha * beta) / ((alpha + beta) ** 2 * (alpha + beta + 1))
def update(self, n=None, y=None, X=None):
update_bindglm(self, n, y, X)
def forecast_marginal(self, n, k, X=None, nsamps=1, mean_only=False):
return forecast_marginal_bindglm(self, n, k, X, nsamps, mean_only)
| 40.405941
| 179
| 0.58462
|
60560f0b924548bbce6492ff83fd3e3cbc225151
| 2,731
|
py
|
Python
|
microcosm_daemon/error_policy.py
|
globality-corp/microcosm-daemon
|
86c384462c00d98431d61ed4890b980b0424cb36
|
[
"Apache-2.0"
] | null | null | null |
microcosm_daemon/error_policy.py
|
globality-corp/microcosm-daemon
|
86c384462c00d98431d61ed4890b980b0424cb36
|
[
"Apache-2.0"
] | 10
|
2016-03-25T00:37:22.000Z
|
2022-02-09T21:16:16.000Z
|
microcosm_daemon/error_policy.py
|
globality-corp/microcosm-daemon
|
86c384462c00d98431d61ed4890b980b0424cb36
|
[
"Apache-2.0"
] | 2
|
2016-12-19T22:41:21.000Z
|
2019-03-17T03:46:31.000Z
|
"""
Error handling policy.
"""
from logging import getLogger
from time import time
from microcosm.api import defaults
from microcosm.config.validation import typed
# nagios style health codes
HEALTH_OK = 0
HEALTH_WARN = 1
HEALTH_ERROR = 2
logger = getLogger("daemon.error_policy")
class ExitError(Exception):
"""
Unconditionally exit the state machine.
"""
pass
class FatalError(Exception):
"""
Unconditionally exit the state machine.
"""
pass
class ErrorPolicy:
"""
Handle errors from state functions.
"""
def __init__(self, strict, health_report_interval):
self.strict = strict
self.health_report_interval = health_report_interval
self.errors = []
self.health = self.compute_health()
self.last_health_report_time = 0
def compute_health(self):
"""
Compute the current daemon health.
"""
return HEALTH_OK if not self.errors else HEALTH_ERROR
def should_report_health(self, new_health):
"""
Should health be reported?
True if health status changes or enough time elapses.
"""
if self.health != new_health:
return True
return self.last_health_report_time + self.health_report_interval < time()
def report_health(self, new_health):
"""
Report health information to logs.
"""
self.last_health_report_time = time()
message = "Health is {}".format(
new_health,
)
if self.health != new_health:
logger.info(message)
else:
logger.debug(message)
for error in self.errors:
if isinstance(error, ExitError):
continue
logger.warn("Caught error during state evaluation: {}".format(error), exc_info=True)
def maybe_report_health(self):
"""
Conditionally report health information.
"""
new_health = self.compute_health()
if self.should_report_health(new_health):
self.report_health(new_health)
self.health = new_health
def __enter__(self):
# reset errors on every iteration
self.errors = []
return self
def __exit__(self, type, value, traceback):
if value:
self.errors.append(value)
self.maybe_report_health()
return not self.strict and type not in (ExitError, FatalError)
@defaults(
strict=False,
health_report_interval=typed(float, 3.0),
)
def configure_error_policy(graph):
return ErrorPolicy(
strict=graph.config.error_policy.strict,
health_report_interval=graph.config.error_policy.health_report_interval,
)
| 22.94958
| 96
| 0.6342
|
67d3a05e5ef93b7e059e98d1ba2a0dc9410372db
| 1,405
|
py
|
Python
|
ESRGAN/dataset.py
|
Vrushank264/GANs-PyTorch
|
3fe406666ecec74140c36f9670b47da3b2b12a6c
|
[
"Apache-2.0"
] | 2
|
2021-07-23T16:33:04.000Z
|
2021-11-14T06:18:19.000Z
|
ESRGAN/dataset.py
|
Vrushank264/GANs-PyTorch
|
3fe406666ecec74140c36f9670b47da3b2b12a6c
|
[
"Apache-2.0"
] | null | null | null |
ESRGAN/dataset.py
|
Vrushank264/GANs-PyTorch
|
3fe406666ecec74140c36f9670b47da3b2b12a6c
|
[
"Apache-2.0"
] | null | null | null |
import torch
import os
from PIL import Image
from torch.utils.data import DataLoader, dataset
import torchvision.transforms as T
class TrainDataset(dataset.Dataset):
def __init__(self,
root: str,
img_size: int = 128,
upscale_factor: int = 4):
super().__init__()
lr_img_size = img_size // upscale_factor
self.fnames = [os.path.join(root, file) for file in os.listdir(root)]
self.hr_transforms = T.Compose([T.Resize((img_size, img_size), interpolation = Image.BICUBIC),
T.RandomHorizontalFlip(0.25),
T.RandomVerticalFlip(0.25),
T.ToTensor(),
T.Normalize([0.0, 0.0, 0.0], [1.0, 1.0, 1.0])])
self.lr_transforms = T.Compose([T.ToPILImage(),
T.Resize((lr_img_size, lr_img_size), interpolation = Image.BICUBIC),
T.ToTensor()])
def __len__(self):
return len(self.fnames)
def __getitem__(self, idx):
hr = Image.open(self.fnames[idx]).convert('RGB')
hr = self.hr_transforms(hr)
lr = self.lr_transforms(hr)
return hr, lr
| 36.025641
| 109
| 0.482562
|
7e82cc67eb7b7b35fa591c1aa8a29c32edcfc6f3
| 1,794
|
py
|
Python
|
my_classes/.history/Tuples/modifying_extending_named_tuples_20210723165021.py
|
minefarmer/deep-Dive-1
|
b0675b853180c5b5781888266ea63a3793b8d855
|
[
"Unlicense"
] | null | null | null |
my_classes/.history/Tuples/modifying_extending_named_tuples_20210723165021.py
|
minefarmer/deep-Dive-1
|
b0675b853180c5b5781888266ea63a3793b8d855
|
[
"Unlicense"
] | null | null | null |
my_classes/.history/Tuples/modifying_extending_named_tuples_20210723165021.py
|
minefarmer/deep-Dive-1
|
b0675b853180c5b5781888266ea63a3793b8d855
|
[
"Unlicense"
] | null | null | null |
''' Modifying and extending Named Tuples
Named tuples are immutable
How we can change values inside the tuple
Just like with strings, we have to create a new tuple, with the modified values
Point2D = namedtuple('Point2D', 'x y')
pt = Point2D(0, 0) # will not work
Suppose we need to change the value of the x coordinate:
Simple approach: pt = Point2P(100, pt.y)
Note: the memory address of pt has now changed
This approach can work well, but it has a major drawback
Stock = namedtuple('Stock", 'symbol year month day open high low close')
djia = Stock('DJIA', 2021, 7, 23, 26313, 26458, 26260, 26393)
the _replace instance method
The replace instance method
Named tuples have a very handy instance method, _replace
It will copy the named tuple into a new one, replacing any values from keyword arguments
The keword arguments are simply the field names in the tuple and the new value
The keyword name must match an existing field name
the _replace instance method
'''
Stock = namedtuple('Stock", 'symbol year month day open high low close')
djia = Stock('DJIA', 2021, 7, 22, 26313, 26458, 26260, 26393)
djia = djia._replace(day=23, high=26459, close=26_394)
djia = Stock('DJIA', 2021, 7, 23, 26313, 26458, 26260, 26394)
''' Extending a named tuple
Sometimes we want to create a named tuple that extends another named tuple, appending one or more fields
Stock = namedtuple('Stock", 'symbol year month day open high low close')
We want to create a new named tuple class, StockExt that adds a single field, previous_close
When dealing with classes, this is sometimes done by using subclassing.
But this is not easy to do with named tuples
and there is a cleaner way of doing it anyway
'''
Point
| 28.47619
| 104
| 0.721293
|
91e81bc985f81d1bfc0d72f13d418290fc12749f
| 3,699
|
py
|
Python
|
tests/test_concept.py
|
jhosoume/pymfe
|
b454f8a5470ebd4dbbeb1a7e5570443dd65fdf9a
|
[
"MIT"
] | null | null | null |
tests/test_concept.py
|
jhosoume/pymfe
|
b454f8a5470ebd4dbbeb1a7e5570443dd65fdf9a
|
[
"MIT"
] | null | null | null |
tests/test_concept.py
|
jhosoume/pymfe
|
b454f8a5470ebd4dbbeb1a7e5570443dd65fdf9a
|
[
"MIT"
] | null | null | null |
"""Test module for concept metafeatures."""
import pytest
from pymfe.mfe import MFE
from tests.utils import load_xy
import numpy as np
GNAME = "concept"
class TestConcept:
"""TestClass dedicated to test concept metafeatures."""
@pytest.mark.parametrize(
"dt_id, ft_name, exp_value, precompute",
[
###################
# Mixed data
###################
(0, "cohesiveness", [10.055, 1.1869723], True),
(0, "conceptvar", [0.5389795, 0.010408287], True),
(0, "impconceptvar", [5.275, 0.59225446], True),
(0, "wg_dist", [1.4762982, 0.07838156], True),
(0, "cohesiveness", [10.055, 1.1869723], False),
(0, "conceptvar", [0.5389795, 0.010408287], False),
(0, "impconceptvar", [5.275, 0.59225446], False),
(0, "wg_dist", [1.4762982, 0.07838156], False),
###################
# Categorical data
###################
(1, "cohesiveness", [306.5352, 48.729893], True),
(1, "conceptvar", [0.47566572, 0.036749393], True),
(1, "impconceptvar", [146.3541, 25.366209], True),
(1, "wg_dist", [2.9002495, 0.21794802], True),
(1, "cohesiveness", [306.5352, 48.729893], False),
(1, "conceptvar", [0.47566572, 0.036749393], False),
(1, "impconceptvar", [146.3541, 25.366209], False),
(1, "wg_dist", [2.9002495, 0.21794802], False),
###################
# Numerical data
###################
(2, "cohesiveness", [67.12, 5.3592987], True),
(2, "conceptvar", [0.4956224, 0.07772438], True),
(2, "impconceptvar", [42.626667, 5.358048], True),
(2, "wg_dist", [0.46218988, 0.05621875], True),
(2, "cohesiveness", [67.12, 5.3592987], False),
(2, "conceptvar", [0.4956224, 0.07772438], False),
(2, "impconceptvar", [42.626667, 5.358048], False),
(2, "wg_dist", [0.46218988, 0.05621875], False),
])
def test_ft_methods_complexity(self, dt_id, ft_name, exp_value,
precompute):
"""Function to test each meta-feature belongs to concept group.
"""
precomp_group = GNAME if precompute else None
X, y = load_xy(dt_id)
mfe = MFE(groups=[GNAME], features=[ft_name], random_state=1234)
mfe.fit(X.values, y.values, precomp_groups=precomp_group)
value = mfe.extract()[1]
if exp_value is np.nan:
assert value[0] is exp_value
else:
assert np.allclose(value, exp_value, equal_nan=True)
@pytest.mark.parametrize(
"dt_id, exp_value, precompute",
[
###################
# Mixed data
###################
(0, [10.055, 0.5389795, 5.275, 1.4762982], False),
(0, [10.055, 0.5389795, 5.275, 1.4762982], True),
###################
# Numerical data
###################
(2, [67.12, 0.4956224, 42.626667, 0.46218988], False),
(2, [67.12, 0.4956224, 42.626667, 0.46218988], True),
])
def test_integration_concept(self, dt_id, exp_value, precompute):
"""Function to test each all concept meta-features.
"""
precomp_group = GNAME if precompute else None
X, y = load_xy(dt_id)
mfe = MFE(groups=[GNAME], summary="mean", random_state=1234)
mfe.fit(X.values, y.values, precomp_groups=precomp_group)
value = mfe.extract()[1]
assert np.allclose(value, exp_value, equal_nan=True)
| 38.53125
| 72
| 0.510408
|
c4bfc346c4f3ef4270c03733a1072827eb831ffc
| 9,931
|
py
|
Python
|
Core/Rule.py
|
sybila/eBCSgen
|
d3e66c84eb908c989d22efbee20e41766948bdec
|
[
"MIT"
] | 1
|
2020-11-09T15:51:35.000Z
|
2020-11-09T15:51:35.000Z
|
Core/Rule.py
|
sybila/eBCSgen
|
d3e66c84eb908c989d22efbee20e41766948bdec
|
[
"MIT"
] | 33
|
2020-01-29T09:51:45.000Z
|
2022-03-30T10:09:32.000Z
|
Core/Rule.py
|
sybila/eBCSgen
|
d3e66c84eb908c989d22efbee20e41766948bdec
|
[
"MIT"
] | 1
|
2020-09-07T11:24:22.000Z
|
2020-09-07T11:24:22.000Z
|
import collections
import itertools
import random
from copy import copy, deepcopy
from Core import Rate
from Core.Complex import Complex
from Core.Side import Side
from Core.Reaction import Reaction
from TS.State import Multiset
def column(lst, index):
return tuple(map(lambda x: x[index], lst))
class Rule:
def __init__(self, agents: tuple, mid: int, compartments: list, complexes: list, pairs: list, rate: Rate, label=None):
"""
Class to represent BCSL rule
:param agents: tuple of Atomic/Structure agents in the order as given by the rule
:param mid: index of first agent from right-hand side
:param compartments: list assigning to each position a compartment (for each agent)
:param complexes: list of pairs (from, to) indicating where the complex starts and ends
:param pairs: entangled agents from LHS to RHS
:param rate: string representing expression
"""
self.agents = agents
self.mid = mid
self.compartments = compartments
self.complexes = complexes
self.pairs = pairs
self.rate = rate
self.label = label
self.comment = (False, [])
def __eq__(self, other: 'Rule'):
return self.agents == other.agents and self.mid == other.mid and self.compartments == other.compartments and \
self.complexes == other.complexes and self.pairs == other.pairs and str(self.rate) == str(other.rate)
def __repr__(self):
return str(self)
def __str__(self):
lhs, rhs = self.create_complexes()
rate = " @ " + str(self.rate) if self.rate else ""
pre_comment, post_comment = "", ""
if self.comment[1]:
comment = "// redundant #{" + ", ".join(list(map(str, self.comment[1]))) + "} "
pre_comment = comment + "// " if self.comment[0] else ""
post_comment = " " + comment if not self.comment[0] else ""
label = str(self.label) + " ~ " if self.label else ""
return label + pre_comment + " + ".join(lhs.to_list_of_strings()) + \
" => " + " + ".join(rhs.to_list_of_strings()) + rate + post_comment
def __lt__(self, other):
return str(self) < str(other)
def __hash__(self):
return hash(str(self))
def create_complexes(self):
"""
Creates left- and right-hand sides of rule as multisets of Complexes.
:return: two multisets of Complexes represented as object Side
"""
lhs, rhs = [], []
for (f, t) in self.complexes:
c = Complex(self.agents[f:t + 1], self.compartments[f])
lhs.append(c) if t < self.mid else rhs.append(c)
return Side(lhs), Side(rhs)
def to_reaction(self) -> Reaction:
"""
Converts Rule to Reactions -> complicated rule structure is simplified to multiset (resp. Side)
representation of both sides.
:return: created Reaction
"""
lhs, rhs = self.create_complexes()
return Reaction(lhs, rhs, copy(self.rate), self.label)
def rate_to_vector(self, ordering, definitions: dict):
"""
Converts all occurrences of Complexes in rate to vector representation.
:param ordering: given ordering of unique of Complexes (as sortedcontainers.SortedList)
:param definitions: dict of (param_name, value)
"""
if self.rate:
self.rate.vectorize(ordering, definitions)
def create_reactions(self, atomic_signature: dict, structure_signature: dict) -> set:
"""
Adds context to all agents and generated all possible combinations.
Then, new rules with these enhances agents are generated and converted to Reactions.
:param atomic_signature: given mapping of atomic name to possible states
:param structure_signature: given mapping of structure name to possible atomics
:return:
"""
results = []
for (l, r) in self.pairs:
if l is None:
right = -1
left = self.agents[r]
elif r is None:
right = 1
left = self.agents[l]
else:
left = self.agents[l]
right = self.agents[r]
results.append(left.add_context(right, atomic_signature, structure_signature))
reactions = set()
for result in itertools.product(*results):
new_agents = tuple(filter(None, column(result, 0) + column(result, 1)))
new_rule = Rule(new_agents, self.mid, self.compartments, self.complexes, self.pairs, self.rate, self.label)
reactions.add(new_rule.to_reaction())
return reactions
def compatible(self, other: 'Rule') -> bool:
"""
Checks whether Rule is compatible (position-wise) with the other Rule.
Is done by formaly translating to Reactions (just a better object handling).
:param other: given Rule
:return: True if compatible
"""
self_reaction = self.to_reaction()
other_reaction = other.to_reaction()
return self_reaction.compatible(other_reaction)
def reduce_context(self):
"""
Reduces context of Rule to minimum.
Includes both agents and Rate.
:return: new Rule with reduced context
"""
new_agents = tuple([agent.reduce_context() for agent in self.agents])
new_rate = self.rate.reduce_context() if self.rate else None
return Rule(new_agents, self.mid, self.compartments, self.complexes, self.pairs, new_rate)
def is_meaningful(self) -> bool:
"""
Checks whether the Rule does any change, i.e. is meaningful.
Done by translating to Reaction and comparing its sides.
:return: True if meaningful
"""
reaction = self.to_reaction()
return not reaction.lhs == reaction.rhs
def exists_compatible_agent(self, agent: Complex) -> bool:
"""
Checks whether there exists a compatible agent in the rhs of the rule.
:param agent: given Complex agent
:return: True if exists compatible
"""
reaction = self.to_reaction()
return reaction.rhs.exists_compatible_agent(agent)
def create_all_compatible(self, atomic_signature: dict, structure_signature: dict):
"""
Creates all fully specified complexes for all both Sides
:param atomic_signature: given atomic signature
:param structure_signature: given structure signature
:return: set of all created Complexes
"""
return self.to_reaction().create_all_compatible(atomic_signature, structure_signature)
def evaluate_rate(self, state, params):
"""
Evaluate rate based on current state and parameter values.
@param state: given state
@param params: mapping of params to its value
@return: a real number of the rate
"""
values = dict()
for (state_complex, count) in state.content.value.items():
for agent in self.rate_agents:
if agent.compatible(state_complex):
values[agent] = values.get(agent, 0) + count
return self.rate.evaluate_direct(values, params)
def match(self, state, all=False):
"""
Find all possible matches of the rule to given state.
@param state: given state
@param all: bool to indicate if choose one matching randomly or return all of them
@return: random match/all matches
"""
state = deepcopy(state.content.value)
matches = find_all_matches(self.lhs.agents, state)
matches = [sum(match, []) for match in matches]
if len(matches) == 0:
return None
if not all:
return random.choice(matches)
return matches
def replace(self, aligned_match):
"""
Apply rule to chosen match.
Match contains agents which satisfy LHS of the rule an can be safely replaced based on RHS
@param aligned_match: complexes fitting LHS of the rule
"""
# replace respective agents
resulting_rhs = []
for i, rhs_agent in enumerate(self.agents[self.mid:]):
if len(aligned_match) <= i:
resulting_rhs.append(rhs_agent)
else:
resulting_rhs.append(rhs_agent.replace(aligned_match[i]))
# construct resulting complexes
output_complexes = []
for (f, t) in list(filter(lambda item: item[0] >= self.mid, self.complexes)):
output_complexes.append(Complex(resulting_rhs[f - self.mid:t - self.mid + 1], self.compartments[f]))
return Multiset(collections.Counter(output_complexes))
def reconstruct_complexes_from_match(self, match):
"""
Create complexes from agents matched to the LHS
@param match: value of
"""
output_complexes = []
for (f, t) in list(filter(lambda item: item[1] < self.mid, self.complexes)):
output_complexes.append(Complex(match[f:t + 1], self.compartments[f]))
return Multiset(collections.Counter(output_complexes))
def find_all_matches(lhs_agents, state):
"""
Finds all possible matches which actually can be used for given state.
@param lhs_agents: given LHS of a rule
@param state: state to be applied to
@return: candidates for match
"""
choices = []
if len(lhs_agents) == 0:
return [choices]
lhs_complex = lhs_agents[0]
for candidate in list(state):
if lhs_complex.compatible(candidate):
state[candidate] -= 1
aligns = candidate.align_match(lhs_complex)
for branch in find_all_matches(lhs_agents[1:], deepcopy(+state)):
for align in aligns:
choices.append([align] + branch)
return choices
| 37.334586
| 122
| 0.623099
|
1576717bd3476b9dcf7aaeb71fa1a8f1c340eccf
| 481
|
py
|
Python
|
src/pymir-viz/src/config.py
|
IJtLJZ8Rm4Yr/ymir-backend
|
73baa7822bb0d8ec1d152b07d9cabaddb2ae895d
|
[
"Apache-2.0"
] | null | null | null |
src/pymir-viz/src/config.py
|
IJtLJZ8Rm4Yr/ymir-backend
|
73baa7822bb0d8ec1d152b07d9cabaddb2ae895d
|
[
"Apache-2.0"
] | null | null | null |
src/pymir-viz/src/config.py
|
IJtLJZ8Rm4Yr/ymir-backend
|
73baa7822bb0d8ec1d152b07d9cabaddb2ae895d
|
[
"Apache-2.0"
] | null | null | null |
import os
env = os.environ.get
SANDBOX_ROOT = env("SANDBOX_ROOT", '/data/mir_root')
REDIS_URI = env("VIZ_REDIS_URI")
# redis key info
ASSET_ID_DETAIL = "detail"
ASSETS_CLASS_IDS_COUNT = "class_ids_count"
ASSETS_CLASS_ID_INDEX = "index"
# the middle data structure, it will save into cache,like Redis
MIDDLE_STRUCTURE_VERSION = "0.1"
# added all assets index by viz
ALL_INDEX_CLASSIDS = "__all_index_classids__"
# set flag status when generating cache
CACHE_STATUS = "status"
| 22.904762
| 63
| 0.775468
|
ed453df1490e9088e9b4c10635c07203d93a61f0
| 67
|
py
|
Python
|
scripts/test/test/dsl.py
|
cedar101/quepy-ko
|
532744e50d9754befeea2eff6a261fa9cd095473
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/test/test/dsl.py
|
cedar101/quepy-ko
|
532744e50d9754befeea2eff6a261fa9cd095473
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/test/test/dsl.py
|
cedar101/quepy-ko
|
532744e50d9754befeea2eff6a261fa9cd095473
|
[
"BSD-3-Clause"
] | null | null | null |
# coding: utf-8
"""
Domain specific language for test quepy.
"""
| 9.571429
| 40
| 0.656716
|
1886db8a9bf3c721e263a166fd04c84d88807c43
| 38,937
|
py
|
Python
|
vipermonkey/core/vba_object.py
|
kirk-sayre-work/ViperMonkey
|
29078c1042645855a856fabc8db0a8e7780faa22
|
[
"Unlicense"
] | 78
|
2017-11-15T13:51:13.000Z
|
2022-03-15T09:22:33.000Z
|
vipermonkey/core/vba_object.py
|
kirk-sayre-work/ViperMonkey
|
29078c1042645855a856fabc8db0a8e7780faa22
|
[
"Unlicense"
] | 17
|
2018-09-14T19:14:47.000Z
|
2021-11-17T19:03:05.000Z
|
vipermonkey/core/vba_object.py
|
kirk-sayre-work/ViperMonkey
|
29078c1042645855a856fabc8db0a8e7780faa22
|
[
"Unlicense"
] | 13
|
2018-09-12T20:48:16.000Z
|
2022-02-07T06:16:24.000Z
|
"""@package vipermonkey.core.vba_object Base class for all VBA objects
and top level functions for evaluating ViperMonkey VBA objects.
"""
# pylint: disable=pointless-string-statement
"""
ViperMonkey: VBA Grammar - Base class for all VBA objects
ViperMonkey is a specialized engine to parse, analyze and interpret Microsoft
VBA macros (Visual Basic for Applications), mainly for malware analysis.
Author: Philippe Lagadec - http://www.decalage.info
License: BSD, see source code or documentation
Project Repository:
https://github.com/decalage2/ViperMonkey
"""
# === LICENSE ==================================================================
# ViperMonkey is copyright (c) 2015-2019 Philippe Lagadec (http://www.decalage.info)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ------------------------------------------------------------------------------
# CHANGELOG:
# 2015-02-12 v0.01 PL: - first prototype
# 2015-2016 PL: - many updates
# 2016-06-11 v0.02 PL: - split vipermonkey into several modules
__version__ = '0.08'
# ------------------------------------------------------------------------------
# TODO:
# --- IMPORTS ------------------------------------------------------------------
import logging
from logger import log
import re
from curses_ascii import isprint
import traceback
from inspect import getouterframes, currentframe
import sys
from datetime import datetime
import pyparsing
from var_in_expr_visitor import var_in_expr_visitor
from utils import safe_str_convert
import utils
import excel
max_emulation_time = None
class VbaLibraryFunc(object):
"""Marker class to tell if a class emulates a VBA function.
"""
def eval(self, context, params=None):
"""Emulate the VBScript/VBA function.
@param context (Context object) The current program
state. This will be updated.
@param params (list) The function call parameters.
@return (any) The result of emulating the function call.
"""
context = context # pylint
params = params # pylint
raise ValueError("eval() method not implemented.")
def num_args(self):
"""Get the # of arguments (minimum) required by the function.
@return (int) The number of required arguments for the
emulated function.
"""
log.warning("Using default # args of 1 for " + safe_str_convert(type(self)))
return 1
def return_type(self):
"""Get the type returned from the emulated function ('INTEGER' or
'STRING').
@return (str) The function return type.
"""
log.warning("Using default return type of 'INTEGER' for " + safe_str_convert(type(self)))
return "INTEGER"
def limits_exceeded(throw_error=False):
"""Check to see if we are about to exceed the maximum recursion
depth. Also check to see if emulation is taking too long (if
needed).
@param throw_error (boolean) If True throw an exception if the
recursion depth or runtime has been exceeded.
@return (boolean) True if the recursion depth or runtime has been
exceeded, False if not.
@throws RuntimeError This is thrown if throw_error is True and
processing limits have been exceeded.
"""
# Check to see if we are approaching the recursion limit.
level = len(getouterframes(currentframe(1)))
recursion_exceeded = (level > (sys.getrecursionlimit() * .50))
time_exceeded = False
# Check to see if we have exceeded the time limit.
if (max_emulation_time is not None):
time_exceeded = (datetime.now() > max_emulation_time)
if (recursion_exceeded):
log.error("Call recursion depth approaching limit.")
if (throw_error):
raise RuntimeError("The ViperMonkey recursion depth will be exceeded. Aborting analysis.")
if (time_exceeded):
log.error("Emulation time exceeded.")
if (throw_error):
raise RuntimeError("The ViperMonkey emulation time limit was exceeded. Aborting analysis.")
return (recursion_exceeded or time_exceeded)
class VBA_Object(object):
"""Base class for all VBA objects that can be evaluated.
"""
# Upper bound for loop iterations. 0 or less means unlimited.
loop_upper_bound = 500000
def __init__(self, original_str, location, tokens):
"""VBA_Object constructor, to be called as a parse action by a
pyparsing parser
@param original_str (str) original string matched by the
parser.
@param location (int) location of the match.
@param tokens (PyParsing tokens thing) tokens extracted by the
parser
"""
self.original_str = original_str
self.location = location
self.tokens = tokens
self._children = None
self.is_useless = False
self.is_loop = False
self.exited_with_goto = False
def eval(self, context, params=None):
"""Evaluate the current value of the object.
@param context (Context object) Context for the evaluation
(local and global variables). State updates will be reflected
in the given context.
@param params (list) Any parameters provided to the object.
@return (any) The result of emulating the current object.
"""
context = context # pylint
params = params # pylint
if (log.getEffectiveLevel() == logging.DEBUG):
log.debug(self)
# raise NotImplementedError
def __repr__(self):
"""Full string representation of the object.
@return (str) Object as a string.
"""
raise NotImplementedError("__repr__() not implemented in " + safe_str_convert(type(self)))
def full_str(self):
"""Full string representation of the object.
@return (str) Object as a string.
"""
return safe_str_convert(self)
def get_children(self):
"""Return the child VBA objects of the current object.
@return (list) The children (VBA_Object objects) of the
current object.
"""
# Check for timeouts.
limits_exceeded(throw_error=True)
# The default behavior is to count any VBA_Object attribute as
# a child.
if ((hasattr(self, "_children")) and (self._children is not None)):
return self._children
r = []
for _, value in self.__dict__.iteritems():
if (isinstance(value, VBA_Object)):
r.append(value)
if isinstance(value, (list, pyparsing.ParseResults)):
for i in value:
if (isinstance(i, VBA_Object)):
r.append(i)
if (isinstance(value, dict)):
for i in value.values():
if (isinstance(i, VBA_Object)):
r.append(i)
self._children = r
return r
def accept(self, visitor, no_embedded_loops=False):
"""Visitor design pattern support, Accept a visitor.
@param visitor (visitor object) The visitor object to use to
visit the current object and it's children.
@param no_embedded_loops (boolean) Whether to skip visiting
loops (While, For, etc.) in the current object.
"""
# Check for timeouts.
limits_exceeded(throw_error=True)
# Skipping visiting embedded loops? Check to see if we are already
# in a loop and the current VBA object is a loop.
if (no_embedded_loops and
hasattr(visitor, "in_loop") and
visitor.in_loop and
self.is_loop):
#print "SKIPPING LOOP!!"
#print self
return
# Set initial in loop status of visitor if needed.
if (not hasattr(visitor, "in_loop")):
visitor.in_loop = self.is_loop
# Have we moved into a loop?
if ((not visitor.in_loop) and (self.is_loop)):
visitor.in_loop = True
# Visit the current item.
visit_status = visitor.visit(self)
if (not visit_status):
return
# Save the in loop status so we can restore it after visiting the children.
old_in_loop = visitor.in_loop
# Visit all the children.
for child in self.get_children():
child.accept(visitor, no_embedded_loops=no_embedded_loops)
# Back in current VBA object. Restore the in loop status.
visitor.in_loop = old_in_loop
def to_python(self, context, params=None, indent=0):
"""JIT compile this VBA object to Python code for direct emulation.
@param context (Context object) Context for the Python code
generation (local and global variables). Current program state
will be read from the context.
@param params (list) Any parameters provided to the object.
@param indent (int) The number of spaces of indent to use at
the beginning of the generated Python code.
@return (str) The current object with it's emulation
implemented as Python code.
"""
log.warning("to_python() not implemented in " + safe_str_convert(type(self)))
raise NotImplementedError("to_python() not implemented in " + safe_str_convert(type(self)))
def _read_from_excel(arg, context):
"""Try to evaluate an argument by reading from the loaded Excel
spreadsheet.
@param arg (VBA_Object object) The argument to evaluate.
@param context (Context object) The current program state.
@return (any) The result of the evaluation on success, None on
failure.
"""
# Try handling reading value from an Excel spreadsheet cell.
# ThisWorkbook.Sheets('YHRPN').Range('J106').Value
if ("MemberAccessExpression" not in safe_str_convert(type(arg))):
return None
arg_str = safe_str_convert(arg)
if (("sheets(" in arg_str.lower()) and
(("range(" in arg_str.lower()) or ("cells(" in arg_str.lower()))):
if (log.getEffectiveLevel() == logging.DEBUG):
log.debug("Try as Excel cell read...")
return arg.eval(context)
# Not handled.
return None
def _read_from_object_text(arg, context):
"""Try to read in a value from the text associated with a object like
a Shape.
@param arg (VBA_Object object) The argument to evaluate.
@param context (Context object) The current program state.
@return (any) The result of the evaluation on success, None on
failure.
"""
# Do we have an object text access?
arg_str = safe_str_convert(arg)
arg_str_low = arg_str.lower().strip()
# Shapes('test33'). TextFrame.TextRange.text
# Shapes('FrXXBbPlWaco').TextFrame.TextRange
#
# Make sure not to pull out Shapes() references that appear as arguments to function
# calls.
import expressions
if (("shapes(" in arg_str_low) and (not isinstance(arg, expressions.Function_Call))):
# Yes we do.
if (log.getEffectiveLevel() == logging.DEBUG):
log.debug("eval_arg: Try to get as ....TextFrame.TextRange.Text value: " + arg_str.lower())
# Handle member access?
lhs = "Shapes('1')"
if ("inlineshapes" in arg_str_low):
lhs = "InlineShapes('1')"
if ("MemberAccessExpression" in safe_str_convert(type(arg))):
# Drop off ActiveDocument prefix.
lhs = arg.lhs
if ((safe_str_convert(lhs) == "ActiveDocument") or (safe_str_convert(lhs) == "ThisDocument")):
lhs = arg.rhs[0]
# Eval the leftmost prefix element of the member access expression first.
if (log.getEffectiveLevel() == logging.DEBUG):
log.debug("eval_obj_text: Old member access lhs = " + safe_str_convert(lhs))
if ((hasattr(lhs, "eval")) and
(not isinstance(lhs, pyparsing.ParseResults))):
lhs = lhs.eval(context)
else:
# Look this up as a variable name.
var_name = safe_str_convert(lhs)
try:
lhs = context.get(var_name)
except KeyError:
lhs = var_name
if (lhs == "NULL"):
lhs = "Shapes('1')"
if ("inlineshapes" in arg_str_low):
lhs = "InlineShapes('1')"
if (log.getEffectiveLevel() == logging.DEBUG):
log.debug("eval_obj_text: Evaled member access lhs = " + safe_str_convert(lhs))
# Try to get this as a doc var.
doc_var_name = safe_str_convert(lhs) + ".TextFrame.TextRange.Text"
doc_var_name = doc_var_name.replace(".TextFrame.TextFrame", ".TextFrame")
if (("InlineShapes(" in doc_var_name) and (not doc_var_name.startswith("InlineShapes("))):
doc_var_name = doc_var_name[doc_var_name.index("InlineShapes("):]
elif (("Shapes(" in doc_var_name) and
(not doc_var_name.startswith("Shapes(")) and
("InlineShapes(" not in doc_var_name)):
doc_var_name = doc_var_name[doc_var_name.index("Shapes("):]
if (log.getEffectiveLevel() == logging.DEBUG):
log.debug("eval_obj_text: Looking for object text " + safe_str_convert(doc_var_name))
val = context.get_doc_var(doc_var_name.lower())
if (val is not None):
if (log.getEffectiveLevel() == logging.DEBUG):
log.debug("eval_obj_text: Found " + safe_str_convert(doc_var_name) + " = " + safe_str_convert(val))
return val
# Not found. Try looking for the object with index 1.
lhs_str = safe_str_convert(lhs)
if ("'" not in lhs_str):
return None
new_lhs = lhs_str[:lhs_str.index("'") + 1] + "1" + lhs_str[lhs_str.rindex("'"):]
doc_var_name = new_lhs + ".TextFrame.TextRange.Text"
doc_var_name = doc_var_name.replace(".TextFrame.TextFrame", ".TextFrame")
if (("InlineShapes(" in doc_var_name) and (not doc_var_name.startswith("InlineShapes("))):
doc_var_name = doc_var_name[doc_var_name.index("InlineShapes("):]
elif (("Shapes(" in doc_var_name) and
(not doc_var_name.startswith("Shapes(")) and
("InlineShapes(" not in doc_var_name)):
doc_var_name = doc_var_name[doc_var_name.index("Shapes("):]
if (log.getEffectiveLevel() == logging.DEBUG):
log.debug("eval_arg: Fallback, looking for object text " + safe_str_convert(doc_var_name))
val = context.get_doc_var(doc_var_name.lower())
return val
# Not handled.
return None
def contains_excel(arg):
"""See if a given expression contains Excel book or sheet objects.
@param arg (VBA_Object object) The argument to check.
@return (boolean) True if the given VBA expression contains Excel
book or sheet objects, False if not.
"""
# Got actual Excel objects?
if isinstance(arg, (excel.ExcelBook, excel.ExcelSheet)):
return True
# Got a function call?
import expressions
if (not isinstance(arg, expressions.Function_Call)):
return False
# Is this an Excel function call?
excel_funcs = set(["usedrange", "sheets", "specialcells"])
return (safe_str_convert(arg.name).lower() in excel_funcs)
constant_expr_cache = {}
def get_cached_value(arg):
"""Get the cached value of an all constant numeric expression if we
have it.
@param arg (VBA_Object object) The argument to check.
@return (int or VBA_Object) The cached value of the all constant
numeric expression if it is in the cache, the original given
argument if not.
"""
# Don't do any more work if this is already a resolved value.
if isinstance(arg, (dict, int)):
return arg
# If it is something that may be hard to convert to a string, no cached value.
if contains_excel(arg):
return None
# This is not already resolved to an int. See if we computed this before.
arg_str = safe_str_convert(arg)
if (arg_str not in constant_expr_cache.keys()):
return None
return constant_expr_cache[arg_str]
def set_cached_value(arg, val):
"""Set the cached value of an all constant numeric expression.
@param arg (VBA_Object object) The unresolved expression to
cache.
@param val (int, float, complex) The value of the resolved
expression.
"""
# We should be setting this to a numeric expression
if ((not isinstance(val, int)) and
(not isinstance(val, float)) and
(not isinstance(val, complex))):
if (log.getEffectiveLevel() == logging.DEBUG):
log.warning("Expression '" + safe_str_convert(val) + "' is a " + safe_str_convert(type(val)) + ", not an int. Not caching.")
return
# Don't cache things that contain Excel sheets or workbooks.
if contains_excel(arg):
return
# We have a number. Cache it.
arg_str = safe_str_convert(arg)
try:
if (log.getEffectiveLevel() == logging.DEBUG):
log.debug("Cache value of " + arg_str + " = " + safe_str_convert(val))
except UnicodeEncodeError:
pass
constant_expr_cache[arg_str] = val
def is_constant_math(arg):
"""See if a given expression is a simple math expression with all
literal numbers.
@param arg (VBA_Object object) The expression to check.
@return (boolean) True if this is a simple math expression with
all numeric literals, False if not.
"""
# Sanity check. If there are variables in the expression it is not all literals.
if (isinstance(arg, VBA_Object)):
var_visitor = var_in_expr_visitor()
arg.accept(var_visitor)
if (len(var_visitor.variables) > 0):
return False
# Some things are not math expressions.
if (isinstance(arg, dict) or
contains_excel(arg)):
return False
# Speed this up with the rure regex library if it is installed.
try:
import rure as local_re
except ImportError:
# Renaming of failed to import rure package.
# pylint: disable=reimported
import re as local_re
# Use a regex to see if this is an all constant expression.
base_pat = "(?:\\s*\\d+(?:\\.\\d+)?\\s*[+\\-\\*/]\\s*)*\\s*\\d+"
paren_pat = base_pat + "|(?:\\((?:\\s*" + base_pat + "\\s*[+\\-\\*\\\\]\\s*)*\\s*" + base_pat + "\\))"
arg_str = safe_str_convert(arg).strip()
try:
arg_str = unicode(arg_str)
except UnicodeDecodeError:
arg_str = filter(isprint, arg_str)
arg_str = unicode(arg_str)
return (local_re.match(unicode(paren_pat), arg_str) is not None)
def _handle_wscriptshell_run(arg, context, got_constant_math):
"""Handle cases where wscriptshell.run() is being called and there is
a local run() function.
@param arg (VBA_Object object) The item being evaluated.
@param context (Context object) The current program state.
@param got_constant_math (boolean) If True the given arg is an all
numeric literal expression, if False it is not.
@return (??) On success the evaluated item is returned, None is
returned on error.
"""
# Handle cases where wscriptshell.run() is being called and there is a local run() function.
if ((".run(" in safe_str_convert(arg).lower()) and (context.contains("run"))):
# Resolve the run() call.
if ("MemberAccessExpression" in safe_str_convert(type(arg))):
arg_evaled = arg.eval(context)
if got_constant_math: set_cached_value(arg, arg_evaled)
return arg_evaled
# Not handled.
return None
def _handle_shapes_access(r, arg, context, got_constant_math):
"""Finish handling a partially handled Shapes() access.
@param arg (VBA_Object object) The item being evaluated.
@param context (Context object) The current program state.
@param got_constant_math (boolean) If True the given arg is an all
numeric literal expression, if False it is not.
@return (??) On success the evaluated item is returned, None is
returned on error.
"""
# Is this a Shapes() access that still needs to be handled?
poss_shape_txt = ""
if isinstance(r, (VBA_Object, str)):
poss_shape_txt = safe_str_convert(r)
if ((poss_shape_txt.startswith("Shapes(")) or (poss_shape_txt.startswith("InlineShapes("))):
if (log.getEffectiveLevel() == logging.DEBUG):
log.debug("eval_arg: Handling intermediate Shapes() access for " + safe_str_convert(r))
r = eval_arg(r, context)
if got_constant_math: set_cached_value(arg, r)
return r
# Not handled.
return None
def _handle_nodetypedvalue_read(arg, context, got_constant_math):
"""Handle reads of the nodeTypedValue field of an object.
@param arg (VBA_Object object) The item being evaluated.
@param context (Context object) The current program state.
@param got_constant_math (boolean) If True the given arg is an all
numeric literal expression, if False it is not.
@return (??) On success the evaluated item is returned, None is
returned on error.
"""
# This is a hack to get values saved in the .text field of objects.
# To do this properly we need to save "FOO.text" as a variable and
# return the value of "FOO.text" when getting "FOO.nodeTypedValue".
if ("nodetypedvalue" in arg.lower()):
try:
tmp = arg.lower().replace("nodetypedvalue", "text")
if (log.getEffectiveLevel() == logging.DEBUG):
log.debug("eval_arg: Try to get as " + tmp + "...")
val = context.get(tmp)
# It looks like maybe this magically does base64 decode? Try that.
if (log.getEffectiveLevel() == logging.DEBUG):
log.debug("eval_arg: Try base64 decode of '" + safe_str_convert(val) + "'...")
val_decode = utils.b64_decode(val)
if (val_decode is not None):
if got_constant_math: set_cached_value(arg, val_decode)
return val_decode
except KeyError:
if (log.getEffectiveLevel() == logging.DEBUG):
log.debug("eval_arg: Not found as .text.")
# Not handled.
return None
def _handle_selected_item_read(arg, context, got_constant_math):
"""Handle reads of the selectedItem field of an object.
@param arg (VBA_Object object) The item being evaluated.
@param context (Context object) The current program state.
@param got_constant_math (boolean) If True the given arg is an all
numeric literal expression, if False it is not.
@return (??) On success the evaluated item is returned, None is
returned on error.
"""
# This is a hack to get values saved in the .rapt.Value field of objects.
if (".selecteditem" in arg.lower()):
try:
tmp = arg.lower().replace(".selecteditem", ".rapt.value")
if (log.getEffectiveLevel() == logging.DEBUG):
log.debug("eval_arg: Try to get as " + tmp + "...")
val = context.get(tmp)
if got_constant_math: set_cached_value(arg, val)
return val
except KeyError:
if (log.getEffectiveLevel() == logging.DEBUG):
log.debug("eval_arg: Not found as .rapt.value.")
# Not handled.
return None
# Read in Office file metadata.
meta = None
def _handle_form_variable_read(arg, context, got_constant_math):
"""Handle reading some VBA form variable (looks like reading a field
of an object).
@param arg (VBA_Object object) The item being evaluated.
@param context (Context object) The current program state.
@param got_constant_math (boolean) If True the given arg is an all
numeric literal expression, if False it is not.
@return (??) On success the evaluated item is returned, None is
returned on error.
"""
# Is this trying to access some VBA form variable?
if ("." in arg.lower()):
# Try easy button first. See if this is just a doc var.
doc_var_val = context.get_doc_var(arg)
if (doc_var_val is not None):
if got_constant_math: set_cached_value(arg, doc_var_val)
return doc_var_val
# Peel off items seperated by a '.', trying them as functions.
arg_peeled = arg
while ("." in arg_peeled):
# Try it as a form variable.
curr_var_attempt = arg_peeled.lower()
try:
if (log.getEffectiveLevel() == logging.DEBUG):
log.debug("eval_arg: Try to load as variable " + curr_var_attempt + "...")
val = context.get(curr_var_attempt)
if (val != safe_str_convert(arg)):
if got_constant_math: set_cached_value(arg, val)
return val
except KeyError:
if (log.getEffectiveLevel() == logging.DEBUG):
log.debug("eval_arg: Not found as variable")
arg_peeled = arg_peeled[arg_peeled.index(".") + 1:]
# Try it as a function
func_name = arg.lower()
func_name = func_name[func_name.rindex(".")+1:]
try:
# Lookp and execute the function.
if (log.getEffectiveLevel() == logging.DEBUG):
log.debug("eval_arg: Try to run as function '" + func_name + "'...")
func = context.get(func_name)
r = func
import procedures
if (isinstance(func, (procedures.Function, procedures.Sub)) or
('vipermonkey.core.vba_library.' in safe_str_convert(type(func)))):
r = eval_arg(func, context, treat_as_var_name=True)
# Did the function resolve to a value?
if (r != func):
# Yes it did. Return the function result.
if got_constant_math: set_cached_value(arg, r)
return r
# The function did to resolve to a value. Return as the
# original string.
if got_constant_math: set_cached_value(arg, arg)
return arg
except KeyError:
if (log.getEffectiveLevel() == logging.DEBUG):
log.debug("eval_arg: Not found as function")
except Exception as e:
if (log.getEffectiveLevel() == logging.DEBUG):
log.debug("eval_arg: Failed. Not a function. " + safe_str_convert(e))
traceback.print_exc()
# Are we trying to load some document meta data?
tmp = arg.lower().strip()
if (tmp.startswith("activedocument.item(")):
# Try to pull the result from the document meta data.
prop = tmp.replace("activedocument.item(", "").replace(")", "").replace("'","").strip()
# Make sure we read in the metadata.
if (meta is None):
log.error("BuiltInDocumentProperties: Metadata not read.")
return ""
# See if we can find the metadata attribute.
if (not hasattr(meta, prop.lower())):
log.error("BuiltInDocumentProperties: Metadata field '" + prop + "' not found.")
return ""
# We have the attribute. Return it.
r = getattr(meta, prop.lower())
if (log.getEffectiveLevel() == logging.DEBUG):
log.debug("BuiltInDocumentProperties: return %r -> %r" % (prop, r))
return r
# Are we trying to load some document data?
if ((tmp.startswith("thisdocument.builtindocumentproperties(")) or
(tmp.startswith("activeworkbook.builtindocumentproperties("))):
# Try to pull the result from the document data.
var = tmp.replace("thisdocument.builtindocumentproperties(", "").replace(")", "").replace("'","").strip()
var = var.replace("activeworkbook.builtindocumentproperties(", "")
val = context.get_doc_var(var)
if (val is not None):
return val
# Try getting from meta data.
val = context.read_metadata_item(var)
if (val is not None):
return val
# Are we loading a document variable?
if (tmp.startswith("activedocument.variables(")):
# ActiveDocument.Variables("ER0SNQAWT").Value
# Try to pull the result from the document variables.
if (log.getEffectiveLevel() == logging.DEBUG):
log.debug("eval_arg: handle expression as doc var lookup '" + tmp + "'")
var = tmp.replace("activedocument.variables(", "").\
replace(")", "").\
replace("'","").\
replace('"',"").\
replace('.value',"").\
replace("(", "").\
strip()
if (log.getEffectiveLevel() == logging.DEBUG):
log.debug("eval_arg: look for '" + var + "' as document variable...")
val = context.get_doc_var(var)
if (val is not None):
if (log.getEffectiveLevel() == logging.DEBUG):
log.debug("eval_arg: got it as document variable.")
return val
else:
if (log.getEffectiveLevel() == logging.DEBUG):
log.debug("eval_arg: did NOT get it as document variable.")
# Are we loading a custom document property?
if (tmp.startswith("activedocument.customdocumentproperties(")):
# ActiveDocument.CustomDocumentProperties("l3qDvt3B53wxeXu").Value
# Try to pull the result from the custom properties.
var = tmp.replace("activedocument.customdocumentproperties(", "").\
replace(")", "").\
replace("'","").\
replace('"',"").\
replace('.value',"").\
replace("(", "").\
strip()
val = context.get_doc_var(var)
if (val is not None):
return val
# As a last resort try reading it as a wildcarded form variable.
wild_name = tmp[:tmp.index(".")] + "*"
for i in range(0, 11):
tmp_name = wild_name + safe_str_convert(i)
try:
val = context.get(tmp_name)
if (log.getEffectiveLevel() == logging.DEBUG):
log.debug("eval_arg: Found '" + tmp + "' as wild card form variable '" + tmp_name + "'")
return val
except KeyError:
pass
# Not handled.
return None
def eval_arg(arg, context, treat_as_var_name=False):
"""Evaluate a single argument if it is a VBA_Object, otherwise return
its value.
@param arg (VBA_Object object) The item being evaluated.
@param context (Context object) The current program state.
@param treat_as_var_name (boolean) If True try to look up a
variable with the given name, if False try to directly evaluate
the given item.
@return (??) On success the evaluated item is returned, None is
returned on error.
"""
# pypy seg faults sometimes if the recursion depth is exceeded. Try to
# avoid that. Also check to see if emulation has taken too long.
limits_exceeded(throw_error=True)
if (log.getEffectiveLevel() == logging.DEBUG):
log.debug("try eval arg: %s (%s, %s, %s)" % (arg, type(arg), isinstance(arg, VBA_Object), treat_as_var_name))
# Is this a constant math expression?
got_constant_math = is_constant_math(arg)
# Do we have the cached value of this expression?
cached_val = get_cached_value(arg)
if (cached_val is not None):
if (log.getEffectiveLevel() == logging.DEBUG):
log.debug("eval_arg: Got cached value %r = %r" % (arg, cached_val))
return cached_val
# Try handling reading value from an Excel spreadsheet cell.
excel_val = _read_from_excel(arg, context)
if (excel_val is not None):
if got_constant_math: set_cached_value(arg, excel_val)
return excel_val
# Short circuit the checks and see if we are accessing some object text first.
obj_text_val = _read_from_object_text(arg, context)
if (obj_text_val is not None):
if got_constant_math: set_cached_value(arg, obj_text_val)
return obj_text_val
# Not reading from an Excel cell. Try as a VBA object.
if isinstance(arg, (VBA_Object, VbaLibraryFunc)):
# Handle cases where wscriptshell.run() is being called and there is a local run() function.
tmp_r = _handle_wscriptshell_run(arg, context, got_constant_math)
if (tmp_r is not None):
return tmp_r
# Handle as a regular VBA object.
if (log.getEffectiveLevel() == logging.DEBUG):
log.debug("eval_arg: eval as VBA_Object %s" % arg)
r = arg.eval(context=context)
# Is this a Shapes() access that still needs to be handled?
tmp_r = _handle_shapes_access(r, arg, context, got_constant_math)
if (tmp_r is not None):
return tmp_r
# Regular VBA object.
if got_constant_math: set_cached_value(arg, r)
return r
# Not a VBA object.
else:
if (log.getEffectiveLevel() == logging.DEBUG):
log.debug("eval_arg: not a VBA_Object: %r" % arg)
# Might this be a special type of variable lookup?
if (isinstance(arg, str)):
# Simple case first. Is this a variable?
try:
if (log.getEffectiveLevel() == logging.DEBUG):
log.debug("eval_arg: Try as variable name: %r" % arg)
r = context.get(arg)
if (log.getEffectiveLevel() == logging.DEBUG):
log.debug("eval_arg: Got %r = %r" % (arg, r))
if got_constant_math: set_cached_value(arg, r)
return r
except KeyError:
# No it is not. Try more complicated cases.
if (log.getEffectiveLevel() == logging.DEBUG):
log.debug("eval_arg: Not found as variable name: %r" % arg)
else:
if (log.getEffectiveLevel() == logging.DEBUG):
log.debug("eval_arg: Do not try as variable name: %r" % arg)
# This is a hack to get values saved in the .text field of objects.
# To do this properly we need to save "FOO.text" as a variable and
# return the value of "FOO.text" when getting "FOO.nodeTypedValue".
tmp_r = _handle_nodetypedvalue_read(arg, context, got_constant_math)
if (tmp_r is not None):
return tmp_r
# This is a hack to get values saved in the .rapt.Value field of objects.
tmp_r = _handle_selected_item_read(arg, context, got_constant_math)
if (tmp_r is not None):
return tmp_r
# Is this trying to access some VBA form variable?
tmp_r = _handle_form_variable_read(arg, context, got_constant_math)
if (tmp_r is not None):
return tmp_r
# Should this be handled as a variable? Must be a valid var name to do this.
if (treat_as_var_name and (re.match(r"[a-zA-Z_][\w\d]*", safe_str_convert(arg)) is not None)):
# We did not resolve the variable. Treat it as unitialized.
if (log.getEffectiveLevel() == logging.DEBUG):
log.debug("eval_arg: return 'NULL'")
return "NULL"
# Are we referring to a form element that we cannot find?
form_fields = [".tag", ".boundvalue", ".column", ".caption",
".groupname", ".seltext", ".controltiptext",
".passwordchar", ".controlsource", ".value"]
for form_field in form_fields:
if (safe_str_convert(arg).lower().endswith(form_field)):
return ""
# The .text hack did not work.
if (log.getEffectiveLevel() == logging.DEBUG):
log.debug("eval_arg: return " + safe_str_convert(arg))
return arg
def eval_args(args, context, treat_as_var_name=False):
"""Evaluate a list of arguments if they are VBA_Objects, otherwise
return their value as-is.
@param args (list) The list of items (VBA_Object object) being
evaluated.
@param context (Context object) The current program state.
@param treat_as_var_name (boolean) If True try to look up variable
with the given names in the args list, if False try to directly
evaluate the given items.
@return (list) Return the list of evaluated arguments on success,
the original args on failure.
"""
# Punt if we can't iterate over the args.
try:
_ = iter(args)
except TypeError:
return args
# Short circuit check to see if there are any VBA objects.
got_vba_objects = False
for arg in args:
if (isinstance(arg, VBA_Object)):
got_vba_objects = True
if (not got_vba_objects):
return args
r = map(lambda arg: eval_arg(arg, context=context, treat_as_var_name=treat_as_var_name), args)
return r
| 37.439423
| 136
| 0.613581
|
74cb3e8e507497c5e505ab1d4fe0a9b05dda8fd9
| 330
|
py
|
Python
|
app/authentication/api_key_manipulation.py
|
jabertuhin/fastapi-with-redis
|
aad3fd2c3c1ae011c43bfb37f29903ef7835026e
|
[
"MIT"
] | 5
|
2021-08-14T17:08:48.000Z
|
2022-01-06T18:29:16.000Z
|
app/authentication/api_key_manipulation.py
|
jabertuhin/fastapi-with-redis
|
aad3fd2c3c1ae011c43bfb37f29903ef7835026e
|
[
"MIT"
] | 2
|
2021-09-25T11:24:09.000Z
|
2021-09-25T11:24:20.000Z
|
app/authentication/api_key_manipulation.py
|
jabertuhin/fastapi-with-redis
|
aad3fd2c3c1ae011c43bfb37f29903ef7835026e
|
[
"MIT"
] | null | null | null |
from datetime import datetime
from typing import NewType
Seconds = NewType("Seconds", int)
def get_api_key_with_time(api_key: str, time: Seconds) -> str:
# Based on minute.
if time//60 == 1:
return f"{api_key}:{str(datetime.now().minute)}"
raise ValueError("Doesn't support other than minute(60 seconds).")
| 30
| 70
| 0.693939
|
ddfd8d9c28d3d412e71ea0a77568dad918a38e10
| 501
|
py
|
Python
|
env/Lib/site-packages/plotly/validators/treemap/marker/colorbar/title/_side.py
|
andresgreen-byte/Laboratorio-1--Inversion-de-Capital
|
8a4707301d19c3826c31026c4077930bcd6a8182
|
[
"MIT"
] | 11,750
|
2015-10-12T07:03:39.000Z
|
2022-03-31T20:43:15.000Z
|
env/Lib/site-packages/plotly/validators/treemap/marker/colorbar/title/_side.py
|
andresgreen-byte/Laboratorio-1--Inversion-de-Capital
|
8a4707301d19c3826c31026c4077930bcd6a8182
|
[
"MIT"
] | 2,951
|
2015-10-12T00:41:25.000Z
|
2022-03-31T22:19:26.000Z
|
env/Lib/site-packages/plotly/validators/treemap/marker/colorbar/title/_side.py
|
andresgreen-byte/Laboratorio-1--Inversion-de-Capital
|
8a4707301d19c3826c31026c4077930bcd6a8182
|
[
"MIT"
] | 2,623
|
2015-10-15T14:40:27.000Z
|
2022-03-28T16:05:50.000Z
|
import _plotly_utils.basevalidators
class SideValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="side", parent_name="treemap.marker.colorbar.title", **kwargs
):
super(SideValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
values=kwargs.pop("values", ["right", "top", "bottom"]),
**kwargs
)
| 33.4
| 87
| 0.640719
|
40688750253d0e9bd6a78056ce1a8127fda7c458
| 2,818
|
py
|
Python
|
disnake/types/raw_models.py
|
MisileLab/disnake
|
c7f6a61f2fe2a05cb57027486d6f2cd7fe5399fa
|
[
"MIT"
] | null | null | null |
disnake/types/raw_models.py
|
MisileLab/disnake
|
c7f6a61f2fe2a05cb57027486d6f2cd7fe5399fa
|
[
"MIT"
] | null | null | null |
disnake/types/raw_models.py
|
MisileLab/disnake
|
c7f6a61f2fe2a05cb57027486d6f2cd7fe5399fa
|
[
"MIT"
] | null | null | null |
"""
The MIT License (MIT)
Copyright (c) 2015-2021 Rapptz
Copyright (c) 2021-present Disnake Development
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from typing import TypedDict, List
from .snowflake import Snowflake
from .member import Member
from .emoji import PartialEmoji
class _MessageEventOptional(TypedDict, total=False):
guild_id: Snowflake
class MessageDeleteEvent(_MessageEventOptional):
id: Snowflake
channel_id: Snowflake
class BulkMessageDeleteEvent(_MessageEventOptional):
ids: List[Snowflake]
channel_id: Snowflake
class _ReactionActionEventOptional(TypedDict, total=False):
guild_id: Snowflake
member: Member
class MessageUpdateEvent(_MessageEventOptional):
id: Snowflake
channel_id: Snowflake
class ReactionActionEvent(_ReactionActionEventOptional):
user_id: Snowflake
channel_id: Snowflake
message_id: Snowflake
emoji: PartialEmoji
class _ReactionClearEventOptional(TypedDict, total=False):
guild_id: Snowflake
class ReactionClearEvent(_ReactionClearEventOptional):
channel_id: Snowflake
message_id: Snowflake
class _ReactionClearEmojiEventOptional(TypedDict, total=False):
guild_id: Snowflake
class ReactionClearEmojiEvent(_ReactionClearEmojiEventOptional):
channel_id: int
message_id: int
emoji: PartialEmoji
class MemberScreeningRejectEvent(TypedDict):
guild_id: Snowflake
user_id: Snowflake
class _IntegrationDeleteEventOptional(TypedDict, total=False):
application_id: Snowflake
class IntegrationDeleteEvent(_IntegrationDeleteEventOptional):
id: Snowflake
guild_id: Snowflake
class _TypingEventOptional(TypedDict, total=False):
guild_id: Snowflake
member: Member
class TypingEvent(_TypingEventOptional):
user_id: Snowflake
channel_id: Snowflake
timestamp: int
| 26.838095
| 75
| 0.792406
|
9d715c7f950bf32ec25cddcc64145f7116c79793
| 6,138
|
py
|
Python
|
eksisozluk.py
|
teko424/eksisozluk_entries
|
1e4c5158a0c2b854c675aabbc9ad92d6399da63a
|
[
"MIT"
] | null | null | null |
eksisozluk.py
|
teko424/eksisozluk_entries
|
1e4c5158a0c2b854c675aabbc9ad92d6399da63a
|
[
"MIT"
] | null | null | null |
eksisozluk.py
|
teko424/eksisozluk_entries
|
1e4c5158a0c2b854c675aabbc9ad92d6399da63a
|
[
"MIT"
] | null | null | null |
# kullanıcı arama, entry sıra numarası özelliği getirilecek
import os
import sys
from subprocess import check_call
try:
import requests
import bs4
except ImportError or ModuleNotFoundError:
packages = ["requests", "BeautifulSoup4", "lxml"]
for package in packages:
check_call(["pip", "install", package])
os.execv(sys.executable, ["python"] + sys.argv)
def eksi():
searchlist = []
c_range = 150
while 1:
q = input("aramak istediğiniz başlık: ")
searchlist.append(q)
try:
if q:
headers = ""
search = "https://eksisozluk.com/?q="
page = 1
url = search + q
r = requests.get(url, headers=headers)
source = bs4.BeautifulSoup(r.content, "lxml")
header = source.title
link = source.find("a", attrs={"itemprop": "url"}).get("href")
search = "https://eksisozluk.com"
url = search + link
entries = source.find_all("div", attrs={"class": "content"})
if not entries:
print("böyle bir şey yok. ama olabilir de.")
continue
dates = source.find_all("a", attrs={"class": "entry-date permalink"})
date_list = []
nicks = source.find_all("a", attrs={"class": "entry-author"})
nick_list = []
nd_num = 0
if len(entries) >= 10:
pagecount = source.find("div", {"data-currentpage": str(page)})
pagecount = \
str(pagecount)[str(pagecount).find("data-pagecount"):str(pagecount).find(">")].split("=")[1]
pagecount = pagecount.strip("\"")
else:
pagecount = 1
print("\n", header.text)
[nick_list.append(nick.text) for nick in nicks]
[date_list.append(date.text) for date in dates]
for num, entry in enumerate(entries, start=1):
print(f"\n {num} -) {entry.text} \n {date_list[nd_num]} "
f"\n\n - {nick_list[nd_num]} -")
if len(entry.text) <= c_range:
print("—" * len(entry.text))
else:
print("—" * c_range)
nd_num += 1
print(f"\nsayfa numarası: {page}\n{pagecount} sayfa mevcut")
while 1:
qa = input("""\nsonraki sayfaya geçmek içn (+) girin\n---------
\ngeri gitmek için (-) girin\n---------
\nsayfa numarası için bir sayı girin\n---------
\ngündemi görmek için (*) girin\n---------
\narama kaydı için (/) girin\n---------
\nson sayfa için (") girin\n---------
\nbaşka bir şey aramak için \"h\" girin: """)
page += 1
if qa == "+":
pass
elif qa == "-":
page -= 2
if page < 1:
print("\nçok geri gittin. biraz ileri gel.")
continue
elif qa.isdigit():
page = int(qa)
elif qa == "*":
page -= 1
pass
elif qa == "/":
print("\n")
for value in searchlist:
print(value)
page -= 1
continue
elif qa == "\"":
page -= 1
page = int(pagecount)
else:
break
pageurl = "?p=" + str(page)
urls = url + pageurl
r = requests.get(urls, headers=headers)
source = bs4.BeautifulSoup(r.content, "lxml")
if qa == "*":
entries = source.find_all("ul", attrs={"class": "topic-list partial"})
else:
entries = source.find_all("div", attrs={"class": "content"})
if not entries:
print("entry kalmadı.\nbaşka bir başlık aratabilirsiniz.")
continue
nicks = source.find_all("a", attrs={"class": "entry-author"})
nick_list = []
dates = source.find_all("a", attrs={"class": "entry-date permalink"})
date_list = []
nd_num = 0
print("\n", header.text)
if qa != "*":
[nick_list.append(nick.text) for nick in nicks]
[date_list.append(date.text) for date in dates]
for num, entry in enumerate(entries, start=1):
print(f"\n {num} -) {entry.text} \n {date_list[nd_num]} "
f"\n\n - {nick_list[nd_num]} -")
if len(entry.text) <= c_range:
print("—" * len(entry.text))
else:
print("—" * c_range)
nd_num += 1
print(f"\nsayfa numarası: {page}\n{pagecount} sayfa mevcut")
else:
for title in entries:
print(title.text)
else:
break
except bs4.FeatureNotFound:
check_call(["pip", "install", "lxml"])
os.execv(sys.executable, ["python"] + sys.argv)
except requests.exceptions.ConnectionError:
print("bağlantınızı kontrol edin")
if __name__ == "__main__":
eksi()
| 44.478261
| 117
| 0.40404
|
4efe7aa4b340d1e92817301158cf94aa1f808d19
| 739
|
py
|
Python
|
pyContabo/types/licenses.py
|
xLeon-python/pyContabo
|
9863bd1ab0f95b50186902c90c40ce53d3026afd
|
[
"MIT"
] | 5
|
2022-01-03T10:34:35.000Z
|
2022-01-27T10:34:41.000Z
|
pyContabo/types/licenses.py
|
xLeon-python/pyContabo
|
9863bd1ab0f95b50186902c90c40ce53d3026afd
|
[
"MIT"
] | 4
|
2022-01-14T10:37:57.000Z
|
2022-01-20T20:44:54.000Z
|
pyContabo/types/licenses.py
|
xLeon-python/pyContabo
|
9863bd1ab0f95b50186902c90c40ce53d3026afd
|
[
"MIT"
] | 1
|
2022-01-20T14:59:32.000Z
|
2022-01-20T14:59:32.000Z
|
from enum import Enum
class license(Enum):
PleskHost = "PleskHost"
PleskPro = "PleskPro"
PleskAdmin = "PleskAdmin"
cPanel5 = "cPanel5"
cPanel30 = "cPanel30"
cPanel50 = "cPanel50"
cPanel100 = "cPanel100"
cPanel150 = "cPanel150"
cPanel200 = "cPanel200"
cPanel250 = "cPanel250"
cPanel300 = "cPanel300"
cPanel350 = "cPanel350"
cPanel400 = "cPanel400"
cPanel450 = "cPanel450"
cPanel500 = "cPanel500"
cPanel550 = "cPanel550"
cPanel600 = "cPanel600"
cPanel650 = "cPanel650"
cPanel700 = "cPanel700"
cPanel750 = "cPanel750"
cPanel800 = "cPanel800"
cPanel850 = "cPanel850"
cPanel900 = "cPanel900"
cPanel950 = "cPanel950"
cPanel1000 = "cPanel1000"
| 24.633333
| 29
| 0.64682
|
45d6e11b18fc8ca746bae139df7d082c8efd5b60
| 13,008
|
py
|
Python
|
PathPlanning/HybridAStar/hybrid_a_star.py
|
pruidzeko/PythonRobotics
|
5ff9b70d737121c2947d844ecfb1fa07abdd210c
|
[
"MIT"
] | 5
|
2021-01-24T12:06:36.000Z
|
2022-03-05T18:32:15.000Z
|
PathPlanning/HybridAStar/hybrid_a_star.py
|
pruidzeko/PythonRobotics
|
5ff9b70d737121c2947d844ecfb1fa07abdd210c
|
[
"MIT"
] | 61
|
2020-08-17T20:02:09.000Z
|
2022-03-14T20:01:01.000Z
|
PathPlanning/HybridAStar/hybrid_a_star.py
|
pruidzeko/PythonRobotics
|
5ff9b70d737121c2947d844ecfb1fa07abdd210c
|
[
"MIT"
] | 1
|
2021-05-02T09:26:52.000Z
|
2021-05-02T09:26:52.000Z
|
"""
Hybrid A* path planning
author: Zheng Zh (@Zhengzh)
"""
import heapq
import math
import os
import sys
import matplotlib.pyplot as plt
import numpy as np
from scipy.spatial import cKDTree
sys.path.append(os.path.dirname(os.path.abspath(__file__))
+ "/../ReedsSheppPath")
try:
from a_star_heuristic import dp_planning
import reeds_shepp_path_planning as rs
from car import move, check_car_collision, MAX_STEER, WB, plot_car
except Exception:
raise
XY_GRID_RESOLUTION = 2.0 # [m]
YAW_GRID_RESOLUTION = np.deg2rad(15.0) # [rad]
MOTION_RESOLUTION = 0.1 # [m] path interpolate resolution
N_STEER = 20 # number of steer command
VR = 1.0 # robot radius
SB_COST = 100.0 # switch back penalty cost
BACK_COST = 5.0 # backward penalty cost
STEER_CHANGE_COST = 5.0 # steer angle change penalty cost
STEER_COST = 1.0 # steer angle change penalty cost
H_COST = 5.0 # Heuristic cost
show_animation = True
class Node:
def __init__(self, x_ind, y_ind, yaw_ind, direction,
x_list, y_list, yaw_list, directions,
steer=0.0, parent_index=None, cost=None):
self.x_index = x_ind
self.y_index = y_ind
self.yaw_index = yaw_ind
self.direction = direction
self.x_list = x_list
self.y_list = y_list
self.yaw_list = yaw_list
self.directions = directions
self.steer = steer
self.parent_index = parent_index
self.cost = cost
class Path:
def __init__(self, x_list, y_list, yaw_list, direction_list, cost):
self.x_list = x_list
self.y_list = y_list
self.yaw_list = yaw_list
self.direction_list = direction_list
self.cost = cost
class Config:
def __init__(self, ox, oy, xy_resolution, yaw_resolution):
min_x_m = min(ox)
min_y_m = min(oy)
max_x_m = max(ox)
max_y_m = max(oy)
ox.append(min_x_m)
oy.append(min_y_m)
ox.append(max_x_m)
oy.append(max_y_m)
self.min_x = round(min_x_m / xy_resolution)
self.min_y = round(min_y_m / xy_resolution)
self.max_x = round(max_x_m / xy_resolution)
self.max_y = round(max_y_m / xy_resolution)
self.x_w = round(self.max_x - self.min_x)
self.y_w = round(self.max_y - self.min_y)
self.min_yaw = round(- math.pi / yaw_resolution) - 1
self.max_yaw = round(math.pi / yaw_resolution)
self.yaw_w = round(self.max_yaw - self.min_yaw)
def calc_motion_inputs():
for steer in np.concatenate((np.linspace(-MAX_STEER, MAX_STEER,
N_STEER), [0.0])):
for d in [1, -1]:
yield [steer, d]
def get_neighbors(current, config, ox, oy, kd_tree):
for steer, d in calc_motion_inputs():
node = calc_next_node(current, steer, d, config, ox, oy, kd_tree)
if node and verify_index(node, config):
yield node
def calc_next_node(current, steer, direction, config, ox, oy, kd_tree):
x, y, yaw = current.x_list[-1], current.y_list[-1], current.yaw_list[-1]
arc_l = XY_GRID_RESOLUTION * 1.5
x_list, y_list, yaw_list = [], [], []
for _ in np.arange(0, arc_l, MOTION_RESOLUTION):
x, y, yaw = move(x, y, yaw, MOTION_RESOLUTION * direction, steer)
x_list.append(x)
y_list.append(y)
yaw_list.append(yaw)
if not check_car_collision(x_list, y_list, yaw_list, ox, oy, kd_tree):
return None
d = direction == 1
x_ind = round(x / XY_GRID_RESOLUTION)
y_ind = round(y / XY_GRID_RESOLUTION)
yaw_ind = round(yaw / YAW_GRID_RESOLUTION)
added_cost = 0.0
if d != current.direction:
added_cost += SB_COST
# steer penalty
added_cost += STEER_COST * abs(steer)
# steer change penalty
added_cost += STEER_CHANGE_COST * abs(current.steer - steer)
cost = current.cost + added_cost + arc_l
node = Node(x_ind, y_ind, yaw_ind, d, x_list,
y_list, yaw_list, [d],
parent_index=calc_index(current, config),
cost=cost, steer=steer)
return node
def is_same_grid(n1, n2):
if n1.x_index == n2.x_index \
and n1.y_index == n2.y_index \
and n1.yaw_index == n2.yaw_index:
return True
return False
def analytic_expansion(current, goal, ox, oy, kd_tree):
start_x = current.x_list[-1]
start_y = current.y_list[-1]
start_yaw = current.yaw_list[-1]
goal_x = goal.x_list[-1]
goal_y = goal.y_list[-1]
goal_yaw = goal.yaw_list[-1]
max_curvature = math.tan(MAX_STEER) / WB
paths = rs.calc_paths(start_x, start_y, start_yaw,
goal_x, goal_y, goal_yaw,
max_curvature, step_size=MOTION_RESOLUTION)
if not paths:
return None
best_path, best = None, None
for path in paths:
if check_car_collision(path.x, path.y, path.yaw, ox, oy, kd_tree):
cost = calc_rs_path_cost(path)
if not best or best > cost:
best = cost
best_path = path
return best_path
def update_node_with_analytic_expansion(current, goal,
c, ox, oy, kd_tree):
path = analytic_expansion(current, goal, ox, oy, kd_tree)
if path:
if show_animation:
plt.plot(path.x, path.y)
f_x = path.x[1:]
f_y = path.y[1:]
f_yaw = path.yaw[1:]
f_cost = current.cost + calc_rs_path_cost(path)
f_parent_index = calc_index(current, c)
fd = []
for d in path.directions[1:]:
fd.append(d >= 0)
f_steer = 0.0
f_path = Node(current.x_index, current.y_index, current.yaw_index,
current.direction, f_x, f_y, f_yaw, fd,
cost=f_cost, parent_index=f_parent_index, steer=f_steer)
return True, f_path
return False, None
def calc_rs_path_cost(reed_shepp_path):
cost = 0.0
for length in reed_shepp_path.lengths:
if length >= 0: # forward
cost += length
else: # back
cost += abs(length) * BACK_COST
# switch back penalty
for i in range(len(reed_shepp_path.lengths) - 1):
# switch back
if reed_shepp_path.lengths[i] * reed_shepp_path.lengths[i + 1] < 0.0:
cost += SB_COST
# steer penalty
for course_type in reed_shepp_path.ctypes:
if course_type != "S": # curve
cost += STEER_COST * abs(MAX_STEER)
# ==steer change penalty
# calc steer profile
n_ctypes = len(reed_shepp_path.ctypes)
u_list = [0.0] * n_ctypes
for i in range(n_ctypes):
if reed_shepp_path.ctypes[i] == "R":
u_list[i] = - MAX_STEER
elif reed_shepp_path.ctypes[i] == "L":
u_list[i] = MAX_STEER
for i in range(len(reed_shepp_path.ctypes) - 1):
cost += STEER_CHANGE_COST * abs(u_list[i + 1] - u_list[i])
return cost
def hybrid_a_star_planning(start, goal, ox, oy, xy_resolution, yaw_resolution):
"""
start: start node
goal: goal node
ox: x position list of Obstacles [m]
oy: y position list of Obstacles [m]
xy_resolution: grid resolution [m]
yaw_resolution: yaw angle resolution [rad]
"""
start[2], goal[2] = rs.pi_2_pi(start[2]), rs.pi_2_pi(goal[2])
tox, toy = ox[:], oy[:]
obstacle_kd_tree = cKDTree(np.vstack((tox, toy)).T)
config = Config(tox, toy, xy_resolution, yaw_resolution)
start_node = Node(round(start[0] / xy_resolution),
round(start[1] / xy_resolution),
round(start[2] / yaw_resolution), True,
[start[0]], [start[1]], [start[2]], [True], cost=0)
goal_node = Node(round(goal[0] / xy_resolution),
round(goal[1] / xy_resolution),
round(goal[2] / yaw_resolution), True,
[goal[0]], [goal[1]], [goal[2]], [True])
openList, closedList = {}, {}
_, _, h_dp = dp_planning(start_node.x_list[-1], start_node.y_list[-1],
goal_node.x_list[-1], goal_node.y_list[-1],
ox, oy, xy_resolution, VR)
pq = []
openList[calc_index(start_node, config)] = start_node
heapq.heappush(pq, (calc_cost(start_node, h_dp, config),
calc_index(start_node, config)))
final_path = None
while True:
if not openList:
print("Error: Cannot find path, No open set")
return [], [], []
cost, c_id = heapq.heappop(pq)
if c_id in openList:
current = openList.pop(c_id)
closedList[c_id] = current
else:
continue
if show_animation: # pragma: no cover
plt.plot(current.x_list[-1], current.y_list[-1], "xc")
# for stopping simulation with the esc key.
plt.gcf().canvas.mpl_connect(
'key_release_event',
lambda event: [exit(0) if event.key == 'escape' else None])
if len(closedList.keys()) % 10 == 0:
plt.pause(0.001)
is_updated, final_path = update_node_with_analytic_expansion(
current, goal_node, config, ox, oy, obstacle_kd_tree)
if is_updated:
print("path found")
break
for neighbor in get_neighbors(current, config, ox, oy,
obstacle_kd_tree):
neighbor_index = calc_index(neighbor, config)
if neighbor_index in closedList:
continue
if neighbor not in openList \
or openList[neighbor_index].cost > neighbor.cost:
heapq.heappush(
pq, (calc_cost(neighbor, h_dp, config),
neighbor_index))
openList[neighbor_index] = neighbor
path = get_final_path(closedList, final_path)
return path
def calc_cost(n, h_dp, c):
ind = (n.y_index - c.min_y) * c.x_w + (n.x_index - c.min_x)
if ind not in h_dp:
return n.cost + 999999999 # collision cost
return n.cost + H_COST * h_dp[ind].cost
def get_final_path(closed, goal_node):
reversed_x, reversed_y, reversed_yaw = \
list(reversed(goal_node.x_list)), list(reversed(goal_node.y_list)), \
list(reversed(goal_node.yaw_list))
direction = list(reversed(goal_node.directions))
nid = goal_node.parent_index
final_cost = goal_node.cost
while nid:
n = closed[nid]
reversed_x.extend(list(reversed(n.x_list)))
reversed_y.extend(list(reversed(n.y_list)))
reversed_yaw.extend(list(reversed(n.yaw_list)))
direction.extend(list(reversed(n.directions)))
nid = n.parent_index
reversed_x = list(reversed(reversed_x))
reversed_y = list(reversed(reversed_y))
reversed_yaw = list(reversed(reversed_yaw))
direction = list(reversed(direction))
# adjust first direction
direction[0] = direction[1]
path = Path(reversed_x, reversed_y, reversed_yaw, direction, final_cost)
return path
def verify_index(node, c):
x_ind, y_ind = node.x_index, node.y_index
if c.min_x <= x_ind <= c.max_x and c.min_y <= y_ind <= c.max_y:
return True
return False
def calc_index(node, c):
ind = (node.yaw_index - c.min_yaw) * c.x_w * c.y_w + \
(node.y_index - c.min_y) * c.x_w + (node.x_index - c.min_x)
if ind <= 0:
print("Error(calc_index):", ind)
return ind
def main():
print("Start Hybrid A* planning")
ox, oy = [], []
for i in range(60):
ox.append(i)
oy.append(0.0)
for i in range(60):
ox.append(60.0)
oy.append(i)
for i in range(61):
ox.append(i)
oy.append(60.0)
for i in range(61):
ox.append(0.0)
oy.append(i)
for i in range(40):
ox.append(20.0)
oy.append(i)
for i in range(40):
ox.append(40.0)
oy.append(60.0 - i)
# Set Initial parameters
start = [10.0, 10.0, np.deg2rad(90.0)]
goal = [50.0, 50.0, np.deg2rad(-90.0)]
print("start : ", start)
print("goal : ", goal)
if show_animation:
plt.plot(ox, oy, ".k")
rs.plot_arrow(start[0], start[1], start[2], fc='g')
rs.plot_arrow(goal[0], goal[1], goal[2])
plt.grid(True)
plt.axis("equal")
path = hybrid_a_star_planning(
start, goal, ox, oy, XY_GRID_RESOLUTION, YAW_GRID_RESOLUTION)
x = path.x_list
y = path.y_list
yaw = path.yaw_list
if show_animation:
for i_x, i_y, i_yaw in zip(x, y, yaw):
plt.cla()
plt.plot(ox, oy, ".k")
plt.plot(x, y, "-r", label="Hybrid A* path")
plt.grid(True)
plt.axis("equal")
plot_car(i_x, i_y, i_yaw)
plt.pause(0.0001)
print(__file__ + " done!!")
if __name__ == '__main__':
main()
| 29.100671
| 79
| 0.58956
|
d29aa590873ef7d8f492b2cd43450e694776f457
| 1,216
|
py
|
Python
|
python_toolbox/address_tools/shared.py
|
hboshnak/python_toolbox
|
cb9ef64b48f1d03275484d707dc5079b6701ad0c
|
[
"MIT"
] | 119
|
2015-02-05T17:59:47.000Z
|
2022-02-21T22:43:40.000Z
|
python_toolbox/address_tools/shared.py
|
hboshnak/python_toolbox
|
cb9ef64b48f1d03275484d707dc5079b6701ad0c
|
[
"MIT"
] | 4
|
2019-04-24T14:01:14.000Z
|
2020-05-21T12:03:29.000Z
|
python_toolbox/address_tools/shared.py
|
hboshnak/python_toolbox
|
cb9ef64b48f1d03275484d707dc5079b6701ad0c
|
[
"MIT"
] | 14
|
2015-03-30T06:30:42.000Z
|
2021-12-24T23:45:11.000Z
|
# Copyright 2009-2017 Ram Rachum.
# This program is distributed under the MIT license.
'''Various objects and tools for `address_tools`.'''
import re
_address_pattern = re.compile(
r"^(?P<address>([a-zA-Z_][0-9a-zA-Z_]*)(\.[a-zA-Z_][0-9a-zA-Z_]*)*)$"
)
'''Pattern for Python addresses, like 'email.encoders'.'''
_contained_address_pattern = re.compile(
r"(?P<address>([a-zA-Z_][0-9a-zA-Z_]*)(\.[a-zA-Z_][0-9a-zA-Z_]*)*)"
)
'''
Pattern for strings containing Python addresses, like '{email.encoders: 1}'.
'''
def _get_parent_and_dict_from_namespace(namespace):
'''
Extract the parent object and `dict` from `namespace`.
For the `namespace`, the user can give either a parent object
(`getattr(namespace, address) is obj`) or a `dict`-like namespace
(`namespace[address] is obj`).
Returns `(parent_object, namespace_dict)`.
'''
if hasattr(namespace, '__getitem__') and hasattr(namespace, 'keys'):
parent_object = None
namespace_dict = namespace
else:
parent_object = namespace
namespace_dict = vars(parent_object)
return (parent_object, namespace_dict)
def is_address(string):
return bool(_address_pattern.match(string))
| 26.434783
| 76
| 0.674342
|
577988bc58ca65fe8c92317e1008aabab8080c44
| 12,475
|
py
|
Python
|
homeassistant/components/media_player/yamaha.py
|
dotlambda/home-assistant
|
68d2851ecf2dcd05bd5197240a31980d4fee8d2e
|
[
"Apache-2.0"
] | null | null | null |
homeassistant/components/media_player/yamaha.py
|
dotlambda/home-assistant
|
68d2851ecf2dcd05bd5197240a31980d4fee8d2e
|
[
"Apache-2.0"
] | 5
|
2022-03-01T06:31:03.000Z
|
2022-03-31T07:20:45.000Z
|
homeassistant/components/media_player/yamaha.py
|
dotlambda/home-assistant
|
68d2851ecf2dcd05bd5197240a31980d4fee8d2e
|
[
"Apache-2.0"
] | null | null | null |
"""
Support for Yamaha Receivers.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/media_player.yamaha/
"""
import logging
import voluptuous as vol
from homeassistant.components.media_player import (
SUPPORT_TURN_OFF, SUPPORT_TURN_ON, SUPPORT_VOLUME_MUTE, SUPPORT_VOLUME_SET,
SUPPORT_SELECT_SOURCE, SUPPORT_PLAY_MEDIA, SUPPORT_PAUSE, SUPPORT_STOP,
SUPPORT_NEXT_TRACK, SUPPORT_PREVIOUS_TRACK, SUPPORT_PLAY,
MEDIA_TYPE_MUSIC, MEDIA_PLAYER_SCHEMA, DOMAIN,
MediaPlayerDevice, PLATFORM_SCHEMA)
from homeassistant.const import (CONF_NAME, CONF_HOST, STATE_OFF, STATE_ON,
STATE_PLAYING, STATE_IDLE, ATTR_ENTITY_ID)
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['rxv==0.5.1']
_LOGGER = logging.getLogger(__name__)
SUPPORT_YAMAHA = SUPPORT_VOLUME_SET | SUPPORT_VOLUME_MUTE | \
SUPPORT_TURN_ON | SUPPORT_TURN_OFF | SUPPORT_SELECT_SOURCE | SUPPORT_PLAY
CONF_SOURCE_NAMES = 'source_names'
CONF_SOURCE_IGNORE = 'source_ignore'
CONF_ZONE_NAMES = 'zone_names'
CONF_ZONE_IGNORE = 'zone_ignore'
DEFAULT_NAME = 'Yamaha Receiver'
DATA_YAMAHA = 'yamaha_known_receivers'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_HOST): cv.string,
vol.Optional(CONF_SOURCE_IGNORE, default=[]):
vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_ZONE_IGNORE, default=[]):
vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_SOURCE_NAMES, default={}): {cv.string: cv.string},
vol.Optional(CONF_ZONE_NAMES, default={}): {cv.string: cv.string},
})
SERVICE_ENABLE_OUTPUT = 'yamaha_enable_output'
ATTR_PORT = 'port'
ATTR_ENABLED = 'enabled'
ENABLE_OUTPUT_SCHEMA = MEDIA_PLAYER_SCHEMA.extend({
vol.Required(ATTR_PORT): cv.string,
vol.Required(ATTR_ENABLED): cv.boolean
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the Yamaha platform."""
import rxv
# Keep track of configured receivers so that we don't end up
# discovering a receiver dynamically that we have static config
# for. Map each device from its unique_id to an instance since
# YamahaDevice is not hashable (thus not possible to add to a set).
if hass.data.get(DATA_YAMAHA) is None:
hass.data[DATA_YAMAHA] = {}
name = config.get(CONF_NAME)
host = config.get(CONF_HOST)
source_ignore = config.get(CONF_SOURCE_IGNORE)
source_names = config.get(CONF_SOURCE_NAMES)
zone_ignore = config.get(CONF_ZONE_IGNORE)
zone_names = config.get(CONF_ZONE_NAMES)
if discovery_info is not None:
name = discovery_info.get('name')
model = discovery_info.get('model_name')
ctrl_url = discovery_info.get('control_url')
desc_url = discovery_info.get('description_url')
receivers = rxv.RXV(
ctrl_url, model_name=model, friendly_name=name,
unit_desc_url=desc_url).zone_controllers()
_LOGGER.info("Receivers: %s", receivers)
# when we are dynamically discovered config is empty
zone_ignore = []
elif host is None:
receivers = []
for recv in rxv.find():
receivers.extend(recv.zone_controllers())
else:
ctrl_url = "http://{}:80/YamahaRemoteControl/ctrl".format(host)
receivers = rxv.RXV(ctrl_url, name).zone_controllers()
devices = []
for receiver in receivers:
if receiver.zone in zone_ignore:
continue
device = YamahaDevice(name, receiver, source_ignore,
source_names, zone_names)
# Only add device if it's not already added
if device.unique_id not in hass.data[DATA_YAMAHA]:
hass.data[DATA_YAMAHA][device.unique_id] = device
devices.append(device)
else:
_LOGGER.debug('Ignoring duplicate receiver %s', name)
def service_handler(service):
"""Handle for services."""
entity_ids = service.data.get(ATTR_ENTITY_ID)
devices = [device for device in hass.data[DATA_YAMAHA].values()
if not entity_ids or device.entity_id in entity_ids]
for device in devices:
port = service.data[ATTR_PORT]
enabled = service.data[ATTR_ENABLED]
device.enable_output(port, enabled)
device.schedule_update_ha_state(True)
hass.services.register(
DOMAIN, SERVICE_ENABLE_OUTPUT, service_handler,
schema=ENABLE_OUTPUT_SCHEMA)
add_devices(devices)
class YamahaDevice(MediaPlayerDevice):
"""Representation of a Yamaha device."""
def __init__(self, name, receiver, source_ignore,
source_names, zone_names):
"""Initialize the Yamaha Receiver."""
self.receiver = receiver
self._muted = False
self._volume = 0
self._pwstate = STATE_OFF
self._current_source = None
self._source_list = None
self._source_ignore = source_ignore or []
self._source_names = source_names or {}
self._zone_names = zone_names or {}
self._reverse_mapping = None
self._playback_support = None
self._is_playback_supported = False
self._play_status = None
self._name = name
self._zone = receiver.zone
@property
def unique_id(self):
"""Return an unique ID."""
return '{0}:{1}'.format(self.receiver.ctrl_url, self._zone)
def update(self):
"""Get the latest details from the device."""
self._play_status = self.receiver.play_status()
if self.receiver.on:
if self._play_status is None:
self._pwstate = STATE_ON
elif self._play_status.playing:
self._pwstate = STATE_PLAYING
else:
self._pwstate = STATE_IDLE
else:
self._pwstate = STATE_OFF
self._muted = self.receiver.mute
self._volume = (self.receiver.volume / 100) + 1
if self.source_list is None:
self.build_source_list()
current_source = self.receiver.input
self._current_source = self._source_names.get(
current_source, current_source)
self._playback_support = self.receiver.get_playback_support()
self._is_playback_supported = self.receiver.is_playback_supported(
self._current_source)
def build_source_list(self):
"""Build the source list."""
self._reverse_mapping = {alias: source for source, alias in
self._source_names.items()}
self._source_list = sorted(
self._source_names.get(source, source) for source in
self.receiver.inputs()
if source not in self._source_ignore)
@property
def name(self):
"""Return the name of the device."""
name = self._name
zone_name = self._zone_names.get(self._zone, self._zone)
if zone_name != "Main_Zone":
# Zone will be one of Main_Zone, Zone_2, Zone_3
name += " " + zone_name.replace('_', ' ')
return name
@property
def state(self):
"""Return the state of the device."""
return self._pwstate
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
return self._volume
@property
def is_volume_muted(self):
"""Boolean if volume is currently muted."""
return self._muted
@property
def source(self):
"""Return the current input source."""
return self._current_source
@property
def source_list(self):
"""List of available input sources."""
return self._source_list
@property
def supported_features(self):
"""Flag media player features that are supported."""
supported_features = SUPPORT_YAMAHA
supports = self._playback_support
mapping = {'play': (SUPPORT_PLAY | SUPPORT_PLAY_MEDIA),
'pause': SUPPORT_PAUSE,
'stop': SUPPORT_STOP,
'skip_f': SUPPORT_NEXT_TRACK,
'skip_r': SUPPORT_PREVIOUS_TRACK}
for attr, feature in mapping.items():
if getattr(supports, attr, False):
supported_features |= feature
return supported_features
def turn_off(self):
"""Turn off media player."""
self.receiver.on = False
def set_volume_level(self, volume):
"""Set volume level, range 0..1."""
receiver_vol = 100 - (volume * 100)
negative_receiver_vol = -receiver_vol
self.receiver.volume = negative_receiver_vol
def mute_volume(self, mute):
"""Mute (true) or unmute (false) media player."""
self.receiver.mute = mute
def turn_on(self):
"""Turn the media player on."""
self.receiver.on = True
self._volume = (self.receiver.volume / 100) + 1
def media_play(self):
"""Send play command."""
self._call_playback_function(self.receiver.play, "play")
def media_pause(self):
"""Send pause command."""
self._call_playback_function(self.receiver.pause, "pause")
def media_stop(self):
"""Send stop command."""
self._call_playback_function(self.receiver.stop, "stop")
def media_previous_track(self):
"""Send previous track command."""
self._call_playback_function(self.receiver.previous, "previous track")
def media_next_track(self):
"""Send next track command."""
self._call_playback_function(self.receiver.next, "next track")
def _call_playback_function(self, function, function_text):
import rxv
try:
function()
except rxv.exceptions.ResponseException:
_LOGGER.warning(
"Failed to execute %s on %s", function_text, self._name)
def select_source(self, source):
"""Select input source."""
self.receiver.input = self._reverse_mapping.get(source, source)
def play_media(self, media_type, media_id, **kwargs):
"""Play media from an ID.
This exposes a pass through for various input sources in the
Yamaha to direct play certain kinds of media. media_type is
treated as the input type that we are setting, and media id is
specific to it.
For the NET RADIO mediatype the format for ``media_id`` is a
"path" in your vtuner hierarchy. For instance:
``Bookmarks>Internet>Radio Paradise``. The separators are
``>`` and the parts of this are navigated by name behind the
scenes. There is a looping construct built into the yamaha
library to do this with a fallback timeout if the vtuner
service is unresponsive.
NOTE: this might take a while, because the only API interface
for setting the net radio station emulates button pressing and
navigating through the net radio menu hiearchy. And each sub
menu must be fetched by the receiver from the vtuner service.
"""
if media_type == "NET RADIO":
self.receiver.net_radio(media_id)
def enable_output(self, port, enabled):
"""Enable or disable an output port.."""
self.receiver.enable_output(port, enabled)
@property
def media_artist(self):
"""Artist of current playing media."""
if self._play_status is not None:
return self._play_status.artist
@property
def media_album_name(self):
"""Album of current playing media."""
if self._play_status is not None:
return self._play_status.album
@property
def media_content_type(self):
"""Content type of current playing media."""
# Loose assumption that if playback is supported, we are playing music
if self._is_playback_supported:
return MEDIA_TYPE_MUSIC
return None
@property
def media_title(self):
"""Artist of current playing media."""
if self._play_status is not None:
song = self._play_status.song
station = self._play_status.station
# If both song and station is available, print both, otherwise
# just the one we have.
if song and station:
return '{}: {}'.format(station, song)
return song or station
| 34.943978
| 79
| 0.647375
|
1a4f0876e2404ab4eb5abae22c4c2ec27f4f0d12
| 13,209
|
py
|
Python
|
bson/codec_options.py
|
educatornum/mongo-python-driver
|
2cb34e4efc84445abb18c45572b2a35c4a81d3e5
|
[
"Apache-2.0"
] | 1
|
2020-03-09T02:18:46.000Z
|
2020-03-09T02:18:46.000Z
|
bson/codec_options.py
|
educatornum/mongo-python-driver
|
2cb34e4efc84445abb18c45572b2a35c4a81d3e5
|
[
"Apache-2.0"
] | null | null | null |
bson/codec_options.py
|
educatornum/mongo-python-driver
|
2cb34e4efc84445abb18c45572b2a35c4a81d3e5
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2014-present MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tools for specifying BSON codec options."""
import datetime
from abc import abstractmethod
from collections import namedtuple
from bson.py3compat import ABC, abc, abstractproperty, string_type
from bson.binary import (ALL_UUID_REPRESENTATIONS,
PYTHON_LEGACY,
UUID_REPRESENTATION_NAMES)
_RAW_BSON_DOCUMENT_MARKER = 101
def _raw_document_class(document_class):
"""Determine if a document_class is a RawBSONDocument class."""
marker = getattr(document_class, '_type_marker', None)
return marker == _RAW_BSON_DOCUMENT_MARKER
class TypeEncoder(ABC):
"""Base class for defining type codec classes which describe how a
custom type can be transformed to one of the types BSON understands.
Codec classes must implement the ``python_type`` attribute, and the
``transform_python`` method to support encoding.
"""
@abstractproperty
def python_type(self):
"""The Python type to be converted into something serializable."""
pass
@abstractmethod
def transform_python(self, value):
"""Convert the given Python object into something serializable."""
pass
class TypeDecoder(ABC):
"""Base class for defining type codec classes which describe how a
BSON type can be transformed to a custom type.
Codec classes must implement the ``bson_type`` attribute, and the
``transform_bson`` method to support decoding.
"""
@abstractproperty
def bson_type(self):
"""The BSON type to be converted into our own type."""
pass
@abstractmethod
def transform_bson(self, value):
"""Convert the given BSON value into our own type."""
pass
class TypeCodec(TypeEncoder, TypeDecoder):
"""Base class for defining type codec classes which describe how a
custom type can be transformed to/from one of the types BSON already
understands, and can encode/decode.
Codec classes must implement the ``python_type`` attribute, and the
``transform_python`` method to support encoding, as well as the
``bson_type`` attribute, and the ``transform_bson`` method to support
decoding.
"""
pass
class TypeRegistry(object):
"""Encapsulates type codecs used in encoding and / or decoding BSON, as
well as the fallback encoder. Type registries cannot be modified after
instantiation.
``TypeRegistry`` can be initialized with an iterable of type codecs, and
a callable for the fallback encoder::
>>> from bson.codec_options import TypeRegistry
>>> type_registry = TypeRegistry([Codec1, Codec2, Codec3, ...],
... fallback_encoder)
:Parameters:
- `type_codecs` (optional): iterable of type codec instances. If
``type_codecs`` contains multiple codecs that transform a single
python or BSON type, the transformation specified by the type codec
occurring last prevails.
- `fallback_encoder` (optional): callable that accepts a single,
unencodable python value and transforms it into a type that BSON can
encode.
"""
def __init__(self, type_codecs=None, fallback_encoder=None):
self.__type_codecs = list(type_codecs or [])
self._fallback_encoder = fallback_encoder
self._encoder_map = {}
self._decoder_map = {}
if self._fallback_encoder is not None:
if not callable(fallback_encoder):
raise TypeError("fallback_encoder %r is not a callable" % (
fallback_encoder))
for codec in self.__type_codecs:
is_valid_codec = False
if isinstance(codec, TypeEncoder):
self._validate_type_encoder(codec)
is_valid_codec = True
self._encoder_map[codec.python_type] = codec.transform_python
if isinstance(codec, TypeDecoder):
is_valid_codec = True
self._decoder_map[codec.bson_type] = codec.transform_bson
if not is_valid_codec:
raise TypeError(
"Expected an instance of %s, %s, or %s, got %r instead" % (
TypeEncoder.__name__, TypeDecoder.__name__,
TypeCodec.__name__, codec))
def _validate_type_encoder(self, codec):
from bson import _BUILT_IN_TYPES
for pytype in _BUILT_IN_TYPES:
if issubclass(codec.python_type, pytype):
err_msg = ("TypeEncoders cannot change how built-in types are "
"encoded (encoder %s transforms type %s)" %
(codec, pytype))
raise TypeError(err_msg)
def __repr__(self):
return ('%s(type_codecs=%r, fallback_encoder=%r)' % (
self.__class__.__name__, self.__type_codecs,
self._fallback_encoder))
def __eq__(self, other):
if not isinstance(other, type(self)):
return NotImplemented
return ((self._decoder_map == other._decoder_map) and
(self._encoder_map == other._encoder_map) and
(self._fallback_encoder == other._fallback_encoder))
_options_base = namedtuple(
'CodecOptions',
('document_class', 'tz_aware', 'uuid_representation',
'unicode_decode_error_handler', 'tzinfo', 'type_registry'))
class CodecOptions(_options_base):
"""Encapsulates options used encoding and / or decoding BSON.
The `document_class` option is used to define a custom type for use
decoding BSON documents. Access to the underlying raw BSON bytes for
a document is available using the :class:`~bson.raw_bson.RawBSONDocument`
type::
>>> from bson.raw_bson import RawBSONDocument
>>> from bson.codec_options import CodecOptions
>>> codec_options = CodecOptions(document_class=RawBSONDocument)
>>> coll = db.get_collection('test', codec_options=codec_options)
>>> doc = coll.find_one()
>>> doc.raw
'\\x16\\x00\\x00\\x00\\x07_id\\x00[0\\x165\\x91\\x10\\xea\\x14\\xe8\\xc5\\x8b\\x93\\x00'
The document class can be any type that inherits from
:class:`~collections.MutableMapping`::
>>> class AttributeDict(dict):
... # A dict that supports attribute access.
... def __getattr__(self, key):
... return self[key]
... def __setattr__(self, key, value):
... self[key] = value
...
>>> codec_options = CodecOptions(document_class=AttributeDict)
>>> coll = db.get_collection('test', codec_options=codec_options)
>>> doc = coll.find_one()
>>> doc._id
ObjectId('5b3016359110ea14e8c58b93')
See :doc:`/examples/datetimes` for examples using the `tz_aware` and
`tzinfo` options.
See :class:`~bson.binary.UUIDLegacy` for examples using the
`uuid_representation` option.
:Parameters:
- `document_class`: BSON documents returned in queries will be decoded
to an instance of this class. Must be a subclass of
:class:`~collections.MutableMapping`. Defaults to :class:`dict`.
- `tz_aware`: If ``True``, BSON datetimes will be decoded to timezone
aware instances of :class:`~datetime.datetime`. Otherwise they will be
naive. Defaults to ``False``.
- `uuid_representation`: The BSON representation to use when encoding
and decoding instances of :class:`~uuid.UUID`. Defaults to
:data:`~bson.binary.PYTHON_LEGACY`.
- `unicode_decode_error_handler`: The error handler to apply when
a Unicode-related error occurs during BSON decoding that would
otherwise raise :exc:`UnicodeDecodeError`. Valid options include
'strict', 'replace', and 'ignore'. Defaults to 'strict'.
- `tzinfo`: A :class:`~datetime.tzinfo` subclass that specifies the
timezone to/from which :class:`~datetime.datetime` objects should be
encoded/decoded.
- `type_registry`: Instance of :class:`TypeRegistry` used to customize
encoding and decoding behavior.
.. warning:: Care must be taken when changing
`unicode_decode_error_handler` from its default value ('strict').
The 'replace' and 'ignore' modes should not be used when documents
retrieved from the server will be modified in the client application
and stored back to the server.
"""
def __new__(cls, document_class=dict,
tz_aware=False, uuid_representation=PYTHON_LEGACY,
unicode_decode_error_handler="strict",
tzinfo=None, type_registry=None):
if not (issubclass(document_class, abc.MutableMapping) or
_raw_document_class(document_class)):
raise TypeError("document_class must be dict, bson.son.SON, "
"bson.raw_bson.RawBSONDocument, or a "
"sublass of collections.MutableMapping")
if not isinstance(tz_aware, bool):
raise TypeError("tz_aware must be True or False")
if uuid_representation not in ALL_UUID_REPRESENTATIONS:
raise ValueError("uuid_representation must be a value "
"from bson.binary.ALL_UUID_REPRESENTATIONS")
if not isinstance(unicode_decode_error_handler, (string_type, None)):
raise ValueError("unicode_decode_error_handler must be a string "
"or None")
if tzinfo is not None:
if not isinstance(tzinfo, datetime.tzinfo):
raise TypeError(
"tzinfo must be an instance of datetime.tzinfo")
if not tz_aware:
raise ValueError(
"cannot specify tzinfo without also setting tz_aware=True")
type_registry = type_registry or TypeRegistry()
if not isinstance(type_registry, TypeRegistry):
raise TypeError("type_registry must be an instance of TypeRegistry")
return tuple.__new__(
cls, (document_class, tz_aware, uuid_representation,
unicode_decode_error_handler, tzinfo, type_registry))
def _arguments_repr(self):
"""Representation of the arguments used to create this object."""
document_class_repr = (
'dict' if self.document_class is dict
else repr(self.document_class))
uuid_rep_repr = UUID_REPRESENTATION_NAMES.get(self.uuid_representation,
self.uuid_representation)
return ('document_class=%s, tz_aware=%r, uuid_representation=%s, '
'unicode_decode_error_handler=%r, tzinfo=%r, '
'type_registry=%r' %
(document_class_repr, self.tz_aware, uuid_rep_repr,
self.unicode_decode_error_handler, self.tzinfo,
self.type_registry))
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, self._arguments_repr())
def with_options(self, **kwargs):
"""Make a copy of this CodecOptions, overriding some options::
>>> from bson.codec_options import DEFAULT_CODEC_OPTIONS
>>> DEFAULT_CODEC_OPTIONS.tz_aware
False
>>> options = DEFAULT_CODEC_OPTIONS.with_options(tz_aware=True)
>>> options.tz_aware
True
.. versionadded:: 3.5
"""
return CodecOptions(
kwargs.get('document_class', self.document_class),
kwargs.get('tz_aware', self.tz_aware),
kwargs.get('uuid_representation', self.uuid_representation),
kwargs.get('unicode_decode_error_handler',
self.unicode_decode_error_handler),
kwargs.get('tzinfo', self.tzinfo),
kwargs.get('type_registry', self.type_registry)
)
DEFAULT_CODEC_OPTIONS = CodecOptions()
def _parse_codec_options(options):
"""Parse BSON codec options."""
return CodecOptions(
document_class=options.get(
'document_class', DEFAULT_CODEC_OPTIONS.document_class),
tz_aware=options.get(
'tz_aware', DEFAULT_CODEC_OPTIONS.tz_aware),
uuid_representation=options.get(
'uuidrepresentation', DEFAULT_CODEC_OPTIONS.uuid_representation),
unicode_decode_error_handler=options.get(
'unicode_decode_error_handler',
DEFAULT_CODEC_OPTIONS.unicode_decode_error_handler),
tzinfo=options.get('tzinfo', DEFAULT_CODEC_OPTIONS.tzinfo),
type_registry=options.get(
'type_registry', DEFAULT_CODEC_OPTIONS.type_registry))
| 41.149533
| 94
| 0.65304
|
f90e5df321138c4504824397a5f4efe2a1507724
| 10,081
|
py
|
Python
|
mysql-utilities-1.6.0/mysql/utilities/common/my_print_defaults.py
|
bopopescu/mysql-dbcompare
|
1e912fd87282be3b3bed48487e6beb0ecb1de339
|
[
"Apache-2.0"
] | 2
|
2018-03-20T07:42:58.000Z
|
2018-03-20T07:43:49.000Z
|
mysql-utilities-1.6.0/mysql/utilities/common/my_print_defaults.py
|
bopopescu/mysql-dbcompare
|
1e912fd87282be3b3bed48487e6beb0ecb1de339
|
[
"Apache-2.0"
] | null | null | null |
mysql-utilities-1.6.0/mysql/utilities/common/my_print_defaults.py
|
bopopescu/mysql-dbcompare
|
1e912fd87282be3b3bed48487e6beb0ecb1de339
|
[
"Apache-2.0"
] | 1
|
2020-07-23T23:07:08.000Z
|
2020-07-23T23:07:08.000Z
|
#
# Copyright (c) 2013 Oracle and/or its affiliates. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
"""
This module provides features to read MySQL configuration files, wrapping the
tool my_print_defaults.
"""
import optparse
import os.path
import re
import subprocess
import tempfile
from mysql.utilities.common.tools import get_tool_path
from mysql.utilities.exception import UtilError
_MY_PRINT_DEFAULTS_TOOL = "my_print_defaults"
_MYLOGIN_FILE = ".mylogin.cnf"
def my_login_config_path():
"""Return the default path of the mylogin file (.mylogin.cnf).
"""
if os.name == 'posix':
# File located in $HOME for non-Windows systems
return os.path.expanduser('~')
else:
# File located in %APPDATA%\MySQL for Windows systems
return r'{0}\MySQL'.format(os.environ['APPDATA'])
def my_login_config_exists():
"""Check if the mylogin file (.mylogin.cnf) exists.
"""
my_login_fullpath = os.path.normpath(my_login_config_path() + "/"
+ _MYLOGIN_FILE)
return os.path.isfile(my_login_fullpath)
class MyDefaultsReader(object):
"""The MyDefaultsReader class is used to read the data stored from a MySQL
configuration file. This class provide methods to read the options data
stored in configurations files, using the my_print_defaults tool. To learn
more about my_print_defaults see:
http://dev.mysql.com/doc/en/my-print-defaults.html
"""
def __init__(self, options=None, find_my_print_defaults_tool=True):
"""Constructor
options[in] dictionary of options (e.g. basedir). Note,
allows options values from optparse to be
passed directly to this parameter.
find_my_print_defaults[in] boolean value indicating if the tool
my_print_defaults should be located upon
initialization of the object.
"""
if options is None:
options = {}
# _config_data is a dictionary of option groups containing a dictionary
# of the options data read from the configuration file.
self._config_data = {}
# Options values from optparse can be directly passed, check if it is
# the case and handle them correctly.
if isinstance(options, optparse.Values):
try:
self._basedir = options.basedir # pylint: disable=E1103
except AttributeError:
# if the attribute is not found, then set it to None (default).
self._basedir = None
try:
# if the attribute is not found, then set it to 0 (default).
self._verbosity = options.verbosity # pylint: disable=E1103
except AttributeError:
self._verbosity = 0
else:
self._basedir = options.get("basedir", None)
self._verbosity = options.get("verbosity", 0)
if find_my_print_defaults_tool:
self.search_my_print_defaults_tool()
else:
self._tool_path = None
@property
def tool_path(self):
"""Sets tool_path property
"""
return self._tool_path
def search_my_print_defaults_tool(self, search_paths=None):
"""Search for the tool my_print_defaults.
"""
if not search_paths:
search_paths = []
# Set the default search paths (i.e., default location of the
# .mylogin.cnf file).
default_paths = [my_login_config_path()]
# Extend the list of path to search with the ones specified.
if search_paths:
default_paths.extend(search_paths)
# Search for the tool my_print_defaults.
try:
self._tool_path = get_tool_path(self._basedir,
_MY_PRINT_DEFAULTS_TOOL,
defaults_paths=default_paths,
search_PATH=True)
except UtilError as err:
raise UtilError("Unable to locate MySQL Client tools. "
"Please confirm that the path to the MySQL client "
"tools are included in the PATH. Error: %s"
% err.errmsg)
def check_tool_version(self, major_version, minor_version):
"""Check the version of the my_print_defaults tool.
Returns True if the version of the tool is equal or above the one that
is specified, otherwise False.
"""
# The path to the tool must have been previously found.
assert self._tool_path, ("First, the required MySQL tool must be "
"found. E.g., use method "
"search_my_print_defaults_tool.")
# Create a temporary file to redirect stdout
out_file = tempfile.TemporaryFile()
if self._verbosity > 0:
subprocess.call([self._tool_path, "--version"], stdout=out_file)
else:
# Redirect stderr to null
null_file = open(os.devnull, "w+b")
subprocess.call([self._tool_path, "--version"], stdout=out_file,
stderr=null_file)
# Read --version output
out_file.seek(0)
line = out_file.readline()
out_file.close()
# Parse the version value
match = re.search(r'(?:Ver )(\d)\.(\d)', line)
if match:
major, minor = match.groups()
if (major_version < int(major)) or \
(major_version == int(major) and minor_version <= int(minor)):
return True
else:
return False
else:
raise UtilError("Unable to determine tool version - %s" %
self._tool_path)
def check_login_path_support(self):
"""Checks if the used my_print_defaults tool supports login-paths.
"""
# The path to the tool must have been previously found.
assert self._tool_path, ("First, the required MySQL tool must be "
"found. E.g., use method "
"search_my_print_defaults_tool.")
# Create a temporary file to redirect stdout
out_file = tempfile.TemporaryFile()
if self._verbosity > 0:
subprocess.call([self._tool_path, "--help"], stdout=out_file)
else:
# Redirect stderr to null
null_file = open(os.devnull, "w+b")
subprocess.call([self._tool_path, "--help"], stdout=out_file,
stderr=null_file)
# Read --help output
out_file.seek(0)
help_output = out_file.read()
out_file.close()
# Check the existence of a "login-path" option
if 'login-path' in help_output:
return True
else:
return False
def _read_group_data(self, group):
"""Read group options data using my_print_defaults tool.
"""
# The path to the tool must have been previously found.
assert self._tool_path, ("First, the required MySQL tool must be "
"found. E.g., use method "
"search_my_print_defaults_tool.")
# Group not found; use my_print_defaults to get group data.
out_file = tempfile.TemporaryFile()
if self._verbosity > 0:
subprocess.call([self._tool_path, group], stdout=out_file)
else:
# Redirect stderr to null
null_file = open(os.devnull, "w+b")
subprocess.call([self._tool_path, group], stdout=out_file,
stderr=null_file)
# Read and parse group options values.
out_file.seek(0)
results = []
for line in out_file:
# Parse option value; ignore starting "--"
key_value = line[2:].split("=", 1)
if len(key_value) == 2:
# Handle option format: --key=value and --key=
results.append((key_value[0], key_value[1].strip()))
elif len(key_value) == 1:
# Handle option format: --key
results.append((key_value[0], True))
else:
raise UtilError("Invalid option value format for "
"group %s: %s" % (group, line))
out_file.close()
if len(results):
self._config_data[group] = dict(results)
else:
self._config_data[group] = None
return self._config_data[group]
def get_group_data(self, group):
"""Retrieve the data associated to the given group.
"""
# Returns group's data locally stored, if available.
try:
return self._config_data[group]
except KeyError:
# Otherwise, get it using my_print_defaults.
return self._read_group_data(group)
def get_option_value(self, group, opt_name):
"""Retrieve the value associated to the given opt_name in the group.
"""
# Get option value, if group's data is available.
grp_options = self.get_group_data(group)
if grp_options:
return grp_options.get(opt_name, None)
else:
return None
| 38.477099
| 79
| 0.587938
|
d6e06f81661d07232a619729c3e4935b6c5ade3e
| 2,420
|
py
|
Python
|
FV3.py
|
RupakMukherjee/FiniteVolumeTensorFlow
|
64745990e856b4e172cc2b0b246970985adbcbdb
|
[
"MIT"
] | null | null | null |
FV3.py
|
RupakMukherjee/FiniteVolumeTensorFlow
|
64745990e856b4e172cc2b0b246970985adbcbdb
|
[
"MIT"
] | null | null | null |
FV3.py
|
RupakMukherjee/FiniteVolumeTensorFlow
|
64745990e856b4e172cc2b0b246970985adbcbdb
|
[
"MIT"
] | null | null | null |
import numpy as np
from matplotlib import pyplot as plt
def set_params():
global xmin, xmax, c, t_end, nx, cfl, n_ghost
xmin=0.0
xmax=1.0
c = 1.0
t_end = 1.0
nx = 100
cfl = 0.1
n_ghost = 3
def set_init():
global time, dx, dt, js, je, nt, x, xb, u, u_init, flux
time = 0.0
dx = (xmax-xmin)/(nx)
dt = cfl*dx/abs(c)
js = n_ghost
je = nx + n_ghost
nt = int(round(t_end/dt))
if(nt*dt < t_end): nt=nt+1
x = np.linspace(xmin-(n_ghost-0.5)*dx,xmax+(n_ghost-0.5)*dx,nx+2*n_ghost)
xb = np.linspace(xmin-(n_ghost-1)*dx,xmax+(n_ghost)*dx,nx+2*n_ghost)
sigma = 0.05
u = np.exp(-(x-0.3)**2/(2*sigma**2))
for j in range(js,je+1):
if(0.5 < x[j]): u[j] = 0.0
if((0.6 <= x[j]) and (x[j]<=0.8)):
u[j]=1.0
elif(0.8<x[j]):
u[j]=0.0
apply_bc(u)
u_init = u.copy()
flux = np.zeros_like(u)
def apply_bc(u):
for j in range(0,n_ghost):
u[je+j]=u[js+j]
u[j]=u[je-n_ghost+j]
def plot_it():
plt.plot(x[js:je], u[js:je], "-+",label="density");
plt.plot(x[js:je], u_init[js:je], label="density(t=0)");
plt.xlim(0.0,1.0); plt.ylim(-0.5,1.5)
plt.xlabel("x")
plt.legend(prop={'size': 10})
plt.show()
def advance():
global time, u, t_end, dt, u_old
while (time < t_end):
dt1 = min(dt, t_end - time)
u_old = u.copy()
f_step1(u,dt1)
f_step1(u,dt1)
u=3./4.*u_old + 1./4.*u
f_step1(u,dt1)
u=1./3.*u_old + 2./3.*u
time = time + dt1
def f_step1(u,dt):
global xb
ubL = np.zeros_like(u)
ubR = np.zeros_like(u)
ubL[js-1:je] = ((1.0-1.0/6.)*u[js-1:je]
-(1./4.-1./12.0)*u[js-1-1:je-1]
+(1./4.+1./12.)*u[js-1+1:je+1])
ubR[js-1:je] = ((1.0-1.0/6.)*u[js-1+1:je+1]
-(1./4.-1./12.0)*u[js-1+2:je+2]
+(1./4.+1./12.)*u[js-1:je])
flux_common(flux, ubL, ubR)
u[js:je] = u[js:je] - dt/dx*(flux[js:je] - flux[js-1:je-1])
apply_bc(u)
def flux_func(ub):
global c
return c*ub
def flux_common(flux, ubL, ubR):
flux_L = flux_func(ubL)
flux_R = flux_func(ubR)
flux[js-1:je] = (1/2) * (flux_L[js-1:je] + flux_R[js-1:je]) \
- (np.abs(c)/2)*(ubR[js-1:je]-ubL[js-1:je])
set_params()
set_init()
advance()
plot_it()
| 21.22807
| 77
| 0.489669
|
992aca5f808ab0d5db7f4b7a08610648ec6a3648
| 7,094
|
py
|
Python
|
ehr_prediction_modeling/eval/metrics_coordinator.py
|
google/ehr-predictions
|
811d3087ced4f92d28f98161478ff0ad6b571ad4
|
[
"Apache-2.0"
] | 67
|
2021-02-19T08:01:48.000Z
|
2022-03-31T15:05:18.000Z
|
ehr_prediction_modeling/eval/metrics_coordinator.py
|
higuseonhye/ehr-predictions
|
811d3087ced4f92d28f98161478ff0ad6b571ad4
|
[
"Apache-2.0"
] | null | null | null |
ehr_prediction_modeling/eval/metrics_coordinator.py
|
higuseonhye/ehr-predictions
|
811d3087ced4f92d28f98161478ff0ad6b571ad4
|
[
"Apache-2.0"
] | 8
|
2021-06-29T10:57:57.000Z
|
2022-03-03T10:33:06.000Z
|
# coding=utf-8
# Copyright 2021 Google Health Research.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Coordinator for metrics of various targets in evaluation."""
import collections
import os
from typing import List, Mapping, Optional, Tuple, Type
from absl import logging
import dataclasses
from ehr_prediction_modeling.eval import metrics_data
import numpy as np
import tensorflow.compat.v1 as tf
@dataclasses.dataclass(frozen=True)
class MetricsTarget(object):
"""Describes a target metrics will be computed for."""
target_name: str
split_name: str
mask_name: Optional[str] = None
def to_string(self) -> str:
"""Returns a string description of this target."""
return (f"Target: {self.target_name}, Split: {self.split_name}, Mask: "
f"{self.mask_name}")
class MetricsCoordinator(object):
"""Base class for accumulating data used to calculate metrics."""
def __init__(self):
"""Initializes metrics coordinator."""
self._data_dict = {}
def add_data(self,
metrics_data_type: Type[metrics_data.MetricsTypes],
data: Tuple[float],
target_name: str,
split_name: str,
mask_name: Optional[str] = None):
"""Extends object's lists of data.
Args:
metrics_data_type: The subclass of MetricsData that will be used to store
this data.
data: A batch of data to add. Tuple should contain data listed in the
order required by the relevant MetricsData add_data method. For example,
For RegressionMetricsData, data should be (predictions, targets,
weights).
target_name: Name of the target the data descibes.
split_name: Name of the split the data is from.
mask_name: Name of the mask used with the data given.
Raises:
ValueError if the data given is found invalid by the MetricsData class.
"""
metrics_target = MetricsTarget(target_name, split_name, mask_name)
if metrics_target not in self._data_dict:
self._data_dict[metrics_target] = metrics_data_type()
self._data_dict[metrics_target].add_data(*data)
def _clear_data(self):
"""Clears any data in _data_dict."""
self._data_dict = {}
def log_metrics(self, current_step: int, clear_data: bool = False) -> None:
"""Logs all metrics, then may clear stored metrics data.
Args:
current_step: Current step we are evaluating.
clear_data: If true, all stored data will be cleared.
"""
for metrics_target, data in self._data_dict.items():
data.log_metrics(metrics_target.to_string(), current_step)
if clear_data:
# Clear any data after it is logged.
self._clear_data()
def add_batch_to_binary_metrics_data(metrics_coordinator, target_names,
predictions, binary_targets,
eval_mask_dict, split_name):
"""Adds binary tasks predictions to BinaryMetricsData objects.
Args:
metrics_coordinator: MetricsCoordinator, used to accumulate metrics for each
target.
target_names: list of str, the names of the targets in the task.
predictions: array of predictions in time-major shape wnct [num_unroll,
batch_size, channels, num_targets].
binary_targets: array of binary targets in time-major shape wnct [
num_unroll, batch_size, channels, num_targets].
eval_mask_dict: dict of string mask name to array, the loss masks to be used
in evaluation, in time-major shape wnct [num_unroll, batch_size, channels,
num_targets].
split_name: str, name of the data split this batch is from.
"""
num_targets = len(target_names)
# Split predictions by target into a list of length num_targets.
predictions_per_target = np.split(
predictions, indices_or_sections=num_targets, axis=3)
for mask_name, mask in eval_mask_dict.items():
positive_filter_and_mask = (mask * binary_targets).astype(bool)
negative_filter_and_mask = (
mask * (np.ones_like(binary_targets) - binary_targets)).astype(bool)
positive_masks_per_target = np.split(
positive_filter_and_mask, indices_or_sections=num_targets, axis=3)
negative_masks_per_target = np.split(
negative_filter_and_mask, indices_or_sections=num_targets, axis=3)
for idx, target_name in enumerate(target_names):
positives = predictions_per_target[idx][positive_masks_per_target[idx]]
positive_weights = np.ones_like(positives)
negatives = predictions_per_target[idx][negative_masks_per_target[idx]]
negative_weights = np.ones_like(negatives)
metrics_coordinator.add_data(
metrics_data.BinaryMetricsData,
(positives, negatives, positive_weights, negative_weights),
target_name,
split_name,
mask_name=mask_name)
def add_batch_to_regression_metrics_data(metrics_coordinator, target_names,
predictions, targets, eval_mask_dict,
split_name):
"""Adds regression tasks predictions to RegressionMetricsData objects.
Args:
metrics_coordinator: MetricsCoordinator, used to accumulate metrics for each
target.
target_names: list of str, the names of the targets in the task.
predictions: array of predictions in time-major shape wnct [num_unroll,
batch_size, channels, num_targets].
targets: array of float targets in time-major shape wnct [ num_unroll,
batch_size, channels, num_targets].
eval_mask_dict: dict of string mask name to array, the loss masks to be used
in evaluation, in time-major shape wnct [num_unroll, batch_size, channels,
num_targets].
split_name: str, name of the data split this batch is from.
"""
num_targets = len(target_names)
predictions_per_target = np.split(
predictions, indices_or_sections=num_targets, axis=3)
target_list = np.split(
targets, indices_or_sections=num_targets, axis=3)
for mask_name, mask in eval_mask_dict.items():
masks_per_target = np.split(
mask, indices_or_sections=num_targets, axis=3)
for idx, target_name in enumerate(target_names):
predictions = predictions_per_target[idx][masks_per_target[idx].astype(
bool)]
targets = target_list[idx][masks_per_target[idx].astype(bool)]
weights = np.ones_like(predictions)
metrics_coordinator.add_data(
metrics_data.RegressionMetricsData, (predictions, targets, weights),
target_name,
split_name,
mask_name=mask_name)
| 39.411111
| 80
| 0.708204
|
485e4a1231e7aa20721872c9bae63348056a91f4
| 10,456
|
py
|
Python
|
netbox/ipam/tables/ip.py
|
salvapinyol/netbox
|
c9c537a1b9b4cadb9c29579f984e1f96c2583792
|
[
"Apache-2.0"
] | null | null | null |
netbox/ipam/tables/ip.py
|
salvapinyol/netbox
|
c9c537a1b9b4cadb9c29579f984e1f96c2583792
|
[
"Apache-2.0"
] | null | null | null |
netbox/ipam/tables/ip.py
|
salvapinyol/netbox
|
c9c537a1b9b4cadb9c29579f984e1f96c2583792
|
[
"Apache-2.0"
] | null | null | null |
import django_tables2 as tables
from django.utils.safestring import mark_safe
from django_tables2.utils import Accessor
from tenancy.tables import TenantColumn
from utilities.tables import (
BaseTable, BooleanColumn, ButtonsColumn, ChoiceFieldColumn, LinkedCountColumn, TagColumn,
ToggleColumn, UtilizationColumn,
)
from ipam.models import *
__all__ = (
'AggregateTable',
'InterfaceIPAddressTable',
'IPAddressAssignTable',
'IPAddressTable',
'IPRangeTable',
'PrefixTable',
'RIRTable',
'RoleTable',
)
AVAILABLE_LABEL = mark_safe('<span class="badge bg-success">Available</span>')
PREFIX_LINK = """
{% load helpers %}
{% if record.depth %}
<div class="record-depth">
{% for i in record.depth|as_range %}
<span>•</span>
{% endfor %}
</div>
{% endif %}
<a href="{% if record.pk %}{% url 'ipam:prefix' pk=record.pk %}{% else %}{% url 'ipam:prefix_add' %}?prefix={{ record }}{% if object.vrf %}&vrf={{ object.vrf.pk }}{% endif %}{% if object.site %}&site={{ object.site.pk }}{% endif %}{% if object.tenant %}&tenant_group={{ object.tenant.group.pk }}&tenant={{ object.tenant.pk }}{% endif %}{% endif %}">{{ record.prefix }}</a>
"""
PREFIXFLAT_LINK = """
{% load helpers %}
{% if record.pk %}
<a href="{% url 'ipam:prefix' pk=record.pk %}">{{ record.prefix }}</a>
{% else %}
{{ record.prefix }}
{% endif %}
"""
IPADDRESS_LINK = """
{% if record.pk %}
<a href="{{ record.get_absolute_url }}">{{ record.address }}</a>
{% elif perms.ipam.add_ipaddress %}
<a href="{% url 'ipam:ipaddress_add' %}?address={{ record.1 }}{% if object.vrf %}&vrf={{ object.vrf.pk }}{% endif %}{% if object.tenant %}&tenant={{ object.tenant.pk }}{% endif %}" class="btn btn-sm btn-success">{% if record.0 <= 65536 %}{{ record.0 }}{% else %}Many{% endif %} IP{{ record.0|pluralize }} available</a>
{% else %}
{% if record.0 <= 65536 %}{{ record.0 }}{% else %}Many{% endif %} IP{{ record.0|pluralize }} available
{% endif %}
"""
IPADDRESS_ASSIGN_LINK = """
<a href="{% url 'ipam:ipaddress_edit' pk=record.pk %}?{% if request.GET.interface %}interface={{ request.GET.interface }}{% elif request.GET.vminterface %}vminterface={{ request.GET.vminterface }}{% endif %}&return_url={{ request.GET.return_url }}">{{ record }}</a>
"""
VRF_LINK = """
{% if record.vrf %}
<a href="{{ record.vrf.get_absolute_url }}">{{ record.vrf }}</a>
{% elif object.vrf %}
<a href="{{ object.vrf.get_absolute_url }}">{{ object.vrf }}</a>
{% else %}
Global
{% endif %}
"""
#
# RIRs
#
class RIRTable(BaseTable):
pk = ToggleColumn()
name = tables.Column(
linkify=True
)
is_private = BooleanColumn(
verbose_name='Private'
)
aggregate_count = LinkedCountColumn(
viewname='ipam:aggregate_list',
url_params={'rir_id': 'pk'},
verbose_name='Aggregates'
)
actions = ButtonsColumn(RIR)
class Meta(BaseTable.Meta):
model = RIR
fields = ('pk', 'name', 'slug', 'is_private', 'aggregate_count', 'description', 'actions')
default_columns = ('pk', 'name', 'is_private', 'aggregate_count', 'description', 'actions')
#
# Aggregates
#
class AggregateTable(BaseTable):
pk = ToggleColumn()
prefix = tables.Column(
linkify=True,
verbose_name='Aggregate'
)
tenant = TenantColumn()
date_added = tables.DateColumn(
format="Y-m-d",
verbose_name='Added'
)
child_count = tables.Column(
verbose_name='Prefixes'
)
utilization = UtilizationColumn(
accessor='get_utilization',
orderable=False
)
tags = TagColumn(
url_name='ipam:aggregate_list'
)
class Meta(BaseTable.Meta):
model = Aggregate
fields = ('pk', 'prefix', 'rir', 'tenant', 'child_count', 'utilization', 'date_added', 'description', 'tags')
default_columns = ('pk', 'prefix', 'rir', 'tenant', 'child_count', 'utilization', 'date_added', 'description')
#
# Roles
#
class RoleTable(BaseTable):
pk = ToggleColumn()
name = tables.Column(
linkify=True
)
prefix_count = LinkedCountColumn(
viewname='ipam:prefix_list',
url_params={'role_id': 'pk'},
verbose_name='Prefixes'
)
vlan_count = LinkedCountColumn(
viewname='ipam:vlan_list',
url_params={'role_id': 'pk'},
verbose_name='VLANs'
)
actions = ButtonsColumn(Role)
class Meta(BaseTable.Meta):
model = Role
fields = ('pk', 'name', 'slug', 'prefix_count', 'vlan_count', 'description', 'weight', 'actions')
default_columns = ('pk', 'name', 'prefix_count', 'vlan_count', 'description', 'actions')
#
# Prefixes
#
class PrefixUtilizationColumn(UtilizationColumn):
"""
Extend UtilizationColumn to allow disabling the warning & danger thresholds for prefixes
marked as fully utilized.
"""
template_code = """
{% load helpers %}
{% if record.pk and record.mark_utilized %}
{% utilization_graph value warning_threshold=0 danger_threshold=0 %}
{% elif record.pk %}
{% utilization_graph value %}
{% endif %}
"""
class PrefixTable(BaseTable):
pk = ToggleColumn()
prefix = tables.TemplateColumn(
template_code=PREFIX_LINK,
attrs={'td': {'class': 'text-nowrap'}}
)
prefix_flat = tables.TemplateColumn(
template_code=PREFIXFLAT_LINK,
attrs={'td': {'class': 'text-nowrap'}},
verbose_name='Prefix (Flat)',
)
depth = tables.Column(
accessor=Accessor('_depth'),
verbose_name='Depth'
)
children = LinkedCountColumn(
accessor=Accessor('_children'),
viewname='ipam:prefix_list',
url_params={
'vrf_id': 'vrf_id',
'within': 'prefix',
},
verbose_name='Children'
)
status = ChoiceFieldColumn(
default=AVAILABLE_LABEL
)
vrf = tables.TemplateColumn(
template_code=VRF_LINK,
verbose_name='VRF'
)
tenant = TenantColumn()
site = tables.Column(
linkify=True
)
vlan = tables.Column(
linkify=True,
verbose_name='VLAN'
)
role = tables.Column(
linkify=True
)
is_pool = BooleanColumn(
verbose_name='Pool'
)
mark_utilized = BooleanColumn(
verbose_name='Marked Utilized'
)
utilization = PrefixUtilizationColumn(
accessor='get_utilization',
orderable=False
)
tags = TagColumn(
url_name='ipam:prefix_list'
)
class Meta(BaseTable.Meta):
model = Prefix
fields = (
'pk', 'prefix', 'prefix_flat', 'status', 'children', 'vrf', 'utilization', 'tenant', 'site', 'vlan', 'role',
'is_pool', 'mark_utilized', 'description', 'tags',
)
default_columns = (
'pk', 'prefix', 'status', 'children', 'vrf', 'utilization', 'tenant', 'site', 'vlan', 'role', 'description',
)
row_attrs = {
'class': lambda record: 'success' if not record.pk else '',
}
#
# IP ranges
#
class IPRangeTable(BaseTable):
pk = ToggleColumn()
start_address = tables.Column(
linkify=True
)
vrf = tables.TemplateColumn(
template_code=VRF_LINK,
verbose_name='VRF'
)
status = ChoiceFieldColumn(
default=AVAILABLE_LABEL
)
role = tables.Column(
linkify=True
)
tenant = TenantColumn()
class Meta(BaseTable.Meta):
model = IPRange
fields = (
'pk', 'start_address', 'end_address', 'size', 'vrf', 'status', 'role', 'tenant', 'description',
)
default_columns = (
'pk', 'start_address', 'end_address', 'size', 'vrf', 'status', 'role', 'tenant', 'description',
)
row_attrs = {
'class': lambda record: 'success' if not record.pk else '',
}
#
# IPAddresses
#
class IPAddressTable(BaseTable):
pk = ToggleColumn()
address = tables.TemplateColumn(
template_code=IPADDRESS_LINK,
verbose_name='IP Address'
)
vrf = tables.TemplateColumn(
template_code=VRF_LINK,
verbose_name='VRF'
)
status = ChoiceFieldColumn(
default=AVAILABLE_LABEL
)
role = ChoiceFieldColumn()
tenant = TenantColumn()
assigned_object = tables.Column(
linkify=True,
orderable=False,
verbose_name='Interface'
)
assigned_object_parent = tables.Column(
accessor='assigned_object.parent_object',
linkify=True,
orderable=False,
verbose_name='Device/VM'
)
nat_inside = tables.Column(
linkify=True,
orderable=False,
verbose_name='NAT (Inside)'
)
assigned = BooleanColumn(
accessor='assigned_object',
linkify=True,
verbose_name='Assigned'
)
tags = TagColumn(
url_name='ipam:ipaddress_list'
)
class Meta(BaseTable.Meta):
model = IPAddress
fields = (
'pk', 'address', 'vrf', 'status', 'role', 'tenant', 'nat_inside', 'assigned', 'dns_name', 'description',
'tags',
)
default_columns = (
'pk', 'address', 'vrf', 'status', 'role', 'tenant', 'assigned', 'dns_name', 'description',
)
row_attrs = {
'class': lambda record: 'success' if not isinstance(record, IPAddress) else '',
}
class IPAddressAssignTable(BaseTable):
address = tables.TemplateColumn(
template_code=IPADDRESS_ASSIGN_LINK,
verbose_name='IP Address'
)
status = ChoiceFieldColumn()
assigned_object = tables.Column(
orderable=False
)
class Meta(BaseTable.Meta):
model = IPAddress
fields = ('address', 'dns_name', 'vrf', 'status', 'role', 'tenant', 'assigned_object', 'description')
orderable = False
class InterfaceIPAddressTable(BaseTable):
"""
List IP addresses assigned to a specific Interface.
"""
address = tables.Column(
linkify=True,
verbose_name='IP Address'
)
vrf = tables.TemplateColumn(
template_code=VRF_LINK,
verbose_name='VRF'
)
status = ChoiceFieldColumn()
tenant = TenantColumn()
actions = ButtonsColumn(
model=IPAddress
)
class Meta(BaseTable.Meta):
model = IPAddress
fields = ('address', 'vrf', 'status', 'role', 'tenant', 'description')
| 28.107527
| 372
| 0.597839
|
7a2126241d8aac84b8e65cd834cdb92e6dc4cfeb
| 5,401
|
py
|
Python
|
tests/python/pants_test/backend/jvm/tasks/test_scalafmt.py
|
ghthor/pants
|
450de702414f87f563081ddefaefd8a554de07a3
|
[
"Apache-2.0"
] | null | null | null |
tests/python/pants_test/backend/jvm/tasks/test_scalafmt.py
|
ghthor/pants
|
450de702414f87f563081ddefaefd8a554de07a3
|
[
"Apache-2.0"
] | null | null | null |
tests/python/pants_test/backend/jvm/tasks/test_scalafmt.py
|
ghthor/pants
|
450de702414f87f563081ddefaefd8a554de07a3
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import os
from textwrap import dedent
from pants.backend.jvm.subsystems.scala_platform import ScalaPlatform
from pants.backend.jvm.targets.junit_tests import JUnitTests
from pants.backend.jvm.targets.scala_library import ScalaLibrary
from pants.backend.jvm.tasks.scalafmt import ScalaFmtCheckFormat, ScalaFmtFormat
from pants.base.exceptions import TaskError
from pants.build_graph.build_file_aliases import BuildFileAliases
from pants.build_graph.resources import Resources
from pants.source.source_root import SourceRootConfig
from pants_test.jvm.nailgun_task_test_base import NailgunTaskTestBase
from pants_test.subsystem.subsystem_util import init_subsystem
class ScalaFmtTestBase(NailgunTaskTestBase):
@classmethod
def alias_groups(cls):
return super(ScalaFmtTestBase, cls).alias_groups().merge(
BuildFileAliases(targets={'java_tests': JUnitTests,
'junit_tests': JUnitTests,
'scala_library': ScalaLibrary}))
def setUp(self):
super(ScalaFmtTestBase, self).setUp()
init_subsystem(ScalaPlatform)
init_subsystem(SourceRootConfig)
self.configuration = self.create_file(
relpath='build-support/scalafmt/config',
contents=dedent("""
align.arrowEnumeratorGenerator = true
align.openParenCallSite = false
align.openParenDefnSite = false
assumeStandardLibraryStripMargin = false
binPack.parentConstructors = false
continuationIndent.callSite = 4
continuationIndent.defnSite = 4
maxColumn = 100
newlines.sometimesBeforeColonInMethodReturnType = true
spaces.afterTripleEquals = true
spaces.inImportCurlyBraces = false
""")
)
self.test_file_contents = dedent(
b"""
package org.pantsbuild.badscalastyle
/**
* These comments are formatted incorrectly
* and the parameter list is too long for one line
*/
case class ScalaStyle(one: String,two: String,three: String,four: String,
five: String,six: String,seven: String,eight: String, nine: String)
class Person(name: String,age: Int,astrologicalSign: String,
shoeSize: Int,
favoriteColor: java.awt.Color) {
def getAge:Int={return age}
def sum(longvariablename: List[String]): Int = {
longvariablename.map(_.toInt).foldLeft(0)(_ + _)
}
}
"""
)
self.test_file = self.create_file(
relpath='src/scala/org/pantsbuild/badscalastyle/BadScalaStyle.scala',
contents=self.test_file_contents
)
self.library = self.make_target(spec='src/scala/org/pantsbuild/badscalastyle',
sources=['BadScalaStyle.scala'],
target_type=ScalaLibrary)
self.as_resources = self.make_target(spec='src/scala/org/pantsbuild/badscalastyle:as_resources',
target_type=Resources,
sources=['BadScalaStyle.scala'],
description='Depends on the same sources as the target '
'above, but as resources.')
class ScalaFmtCheckFormatTest(ScalaFmtTestBase):
@classmethod
def task_type(cls):
return ScalaFmtCheckFormat
def test_scalafmt_fail_default_config(self):
self.set_options(skip=False)
context = self.context(target_roots=self.library)
with self.assertRaises(TaskError):
self.execute(context)
def test_scalafmt_fail(self):
self.set_options(skip=False, configuration=self.configuration)
context = self.context(target_roots=self.library)
with self.assertRaises(TaskError):
self.execute(context)
def test_scalafmt_disabled(self):
self.set_options(skip=True)
self.execute(self.context(target_roots=self.library))
def test_scalafmt_ignore_resources(self):
self.set_options(skip=False, configuration=self.configuration)
context = self.context(target_roots=self.as_resources)
self.execute(context)
class ScalaFmtFormatTest(ScalaFmtTestBase):
@classmethod
def task_type(cls):
return ScalaFmtFormat
def test_scalafmt_format_default_config(self):
self.format_file_and_verify_fmt(skip=False)
def test_scalafmt_format(self):
self.format_file_and_verify_fmt(skip=False, configuration=self.configuration)
def format_file_and_verify_fmt(self, **options):
self.set_options(**options)
lint_options_scope = 'sfcf'
check_fmt_task_type = self.synthesize_task_subtype(ScalaFmtCheckFormat, lint_options_scope)
self.set_options_for_scope(lint_options_scope, **options)
# format an incorrectly formatted file.
context = self.context(for_task_types=[check_fmt_task_type], target_roots=self.library)
self.execute(context)
with open(self.test_file, 'rb') as fp:
self.assertNotEqual(self.test_file_contents, fp.read())
# verify that the lint check passes.
check_fmt_workdir = os.path.join(self.pants_workdir, check_fmt_task_type.stable_name())
check_fmt_task = check_fmt_task_type(context, check_fmt_workdir)
check_fmt_task.execute()
| 36.993151
| 100
| 0.71172
|
f4173f5d05b709238ff6014e3c5c9daa00214d82
| 16,020
|
py
|
Python
|
tensorflow/contrib/distributions/python/kernel_tests/shape_test.py
|
danielgordon10/tensorflow
|
395cfc42ee3c5842f5383f4049674c012998b133
|
[
"Apache-2.0"
] | 101
|
2016-12-03T11:40:52.000Z
|
2017-12-23T02:02:03.000Z
|
tensorflow/contrib/distributions/python/kernel_tests/shape_test.py
|
danielgordon10/tensorflow
|
395cfc42ee3c5842f5383f4049674c012998b133
|
[
"Apache-2.0"
] | 9
|
2016-12-14T03:27:46.000Z
|
2017-09-13T02:29:07.000Z
|
tensorflow/contrib/distributions/python/kernel_tests/shape_test.py
|
danielgordon10/tensorflow
|
395cfc42ee3c5842f5383f4049674c012998b133
|
[
"Apache-2.0"
] | 47
|
2016-12-04T12:37:24.000Z
|
2018-01-14T18:13:07.000Z
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ShapeUtil."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.contrib.distributions.python.ops.shape import _DistributionShape
from tensorflow.python.framework import tensor_util
_empty_shape = np.array([], dtype=np.int32)
def _eval(x):
if hasattr(x, "__iter__"):
return [x.eval() for x in x]
return x.eval()
def _constant(x):
if hasattr(x, "__iter__"):
return [tensor_util.constant_value(x) for x in x]
return tensor_util.constant_value(x)
class DistributionShapeTest(tf.test.TestCase):
def setUp(self):
self._rng = np.random.RandomState(42)
def _random_sample(self, sample_shape, dtype=tf.float64):
return self._rng.random_sample(sample_shape).astype(dtype.as_numpy_dtype())
def _assertNdArrayEqual(self, expected, actual):
"""Helper which properly compares two np.ndarray-like objects.
This function checks for exact equality so is probably only suitable for
integers or powers of 2.
Args:
expected: np.ndarray. Ground-truth value.
actual: np.ndarray. Observed value.
"""
expected = np.asarray(expected)
actual = np.asarray(actual)
self.assertEqual(
expected.shape, actual.shape,
"Shape mismatch: expected %s, got %s." % (expected.shape, actual.shape))
actual_item = actual.flat
for expected_item in expected.flat:
self.assertAllEqual(expected_item, next(actual_item))
def testDistributionShapeGetNdimsStatic(self):
with self.test_session():
shaper = _DistributionShape(batch_ndims=0, event_ndims=0)
x = 1
self.assertEqual(0, shaper.get_sample_ndims(x).eval())
self.assertEqual(0, shaper.batch_ndims.eval())
self.assertEqual(0, shaper.event_ndims.eval())
shaper = _DistributionShape(batch_ndims=1, event_ndims=1)
x = self._random_sample((1, 2, 3))
self.assertAllEqual(3, shaper.get_ndims(x).eval())
self.assertEqual(1, shaper.get_sample_ndims(x).eval())
self.assertEqual(1, shaper.batch_ndims.eval())
self.assertEqual(1, shaper.event_ndims.eval())
x += self._random_sample((1, 2, 3))
self.assertAllEqual(3, shaper.get_ndims(x).eval())
self.assertEqual(1, shaper.get_sample_ndims(x).eval())
self.assertEqual(1, shaper.batch_ndims.eval())
self.assertEqual(1, shaper.event_ndims.eval())
# Test ndims functions work, even despite unfed Tensors.
y = tf.placeholder(tf.float32, shape=(1024, None, 1024))
self.assertEqual(3, shaper.get_ndims(y).eval())
self.assertEqual(1, shaper.get_sample_ndims(y).eval())
self.assertEqual(1, shaper.batch_ndims.eval())
self.assertEqual(1, shaper.event_ndims.eval())
def testDistributionShapeGetNdimsDynamic(self):
with self.test_session() as sess:
batch_ndims = tf.placeholder(tf.int32)
event_ndims = tf.placeholder(tf.int32)
shaper = _DistributionShape(batch_ndims=batch_ndims,
event_ndims=event_ndims)
y = tf.placeholder(tf.float32)
y_value = np.ones((4, 2), dtype=y.dtype.as_numpy_dtype())
feed_dict = {y: y_value, batch_ndims: 1, event_ndims: 1}
self.assertEqual(2, sess.run(shaper.get_ndims(y),
feed_dict=feed_dict))
def testDistributionShapeGetDimsStatic(self):
with self.test_session():
shaper = _DistributionShape(batch_ndims=0, event_ndims=0)
shaper = _DistributionShape(batch_ndims=0, event_ndims=0)
x = 1
self.assertAllEqual((_empty_shape, _empty_shape, _empty_shape),
_constant(shaper.get_dims(x)))
shaper = _DistributionShape(batch_ndims=1, event_ndims=2)
x += self._random_sample((1, 1, 2, 2))
self._assertNdArrayEqual(
([0], [1], [2, 3]),
_constant(shaper.get_dims(x)))
x += x
self._assertNdArrayEqual(
([0], [1], [2, 3]),
_constant(shaper.get_dims(x)))
def testDistributionShapeGetDimsDynamic(self):
with self.test_session() as sess:
# Works for static {batch,event}_ndims despite unfed input.
shaper = _DistributionShape(batch_ndims=1, event_ndims=2)
y = tf.placeholder(tf.float32, shape=(10, None, 5, 5))
self._assertNdArrayEqual([[0], [1], [2, 3]], _eval(shaper.get_dims(y)))
# Works for deferred {batch,event}_ndims.
batch_ndims = tf.placeholder(tf.int32)
event_ndims = tf.placeholder(tf.int32)
shaper = _DistributionShape(batch_ndims=batch_ndims,
event_ndims=event_ndims)
y = tf.placeholder(tf.float32)
y_value = self._random_sample((10, 3, 5, 5), dtype=y.dtype)
feed_dict = {y: y_value, batch_ndims: 1, event_ndims: 2}
self._assertNdArrayEqual(
([0], [1], [2, 3]),
sess.run(shaper.get_dims(y), feed_dict=feed_dict))
def testDistributionShapeGetShapeStatic(self):
with self.test_session():
shaper = _DistributionShape(batch_ndims=0, event_ndims=0)
self.assertAllEqual((_empty_shape, _empty_shape, _empty_shape),
_constant(shaper.get_shape(1.)))
self._assertNdArrayEqual(([1], _empty_shape, _empty_shape),
_constant(shaper.get_shape(np.ones(1))))
self._assertNdArrayEqual(([2, 2], _empty_shape, _empty_shape),
_constant(shaper.get_shape(np.ones((2, 2)))))
self._assertNdArrayEqual(([3, 2, 1], _empty_shape, _empty_shape),
_constant(shaper.get_shape(np.ones((3, 2, 1)))))
shaper = _DistributionShape(batch_ndims=0, event_ndims=1)
with self.assertRaisesRegexp(ValueError, "expected .* <= ndims"):
shaper.get_shape(1.)
self._assertNdArrayEqual((_empty_shape, _empty_shape, [1]),
_constant(shaper.get_shape(np.ones(1))))
self._assertNdArrayEqual(([2], _empty_shape, [2]),
_constant(shaper.get_shape(np.ones((2, 2)))))
self._assertNdArrayEqual(([3, 2], _empty_shape, [1]),
_constant(shaper.get_shape(np.ones((3, 2, 1)))))
shaper = _DistributionShape(batch_ndims=1, event_ndims=0)
with self.assertRaisesRegexp(ValueError, "expected .* <= ndims"):
shaper.get_shape(1.)
self._assertNdArrayEqual((_empty_shape, [1], _empty_shape),
_constant(shaper.get_shape(np.ones(1))))
self._assertNdArrayEqual(([2], [2], _empty_shape),
_constant(shaper.get_shape(np.ones((2, 2)))))
self._assertNdArrayEqual(([3, 2], [1], _empty_shape),
_constant(shaper.get_shape(np.ones((3, 2, 1)))))
shaper = _DistributionShape(batch_ndims=1, event_ndims=1)
with self.assertRaisesRegexp(ValueError, "expected .* <= ndims"):
shaper.get_shape(1.)
with self.assertRaisesRegexp(ValueError, "expected .* <= ndims"):
shaper.get_shape(np.ones(1))
self._assertNdArrayEqual((_empty_shape, [2], [2]),
_constant(shaper.get_shape(np.ones((2, 2)))))
self._assertNdArrayEqual(([3], [2], [1]),
_constant(shaper.get_shape(np.ones((3, 2, 1)))))
def testDistributionShapeGetShapeDynamic(self):
with self.test_session() as sess:
# Works for static ndims despite unknown static shape.
shaper = _DistributionShape(batch_ndims=1, event_ndims=1)
y = tf.placeholder(tf.int32, shape=(None, None, 2))
y_value = np.ones((3, 4, 2), dtype=y.dtype.as_numpy_dtype())
self._assertNdArrayEqual(
([3], [4], [2]),
sess.run(shaper.get_shape(y), feed_dict={y: y_value}))
shaper = _DistributionShape(batch_ndims=0, event_ndims=1)
y = tf.placeholder(tf.int32, shape=(None, None))
y_value = np.ones((3, 2), dtype=y.dtype.as_numpy_dtype())
self._assertNdArrayEqual(
([3], _empty_shape, [2]),
sess.run(shaper.get_shape(y), feed_dict={y: y_value}))
# Works for deferred {batch,event}_ndims.
batch_ndims = tf.placeholder(tf.int32)
event_ndims = tf.placeholder(tf.int32)
shaper = _DistributionShape(batch_ndims=batch_ndims,
event_ndims=event_ndims)
y = tf.placeholder(tf.float32)
y_value = self._random_sample((3, 4, 2), dtype=y.dtype)
feed_dict = {y: y_value, batch_ndims: 1, event_ndims: 1}
self._assertNdArrayEqual(
([3], [4], [2]),
sess.run(shaper.get_shape(y), feed_dict=feed_dict))
y_value = self._random_sample((3, 2), dtype=y.dtype)
feed_dict = {y: y_value, batch_ndims: 0, event_ndims: 1}
self._assertNdArrayEqual(
([3], _empty_shape, [2]),
sess.run(shaper.get_shape(y), feed_dict=feed_dict))
def testDistributionShapeMakeBatchReadyStatic(self):
with self.test_session() as sess:
x = self._random_sample((1, 2, 3))
shaper = _DistributionShape(batch_ndims=1, event_ndims=1)
y, sample_shape = shaper.make_batch_of_event_sample_matrices(x)
self.assertAllEqual(np.transpose(x, axes=(1, 2, 0)), y.eval())
self.assertAllEqual((1,), sample_shape.eval())
should_be_x_value = shaper.undo_make_batch_of_event_sample_matrices(
y, sample_shape)
self.assertAllEqual(x, should_be_x_value.eval())
shaper = _DistributionShape(batch_ndims=1, event_ndims=1)
x = tf.placeholder(tf.float32)
x_value = self._random_sample((3, 4, 2), dtype=x.dtype)
feed_dict = {x: x_value}
y, sample_shape = shaper.make_batch_of_event_sample_matrices(x)
self.assertAllEqual(
(3,),
sess.run(sample_shape, feed_dict=feed_dict))
self.assertAllClose(
np.transpose(np.reshape(x_value, (-1, 4, 2)), (1, 2, 0)),
sess.run(y, feed_dict=feed_dict),
rtol=1e-3)
should_be_x_value = shaper.undo_make_batch_of_event_sample_matrices(
y, sample_shape)
self.assertAllEqual(x_value, sess.run(should_be_x_value,
feed_dict=feed_dict))
shaper = _DistributionShape(batch_ndims=0, event_ndims=0)
x = tf.placeholder(tf.float32)
x_value = np.ones((3,), dtype=x.dtype.as_numpy_dtype())
feed_dict = {x: x_value}
y, sample_shape = shaper.make_batch_of_event_sample_matrices(x)
self.assertAllEqual(
(3,),
sess.run(sample_shape, feed_dict=feed_dict))
# The following check shows we don't need to manually set_shape in the
# ShapeUtil.
self.assertAllEqual((1, 1, None),
y.get_shape().ndims and y.get_shape().as_list())
self.assertAllEqual(
np.ones((1, 1, 3), dtype=x.dtype.as_numpy_dtype()),
sess.run(y, feed_dict=feed_dict))
should_be_x_value = shaper.undo_make_batch_of_event_sample_matrices(
y, sample_shape)
self.assertAllEqual(x_value, sess.run(should_be_x_value,
feed_dict=feed_dict))
def testDistributionShapeMakeBatchReadyDynamic(self):
with self.test_session() as sess:
shaper = _DistributionShape(batch_ndims=1, event_ndims=1)
x = tf.placeholder(tf.float32, shape=(1, 2, 3))
x_value = self._random_sample(x.get_shape().as_list(), dtype=x.dtype)
y, sample_shape = sess.run(
shaper.make_batch_of_event_sample_matrices(x),
feed_dict={x: x_value})
self.assertAllEqual(np.transpose(x_value, (1, 2, 0)), y)
self.assertAllEqual((1,), sample_shape)
feed_dict = {x: x_value}
y, sample_shape = shaper.make_batch_of_event_sample_matrices(x)
self.assertAllEqual(
(1,),
sess.run(sample_shape, feed_dict=feed_dict))
self.assertAllEqual(
np.transpose(x_value, (1, 2, 0)),
sess.run(y, feed_dict=feed_dict))
should_be_x_value = shaper.undo_make_batch_of_event_sample_matrices(
y, sample_shape)
self.assertAllEqual(x_value, sess.run(should_be_x_value,
feed_dict=feed_dict))
batch_ndims = tf.placeholder(tf.int32)
event_ndims = tf.placeholder(tf.int32)
shaper = _DistributionShape(batch_ndims=batch_ndims,
event_ndims=event_ndims)
# batch_ndims = 1, event_ndims = 1.
x = tf.placeholder(tf.float32)
x_value = np.ones((3, 4, 2), dtype=x.dtype.as_numpy_dtype())
feed_dict = {x: x_value, batch_ndims: 1, event_ndims: 1}
y, sample_shape = shaper.make_batch_of_event_sample_matrices(x)
self.assertAllEqual(
(3,),
sess.run(sample_shape, feed_dict=feed_dict))
self.assertAllEqual(
np.ones((4, 2, 3), dtype=x.dtype.as_numpy_dtype()),
sess.run(y, feed_dict=feed_dict))
should_be_x_value = shaper.undo_make_batch_of_event_sample_matrices(
y, sample_shape)
self.assertAllEqual(x_value, sess.run(should_be_x_value,
feed_dict=feed_dict))
# batch_ndims = 0, event_ndims = 0.
x_value = np.ones((3,), dtype=x.dtype.as_numpy_dtype())
feed_dict = {x: x_value, batch_ndims: 0, event_ndims: 0}
y, sample_shape = shaper.make_batch_of_event_sample_matrices(x)
self.assertAllEqual(
(3,),
sess.run(sample_shape, feed_dict=feed_dict))
self.assertAllEqual(
np.ones((1, 1, 3), dtype=x.dtype.as_numpy_dtype()),
sess.run(y, feed_dict=feed_dict))
should_be_x_value = shaper.undo_make_batch_of_event_sample_matrices(
y, sample_shape)
self.assertAllEqual(x_value, sess.run(should_be_x_value,
feed_dict=feed_dict))
# batch_ndims = 0, event_ndims = 1.
x_value = np.ones((1, 2,), dtype=x.dtype.as_numpy_dtype())
feed_dict = {x: x_value, batch_ndims: 0, event_ndims: 1}
y, sample_shape = shaper.make_batch_of_event_sample_matrices(x)
self.assertAllEqual(
(1,),
sess.run(sample_shape, feed_dict=feed_dict))
self.assertAllEqual(
np.ones((1, 2, 1), dtype=x.dtype.as_numpy_dtype()),
sess.run(y, feed_dict=feed_dict))
should_be_x_value = shaper.undo_make_batch_of_event_sample_matrices(
y, sample_shape)
self.assertAllEqual(x_value, sess.run(should_be_x_value,
feed_dict=feed_dict))
# batch_ndims = 1, event_ndims = 0.
x_value = np.ones((1, 2), dtype=x.dtype.as_numpy_dtype())
feed_dict = {x: x_value, batch_ndims: 1, event_ndims: 0}
y, sample_shape = shaper.make_batch_of_event_sample_matrices(x)
self.assertAllEqual(
(1,),
sess.run(sample_shape, feed_dict=feed_dict))
self.assertAllEqual(
np.ones((2, 1, 1), dtype=x.dtype.as_numpy_dtype()),
sess.run(y, feed_dict=feed_dict))
should_be_x_value = shaper.undo_make_batch_of_event_sample_matrices(
y, sample_shape)
self.assertAllEqual(x_value, sess.run(should_be_x_value,
feed_dict=feed_dict))
if __name__ == "__main__":
tf.test.main()
| 44.010989
| 80
| 0.640762
|
787530f9fb6cac8866eb3f59725d22d9e52b2ade
| 6,330
|
py
|
Python
|
magenta/models/image_stylization/image_stylization_train.py
|
sandutsar/magenta
|
77ed668af96edea7c993d38973b9da342bd31e82
|
[
"Apache-2.0"
] | 16,143
|
2016-05-14T04:44:54.000Z
|
2020-06-04T06:48:38.000Z
|
magenta/models/image_stylization/image_stylization_train.py
|
sandutsar/magenta
|
77ed668af96edea7c993d38973b9da342bd31e82
|
[
"Apache-2.0"
] | 1,076
|
2016-05-19T14:13:43.000Z
|
2020-06-04T16:36:51.000Z
|
magenta/models/image_stylization/image_stylization_train.py
|
sandutsar/magenta
|
77ed668af96edea7c993d38973b9da342bd31e82
|
[
"Apache-2.0"
] | 3,584
|
2016-05-14T05:55:19.000Z
|
2020-06-04T17:53:50.000Z
|
# Copyright 2022 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Trains the N-styles style transfer model."""
import ast
import os
from magenta.models.image_stylization import image_utils
from magenta.models.image_stylization import learning
from magenta.models.image_stylization import model
from magenta.models.image_stylization import vgg
import tensorflow.compat.v1 as tf
import tf_slim as slim
DEFAULT_CONTENT_WEIGHTS = '{"vgg_16/conv3": 1.0}'
DEFAULT_STYLE_WEIGHTS = ('{"vgg_16/conv1": 1e-4, "vgg_16/conv2": 1e-4,'
' "vgg_16/conv3": 1e-4, "vgg_16/conv4": 1e-4}')
flags = tf.app.flags
flags.DEFINE_float('clip_gradient_norm', 0, 'Clip gradients to this norm')
flags.DEFINE_float('learning_rate', 1e-3, 'Learning rate')
flags.DEFINE_integer('batch_size', 16, 'Batch size.')
flags.DEFINE_integer('image_size', 256, 'Image size.')
flags.DEFINE_integer('ps_tasks', 0,
'Number of parameter servers. If 0, parameters '
'are handled locally by the worker.')
flags.DEFINE_integer('num_styles', None, 'Number of styles.')
flags.DEFINE_float('alpha', 1.0, 'Width multiplier')
flags.DEFINE_integer('save_summaries_secs', 15,
'Frequency at which summaries are saved, in seconds.')
flags.DEFINE_integer('save_interval_secs', 15,
'Frequency at which the model is saved, in seconds.')
flags.DEFINE_integer('task', 0,
'Task ID. Used when training with multiple '
'workers to identify each worker.')
flags.DEFINE_integer('train_steps', 40000, 'Number of training steps.')
flags.DEFINE_string('content_weights', DEFAULT_CONTENT_WEIGHTS,
'Content weights')
flags.DEFINE_string('master', '',
'Name of the TensorFlow master to use.')
flags.DEFINE_string('style_coefficients', None,
'Scales the style weights conditioned on the style image.')
flags.DEFINE_string('style_dataset_file', None, 'Style dataset file.')
flags.DEFINE_string('style_weights', DEFAULT_STYLE_WEIGHTS, 'Style weights')
flags.DEFINE_string('train_dir', None,
'Directory for checkpoints and summaries.')
FLAGS = flags.FLAGS
def main(unused_argv=None):
with tf.Graph().as_default():
# Force all input processing onto CPU in order to reserve the GPU for the
# forward inference and back-propagation.
device = '/cpu:0' if not FLAGS.ps_tasks else '/job:worker/cpu:0'
with tf.device(tf.train.replica_device_setter(FLAGS.ps_tasks,
worker_device=device)):
inputs, _ = image_utils.imagenet_inputs(FLAGS.batch_size,
FLAGS.image_size)
# Load style images and select one at random (for each graph execution, a
# new random selection occurs)
style_images, style_labels, \
style_gram_matrices = image_utils.style_image_inputs(
os.path.expanduser(FLAGS.style_dataset_file),
batch_size=FLAGS.batch_size,
image_size=FLAGS.image_size,
square_crop=True,
shuffle=True)
with tf.device(tf.train.replica_device_setter(FLAGS.ps_tasks)):
# Process style and weight flags
num_styles = FLAGS.num_styles
if FLAGS.style_coefficients is None:
style_coefficients = [1.0 for _ in range(num_styles)]
else:
style_coefficients = ast.literal_eval(FLAGS.style_coefficients)
if len(style_coefficients) != num_styles:
raise ValueError(
'number of style coefficients differs from number of styles')
content_weights = ast.literal_eval(FLAGS.content_weights)
style_weights = ast.literal_eval(FLAGS.style_weights)
# Rescale style weights dynamically based on the current style image
style_coefficient = tf.gather(
tf.constant(style_coefficients), style_labels)
style_weights = dict((key, style_coefficient * style_weights[key])
for key in style_weights)
# Define the model
stylized_inputs = model.transform(
inputs,
alpha=FLAGS.alpha,
normalizer_params={
'labels': style_labels,
'num_categories': num_styles,
'center': True,
'scale': True
})
# Compute losses.
total_loss, loss_dict = learning.total_loss(
inputs, stylized_inputs, style_gram_matrices, content_weights,
style_weights)
for key, value in loss_dict.items():
tf.summary.scalar(key, value)
# Adding Image summaries to the tensorboard.
tf.summary.image('image/0_inputs', inputs, 3)
tf.summary.image('image/1_styles', style_images, 3)
tf.summary.image('image/2_styled_inputs', stylized_inputs, 3)
# Set up training
optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate)
train_op = slim.learning.create_train_op(
total_loss, optimizer, clip_gradient_norm=FLAGS.clip_gradient_norm,
summarize_gradients=False)
# Function to restore VGG16 parameters.
init_fn_vgg = slim.assign_from_checkpoint_fn(vgg.checkpoint_file(),
slim.get_variables('vgg_16'))
# Run training
slim.learning.train(
train_op=train_op,
logdir=os.path.expanduser(FLAGS.train_dir),
master=FLAGS.master,
is_chief=FLAGS.task == 0,
number_of_steps=FLAGS.train_steps,
init_fn=init_fn_vgg,
save_summaries_secs=FLAGS.save_summaries_secs,
save_interval_secs=FLAGS.save_interval_secs)
def console_entry_point():
tf.disable_v2_behavior()
tf.app.run(main)
if __name__ == '__main__':
console_entry_point()
| 41.92053
| 80
| 0.67109
|
645ecf8a5894f3c76b76ec6bd283c345481ab69d
| 15,247
|
py
|
Python
|
corehq/apps/users/bulk_download.py
|
omari-funzone/commcare-hq
|
5edb462c891fc08e51c4babd7acdf12c0006a602
|
[
"BSD-3-Clause"
] | null | null | null |
corehq/apps/users/bulk_download.py
|
omari-funzone/commcare-hq
|
5edb462c891fc08e51c4babd7acdf12c0006a602
|
[
"BSD-3-Clause"
] | null | null | null |
corehq/apps/users/bulk_download.py
|
omari-funzone/commcare-hq
|
5edb462c891fc08e51c4babd7acdf12c0006a602
|
[
"BSD-3-Clause"
] | null | null | null |
import uuid
from django.conf import settings
from django.utils.translation import ugettext
from couchexport.writers import Excel2007ExportWriter
from soil import DownloadBase
from soil.util import expose_download, get_download_file_path
from corehq import privileges
from corehq import toggles
from corehq.apps.accounting.utils import domain_has_privilege
from corehq.apps.custom_data_fields.models import (
PROFILE_SLUG,
CustomDataFieldsDefinition,
CustomDataFieldsProfile,
)
from corehq.apps.groups.models import Group
from corehq.apps.locations.models import SQLLocation
from corehq.apps.user_importer.importer import BulkCacheBase, GroupMemoizer
from corehq.apps.users.dbaccessors.all_commcare_users import (
get_commcare_users_by_filters,
get_mobile_usernames_by_filters,
get_all_user_rows,
get_web_user_count,
)
from corehq.apps.users.models import CouchUser, UserRole, Invitation
from corehq.util.workbook_json.excel import (
alphanumeric_sort_key,
flatten_json,
json_to_headers,
)
from couchdbkit import ResourceNotFound
class LocationIdToSiteCodeCache(BulkCacheBase):
def lookup(self, location_id):
return SQLLocation.objects.get(
domain=self.domain, # this is only for safety
location_id=location_id
).site_code
def build_data_headers(keys, header_prefix='data'):
return json_to_headers(
{header_prefix: {key: None for key in keys}}
)
def get_devices(user):
"""
Returns a comma-separated list of IMEI numbers of the user's devices, sorted with most-recently-used first
"""
return ', '.join([device.device_id for device in sorted(
user.devices, key=lambda d: d.last_used, reverse=True
)])
def get_location_codes(location_cache, loc_id, assigned_loc_ids):
location_codes = []
try:
location_codes.append(location_cache.get(loc_id))
except SQLLocation.DoesNotExist:
pass
for location_id in assigned_loc_ids:
# skip if primary location_id, as it is already added to the start of list above
if location_id != loc_id:
try:
location_codes.append(location_cache.get(location_id))
except SQLLocation.DoesNotExist:
pass
return location_codes
def make_mobile_user_dict(user, group_names, location_cache, domain, fields_definition):
model_data = {}
uncategorized_data = {}
model_data, uncategorized_data = (
fields_definition.get_model_and_uncategorized(user.metadata)
)
role = user.get_role(domain)
profile = None
if PROFILE_SLUG in user.metadata and domain_has_privilege(domain, privileges.APP_USER_PROFILES):
try:
profile = CustomDataFieldsProfile.objects.get(id=user.metadata[PROFILE_SLUG])
except CustomDataFieldsProfile.DoesNotExist:
profile = None
activity = user.reporting_metadata
location_codes = get_location_codes(location_cache, user.location_id, user.assigned_location_ids)
def _format_date(date):
return date.strftime('%Y-%m-%d %H:%M:%S') if date else ''
return {
'data': model_data,
'uncategorized_data': uncategorized_data,
'group': group_names,
'name': user.full_name,
'password': "********", # dummy display string for passwords
'phone-number': user.phone_number,
'email': user.email,
'username': user.raw_username,
'language': user.language,
'user_id': user._id,
'is_active': str(user.is_active),
'User IMEIs (read only)': get_devices(user),
'location_code': location_codes,
'role': role.name if role else '',
'domain': domain,
'user_profile': profile.name if profile else '',
'registered_on (read only)': _format_date(user.created_on),
'last_submission (read only)': _format_date(activity.last_submission_for_user.submission_date),
'last_sync (read only)': activity.last_sync_for_user.sync_date,
}
def get_user_role_name(domain_membership):
if domain_membership.is_admin:
return ugettext('Admin')
else:
role_name = ''
if domain_membership.role_id:
try:
role_name = UserRole.get(domain_membership.role_id).name
except ResourceNotFound:
role_name = ugettext('Unknown Role')
return role_name
def make_web_user_dict(user, location_cache, domain):
user = CouchUser.wrap_correctly(user['doc'])
domain_membership = user.get_domain_membership(domain)
role_name = get_user_role_name(domain_membership)
location_codes = get_location_codes(location_cache, domain_membership.location_id,
domain_membership.assigned_location_ids)
return {
'username': user.username,
'first_name': user.first_name,
'last_name': user.last_name,
'email': user.email,
'role': role_name,
'location_code': location_codes,
'status': ugettext('Active User'),
'last_access_date (read only)': domain_membership.last_accessed,
'last_login (read only)': user.last_login,
'remove': '',
}
def make_invited_web_user_dict(invite, location_cache):
location_codes = []
try:
location_codes.append(location_cache.get(invite.supply_point))
except SQLLocation.DoesNotExist:
pass
return {
'username': invite.email,
'first_name': 'N/A',
'last_name': 'N/A',
'email': invite.email,
'role': invite.get_role_name(),
'location_code': location_codes,
'status': ugettext('Invited'),
'last_access_date (read only)': 'N/A',
'last_login (read only)': 'N/A',
'remove': '',
}
def get_user_rows(user_dicts, user_headers):
for user_dict in user_dicts:
row = dict(flatten_json(user_dict))
yield [row.get(header, '') for header in user_headers]
def parse_mobile_users(domain, user_filters, task=None, total_count=None):
from corehq.apps.users.views.mobile.custom_data_fields import UserFieldsView
fields_definition = CustomDataFieldsDefinition.get_or_create(
domain,
UserFieldsView.field_type
)
unrecognized_user_data_keys = set()
user_groups_length = 0
max_location_length = 0
user_dicts = []
domains_list = [domain]
is_multi_domain_download = False
if 'domains' in user_filters:
domains_list = user_filters['domains']
if domains_list != [domain]:
is_multi_domain_download = True
current_user_downloaded_count = 0
for current_domain in domains_list:
location_cache = LocationIdToSiteCodeCache(current_domain)
for n, user in enumerate(get_commcare_users_by_filters(current_domain, user_filters)):
group_memoizer = load_memoizer(current_domain)
group_names = sorted([
group_memoizer.get(id).name for id in Group.by_user_id(user.user_id, wrap=False)
], key=alphanumeric_sort_key)
user_dict = make_mobile_user_dict(user, group_names, location_cache, current_domain, fields_definition)
user_dicts.append(user_dict)
unrecognized_user_data_keys.update(user_dict['uncategorized_data'])
user_groups_length = max(user_groups_length, len(group_names))
max_location_length = max(max_location_length, len(user_dict["location_code"]))
if task:
DownloadBase.set_progress(task, n + current_user_downloaded_count, total_count)
current_user_downloaded_count += n + 1
user_headers = [
'username', 'password', 'name', 'phone-number', 'email',
'language', 'role', 'user_id', 'is_active', 'User IMEIs (read only)',
'registered_on (read only)', 'last_submission (read only)', 'last_sync (read only)'
]
if domain_has_privilege(domain, privileges.APP_USER_PROFILES):
user_headers += ['user_profile']
user_data_fields = [f.slug for f in fields_definition.get_fields(include_system=False)]
user_headers.extend(build_data_headers(user_data_fields))
user_headers.extend(build_data_headers(
unrecognized_user_data_keys,
header_prefix='uncategorized_data'
))
user_headers.extend(json_to_headers(
{'group': list(range(1, user_groups_length + 1))}
))
if domain_has_privilege(domain, privileges.LOCATIONS):
user_headers.extend(json_to_headers(
{'location_code': list(range(1, max_location_length + 1))}
))
if is_multi_domain_download:
user_headers += ['domain']
return user_headers, get_user_rows(user_dicts, user_headers)
def parse_web_users(domain, task=None, total_count=None):
user_dicts = []
max_location_length = 0
location_cache = LocationIdToSiteCodeCache(domain)
for n, user in enumerate(get_all_user_rows(domain, include_web_users=True, include_mobile_users=False,
include_inactive=False, include_docs=True)):
user_dict = make_web_user_dict(user, location_cache, domain)
user_dicts.append(user_dict)
max_location_length = max(max_location_length, len(user_dict["location_code"]))
if task:
DownloadBase.set_progress(task, n, total_count)
for m, invite in enumerate(Invitation.by_domain(domain)):
user_dict = make_invited_web_user_dict(invite, location_cache)
user_dicts.append(user_dict)
if task:
DownloadBase.set_progress(task, n + m, total_count)
user_headers = [
'username', 'first_name', 'last_name', 'email', 'role', 'last_access_date (read only)',
'last_login (read only)', 'status', 'remove'
]
if domain_has_privilege(domain, privileges.LOCATIONS):
user_headers.extend(json_to_headers(
{'location_code': list(range(1, max_location_length + 1))}
))
return user_headers, get_user_rows(user_dicts, user_headers)
def parse_groups(groups):
def _make_group_dict(group):
return {
'id': group.get_id,
'name': group.name,
'case-sharing': group.case_sharing,
'reporting': group.reporting,
'data': group.metadata,
}
group_data_keys = set()
group_dicts = []
sorted_groups = sorted(
groups,
key=lambda group: alphanumeric_sort_key(group.name)
)
for group in sorted_groups:
group_dicts.append(_make_group_dict(group))
group_data_keys.update(group.metadata if group.metadata else [])
group_headers = ['id', 'name', 'case-sharing?', 'reporting?']
group_headers.extend(build_data_headers(group_data_keys))
def _get_group_rows():
for group_dict in group_dicts:
row = dict(flatten_json(group_dict))
yield [row.get(header, '') for header in group_headers]
return group_headers, _get_group_rows()
def count_users_and_groups(domain, user_filters, group_memoizer):
users_count = get_commcare_users_by_filters(domain, user_filters, count_only=True)
groups_count = len(group_memoizer.groups)
return users_count + groups_count
def dump_usernames(domain, download_id, user_filters, task, owner_id):
domains_list = [domain]
if 'domains' in user_filters:
domains_list = user_filters['domains'] # for instances of multi-domain download
users_count = 0
for download_domain in domains_list:
users_count += get_commcare_users_by_filters(download_domain, user_filters, count_only=True)
DownloadBase.set_progress(task, 0, users_count)
usernames = []
for download_domain in domains_list:
usernames += get_mobile_usernames_by_filters(download_domain, user_filters)
headers = [('users', [['username']])]
rows = [('users', [[username] for username in usernames])]
location_id = user_filters.get('location_id')
location_name = ""
if location_id:
location = SQLLocation.active_objects.get_or_None(location_id=location_id)
location_name = location.name if location else ""
filename_prefix = "_".join([a for a in [domain, location_name] if bool(a)])
filename = "{}_users.xlsx".format(filename_prefix)
_dump_xlsx_and_expose_download(filename, headers, rows, download_id, task, users_count, owner_id)
def _dump_xlsx_and_expose_download(filename, headers, rows, download_id, task, total_count, owner_id):
writer = Excel2007ExportWriter(format_as_text=True)
use_transfer = settings.SHARED_DRIVE_CONF.transfer_enabled
file_path = get_download_file_path(use_transfer, filename)
writer.open(
header_table=headers,
file=file_path,
)
writer.write(rows)
writer.close()
expose_download(use_transfer, file_path, filename, download_id, 'xlsx', owner_ids=[owner_id])
DownloadBase.set_progress(task, total_count, total_count)
def load_memoizer(domain):
group_memoizer = GroupMemoizer(domain=domain)
# load groups manually instead of calling group_memoizer.load_all()
# so that we can detect blank groups
blank_groups = set()
for group in Group.by_domain(domain):
if group.name:
group_memoizer.add_group(group)
else:
blank_groups.add(group)
if blank_groups:
raise GroupNameError(blank_groups=blank_groups)
return group_memoizer
def dump_users_and_groups(domain, download_id, user_filters, task, owner_id):
domains_list = user_filters['domains']
users_groups_count = 0
groups = set()
for current_domain in domains_list:
group_memoizer = load_memoizer(current_domain)
users_groups_count += count_users_and_groups(current_domain, user_filters, group_memoizer)
groups.update(group_memoizer.groups)
DownloadBase.set_progress(task, 0, users_groups_count)
user_headers, user_rows = parse_mobile_users(
domain,
user_filters,
task,
users_groups_count,
)
group_headers, group_rows = parse_groups(groups)
headers = [
('users', [user_headers]),
('groups', [group_headers]),
]
rows = [
('users', user_rows),
('groups', group_rows),
]
filename = "{}_users_{}.xlsx".format(domain, uuid.uuid4().hex)
_dump_xlsx_and_expose_download(filename, headers, rows, download_id, task, users_groups_count, owner_id)
def dump_web_users(domain, download_id, task, owner_id):
users_count = get_web_user_count(domain, include_inactive=False)
DownloadBase.set_progress(task, 0, users_count)
user_headers, user_rows = parse_web_users(domain, task, users_count)
headers = [('users', [user_headers])]
rows = [('users', user_rows)]
filename = "{}_users_{}.xlsx".format(domain, uuid.uuid4().hex)
_dump_xlsx_and_expose_download(filename, headers, rows, download_id, task, users_count, owner_id)
class GroupNameError(Exception):
def __init__(self, blank_groups):
self.blank_groups = blank_groups
@property
def message(self):
return "The following group ids have a blank name: %s." % (
', '.join([group.get_id for group in self.blank_groups])
)
| 36.651442
| 115
| 0.690496
|
ea2c86963330133a50d012a86f2b2e6356f949e6
| 3,163
|
py
|
Python
|
Sensor_python_code/homework_and_develop/a12_sensor_read.py
|
Eric-IoT-2019-summer/QI_D_team
|
c317c9302ee843223c5d9387db3b3ad67dc78ce2
|
[
"MIT"
] | 1
|
2021-03-28T15:54:13.000Z
|
2021-03-28T15:54:13.000Z
|
Sensor_python_code/homework_and_develop/a12_sensor_read.py
|
Eric-IoT-2019-summer/QI_D_team
|
c317c9302ee843223c5d9387db3b3ad67dc78ce2
|
[
"MIT"
] | 2
|
2021-03-28T15:49:07.000Z
|
2021-05-10T03:37:02.000Z
|
Sensor_python_code/homework_and_develop/a12_sensor_read.py
|
Eric-IoT-2019-summer/QI_D_team
|
c317c9302ee843223c5d9387db3b3ad67dc78ce2
|
[
"MIT"
] | null | null | null |
import time
WE0, AE0 = [295,391,347,345], [282, 390, 296, 255]
select_pin, Sen = [24, 25, 26, 27], [0.228, 0.399, 0.267, 0.318]
n, en_bit, mode, delay= [1.18, 0.18, 0.03, 1.15], 28, 0, 1
for i in range(0, 5):
pin_direction = open("/gpio/pin" + str(i + 24) + "/direction", 'w')
pin_direction.write("out")
pin_direction.close()
def pin_mask(bit):
if bit == 0:
return 1
if bit == 1:
return 2
if bit == 2:
return 4
if bit == 3:
return 8
def write_bit_to_gpio_pin(pin_num, value):
if value == 1:
filename = "/gpio/pin" + str(pin_num) + "/value"
file = open(filename, 'w')
file.write("1")
file.close()
#print "pin{0} is HIGH".format(pin_num)
elif value == 0:
filename = "/gpio/pin" + str(pin_num) + "/value"
file = open(filename, 'w')
file.write("0")
file.close()
#print "pin{0} is LOW".format(pin_num)
else:
return 0
def map_select_gpio_pin(bit):
if bit == 0:
return 24
if bit == 1:
return 25
if bit == 2:
return 26
if bit == 3:
return 27
def mux(channel, en=True):
write_bit_to_gpio_pin(en_bit, ~en)
s = [0, 0, 0, 0]
for i in range(0, 4):
s[i] = (channel & pin_mask(i)) >> i
write_bit_to_gpio_pin(map_select_gpio_pin(i), s[i])
return s
def input_analog_value():
raw = int(open("/sys/bus/iio/devices/iio:device0/in_voltage0_raw").read())
scale = float(open("/sys/bus/iio/devices/iio:device0/in_voltage_scale").read())
result = raw * scale
real_value = ((result - 3122.2)/1041.2) + 3
#return result
return real_value
def cal_temp36(ADC_value):
temp = (ADC_value-0.76)/0.010 + 22 # unit => V
return temp
def cal_gas(WE, AE, state):
for i in range(0,4):
if state == i:
ppb = ((WE - WE0[i]) - (n[i]*(AE - AE0[i]))) / Sen[i]
return ppb
def cal_pm(voltage):
hppcf = 50.0 + (2433.0*voltage) + 1386.0*(voltage**2)
pm = 0.518 + (0.00274*hppcf)
return pm
def main_process():
for i in range(0,5): #alpha_sensor
mux(2*i)
sn_WE = input_analog_value()
mux(2*i+1)
sn_AE = input_analog_value()
alpha_sn = cal_gas(sn_WE, sn_AE, i)
if i == 0: print "\nNO2 value is {0}".format(alpha_sn)#no2
elif i == 1: print "O3 value is {0}".format(alpha_sn)#o3
elif i == 2: print "CO value is {0}".format(alpha_sn)#CO
elif i == 3: print "SO2 value is {0}".format(alpha_sn)#So2
elif i == 4:
temperature = cal_temp36(sn_WE) # C8, TMP36 Sensor
pm_value = cal_pm(sn_AE) # C9, PM2.5 Sensor
print "Temperature value is {0}".format(temperature)
time.sleep(delay)
print "PM2.5 value is {0}\n".format(pm_value)
print "\n"
time.sleep(delay)
Running = True
while Running:
main_process()
| 27.745614
| 84
| 0.516914
|
7889b7f2c73639b128a1ae3532293e2fbe31c47f
| 2,557
|
py
|
Python
|
bot/database.py
|
mawanda-jun/tnt-village-bot
|
f1246bf97bca369ce0a196a75ef763da74c44c17
|
[
"MIT"
] | null | null | null |
bot/database.py
|
mawanda-jun/tnt-village-bot
|
f1246bf97bca369ce0a196a75ef763da74c44c17
|
[
"MIT"
] | null | null | null |
bot/database.py
|
mawanda-jun/tnt-village-bot
|
f1246bf97bca369ce0a196a75ef763da74c44c17
|
[
"MIT"
] | null | null | null |
import logging
import simplesqlitewrap as ssw
from bot import sql
from .utils import utils
logger = logging.getLogger(__name__)
CATEGORIE = {
1: 'Film TV e programmi',
2: 'Musica',
3: 'E Books',
4: 'Film',
6: 'Linux',
7: 'Anime',
8: 'Cartoni',
9: 'Macintosh',
10: 'Windows Software',
11: 'Pc Game',
12: 'Playstation',
13: 'Students Releases',
14: 'Documentari',
21: 'Video Musicali',
22: 'Sport',
23: 'Teatro',
24: 'Wrestling',
25: 'Varie',
26: 'Xbox',
27: 'Immagini sfondi',
28: 'Altri Giochi',
29: 'Serie TV',
30: 'Fumetteria',
31: 'Trash',
32: 'Nintendo',
34: 'A Book',
35: 'Podc: st',
36: 'Edicola',
37: 'Mobile'
}
DICT_FORMATTING = {
'titolo': lambda release: utils.escape(release['titolo']),
'descrizione': lambda release: utils.escape(release['descrizione']),
'dimensione': lambda release: utils.human_readable_size(release['dimensione']),
'autore': lambda release: utils.html_escape(release['autore']),
'categoria': lambda release: CATEGORIE[release['categoria']],
'magnet': lambda release: 'magnet:?xt=urn:btih:{}'.format(release['hash']),
'forum_url': lambda release: 'http://forum.tntvillage.scambioetico.org/index.php?showtopic={}'.format(release['topic']),
'data': lambda release: release['data'],
'topic': lambda release: release['topic'],
'id': lambda release: release['id'],
'hash': lambda release: release['hash'],
'webarchive_url': lambda release: 'https://web.archive.org/web/http://forum.tntvillage.scambioetico.org/index.php?showtopic={}'.format(release['topic']),
}
class Database(ssw.Database):
def __init__(self, filename):
logger.debug('initing Database module')
ssw.Database.__init__(self, filename)
self._init_db()
def _init_db(self):
logger.debug('creating tables')
self._execute(sql.CREATE_TABLE_RELEASES)
def search(self, query, **kwargs):
query = query.strip('%')
query = '%{}%'.format(query)
return self._execute(sql.SELECT_RELEASE, (query, query), fetchall=True, **kwargs)
def release_by_id(self, release_id, format_release=True):
release_id = int(release_id)
release = self._execute(sql.SELECT_RELEASE_ID, (release_id,), fetchone=True, as_dict=True)
if not format_release:
return release
else:
return {k: DICT_FORMATTING.get(k, lambda x: x[k])(release) for k, v in DICT_FORMATTING.items()}
| 29.390805
| 157
| 0.62808
|
56a0f08f2d3f48d36de7dce577fa0e2dd11d1fad
| 815
|
py
|
Python
|
src/griddlers/cells_section.py
|
saroad2/griddlers
|
7b90b777c7a22db3511d462cefe78ada29e4512e
|
[
"Apache-2.0"
] | 1
|
2021-03-31T08:44:54.000Z
|
2021-03-31T08:44:54.000Z
|
src/griddlers/cells_section.py
|
saroad2/griddlers
|
7b90b777c7a22db3511d462cefe78ada29e4512e
|
[
"Apache-2.0"
] | null | null | null |
src/griddlers/cells_section.py
|
saroad2/griddlers
|
7b90b777c7a22db3511d462cefe78ada29e4512e
|
[
"Apache-2.0"
] | null | null | null |
from dataclasses import dataclass, field
from griddlers.cell_mark import CellMark
@dataclass
class CellsSection:
start: int
end: int
mark: CellMark
blocked_below: bool = field(default=False)
blocked_above: bool = field(default=False)
def __post_init__(self):
if self.start > self.end:
raise ValueError(
"Cannot create section with end smaller than start "
f"({self.end} < {self.start}"
)
@property
def length(self):
return self.end - self.start + 1
@property
def blocked(self):
return self.blocked_below and self.blocked_above
def __contains__(self, index):
return self.start <= index <= self.end
def __iter__(self):
return iter(range(self.start, self.end + 1))
| 23.285714
| 68
| 0.625767
|
9a2f81d45c372b577249566a0bf3054f18692a29
| 389
|
py
|
Python
|
tests/admin_docs/views.py
|
fizista/django
|
16f3a6a4c7bab11644d11c2be029374e5095cb56
|
[
"BSD-3-Clause"
] | 2
|
2016-07-23T18:08:37.000Z
|
2016-07-24T09:54:34.000Z
|
tests/admin_docs/views.py
|
fizista/django
|
16f3a6a4c7bab11644d11c2be029374e5095cb56
|
[
"BSD-3-Clause"
] | null | null | null |
tests/admin_docs/views.py
|
fizista/django
|
16f3a6a4c7bab11644d11c2be029374e5095cb56
|
[
"BSD-3-Clause"
] | 1
|
2020-10-01T08:23:34.000Z
|
2020-10-01T08:23:34.000Z
|
from django.http import HttpResponse
from django.utils.decorators import decorator_from_middleware
from django.views.generic import View
from django.contrib.admindocs.middleware import XViewMiddleware
xview_dec = decorator_from_middleware(XViewMiddleware)
def xview(request):
return HttpResponse()
class XViewClass(View):
def get(self, request):
return HttpResponse()
| 24.3125
| 63
| 0.804627
|
e836d3af16e1f7a2ffe47c2f96d05b2bcc450457
| 16
|
py
|
Python
|
metrics/__init__.py
|
ElementAI/wise_ils
|
f0897b4aae042c2085eeb13b38c2a26b43cb8eaf
|
[
"Apache-2.0"
] | 24
|
2019-08-08T22:00:14.000Z
|
2021-01-28T01:45:51.000Z
|
metrics/__init__.py
|
JaringAu/wise_ils
|
4e8bca05d834a80a5aff5e7c6e84dd3a283b399b
|
[
"Apache-2.0"
] | 6
|
2019-08-12T03:05:23.000Z
|
2020-07-27T21:26:12.000Z
|
metrics/__init__.py
|
JaringAu/wise_ils
|
4e8bca05d834a80a5aff5e7c6e84dd3a283b399b
|
[
"Apache-2.0"
] | 8
|
2019-08-08T22:15:02.000Z
|
2021-01-28T01:45:52.000Z
|
from . import ap
| 16
| 16
| 0.75
|
c5693d0e05dc66524803afad5039c32f95313edd
| 5,994
|
py
|
Python
|
examples/characterization/active-reset/configuration.py
|
qua-platform/qua-libs
|
805a3b1a69980b939b370b3ba09434bc26dc45ec
|
[
"BSD-3-Clause"
] | 21
|
2021-05-21T08:23:34.000Z
|
2022-03-25T11:30:55.000Z
|
examples/characterization/active-reset/configuration.py
|
qua-platform/qua-libs
|
805a3b1a69980b939b370b3ba09434bc26dc45ec
|
[
"BSD-3-Clause"
] | 9
|
2021-05-13T19:56:00.000Z
|
2021-12-21T05:11:04.000Z
|
examples/characterization/active-reset/configuration.py
|
qua-platform/qua-libs
|
805a3b1a69980b939b370b3ba09434bc26dc45ec
|
[
"BSD-3-Clause"
] | 2
|
2021-06-21T10:56:40.000Z
|
2021-12-19T14:21:33.000Z
|
import numpy as np
######################
# AUXILIARY FUNCTIONS:
######################
def gauss(amplitude, mu, sigma, length):
t = np.linspace(-length / 2, length / 2, length)
gauss_wave = amplitude * np.exp(-((t - mu) ** 2) / (2 * sigma ** 2))
return [float(x) for x in gauss_wave]
def IQ_imbalance(g, phi):
c = np.cos(phi)
s = np.sin(phi)
N = 1 / ((1 - g ** 2) * (2 * c ** 2 - 1))
return [float(N * x) for x in [(1 - g) * c, (1 + g) * s, (1 - g) * s, (1 + g) * c]]
################
# CONFIGURATION:
################
long_readout_len = 3600
readout_len = 400
qubit_IF = 50e6
rr_IF = 50e6
qubit_LO = 6.345e9
rr_LO = 4.755e9
config = {
"version": 1,
"controllers": {
"con1": {
"type": "opx1",
"analog_outputs": {
1: {"offset": 0.0}, # qubit I
2: {"offset": 0.0}, # qubit Q
3: {"offset": 0.0}, # RR I
4: {"offset": 0.0}, # RR Q
},
"digital_outputs": {},
"analog_inputs": {1: {"offset": 0.0}},
}
},
"elements": {
"qubit": {
"mixInputs": {
"I": ("con1", 1),
"Q": ("con1", 2),
"lo_frequency": qubit_LO,
"mixer": "mixer_qubit",
},
"intermediate_frequency": qubit_IF,
"operations": {
"CW": "CW",
"saturation": "saturation_pulse",
"gaussian": "gaussian_pulse",
"pi": "pi_pulse",
"pi2": "pi2_pulse",
"minus_pi2": "minus_pi2_pulse",
},
},
"rr": {
"mixInputs": {
"I": ("con1", 3),
"Q": ("con1", 4),
"lo_frequency": rr_LO,
"mixer": "mixer_RR",
},
"intermediate_frequency": rr_IF,
"operations": {
"CW": "CW",
"long_readout": "long_readout_pulse",
"readout": "readout_pulse",
},
"outputs": {"out1": ("con1", 1)},
"time_of_flight": 28,
"smearing": 0,
},
},
"pulses": {
"CW": {
"operation": "control",
"length": 60000,
"waveforms": {"I": "const_wf", "Q": "zero_wf"},
},
"saturation_pulse": {
"operation": "control",
"length": 20000, # several T1s
"waveforms": {"I": "saturation_wf", "Q": "zero_wf"},
},
"gaussian_pulse": {
"operation": "control",
"length": 60,
"waveforms": {"I": "gauss_wf", "Q": "zero_wf"},
},
"pi_pulse": {
"operation": "control",
"length": 60,
"waveforms": {"I": "pi_wf", "Q": "zero_wf"},
},
"pi2_pulse": {
"operation": "control",
"length": 60,
"waveforms": {"I": "pi2_wf", "Q": "zero_wf"},
},
"minus_pi2_pulse": {
"operation": "control",
"length": 60,
"waveforms": {"I": "minus_pi2_wf", "Q": "zero_wf"},
},
"long_readout_pulse": {
"operation": "measurement",
"length": long_readout_len,
"waveforms": {"I": "long_readout_wf", "Q": "zero_wf"},
"integration_weights": {
"long_integW1": "long_integW1",
"long_integW2": "long_integW2",
},
"digital_marker": "ON",
},
"readout_pulse": {
"operation": "measurement",
"length": readout_len,
"waveforms": {"I": "readout_wf", "Q": "zero_wf"},
"integration_weights": {
"integW1": "integW1",
"integW2": "integW2",
"optW1": "optW1",
"optW2": "optW2",
},
"digital_marker": "ON",
},
},
"waveforms": {
"const_wf": {"type": "constant", "sample": 0.4},
"zero_wf": {"type": "constant", "sample": 0.0},
"saturation_wf": {"type": "constant", "sample": 0.211},
"gauss_wf": {"type": "arbitrary", "samples": gauss(0.4, 0.0, 6.0, 60)},
"pi_wf": {"type": "arbitrary", "samples": gauss(0.3, 0.0, 6.0, 60)},
"pi2_wf": {"type": "arbitrary", "samples": gauss(0.15, 0.0, 6.0, 60)},
"minus_pi2_wf": {"type": "arbitrary", "samples": gauss(-0.15, 0.0, 6.0, 60)},
"long_readout_wf": {"type": "constant", "sample": 0.32},
"readout_wf": {"type": "constant", "sample": 0.34},
},
"digital_waveforms": {"ON": {"samples": [(1, 0)]}},
"integration_weights": {
"long_integW1": {
"cosine": [1.0] * int(long_readout_len / 4),
"sine": [0.0] * int(long_readout_len / 4),
},
"long_integW2": {
"cosine": [0.0] * int(long_readout_len / 4),
"sine": [1.0] * int(long_readout_len / 4),
},
"integW1": {
"cosine": [1.0] * int(readout_len / 4),
"sine": [0.0] * int(readout_len / 4),
},
"integW2": {
"cosine": [0.0] * int(readout_len / 4),
"sine": [1.0] * int(readout_len / 4),
},
"optW1": {
"cosine": [1.0] * int(readout_len / 4),
"sine": [0.0] * int(readout_len / 4),
},
"optW2": {
"cosine": [0.0] * int(readout_len / 4),
"sine": [1.0] * int(readout_len / 4),
},
},
"mixers": {
"mixer_qubit": [
{
"intermediate_frequency": qubit_IF,
"lo_frequency": qubit_LO,
"correction": IQ_imbalance(0.0, 0.0),
}
],
"mixer_RR": [
{
"intermediate_frequency": rr_IF,
"lo_frequency": rr_LO,
"correction": IQ_imbalance(0.0, 0.0),
}
],
},
}
| 30.896907
| 87
| 0.412079
|
aa8df3c54204b59f64e82bd3007dddef5e13adb6
| 2,127
|
py
|
Python
|
models.py
|
tensorway/self-supervised-vision
|
824e9e66841f98f0b84d2f89e05f72ffd115b32b
|
[
"MIT"
] | null | null | null |
models.py
|
tensorway/self-supervised-vision
|
824e9e66841f98f0b84d2f89e05f72ffd115b32b
|
[
"MIT"
] | null | null | null |
models.py
|
tensorway/self-supervised-vision
|
824e9e66841f98f0b84d2f89e05f72ffd115b32b
|
[
"MIT"
] | null | null | null |
#%%
import timm
import torch as th
import torch
from torch import nn
from mini_resnet import get_mini_resnet
import torch.nn.functional as F
class BenchmarkModel(nn.Module):
def __init__(
self,
projector_mlp_arch,
model_name='mini_resnet20',
n_classes = 10,
):
'''
simple embedding based model used as a benchmark
Args:
- projector_mlp_arch: List[int]
architecture of the mlp that will be appended to the backbone
does not include the backbone last layer
- model_name: string
the name of the pretrained model
'''
super().__init__()
th.hub._validate_not_a_forked_repo=lambda a,b,c: True
if 'mini_resnet' in model_name:
n_layers = int(model_name.split('mini_resnet')[-1])
self.backbone = get_mini_resnet(n_layers)
nfeatures = 64
else:
raise Exception('select mini resnet')
self.projector = MLP(net_arch=[nfeatures]+projector_mlp_arch)
self.classifier = nn.Linear(nfeatures, n_classes)
def embed(self, x):
x = self.backbone(x)
return self.projector(x)
def forward(self, x):
x = self.backbone(x)
x = self.classifier(x)
return th.softmax(x, dim=-1)
class MLP(nn.Module):
'''
accepts layer sizes and creates a MLP model out of it
Args:
net_arch: list of integers denoting layer sizes (input, hidden0, hidden1, ... hiddenn, out)
'''
def __init__(self, net_arch, last_activation= lambda x:x):
super().__init__()
self.layers = nn.ModuleList([nn.Linear(a, b) for a, b in zip(net_arch[:-1], net_arch[1:])])
self.last_activation = last_activation
def forward(self, x):
h = x
for lay in self.layers[:-1]:
h = F.relu(lay(h))
h = self.layers[-1](h)
return h
# %%
if __name__ == '__main__':
model = BenchmarkModel([1000, 1000], 'mini_resnet20')
batch = th.rand(3, 3, 32, 32)
print(model(batch).shape)
print(model(batch))
# %%
| 28.743243
| 99
| 0.598025
|
71f54a048a9fda118bfd535a9cd0ccad55d7dc00
| 6,696
|
py
|
Python
|
regulations/tests/layers_appliers_test.py
|
navigo/regulations-site
|
910c24e46f4e921210a40da452dff69feae692d4
|
[
"CC0-1.0"
] | 18
|
2016-09-22T05:05:16.000Z
|
2021-07-28T18:13:48.000Z
|
regulations/tests/layers_appliers_test.py
|
navigo/regulations-site
|
910c24e46f4e921210a40da452dff69feae692d4
|
[
"CC0-1.0"
] | 260
|
2016-04-05T22:06:10.000Z
|
2021-01-07T22:08:15.000Z
|
regulations/tests/layers_appliers_test.py
|
navigo/regulations-site
|
910c24e46f4e921210a40da452dff69feae692d4
|
[
"CC0-1.0"
] | 25
|
2016-04-06T03:26:42.000Z
|
2020-10-19T16:49:23.000Z
|
from unittest import TestCase
from regulations.generator.layers import layers_applier
from regulations.generator.layers import location_replace
class LayersApplierTest(TestCase):
def test_enqueue(self):
applier = layers_applier.LayersApplier()
element = ('abcd', 'ABCD', [])
applier.enqueue(element)
priority, retrieved = applier.queue.get()
self.assertEquals(priority, -4)
self.assertEquals(retrieved, element)
def test_list_enqueue(self):
applier = layers_applier.LayersApplier()
elements = [('abcd', 'ABCD', []), ('efghi', 'EFG', [12])]
applier.enqueue_from_list(elements)
priority, retrieved = applier.queue.get()
self.assertEquals(priority, -5)
self.assertEqual(elements[1], retrieved)
priority, retrieved = applier.queue.get()
self.assertEquals(priority, -4)
self.assertEqual(elements[0], retrieved)
def test_replace_all(self):
applier = layers_applier.LayersApplier()
applier.text = ('Prefix test <a \nhref="url" data="test">link '
'\ntest</a> postfix text')
applier.replace_all('test', 'linksecondword')
replaced = ('Prefix linksecondword <a \nhref="url" data="test">link '
'\nlinksecondword</a> postfix text')
self.assertEquals(applier.text, replaced)
def test_find_all_offsets(self):
pattern = 'ABCD'
text = 'The grey fox ABCD jumped over the fence ABCD'
offsets = location_replace.LocationReplace.find_all_offsets(pattern,
text)
self.assertEquals(offsets, [(13, 17), (40, 44)])
def test_find_offsets_no_pattern(self):
pattern = 'ABCD'
text = 'The grey fox jumped over the fence'
offsets = location_replace.LocationReplace.find_all_offsets(pattern,
text)
self.assertEquals(offsets, [])
def test_replace_at(self):
text = 'The grey fox ABCD jumped ABCD over the fence ABCD'
applier = layers_applier.LayersApplier()
applier.text = text
applier.replace_at('ABCD', '<a>ABCD</a>', [0, 2])
self.assertEquals(
applier.text,
'The grey fox <a>ABCD</a> jumped ABCD over the fence <a>ABCD</a>')
def test_update_offsets(self):
lr = location_replace.LocationReplace()
lr.offset_starter = 5
pattern = 'ABCD'
text = ('The grey <a href="link">ABCD</a> jumped over the ABCD fence '
'on a ABCD day')
lr.update_offsets(pattern, text)
self.assertEqual(lr.offset_starter, 5)
self.assertEqual(lr.offset_counters, [5, 6, 7])
self.assertEqual(list(lr.offsets.keys()), [5, 6, 7])
self.assertEqual(lr.offsets[5], (24, 28))
def test_update_offset_starter(self):
lr = location_replace.LocationReplace()
lr.offset_counters = [5, 6, 7]
lr.update_offset_starter()
self.assertEqual(lr.offset_starter, 8)
def test_replace_at_case_sensitive(self):
original = 'state'
replacement = '<a href="link_url">state</a>'
locations = [0, 1, 2]
applier = layers_applier.LayersApplier()
applier.text = "<em>(6)</em> <dfn> Under state law. </dfn> State law."
applier.replace_at(original, replacement, locations)
result = (u"<em>(6)</em> <dfn> Under <a href=\"link_url\">state</a> "
u"law. </dfn> State law.")
self.assertEquals(applier.text, result)
def test_replace_no_original(self):
original = 'federal'
replacement = '<a href="link_url">state</a>'
locations = [0, 1, 2]
applier = layers_applier.LayersApplier()
applier.text = "<em>(6)</em> <dfn> Under state law. </dfn> State law."
applier.replace_at(original, replacement, locations)
result = "<em>(6)</em> <dfn> Under state law. </dfn> State law."
self.assertEquals(applier.text, result)
def test_replace_skip_location(self):
original = 'state'
replacement = '<a href="link_url">state</a>'
locations = [0, 2]
applier = layers_applier.LayersApplier()
applier.text = ("<em>(6)</em> <dfn> Under state law. </dfn> state "
"law. <dfn> state liability. </dfn>")
applier.replace_at(original, replacement, locations)
result = ("<em>(6)</em> <dfn> Under <a href=\"link_url\">state</a> "
"law. </dfn> state law. <dfn> <a href=\"link_url\">state"
"</a> liability. </dfn>")
self.assertEquals(applier.text, result)
def test_apply_layers(self):
# Tests same as above but from one level out.
original = 'state'
replacement = '<a href="link_url">state</a>'
locations = [0, 2]
text = ("<em>(6)</em> <dfn> Under state law. </dfn> state "
"law. <dfn> state liability. </dfn>")
applier = layers_applier.LayersApplier()
applier.enqueue((original, replacement, locations))
applier.apply_layers(text)
result = ("<em>(6)</em> <dfn> Under <a href=\"link_url\">state</a> "
"law. </dfn> state law. <dfn> <a href=\"link_url\">state"
"</a> liability. </dfn>")
self.assertEquals(applier.text, result)
def test_apply_layers_escaping(self):
# See https://github.com/eregs/regulations-site/issues/514 and
# https://github.com/fecgov/fec-eregs/issues/382
#
# It appears that we had a holdover step of unescaping that, thanks to
# looser interpretations in Python 3.6 (specifically, ``&sec`` was
# treated as a valid escape even without a trailing semicolon) started
# breaking links that have a ``§ion`` parameter.
original = 'state'
replacement = '<a href="link_url">state</a>'
locations = [0, 2]
text = ("<em>(6)</em> <dfn> Under state law. </dfn> state "
"law. <dfn> state liability. </dfn>"
"<a href='http://example.org?one=1§ion2'>test</a>")
applier = layers_applier.LayersApplier()
applier.enqueue((original, replacement, locations))
applier.apply_layers(text)
result = ("<em>(6)</em> <dfn> Under <a href=\"link_url\">state</a> "
"law. </dfn> state law. <dfn> <a href=\"link_url\">state"
"</a> liability. </dfn>"
"<a href='http://example.org?one=1§ion2'>test</a>")
self.assertEquals(applier.text, result)
| 40.095808
| 78
| 0.586619
|
26eb9412f376c37bcb5585de67cd77a21f42d589
| 14,038
|
py
|
Python
|
mopidy/mpd/protocol/current_playlist.py
|
stekern/mopidy
|
94509cf70f2bd35ebb13e746dde135bfbe35ce0e
|
[
"Apache-2.0"
] | null | null | null |
mopidy/mpd/protocol/current_playlist.py
|
stekern/mopidy
|
94509cf70f2bd35ebb13e746dde135bfbe35ce0e
|
[
"Apache-2.0"
] | null | null | null |
mopidy/mpd/protocol/current_playlist.py
|
stekern/mopidy
|
94509cf70f2bd35ebb13e746dde135bfbe35ce0e
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import absolute_import, unicode_literals
from mopidy.compat import urllib
from mopidy.internal import deprecation
from mopidy.mpd import exceptions, protocol, translator
@protocol.commands.add('add')
def add(context, uri):
"""
*musicpd.org, current playlist section:*
``add {URI}``
Adds the file ``URI`` to the playlist (directories add recursively).
``URI`` can also be a single file.
*Clarifications:*
- ``add ""`` should add all tracks in the library to the current playlist.
"""
if not uri.strip('/'):
return
# If we have an URI just try and add it directly without bothering with
# jumping through browse...
if urllib.parse.urlparse(uri).scheme != '':
if context.core.tracklist.add(uris=[uri]).get():
return
try:
uris = []
for path, ref in context.browse(uri, lookup=False):
if ref:
uris.append(ref.uri)
except exceptions.MpdNoExistError as e:
e.message = 'directory or file not found'
raise
if not uris:
raise exceptions.MpdNoExistError('directory or file not found')
context.core.tracklist.add(uris=uris).get()
@protocol.commands.add('addid', songpos=protocol.UINT)
def addid(context, uri, songpos=None):
"""
*musicpd.org, current playlist section:*
``addid {URI} [POSITION]``
Adds a song to the playlist (non-recursive) and returns the song id.
``URI`` is always a single file or URL. For example::
addid "foo.mp3"
Id: 999
OK
*Clarifications:*
- ``addid ""`` should return an error.
"""
if not uri:
raise exceptions.MpdNoExistError('No such song')
length = context.core.tracklist.get_length()
if songpos is not None and songpos > length.get():
raise exceptions.MpdArgError('Bad song index')
tl_tracks = context.core.tracklist.add(
uris=[uri], at_position=songpos).get()
if not tl_tracks:
raise exceptions.MpdNoExistError('No such song')
return ('Id', tl_tracks[0].tlid)
@protocol.commands.add('delete', songrange=protocol.RANGE)
def delete(context, songrange):
"""
*musicpd.org, current playlist section:*
``delete [{POS} | {START:END}]``
Deletes a song from the playlist.
"""
start = songrange.start
end = songrange.stop
if end is None:
end = context.core.tracklist.get_length().get()
tl_tracks = context.core.tracklist.slice(start, end).get()
if not tl_tracks:
raise exceptions.MpdArgError('Bad song index', command='delete')
for (tlid, _) in tl_tracks:
context.core.tracklist.remove({'tlid': [tlid]})
@protocol.commands.add('deleteid', tlid=protocol.UINT)
def deleteid(context, tlid):
"""
*musicpd.org, current playlist section:*
``deleteid {SONGID}``
Deletes the song ``SONGID`` from the playlist
"""
tl_tracks = context.core.tracklist.remove({'tlid': [tlid]}).get()
if not tl_tracks:
raise exceptions.MpdNoExistError('No such song')
@protocol.commands.add('clear')
def clear(context):
"""
*musicpd.org, current playlist section:*
``clear``
Clears the current playlist.
"""
context.core.tracklist.clear()
@protocol.commands.add('move', songrange=protocol.RANGE, to=protocol.UINT)
def move_range(context, songrange, to):
"""
*musicpd.org, current playlist section:*
``move [{FROM} | {START:END}] {TO}``
Moves the song at ``FROM`` or range of songs at ``START:END`` to
``TO`` in the playlist.
"""
start = songrange.start
end = songrange.stop
if end is None:
end = context.core.tracklist.get_length().get()
context.core.tracklist.move(start, end, to)
@protocol.commands.add('moveid', tlid=protocol.UINT, to=protocol.UINT)
def moveid(context, tlid, to):
"""
*musicpd.org, current playlist section:*
``moveid {FROM} {TO}``
Moves the song with ``FROM`` (songid) to ``TO`` (playlist index) in
the playlist. If ``TO`` is negative, it is relative to the current
song in the playlist (if there is one).
"""
tl_tracks = context.core.tracklist.filter({'tlid': [tlid]}).get()
if not tl_tracks:
raise exceptions.MpdNoExistError('No such song')
position = context.core.tracklist.index(tl_tracks[0]).get()
context.core.tracklist.move(position, position + 1, to)
@protocol.commands.add('playlist')
def playlist(context):
"""
*musicpd.org, current playlist section:*
``playlist``
Displays the current playlist.
.. note::
Do not use this, instead use ``playlistinfo``.
"""
deprecation.warn('mpd.protocol.current_playlist.playlist')
return playlistinfo(context)
@protocol.commands.add('playlistfind')
def playlistfind(context, tag, needle):
"""
*musicpd.org, current playlist section:*
``playlistfind {TAG} {NEEDLE}``
Finds songs in the current playlist with strict matching.
"""
if tag == 'filename':
tl_tracks = context.core.tracklist.filter({'uri': [needle]}).get()
if not tl_tracks:
return None
position = context.core.tracklist.index(tl_tracks[0]).get()
return translator.track_to_mpd_format(tl_tracks[0], position=position)
raise exceptions.MpdNotImplemented # TODO
@protocol.commands.add('playlistid', tlid=protocol.UINT)
def playlistid(context, tlid=None):
"""
*musicpd.org, current playlist section:*
``playlistid {SONGID}``
Displays a list of songs in the playlist. ``SONGID`` is optional
and specifies a single song to display info for.
"""
if tlid is not None:
tl_tracks = context.core.tracklist.filter({'tlid': [tlid]}).get()
if not tl_tracks:
raise exceptions.MpdNoExistError('No such song')
position = context.core.tracklist.index(tl_tracks[0]).get()
return translator.track_to_mpd_format(tl_tracks[0], position=position)
else:
return translator.tracks_to_mpd_format(
context.core.tracklist.get_tl_tracks().get())
@protocol.commands.add('playlistinfo')
def playlistinfo(context, parameter=None):
"""
*musicpd.org, current playlist section:*
``playlistinfo [[SONGPOS] | [START:END]]``
Displays a list of all songs in the playlist, or if the optional
argument is given, displays information only for the song
``SONGPOS`` or the range of songs ``START:END``.
*ncmpc and mpc:*
- uses negative indexes, like ``playlistinfo "-1"``, to request
the entire playlist
"""
if parameter is None or parameter == '-1':
start, end = 0, None
else:
tracklist_slice = protocol.RANGE(parameter)
start, end = tracklist_slice.start, tracklist_slice.stop
tl_tracks = context.core.tracklist.get_tl_tracks().get()
if start and start > len(tl_tracks):
raise exceptions.MpdArgError('Bad song index')
if end and end > len(tl_tracks):
end = None
return translator.tracks_to_mpd_format(tl_tracks, start, end)
@protocol.commands.add('playlistsearch')
def playlistsearch(context, tag, needle):
"""
*musicpd.org, current playlist section:*
``playlistsearch {TAG} {NEEDLE}``
Searches case-sensitively for partial matches in the current
playlist.
*GMPC:*
- uses ``filename`` and ``any`` as tags
"""
raise exceptions.MpdNotImplemented # TODO
@protocol.commands.add('plchanges', version=protocol.INT)
def plchanges(context, version):
"""
*musicpd.org, current playlist section:*
``plchanges {VERSION}``
Displays changed songs currently in the playlist since ``VERSION``.
To detect songs that were deleted at the end of the playlist, use
``playlistlength`` returned by status command.
*MPDroid:*
- Calls ``plchanges "-1"`` two times per second to get the entire playlist.
"""
# XXX Naive implementation that returns all tracks as changed
tracklist_version = context.core.tracklist.get_version().get()
if version < tracklist_version:
return translator.tracks_to_mpd_format(
context.core.tracklist.get_tl_tracks().get())
elif version == tracklist_version:
# A version match could indicate this is just a metadata update, so
# check for a stream ref and let the client know about the change.
stream_title = context.core.playback.get_stream_title().get()
if stream_title is None:
return None
tl_track = context.core.playback.get_current_tl_track().get()
position = context.core.tracklist.index(tl_track).get()
return translator.track_to_mpd_format(
tl_track, position=position, stream_title=stream_title)
@protocol.commands.add('plchangesposid', version=protocol.INT)
def plchangesposid(context, version):
"""
*musicpd.org, current playlist section:*
``plchangesposid {VERSION}``
Displays changed songs currently in the playlist since ``VERSION``.
This function only returns the position and the id of the changed
song, not the complete metadata. This is more bandwidth efficient.
To detect songs that were deleted at the end of the playlist, use
``playlistlength`` returned by status command.
"""
# XXX Naive implementation that returns all tracks as changed
if int(version) != context.core.tracklist.get_version().get():
result = []
for (position, (tlid, _)) in enumerate(
context.core.tracklist.get_tl_tracks().get()):
result.append(('cpos', position))
result.append(('Id', tlid))
return result
@protocol.commands.add(
'prio', priority=protocol.UINT, position=protocol.RANGE)
def prio(context, priority, position):
"""
*musicpd.org, current playlist section:*
``prio {PRIORITY} {START:END...}``
Set the priority of the specified songs. A higher priority means that
it will be played first when "random" mode is enabled.
A priority is an integer between 0 and 255. The default priority of new
songs is 0.
"""
raise exceptions.MpdNotImplemented # TODO
@protocol.commands.add('prioid')
def prioid(context, *args):
"""
*musicpd.org, current playlist section:*
``prioid {PRIORITY} {ID...}``
Same as prio, but address the songs with their id.
"""
raise exceptions.MpdNotImplemented # TODO
@protocol.commands.add('rangeid', tlid=protocol.UINT, songrange=protocol.RANGE)
def rangeid(context, tlid, songrange):
"""
*musicpd.org, current playlist section:*
``rangeid {ID} {START:END}``
Specifies the portion of the song that shall be played. START and END
are offsets in seconds (fractional seconds allowed); both are optional.
Omitting both (i.e. sending just ":") means "remove the range, play
everything". A song that is currently playing cannot be manipulated
this way.
.. versionadded:: 0.19
New in MPD protocol version 0.19
"""
raise exceptions.MpdNotImplemented # TODO
@protocol.commands.add('shuffle', songrange=protocol.RANGE)
def shuffle(context, songrange=None):
"""
*musicpd.org, current playlist section:*
``shuffle [START:END]``
Shuffles the current playlist. ``START:END`` is optional and
specifies a range of songs.
"""
if songrange is None:
start, end = None, None
else:
start, end = songrange.start, songrange.stop
context.core.tracklist.shuffle(start, end)
@protocol.commands.add('swap', songpos1=protocol.UINT, songpos2=protocol.UINT)
def swap(context, songpos1, songpos2):
"""
*musicpd.org, current playlist section:*
``swap {SONG1} {SONG2}``
Swaps the positions of ``SONG1`` and ``SONG2``.
"""
if songpos2 < songpos1:
songpos1, songpos2 = songpos2, songpos1
context.core.tracklist.move(songpos1, songpos1 + 1, songpos2)
context.core.tracklist.move(songpos2 - 1, songpos2, songpos1)
@protocol.commands.add('swapid', tlid1=protocol.UINT, tlid2=protocol.UINT)
def swapid(context, tlid1, tlid2):
"""
*musicpd.org, current playlist section:*
``swapid {SONG1} {SONG2}``
Swaps the positions of ``SONG1`` and ``SONG2`` (both song ids).
"""
tl_tracks1 = context.core.tracklist.filter({'tlid': [tlid1]}).get()
tl_tracks2 = context.core.tracklist.filter({'tlid': [tlid2]}).get()
if not tl_tracks1 or not tl_tracks2:
raise exceptions.MpdNoExistError('No such song')
position1 = context.core.tracklist.index(tl_tracks1[0]).get()
position2 = context.core.tracklist.index(tl_tracks2[0]).get()
swap(context, position1, position2)
@protocol.commands.add('addtagid', tlid=protocol.UINT)
def addtagid(context, tlid, tag, value):
"""
*musicpd.org, current playlist section:*
``addtagid {SONGID} {TAG} {VALUE}``
Adds a tag to the specified song. Editing song tags is only possible
for remote songs. This change is volatile: it may be overwritten by
tags received from the server, and the data is gone when the song gets
removed from the queue.
.. versionadded:: 0.19
New in MPD protocol version 0.19
"""
raise exceptions.MpdNotImplemented # TODO
@protocol.commands.add('cleartagid', tlid=protocol.UINT)
def cleartagid(context, tlid, tag):
"""
*musicpd.org, current playlist section:*
``cleartagid {SONGID} [TAG]``
Removes tags from the specified song. If TAG is not specified, then all
tag values will be removed. Editing song tags is only possible for
remote songs.
.. versionadded:: 0.19
New in MPD protocol version 0.19
"""
raise exceptions.MpdNotImplemented # TODO
| 31.057522
| 79
| 0.651375
|
c78072131bafa478fff617e72c364ee9f6380944
| 80,434
|
py
|
Python
|
salt/modules/state.py
|
waynegemmell/salt
|
88056db3589cccab8956c2ae4f9b733acce89461
|
[
"Apache-2.0"
] | 9,425
|
2015-01-01T05:59:24.000Z
|
2022-03-31T20:44:05.000Z
|
salt/modules/state.py
|
waynegemmell/salt
|
88056db3589cccab8956c2ae4f9b733acce89461
|
[
"Apache-2.0"
] | 33,507
|
2015-01-01T00:19:56.000Z
|
2022-03-31T23:48:20.000Z
|
salt/modules/state.py
|
waynegemmell/salt
|
88056db3589cccab8956c2ae4f9b733acce89461
|
[
"Apache-2.0"
] | 5,810
|
2015-01-01T19:11:45.000Z
|
2022-03-31T02:37:20.000Z
|
"""
Control the state system on the minion.
State Caching
-------------
When a highstate is called, the minion automatically caches a copy of the last
high data. If you then run a highstate with cache=True it will use that cached
highdata and won't hit the fileserver except for ``salt://`` links in the
states themselves.
"""
import logging
import os
import shutil
import sys
import tarfile
import tempfile
import time
import salt.config
import salt.defaults.exitcodes
import salt.payload
import salt.state
import salt.utils.args
import salt.utils.data
import salt.utils.event
import salt.utils.files
import salt.utils.functools
import salt.utils.hashutils
import salt.utils.jid
import salt.utils.json
import salt.utils.msgpack
import salt.utils.platform
import salt.utils.state
import salt.utils.stringutils
import salt.utils.url
import salt.utils.versions
from salt.exceptions import CommandExecutionError, SaltInvocationError
from salt.loader import _format_cached_grains
from salt.runners.state import orchestrate as _orchestrate
from salt.utils.odict import OrderedDict
__proxyenabled__ = ["*"]
__outputter__ = {
"sls": "highstate",
"sls_id": "highstate",
"pkg": "highstate",
"top": "highstate",
"single": "highstate",
"highstate": "highstate",
"template": "highstate",
"template_str": "highstate",
"apply_": "highstate",
"test": "highstate",
"request": "highstate",
"check_request": "highstate",
"run_request": "highstate",
}
__func_alias__ = {"apply_": "apply"}
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = "state"
def __virtual__():
"""
Set the virtualname
"""
# Update global namespace with functions that are cloned in this module
global _orchestrate
_orchestrate = salt.utils.functools.namespaced_function(_orchestrate, globals())
return __virtualname__
def _filter_running(runnings):
"""
Filter out the result: True + no changes data
"""
ret = {
tag: value
for tag, value in runnings.items()
if not value["result"] or value["changes"]
}
return ret
def _set_retcode(ret, highstate=None):
"""
Set the return code based on the data back from the state system
"""
# Set default retcode to 0
__context__["retcode"] = salt.defaults.exitcodes.EX_OK
if isinstance(ret, list):
__context__["retcode"] = salt.defaults.exitcodes.EX_STATE_COMPILER_ERROR
return
if not __utils__["state.check_result"](ret, highstate=highstate):
__context__["retcode"] = salt.defaults.exitcodes.EX_STATE_FAILURE
def _get_pillar_errors(kwargs, pillar=None):
"""
Checks all pillars (external and internal) for errors.
Return an error message, if anywhere or None.
:param kwargs: dictionary of options
:param pillar: external pillar
:return: None or an error message
"""
return (
None
if kwargs.get("force")
else (pillar or {}).get("_errors", __pillar__.get("_errors")) or None
)
def _wait(jid):
"""
Wait for all previously started state jobs to finish running
"""
if jid is None:
jid = salt.utils.jid.gen_jid(__opts__)
states = _prior_running_states(jid)
while states:
time.sleep(1)
states = _prior_running_states(jid)
def _snapper_pre(opts, jid):
"""
Create a snapper pre snapshot
"""
snapper_pre = None
try:
if not opts["test"] and __opts__.get("snapper_states"):
# Run the snapper pre snapshot
snapper_pre = __salt__["snapper.create_snapshot"](
config=__opts__.get("snapper_states_config", "root"),
snapshot_type="pre",
description="Salt State run for jid {}".format(jid),
__pub_jid=jid,
)
except Exception: # pylint: disable=broad-except
log.error("Failed to create snapper pre snapshot for jid: %s", jid)
return snapper_pre
def _snapper_post(opts, jid, pre_num):
"""
Create the post states snapshot
"""
try:
if not opts["test"] and __opts__.get("snapper_states") and pre_num:
# Run the snapper pre snapshot
__salt__["snapper.create_snapshot"](
config=__opts__.get("snapper_states_config", "root"),
snapshot_type="post",
pre_number=pre_num,
description="Salt State run for jid {}".format(jid),
__pub_jid=jid,
)
except Exception: # pylint: disable=broad-except
log.error("Failed to create snapper pre snapshot for jid: %s", jid)
def _get_pause(jid, state_id=None):
"""
Return the pause information for a given jid
"""
pause_dir = os.path.join(__opts__["cachedir"], "state_pause")
pause_path = os.path.join(pause_dir, jid)
if not os.path.exists(pause_dir):
try:
os.makedirs(pause_dir)
except OSError:
# File created in the gap
pass
data = {}
if state_id is not None:
if state_id not in data:
data[state_id] = {}
if os.path.exists(pause_path):
with salt.utils.files.fopen(pause_path, "rb") as fp_:
data = salt.utils.msgpack.loads(fp_.read())
return data, pause_path
def get_pauses(jid=None):
"""
Get a report on all of the currently paused state runs and pause
run settings.
Optionally send in a jid if you only desire to see a single pause
data set.
"""
ret = {}
active = __salt__["saltutil.is_running"]("state.*")
pause_dir = os.path.join(__opts__["cachedir"], "state_pause")
if not os.path.exists(pause_dir):
return ret
if jid is None:
jids = os.listdir(pause_dir)
elif isinstance(jid, list):
jids = salt.utils.data.stringify(jid)
else:
jids = [str(jid)]
for scan_jid in jids:
is_active = False
for active_data in active:
if active_data["jid"] == scan_jid:
is_active = True
if not is_active:
try:
pause_path = os.path.join(pause_dir, scan_jid)
os.remove(pause_path)
except OSError:
# Already gone
pass
continue
data, pause_path = _get_pause(scan_jid)
ret[scan_jid] = data
return ret
def soft_kill(jid, state_id=None):
"""
Set up a state run to die before executing the given state id,
this instructs a running state to safely exit at a given
state id. This needs to pass in the jid of the running state.
If a state_id is not passed then the jid referenced will be safely exited
at the beginning of the next state run.
The given state id is the id got a given state execution, so given a state
that looks like this:
.. code-block:: yaml
vim:
pkg.installed: []
The state_id to pass to `soft_kill` is `vim`
CLI Examples:
.. code-block:: bash
salt '*' state.soft_kill 20171130110407769519
salt '*' state.soft_kill 20171130110407769519 vim
"""
jid = str(jid)
if state_id is None:
state_id = "__all__"
data, pause_path = _get_pause(jid, state_id)
data[state_id]["kill"] = True
with salt.utils.files.fopen(pause_path, "wb") as fp_:
fp_.write(salt.utils.msgpack.dumps(data))
def pause(jid, state_id=None, duration=None):
"""
Set up a state id pause, this instructs a running state to pause at a given
state id. This needs to pass in the jid of the running state and can
optionally pass in a duration in seconds. If a state_id is not passed then
the jid referenced will be paused at the beginning of the next state run.
The given state id is the id got a given state execution, so given a state
that looks like this:
.. code-block:: yaml
vim:
pkg.installed: []
The state_id to pass to `pause` is `vim`
CLI Examples:
.. code-block:: bash
salt '*' state.pause 20171130110407769519
salt '*' state.pause 20171130110407769519 vim
salt '*' state.pause 20171130110407769519 vim 20
"""
jid = str(jid)
if state_id is None:
state_id = "__all__"
data, pause_path = _get_pause(jid, state_id)
if duration:
data[state_id]["duration"] = int(duration)
with salt.utils.files.fopen(pause_path, "wb") as fp_:
fp_.write(salt.utils.msgpack.dumps(data))
def resume(jid, state_id=None):
"""
Remove a pause from a jid, allowing it to continue. If the state_id is
not specified then the a general pause will be resumed.
The given state_id is the id got a given state execution, so given a state
that looks like this:
.. code-block:: yaml
vim:
pkg.installed: []
The state_id to pass to `rm_pause` is `vim`
CLI Examples:
.. code-block:: bash
salt '*' state.resume 20171130110407769519
salt '*' state.resume 20171130110407769519 vim
"""
jid = str(jid)
if state_id is None:
state_id = "__all__"
data, pause_path = _get_pause(jid, state_id)
if state_id in data:
data.pop(state_id)
if state_id == "__all__":
data = {}
with salt.utils.files.fopen(pause_path, "wb") as fp_:
fp_.write(salt.utils.msgpack.dumps(data))
def orchestrate(
mods, saltenv="base", test=None, exclude=None, pillar=None, pillarenv=None
):
"""
.. versionadded:: 2016.11.0
Execute the orchestrate runner from a masterless minion.
.. seealso:: More Orchestrate documentation
* :ref:`Full Orchestrate Tutorial <orchestrate-runner>`
* Docs for the salt state module :py:mod:`salt.states.saltmod`
CLI Examples:
.. code-block:: bash
salt-call --local state.orchestrate webserver
salt-call --local state.orchestrate webserver saltenv=dev test=True
salt-call --local state.orchestrate webserver saltenv=dev pillarenv=aws
"""
return _orchestrate(
mods=mods,
saltenv=saltenv,
test=test,
exclude=exclude,
pillar=pillar,
pillarenv=pillarenv,
)
def running(concurrent=False):
"""
Return a list of strings that contain state return data if a state function
is already running. This function is used to prevent multiple state calls
from being run at the same time.
CLI Example:
.. code-block:: bash
salt '*' state.running
"""
ret = []
if concurrent:
return ret
active = __salt__["saltutil.is_running"]("state.*")
for data in active:
err = (
'The function "{}" is running as PID {} and was started at {} '
"with jid {}".format(
data["fun"],
data["pid"],
salt.utils.jid.jid_to_time(data["jid"]),
data["jid"],
)
)
ret.append(err)
return ret
def _prior_running_states(jid):
"""
Return a list of dicts of prior calls to state functions. This function is
used to queue state calls so only one is run at a time.
"""
ret = []
active = __salt__["saltutil.is_running"]("state.*")
for data in active:
try:
data_jid = int(data["jid"])
except ValueError:
continue
if data_jid < int(jid):
ret.append(data)
return ret
def _check_queue(queue, kwargs):
"""
Utility function to queue the state run if requested
and to check for conflicts in currently running states
"""
if queue:
_wait(kwargs.get("__pub_jid"))
else:
conflict = running(concurrent=kwargs.get("concurrent", False))
if conflict:
__context__["retcode"] = salt.defaults.exitcodes.EX_STATE_COMPILER_ERROR
return conflict
def _get_initial_pillar(opts):
return (
__pillar__.value()
if __opts__.get("__cli", None) == "salt-call"
and opts["pillarenv"] == __opts__["pillarenv"]
else None
)
def low(data, queue=False, **kwargs):
"""
Execute a single low data call
This function is mostly intended for testing the state system and is not
likely to be needed in everyday usage.
CLI Example:
.. code-block:: bash
salt '*' state.low '{"state": "pkg", "fun": "installed", "name": "vi"}'
"""
conflict = _check_queue(queue, kwargs)
if conflict is not None:
return conflict
try:
st_ = salt.state.State(__opts__, proxy=__proxy__)
except NameError:
st_ = salt.state.State(__opts__)
err = st_.verify_data(data)
if err:
__context__["retcode"] = salt.defaults.exitcodes.EX_STATE_COMPILER_ERROR
return err
ret = st_.call(data)
if isinstance(ret, list):
__context__["retcode"] = salt.defaults.exitcodes.EX_STATE_COMPILER_ERROR
if __utils__["state.check_result"](ret):
__context__["retcode"] = salt.defaults.exitcodes.EX_STATE_FAILURE
return ret
def _get_test_value(test=None, **kwargs):
"""
Determine the correct value for the test flag.
"""
ret = True
if test is None:
if salt.utils.args.test_mode(test=test, **kwargs):
ret = True
elif __salt__["config.get"]("test", omit_opts=True) is True:
ret = True
else:
ret = __opts__.get("test", None)
else:
ret = test
return ret
def high(data, test=None, queue=False, **kwargs):
"""
Execute the compound calls stored in a single set of high data
This function is mostly intended for testing the state system and is not
likely to be needed in everyday usage.
CLI Example:
.. code-block:: bash
salt '*' state.high '{"vim": {"pkg": ["installed"]}}'
"""
conflict = _check_queue(queue, kwargs)
if conflict is not None:
return conflict
opts = salt.utils.state.get_sls_opts(__opts__, **kwargs)
opts["test"] = _get_test_value(test, **kwargs)
pillar_override = kwargs.get("pillar")
pillar_enc = kwargs.get("pillar_enc")
if (
pillar_enc is None
and pillar_override is not None
and not isinstance(pillar_override, dict)
):
raise SaltInvocationError(
"Pillar data must be formatted as a dictionary, unless pillar_enc "
"is specified."
)
try:
st_ = salt.state.State(
opts,
pillar_override,
pillar_enc=pillar_enc,
proxy=dict(__proxy__),
context=dict(__context__),
initial_pillar=_get_initial_pillar(opts),
)
except NameError:
st_ = salt.state.State(
opts,
pillar_override,
pillar_enc=pillar_enc,
initial_pillar=_get_initial_pillar(opts),
)
ret = st_.call_high(data)
_set_retcode(ret, highstate=data)
return ret
def template(tem, queue=False, **kwargs):
"""
Execute the information stored in a template file on the minion.
This function does not ask a master for a SLS file to render but
instead directly processes the file at the provided path on the minion.
CLI Example:
.. code-block:: bash
salt '*' state.template '<Path to template on the minion>'
"""
if "env" in kwargs:
# "env" is not supported; Use "saltenv".
kwargs.pop("env")
conflict = _check_queue(queue, kwargs)
if conflict is not None:
return conflict
opts = salt.utils.state.get_sls_opts(__opts__, **kwargs)
try:
st_ = salt.state.HighState(
opts,
context=dict(__context__),
proxy=dict(__proxy__),
initial_pillar=_get_initial_pillar(opts),
)
except NameError:
st_ = salt.state.HighState(
opts, context=dict(__context__), initial_pillar=_get_initial_pillar(opts)
)
with st_:
errors = _get_pillar_errors(kwargs, pillar=st_.opts["pillar"])
if errors:
__context__["retcode"] = salt.defaults.exitcodes.EX_PILLAR_FAILURE
raise CommandExecutionError("Pillar failed to render", info=errors)
if not tem.endswith(".sls"):
tem = "{sls}.sls".format(sls=tem)
high_state, errors = st_.render_state(
tem, kwargs.get("saltenv", ""), "", None, local=True
)
if errors:
__context__["retcode"] = salt.defaults.exitcodes.EX_STATE_COMPILER_ERROR
return errors
ret = st_.state.call_high(high_state)
_set_retcode(ret, highstate=high_state)
return ret
def template_str(tem, queue=False, **kwargs):
"""
Execute the information stored in a string from an sls template
CLI Example:
.. code-block:: bash
salt '*' state.template_str '<Template String>'
"""
conflict = _check_queue(queue, kwargs)
if conflict is not None:
return conflict
opts = salt.utils.state.get_sls_opts(__opts__, **kwargs)
try:
st_ = salt.state.State(
opts, proxy=__proxy__, initial_pillar=_get_initial_pillar(opts)
)
except NameError:
st_ = salt.state.State(opts, initial_pillar=_get_initial_pillar(opts))
ret = st_.call_template_str(tem)
_set_retcode(ret)
return ret
def apply_(mods=None, **kwargs):
"""
.. versionadded:: 2015.5.0
This function will call :mod:`state.highstate
<salt.modules.state.highstate>` or :mod:`state.sls
<salt.modules.state.sls>` based on the arguments passed to this function.
It exists as a more intuitive way of applying states.
.. rubric:: APPLYING ALL STATES CONFIGURED IN TOP.SLS (A.K.A. :ref:`HIGHSTATE <running-highstate>`)
To apply all configured states, simply run ``state.apply``:
.. code-block:: bash
salt '*' state.apply
The following additional arguments are also accepted when applying all
states configured in top.sls:
test
Run states in test-only (dry-run) mode
mock
The mock option allows for the state run to execute without actually
calling any states. This then returns a mocked return which will show
the requisite ordering as well as fully validate the state run.
.. versionadded:: 2015.8.4
pillar
Custom Pillar values, passed as a dictionary of key-value pairs
.. code-block:: bash
salt '*' state.apply stuff pillar='{"foo": "bar"}'
.. note::
Values passed this way will override Pillar values set via
``pillar_roots`` or an external Pillar source.
exclude
Exclude specific states from execution. Accepts a list of sls names, a
comma-separated string of sls names, or a list of dictionaries
containing ``sls`` or ``id`` keys. Glob-patterns may be used to match
multiple states.
.. code-block:: bash
salt '*' state.apply exclude=bar,baz
salt '*' state.apply exclude=foo*
salt '*' state.apply exclude="[{'id': 'id_to_exclude'}, {'sls': 'sls_to_exclude'}]"
queue : False
Instead of failing immediately when another state run is in progress,
queue the new state run to begin running once the other has finished.
This option starts a new thread for each queued state run, so use this
option sparingly.
localconfig
Optionally, instead of using the minion config, load minion opts from
the file specified by this argument, and then merge them with the
options from the minion config. This functionality allows for specific
states to be run with their own custom minion configuration, including
different pillars, file_roots, etc.
.. code-block:: bash
salt '*' state.apply localconfig=/path/to/minion.yml
.. rubric:: APPLYING INDIVIDUAL SLS FILES (A.K.A. :py:func:`STATE.SLS <salt.modules.state.sls>`)
To apply individual SLS files, pass them as a comma-separated list:
.. code-block:: bash
# Run the states configured in salt://stuff.sls (or salt://stuff/init.sls)
salt '*' state.apply stuff
# Run the states configured in salt://stuff.sls (or salt://stuff/init.sls)
# and salt://pkgs.sls (or salt://pkgs/init.sls).
salt '*' state.apply stuff,pkgs
# Run the states configured in a more deeply nested directory such as salt://my/organized/stuff.sls (or salt://my/organized/stuff/init.sls)
salt '*' state.apply my.organized.stuff
The following additional arguments are also accepted when applying
individual SLS files:
test
Run states in test-only (dry-run) mode
mock
The mock option allows for the state run to execute without actually
calling any states. This then returns a mocked return which will show
the requisite ordering as well as fully validate the state run.
.. versionadded:: 2015.8.4
pillar
Custom Pillar values, passed as a dictionary of key-value pairs
.. code-block:: bash
salt '*' state.apply stuff pillar='{"foo": "bar"}'
.. note::
Values passed this way will override Pillar values set via
``pillar_roots`` or an external Pillar source.
queue : False
Instead of failing immediately when another state run is in progress,
queue the new state run to begin running once the other has finished.
This option starts a new thread for each queued state run, so use this
option sparingly.
concurrent : False
Execute state runs concurrently instead of serially
.. warning::
This flag is potentially dangerous. It is designed for use when
multiple state runs can safely be run at the same time. Do *not*
use this flag for performance optimization.
saltenv
Specify a salt fileserver environment to be used when applying states
.. versionchanged:: 0.17.0
Argument name changed from ``env`` to ``saltenv``
.. versionchanged:: 2014.7.0
If no saltenv is specified, the minion config will be checked for an
``environment`` parameter and if found, it will be used. If none is
found, ``base`` will be used. In prior releases, the minion config
was not checked and ``base`` would always be assumed when the
saltenv was not explicitly set.
pillarenv
Specify a Pillar environment to be used when applying states. This
can also be set in the minion config file using the
:conf_minion:`pillarenv` option. When neither the
:conf_minion:`pillarenv` minion config option nor this CLI argument is
used, all Pillar environments will be merged together.
localconfig
Optionally, instead of using the minion config, load minion opts from
the file specified by this argument, and then merge them with the
options from the minion config. This functionality allows for specific
states to be run with their own custom minion configuration, including
different pillars, file_roots, etc.
.. code-block:: bash
salt '*' state.apply stuff localconfig=/path/to/minion.yml
sync_mods
If specified, the desired custom module types will be synced prior to
running the SLS files:
.. code-block:: bash
salt '*' state.apply stuff sync_mods=states,modules
salt '*' state.apply stuff sync_mods=all
.. note::
This option is ignored when no SLS files are specified, as a
:ref:`highstate <running-highstate>` automatically syncs all custom
module types.
.. versionadded:: 2017.7.8,2018.3.3,2019.2.0
"""
if mods:
return sls(mods, **kwargs)
return highstate(**kwargs)
def test(*args, **kwargs):
"""
.. versionadded:: 3001
Alias for `state.apply` with the kwarg `test` forced to `True`.
This is a nicety to avoid the need to type out `test=True` and the possibility of
a typo causing changes you do not intend.
"""
kwargs["test"] = True
ret = apply_(*args, **kwargs)
return ret
def request(mods=None, **kwargs):
"""
.. versionadded:: 2015.5.0
Request that the local admin execute a state run via
`salt-call state.run_request`.
All arguments match those of state.apply.
CLI Example:
.. code-block:: bash
salt '*' state.request
salt '*' state.request stuff
salt '*' state.request stuff,pkgs
"""
kwargs["test"] = True
ret = apply_(mods, **kwargs)
notify_path = os.path.join(__opts__["cachedir"], "req_state.p")
req = check_request()
req.update(
{
kwargs.get("name", "default"): {
"test_run": ret,
"mods": mods,
"kwargs": kwargs,
}
}
)
with salt.utils.files.set_umask(0o077):
try:
if salt.utils.platform.is_windows():
# Make sure cache file isn't read-only
__salt__["cmd.run"]('attrib -R "{}"'.format(notify_path))
with salt.utils.files.fopen(notify_path, "w+b") as fp_:
salt.payload.dump(req, fp_)
except OSError:
log.error(
"Unable to write state request file %s. Check permission.", notify_path
)
return ret
def check_request(name=None):
"""
.. versionadded:: 2015.5.0
Return the state request information, if any
CLI Example:
.. code-block:: bash
salt '*' state.check_request
"""
notify_path = os.path.join(__opts__["cachedir"], "req_state.p")
if os.path.isfile(notify_path):
with salt.utils.files.fopen(notify_path, "rb") as fp_:
req = salt.payload.load(fp_)
if name:
return req[name]
return req
return {}
def clear_request(name=None):
"""
.. versionadded:: 2015.5.0
Clear out the state execution request without executing it
CLI Example:
.. code-block:: bash
salt '*' state.clear_request
"""
notify_path = os.path.join(__opts__["cachedir"], "req_state.p")
if not os.path.isfile(notify_path):
return True
if not name:
try:
os.remove(notify_path)
except OSError:
pass
else:
req = check_request()
if name in req:
req.pop(name)
else:
return False
with salt.utils.files.set_umask(0o077):
try:
if salt.utils.platform.is_windows():
# Make sure cache file isn't read-only
__salt__["cmd.run"]('attrib -R "{}"'.format(notify_path))
with salt.utils.files.fopen(notify_path, "w+b") as fp_:
salt.payload.dump(req, fp_)
except OSError:
log.error(
"Unable to write state request file %s. Check permission.",
notify_path,
)
return True
def run_request(name="default", **kwargs):
"""
.. versionadded:: 2015.5.0
Execute the pending state request
CLI Example:
.. code-block:: bash
salt '*' state.run_request
"""
req = check_request()
if name not in req:
return {}
n_req = req[name]
if "mods" not in n_req or "kwargs" not in n_req:
return {}
req[name]["kwargs"].update(kwargs)
if "test" in n_req["kwargs"]:
n_req["kwargs"].pop("test")
if req:
ret = apply_(n_req["mods"], **n_req["kwargs"])
try:
os.remove(os.path.join(__opts__["cachedir"], "req_state.p"))
except OSError:
pass
return ret
return {}
def highstate(test=None, queue=False, **kwargs):
"""
Retrieve the state data from the salt master for this minion and execute it
test
Run states in test-only (dry-run) mode
pillar
Custom Pillar values, passed as a dictionary of key-value pairs
.. code-block:: bash
salt '*' state.highstate stuff pillar='{"foo": "bar"}'
.. note::
Values passed this way will override Pillar values set via
``pillar_roots`` or an external Pillar source.
.. versionchanged:: 2016.3.0
GPG-encrypted CLI Pillar data is now supported via the GPG
renderer. See :ref:`here <encrypted-cli-pillar-data>` for details.
pillar_enc
Specify which renderer to use to decrypt encrypted data located within
the ``pillar`` value. Currently, only ``gpg`` is supported.
.. versionadded:: 2016.3.0
exclude
Exclude specific states from execution. Accepts a list of sls names, a
comma-separated string of sls names, or a list of dictionaries
containing ``sls`` or ``id`` keys. Glob-patterns may be used to match
multiple states.
.. code-block:: bash
salt '*' state.highstate exclude=bar,baz
salt '*' state.highstate exclude=foo*
salt '*' state.highstate exclude="[{'id': 'id_to_exclude'}, {'sls': 'sls_to_exclude'}]"
saltenv
Specify a salt fileserver environment to be used when applying states
.. versionchanged:: 0.17.0
Argument name changed from ``env`` to ``saltenv``.
.. versionchanged:: 2014.7.0
If no saltenv is specified, the minion config will be checked for a
``saltenv`` parameter and if found, it will be used. If none is
found, ``base`` will be used. In prior releases, the minion config
was not checked and ``base`` would always be assumed when the
saltenv was not explicitly set.
pillarenv
Specify a Pillar environment to be used when applying states. This
can also be set in the minion config file using the
:conf_minion:`pillarenv` option. When neither the
:conf_minion:`pillarenv` minion config option nor this CLI argument is
used, all Pillar environments will be merged together.
queue : False
Instead of failing immediately when another state run is in progress,
queue the new state run to begin running once the other has finished.
This option starts a new thread for each queued state run, so use this
option sparingly.
localconfig
Optionally, instead of using the minion config, load minion opts from
the file specified by this argument, and then merge them with the
options from the minion config. This functionality allows for specific
states to be run with their own custom minion configuration, including
different pillars, file_roots, etc.
mock
The mock option allows for the state run to execute without actually
calling any states. This then returns a mocked return which will show
the requisite ordering as well as fully validate the state run.
.. versionadded:: 2015.8.4
CLI Examples:
.. code-block:: bash
salt '*' state.highstate
salt '*' state.highstate whitelist=sls1_to_run,sls2_to_run
salt '*' state.highstate exclude=sls_to_exclude
salt '*' state.highstate exclude="[{'id': 'id_to_exclude'}, {'sls': 'sls_to_exclude'}]"
salt '*' state.highstate pillar="{foo: 'Foo!', bar: 'Bar!'}"
"""
if _disabled(["highstate"]):
log.debug(
"Salt highstate run is disabled. To re-enable, run state.enable highstate"
)
ret = {
"name": (
"Salt highstate run is disabled. To re-enable, run state.enable"
" highstate"
),
"result": "False",
"comment": "Disabled",
}
return ret
conflict = _check_queue(queue, kwargs)
if conflict is not None:
return conflict
orig_test = __opts__.get("test", None)
opts = salt.utils.state.get_sls_opts(__opts__, **kwargs)
opts["test"] = _get_test_value(test, **kwargs)
if "env" in kwargs:
# "env" is not supported; Use "saltenv".
kwargs.pop("env")
if "saltenv" in kwargs:
opts["saltenv"] = kwargs["saltenv"]
if "pillarenv" in kwargs:
opts["pillarenv"] = kwargs["pillarenv"]
pillar_override = kwargs.get("pillar")
pillar_enc = kwargs.get("pillar_enc")
if (
pillar_enc is None
and pillar_override is not None
and not isinstance(pillar_override, dict)
):
raise SaltInvocationError(
"Pillar data must be formatted as a dictionary, unless pillar_enc "
"is specified."
)
try:
st_ = salt.state.HighState(
opts,
pillar_override,
kwargs.get("__pub_jid"),
pillar_enc=pillar_enc,
proxy=dict(__proxy__),
context=dict(__context__),
mocked=kwargs.get("mock", False),
initial_pillar=_get_initial_pillar(opts),
)
except NameError:
st_ = salt.state.HighState(
opts,
pillar_override,
kwargs.get("__pub_jid"),
pillar_enc=pillar_enc,
mocked=kwargs.get("mock", False),
initial_pillar=_get_initial_pillar(opts),
)
with st_:
errors = _get_pillar_errors(kwargs, st_.opts["pillar"])
if errors:
__context__["retcode"] = salt.defaults.exitcodes.EX_PILLAR_FAILURE
return ["Pillar failed to render with the following messages:"] + errors
st_.push_active()
orchestration_jid = kwargs.get("orchestration_jid")
snapper_pre = _snapper_pre(opts, kwargs.get("__pub_jid", "called localy"))
try:
ret = st_.call_highstate(
exclude=kwargs.get("exclude", []),
cache=kwargs.get("cache", None),
cache_name=kwargs.get("cache_name", "highstate"),
force=kwargs.get("force", False),
whitelist=kwargs.get("whitelist"),
orchestration_jid=orchestration_jid,
)
finally:
st_.pop_active()
if isinstance(ret, dict) and (
__salt__["config.option"]("state_data", "") == "terse"
or kwargs.get("terse")
):
ret = _filter_running(ret)
_set_retcode(ret, highstate=st_.building_highstate)
_snapper_post(opts, kwargs.get("__pub_jid", "called localy"), snapper_pre)
# Work around Windows multiprocessing bug, set __opts__['test'] back to
# value from before this function was run.
__opts__["test"] = orig_test
return ret
def sls(mods, test=None, exclude=None, queue=False, sync_mods=None, **kwargs):
"""
Execute the states in one or more SLS files
test
Run states in test-only (dry-run) mode
pillar
Custom Pillar values, passed as a dictionary of key-value pairs
.. code-block:: bash
salt '*' state.sls stuff pillar='{"foo": "bar"}'
.. note::
Values passed this way will override existing Pillar values set via
``pillar_roots`` or an external Pillar source. Pillar values that
are not included in the kwarg will not be overwritten.
.. versionchanged:: 2016.3.0
GPG-encrypted CLI Pillar data is now supported via the GPG
renderer. See :ref:`here <encrypted-cli-pillar-data>` for details.
pillar_enc
Specify which renderer to use to decrypt encrypted data located within
the ``pillar`` value. Currently, only ``gpg`` is supported.
.. versionadded:: 2016.3.0
exclude
Exclude specific states from execution. Accepts a list of sls names, a
comma-separated string of sls names, or a list of dictionaries
containing ``sls`` or ``id`` keys. Glob-patterns may be used to match
multiple states.
.. code-block:: bash
salt '*' state.sls foo,bar,baz exclude=bar,baz
salt '*' state.sls foo,bar,baz exclude=ba*
salt '*' state.sls foo,bar,baz exclude="[{'id': 'id_to_exclude'}, {'sls': 'sls_to_exclude'}]"
queue : False
Instead of failing immediately when another state run is in progress,
queue the new state run to begin running once the other has finished.
This option starts a new thread for each queued state run, so use this
option sparingly.
concurrent : False
Execute state runs concurrently instead of serially
.. warning::
This flag is potentially dangerous. It is designed for use when
multiple state runs can safely be run at the same time. Do *not*
use this flag for performance optimization.
saltenv
Specify a salt fileserver environment to be used when applying states
.. versionchanged:: 0.17.0
Argument name changed from ``env`` to ``saltenv``.
.. versionchanged:: 2014.7.0
If no saltenv is specified, the minion config will be checked for an
``environment`` parameter and if found, it will be used. If none is
found, ``base`` will be used. In prior releases, the minion config
was not checked and ``base`` would always be assumed when the
saltenv was not explicitly set.
pillarenv
Specify a Pillar environment to be used when applying states. This
can also be set in the minion config file using the
:conf_minion:`pillarenv` option. When neither the
:conf_minion:`pillarenv` minion config option nor this CLI argument is
used, all Pillar environments will be merged together.
localconfig
Optionally, instead of using the minion config, load minion opts from
the file specified by this argument, and then merge them with the
options from the minion config. This functionality allows for specific
states to be run with their own custom minion configuration, including
different pillars, file_roots, etc.
mock
The mock option allows for the state run to execute without actually
calling any states. This then returns a mocked return which will show
the requisite ordering as well as fully validate the state run.
.. versionadded:: 2015.8.4
sync_mods
If specified, the desired custom module types will be synced prior to
running the SLS files:
.. code-block:: bash
salt '*' state.sls stuff sync_mods=states,modules
salt '*' state.sls stuff sync_mods=all
.. versionadded:: 2017.7.8,2018.3.3,2019.2.0
CLI Example:
.. code-block:: bash
# Run the states configured in salt://example.sls (or salt://example/init.sls)
salt '*' state.apply example
# Run the states configured in salt://core.sls (or salt://core/init.sls)
# and salt://edit/vim.sls (or salt://edit/vim/init.sls)
salt '*' state.sls core,edit.vim
# Run the states configured in a more deeply nested directory such as salt://my/nested/state.sls (or salt://my/nested/state/init.sls)
salt '*' state.sls my.nested.state
salt '*' state.sls core exclude="[{'id': 'id_to_exclude'}, {'sls': 'sls_to_exclude'}]"
salt '*' state.sls myslsfile pillar="{foo: 'Foo!', bar: 'Bar!'}"
"""
concurrent = kwargs.get("concurrent", False)
if "env" in kwargs:
# "env" is not supported; Use "saltenv".
kwargs.pop("env")
# Modification to __opts__ lost after this if-else
if queue:
_wait(kwargs.get("__pub_jid"))
else:
conflict = running(concurrent)
if conflict:
__context__["retcode"] = salt.defaults.exitcodes.EX_STATE_COMPILER_ERROR
return conflict
if isinstance(mods, list):
disabled = _disabled(mods)
else:
disabled = _disabled([mods])
if disabled:
for state in disabled:
log.debug(
"Salt state %s is disabled. To re-enable, run state.enable %s",
state,
state,
)
__context__["retcode"] = salt.defaults.exitcodes.EX_STATE_COMPILER_ERROR
return disabled
orig_test = __opts__.get("test", None)
opts = salt.utils.state.get_sls_opts(__opts__, **kwargs)
opts["test"] = _get_test_value(test, **kwargs)
# Since this is running a specific SLS file (or files), fall back to the
# 'base' saltenv if none is configured and none was passed.
if opts["saltenv"] is None:
opts["saltenv"] = "base"
pillar_override = kwargs.get("pillar")
pillar_enc = kwargs.get("pillar_enc")
if (
pillar_enc is None
and pillar_override is not None
and not isinstance(pillar_override, dict)
):
raise SaltInvocationError(
"Pillar data must be formatted as a dictionary, unless pillar_enc "
"is specified."
)
cfn = os.path.join(
__opts__["cachedir"],
"{}.cache.p".format(kwargs.get("cache_name", "highstate")),
)
if sync_mods is True:
sync_mods = ["all"]
if sync_mods is not None:
sync_mods = salt.utils.args.split_input(sync_mods)
else:
sync_mods = []
if "all" in sync_mods and sync_mods != ["all"]:
# Prevent unnecessary extra syncing
sync_mods = ["all"]
for module_type in sync_mods:
try:
__salt__["saltutil.sync_{}".format(module_type)](saltenv=opts["saltenv"])
except KeyError:
log.warning("Invalid custom module type '%s', ignoring", module_type)
try:
st_ = salt.state.HighState(
opts,
pillar_override,
kwargs.get("__pub_jid"),
pillar_enc=pillar_enc,
proxy=dict(__proxy__),
context=dict(__context__),
mocked=kwargs.get("mock", False),
initial_pillar=_get_initial_pillar(opts),
)
except NameError:
st_ = salt.state.HighState(
opts,
pillar_override,
kwargs.get("__pub_jid"),
pillar_enc=pillar_enc,
mocked=kwargs.get("mock", False),
initial_pillar=_get_initial_pillar(opts),
)
with st_:
errors = _get_pillar_errors(kwargs, pillar=st_.opts["pillar"])
if errors:
__context__["retcode"] = salt.defaults.exitcodes.EX_PILLAR_FAILURE
return ["Pillar failed to render with the following messages:"] + errors
orchestration_jid = kwargs.get("orchestration_jid")
with salt.utils.files.set_umask(0o077):
if kwargs.get("cache"):
if os.path.isfile(cfn):
with salt.utils.files.fopen(cfn, "rb") as fp_:
high_ = salt.payload.load(fp_)
return st_.state.call_high(high_, orchestration_jid)
# If the state file is an integer, convert to a string then to unicode
if isinstance(mods, int):
mods = salt.utils.stringutils.to_unicode(str(mods))
mods = salt.utils.args.split_input(mods)
st_.push_active()
try:
high_, errors = st_.render_highstate({opts["saltenv"]: mods})
if errors:
__context__["retcode"] = salt.defaults.exitcodes.EX_STATE_COMPILER_ERROR
return errors
if exclude:
exclude = salt.utils.args.split_input(exclude)
if "__exclude__" in high_:
high_["__exclude__"].extend(exclude)
else:
high_["__exclude__"] = exclude
snapper_pre = _snapper_pre(opts, kwargs.get("__pub_jid", "called localy"))
ret = st_.state.call_high(high_, orchestration_jid)
finally:
st_.pop_active()
if __salt__["config.option"]("state_data", "") == "terse" or kwargs.get("terse"):
ret = _filter_running(ret)
cache_file = os.path.join(__opts__["cachedir"], "sls.p")
with salt.utils.files.set_umask(0o077):
try:
if salt.utils.platform.is_windows():
# Make sure cache file isn't read-only
__salt__["cmd.run"](["attrib", "-R", cache_file], python_shell=False)
with salt.utils.files.fopen(cache_file, "w+b") as fp_:
salt.payload.dump(ret, fp_)
except OSError:
log.error(
"Unable to write to SLS cache file %s. Check permission.", cache_file
)
_set_retcode(ret, high_)
# Work around Windows multiprocessing bug, set __opts__['test'] back to
# value from before this function was run.
__opts__["test"] = orig_test
try:
with salt.utils.files.fopen(cfn, "w+b") as fp_:
try:
salt.payload.dump(high_, fp_)
except TypeError:
# Can't serialize pydsl
pass
except OSError:
log.error(
"Unable to write to highstate cache file %s. Do you have permissions?",
cfn,
)
_snapper_post(opts, kwargs.get("__pub_jid", "called localy"), snapper_pre)
return ret
def top(topfn, test=None, queue=False, **kwargs):
"""
Execute a specific top file instead of the default. This is useful to apply
configurations from a different environment (for example, dev or prod), without
modifying the default top file.
queue : False
Instead of failing immediately when another state run is in progress,
queue the new state run to begin running once the other has finished.
This option starts a new thread for each queued state run, so use this
option sparingly.
saltenv
Specify a salt fileserver environment to be used when applying states
pillarenv
Specify a Pillar environment to be used when applying states. This
can also be set in the minion config file using the
:conf_minion:`pillarenv` option. When neither the
:conf_minion:`pillarenv` minion config option nor this CLI argument is
used, all Pillar environments will be merged together.
.. versionadded:: 2017.7.0
CLI Example:
.. code-block:: bash
salt '*' state.top reverse_top.sls
salt '*' state.top prod_top.sls exclude=sls_to_exclude
salt '*' state.top dev_top.sls exclude="[{'id': 'id_to_exclude'}, {'sls': 'sls_to_exclude'}]"
"""
conflict = _check_queue(queue, kwargs)
if conflict is not None:
return conflict
orig_test = __opts__.get("test", None)
opts = salt.utils.state.get_sls_opts(__opts__, **kwargs)
opts["test"] = _get_test_value(test, **kwargs)
pillar_override = kwargs.get("pillar")
pillar_enc = kwargs.get("pillar_enc")
if (
pillar_enc is None
and pillar_override is not None
and not isinstance(pillar_override, dict)
):
raise SaltInvocationError(
"Pillar data must be formatted as a dictionary, unless pillar_enc "
"is specified."
)
try:
st_ = salt.state.HighState(
opts,
pillar_override,
pillar_enc=pillar_enc,
context=dict(__context__),
proxy=dict(__proxy__),
initial_pillar=_get_initial_pillar(opts),
)
except NameError:
st_ = salt.state.HighState(
opts,
pillar_override,
pillar_enc=pillar_enc,
context=dict(__context__),
initial_pillar=_get_initial_pillar(opts),
)
with st_:
errors = _get_pillar_errors(kwargs, pillar=st_.opts["pillar"])
if errors:
__context__["retcode"] = salt.defaults.exitcodes.EX_PILLAR_FAILURE
return ["Pillar failed to render with the following messages:"] + errors
st_.push_active()
st_.opts["state_top"] = salt.utils.url.create(topfn)
ret = {}
orchestration_jid = kwargs.get("orchestration_jid")
if "saltenv" in kwargs:
st_.opts["state_top_saltenv"] = kwargs["saltenv"]
try:
snapper_pre = _snapper_pre(opts, kwargs.get("__pub_jid", "called localy"))
ret = st_.call_highstate(
exclude=kwargs.get("exclude", []),
cache=kwargs.get("cache", None),
cache_name=kwargs.get("cache_name", "highstate"),
orchestration_jid=orchestration_jid,
)
finally:
st_.pop_active()
_set_retcode(ret, highstate=st_.building_highstate)
# Work around Windows multiprocessing bug, set __opts__['test'] back to
# value from before this function was run.
_snapper_post(opts, kwargs.get("__pub_jid", "called localy"), snapper_pre)
__opts__["test"] = orig_test
return ret
def show_highstate(queue=False, **kwargs):
"""
Retrieve the highstate data from the salt master and display it
Custom Pillar data can be passed with the ``pillar`` kwarg.
CLI Example:
.. code-block:: bash
salt '*' state.show_highstate
"""
conflict = _check_queue(queue, kwargs)
if conflict is not None:
return conflict
pillar_override = kwargs.get("pillar")
pillar_enc = kwargs.get("pillar_enc")
if (
pillar_enc is None
and pillar_override is not None
and not isinstance(pillar_override, dict)
):
raise SaltInvocationError(
"Pillar data must be formatted as a dictionary, unless pillar_enc "
"is specified."
)
opts = salt.utils.state.get_sls_opts(__opts__, **kwargs)
try:
st_ = salt.state.HighState(
opts,
pillar_override,
pillar_enc=pillar_enc,
proxy=__proxy__,
initial_pillar=_get_initial_pillar(opts),
)
except NameError:
st_ = salt.state.HighState(
opts,
pillar_override,
pillar_enc=pillar_enc,
initial_pillar=_get_initial_pillar(opts),
)
with st_:
errors = _get_pillar_errors(kwargs, pillar=st_.opts["pillar"])
if errors:
__context__["retcode"] = salt.defaults.exitcodes.EX_PILLAR_FAILURE
raise CommandExecutionError("Pillar failed to render", info=errors)
st_.push_active()
try:
ret = st_.compile_highstate()
finally:
st_.pop_active()
_set_retcode(ret)
return ret
def show_lowstate(queue=False, **kwargs):
"""
List out the low data that will be applied to this minion
CLI Example:
.. code-block:: bash
salt '*' state.show_lowstate
"""
conflict = _check_queue(queue, kwargs)
if conflict is not None:
assert False
return conflict
opts = salt.utils.state.get_sls_opts(__opts__, **kwargs)
try:
st_ = salt.state.HighState(
opts, proxy=__proxy__, initial_pillar=_get_initial_pillar(opts)
)
except NameError:
st_ = salt.state.HighState(opts, initial_pillar=_get_initial_pillar(opts))
with st_:
errors = _get_pillar_errors(kwargs, pillar=st_.opts["pillar"])
if errors:
__context__["retcode"] = salt.defaults.exitcodes.EX_PILLAR_FAILURE
raise CommandExecutionError("Pillar failed to render", info=errors)
st_.push_active()
try:
ret = st_.compile_low_chunks()
finally:
st_.pop_active()
return ret
def show_state_usage(queue=False, **kwargs):
"""
Retrieve the highstate data from the salt master to analyse used and unused states
Custom Pillar data can be passed with the ``pillar`` kwarg.
CLI Example:
.. code-block:: bash
salt '*' state.show_state_usage
"""
conflict = _check_queue(queue, kwargs)
if conflict is not None:
return conflict
pillar = kwargs.get("pillar")
pillar_enc = kwargs.get("pillar_enc")
if pillar_enc is None and pillar is not None and not isinstance(pillar, dict):
raise SaltInvocationError(
"Pillar data must be formatted as a dictionary, unless pillar_enc "
"is specified."
)
with salt.state.HighState(__opts__, pillar, pillar_enc=pillar_enc) as st_:
st_.push_active()
try:
ret = st_.compile_state_usage()
finally:
st_.pop_active()
_set_retcode(ret)
return ret
def show_states(queue=False, **kwargs):
"""
Returns the list of states that will be applied on highstate.
CLI Example:
.. code-block:: bash
salt '*' state.show_states
.. versionadded:: 2019.2.0
"""
conflict = _check_queue(queue, kwargs)
if conflict is not None:
assert False
return conflict
opts = salt.utils.state.get_sls_opts(__opts__, **kwargs)
try:
st_ = salt.state.HighState(
opts, proxy=__proxy__, initial_pillar=_get_initial_pillar(opts)
)
except NameError:
st_ = salt.state.HighState(opts, initial_pillar=_get_initial_pillar(opts))
with st_:
errors = _get_pillar_errors(kwargs, pillar=st_.opts["pillar"])
if errors:
__context__["retcode"] = salt.defaults.exitcodes.EX_PILLAR_FAILURE
raise CommandExecutionError("Pillar failed to render", info=errors)
st_.push_active()
states = OrderedDict()
try:
result = st_.compile_low_chunks()
if not isinstance(result, list):
raise Exception(result)
for s in result:
if not isinstance(s, dict):
_set_retcode(result)
return result
states[s["__sls__"]] = True
finally:
st_.pop_active()
return list(states.keys())
def sls_id(id_, mods, test=None, queue=False, **kwargs):
"""
Call a single ID from the named module(s) and handle all requisites
The state ID comes *before* the module ID(s) on the command line.
id
ID to call
mods
Comma-delimited list of modules to search for given id and its requisites
.. versionadded:: 2014.7.0
saltenv : base
Specify a salt fileserver environment to be used when applying states
pillarenv
Specify a Pillar environment to be used when applying states. This
can also be set in the minion config file using the
:conf_minion:`pillarenv` option. When neither the
:conf_minion:`pillarenv` minion config option nor this CLI argument is
used, all Pillar environments will be merged together.
pillar
Custom Pillar values, passed as a dictionary of key-value pairs
.. code-block:: bash
salt '*' state.sls_id my_state my_module pillar='{"foo": "bar"}'
.. note::
Values passed this way will override existing Pillar values set via
``pillar_roots`` or an external Pillar source. Pillar values that
are not included in the kwarg will not be overwritten.
.. versionadded:: 2018.3.0
CLI Example:
.. code-block:: bash
salt '*' state.sls_id my_state my_module
salt '*' state.sls_id my_state my_module,a_common_module
"""
conflict = _check_queue(queue, kwargs)
if conflict is not None:
return conflict
orig_test = __opts__.get("test", None)
opts = salt.utils.state.get_sls_opts(__opts__, **kwargs)
opts["test"] = _get_test_value(test, **kwargs)
# Since this is running a specific ID within a specific SLS file, fall back
# to the 'base' saltenv if none is configured and none was passed.
if opts["saltenv"] is None:
opts["saltenv"] = "base"
pillar_override = kwargs.get("pillar")
pillar_enc = kwargs.get("pillar_enc")
if (
pillar_enc is None
and pillar_override is not None
and not isinstance(pillar_override, dict)
):
raise SaltInvocationError(
"Pillar data must be formatted as a dictionary, unless pillar_enc "
"is specified."
)
try:
st_ = salt.state.HighState(
opts,
pillar_override,
pillar_enc=pillar_enc,
proxy=__proxy__,
initial_pillar=_get_initial_pillar(opts),
)
except NameError:
st_ = salt.state.HighState(
opts,
pillar_override,
pillar_enc=pillar_enc,
initial_pillar=_get_initial_pillar(opts),
)
with st_:
errors = _get_pillar_errors(kwargs, pillar=st_.opts["pillar"])
if errors:
__context__["retcode"] = salt.defaults.exitcodes.EX_PILLAR_FAILURE
return ["Pillar failed to render with the following messages:"] + errors
split_mods = salt.utils.args.split_input(mods)
st_.push_active()
try:
high_, errors = st_.render_highstate({opts["saltenv"]: split_mods})
finally:
st_.pop_active()
errors += st_.state.verify_high(high_)
# Apply requisites to high data
high_, req_in_errors = st_.state.requisite_in(high_)
if req_in_errors:
# This if statement should not be necessary if there were no errors,
# but it is required to get the unit tests to pass.
errors.extend(req_in_errors)
if errors:
__context__["retcode"] = salt.defaults.exitcodes.EX_STATE_COMPILER_ERROR
return errors
chunks = st_.state.compile_high_data(high_)
ret = {}
for chunk in chunks:
if chunk.get("__id__", "") == id_:
ret.update(st_.state.call_chunk(chunk, {}, chunks))
_set_retcode(ret, highstate=highstate)
# Work around Windows multiprocessing bug, set __opts__['test'] back to
# value from before this function was run.
__opts__["test"] = orig_test
if not ret:
raise SaltInvocationError(
"No matches for ID '{}' found in SLS '{}' within saltenv '{}'".format(
id_, mods, opts["saltenv"]
)
)
return ret
def show_low_sls(mods, test=None, queue=False, **kwargs):
"""
Display the low data from a specific sls. The default environment is
``base``, use ``saltenv`` to specify a different environment.
saltenv
Specify a salt fileserver environment to be used when applying states
pillar
Custom Pillar values, passed as a dictionary of key-value pairs
.. code-block:: bash
salt '*' state.show_low_sls stuff pillar='{"foo": "bar"}'
.. note::
Values passed this way will override Pillar values set via
``pillar_roots`` or an external Pillar source.
pillarenv
Specify a Pillar environment to be used when applying states. This
can also be set in the minion config file using the
:conf_minion:`pillarenv` option. When neither the
:conf_minion:`pillarenv` minion config option nor this CLI argument is
used, all Pillar environments will be merged together.
CLI Example:
.. code-block:: bash
salt '*' state.show_low_sls foo
salt '*' state.show_low_sls foo saltenv=dev
"""
if "env" in kwargs:
# "env" is not supported; Use "saltenv".
kwargs.pop("env")
conflict = _check_queue(queue, kwargs)
if conflict is not None:
return conflict
orig_test = __opts__.get("test", None)
opts = salt.utils.state.get_sls_opts(__opts__, **kwargs)
opts["test"] = _get_test_value(test, **kwargs)
# Since this is dealing with a specific SLS file (or files), fall back to
# the 'base' saltenv if none is configured and none was passed.
if opts["saltenv"] is None:
opts["saltenv"] = "base"
pillar_override = kwargs.get("pillar")
pillar_enc = kwargs.get("pillar_enc")
if (
pillar_enc is None
and pillar_override is not None
and not isinstance(pillar_override, dict)
):
raise SaltInvocationError(
"Pillar data must be formatted as a dictionary, unless pillar_enc "
"is specified."
)
try:
st_ = salt.state.HighState(
opts,
pillar_override,
proxy=__proxy__,
initial_pillar=_get_initial_pillar(opts),
)
except NameError:
st_ = salt.state.HighState(
opts, pillar_override, initial_pillar=_get_initial_pillar(opts)
)
with st_:
errors = _get_pillar_errors(kwargs, pillar=st_.opts["pillar"])
if errors:
__context__["retcode"] = salt.defaults.exitcodes.EX_PILLAR_FAILURE
raise CommandExecutionError("Pillar failed to render", info=errors)
mods = salt.utils.args.split_input(mods)
st_.push_active()
try:
high_, errors = st_.render_highstate({opts["saltenv"]: mods})
finally:
st_.pop_active()
errors += st_.state.verify_high(high_)
if errors:
__context__["retcode"] = salt.defaults.exitcodes.EX_STATE_COMPILER_ERROR
return errors
ret = st_.state.compile_high_data(high_)
# Work around Windows multiprocessing bug, set __opts__['test'] back to
# value from before this function was run.
__opts__["test"] = orig_test
return ret
def show_sls(mods, test=None, queue=False, **kwargs):
"""
Display the state data from a specific sls or list of sls files on the
master. The default environment is ``base``, use ``saltenv`` to specify a
different environment.
This function does not support topfiles. For ``top.sls`` please use
``show_top`` instead.
Custom Pillar data can be passed with the ``pillar`` kwarg.
saltenv
Specify a salt fileserver environment to be used when applying states
pillarenv
Specify a Pillar environment to be used when applying states. This
can also be set in the minion config file using the
:conf_minion:`pillarenv` option. When neither the
:conf_minion:`pillarenv` minion config option nor this CLI argument is
used, all Pillar environments will be merged together.
CLI Example:
.. code-block:: bash
salt '*' state.show_sls core,edit.vim saltenv=dev
"""
if "env" in kwargs:
# "env" is not supported; Use "saltenv".
kwargs.pop("env")
conflict = _check_queue(queue, kwargs)
if conflict is not None:
return conflict
orig_test = __opts__.get("test", None)
opts = salt.utils.state.get_sls_opts(__opts__, **kwargs)
opts["test"] = _get_test_value(test, **kwargs)
# Since this is dealing with a specific SLS file (or files), fall back to
# the 'base' saltenv if none is configured and none was passed.
if opts["saltenv"] is None:
opts["saltenv"] = "base"
pillar_override = kwargs.get("pillar")
pillar_enc = kwargs.get("pillar_enc")
if (
pillar_enc is None
and pillar_override is not None
and not isinstance(pillar_override, dict)
):
raise SaltInvocationError(
"Pillar data must be formatted as a dictionary, unless pillar_enc "
"is specified."
)
try:
st_ = salt.state.HighState(
opts,
pillar_override,
pillar_enc=pillar_enc,
proxy=__proxy__,
initial_pillar=_get_initial_pillar(opts),
)
except NameError:
st_ = salt.state.HighState(
opts,
pillar_override,
pillar_enc=pillar_enc,
initial_pillar=_get_initial_pillar(opts),
)
with st_:
errors = _get_pillar_errors(kwargs, pillar=st_.opts["pillar"])
if errors:
__context__["retcode"] = salt.defaults.exitcodes.EX_PILLAR_FAILURE
raise CommandExecutionError("Pillar failed to render", info=errors)
mods = salt.utils.args.split_input(mods)
st_.push_active()
try:
high_, errors = st_.render_highstate({opts["saltenv"]: mods})
finally:
st_.pop_active()
errors += st_.state.verify_high(high_)
# Work around Windows multiprocessing bug, set __opts__['test'] back to
# value from before this function was run.
__opts__["test"] = orig_test
if errors:
__context__["retcode"] = salt.defaults.exitcodes.EX_STATE_COMPILER_ERROR
return errors
return high_
def sls_exists(mods, test=None, queue=False, **kwargs):
"""
Tests for the existence the of a specific SLS or list of SLS files on the
master. Similar to :py:func:`state.show_sls <salt.modules.state.show_sls>`,
rather than returning state details, returns True or False. The default
environment is ``base``, use ``saltenv`` to specify a different environment.
.. versionadded:: 2019.2.0
saltenv
Specify a salt fileserver environment from which to look for the SLS files
specified in the ``mods`` argument
CLI Example:
.. code-block:: bash
salt '*' state.sls_exists core,edit.vim saltenv=dev
"""
return isinstance(show_sls(mods, test=test, queue=queue, **kwargs), dict)
def id_exists(ids, mods, test=None, queue=False, **kwargs):
"""
Tests for the existence of a specific ID or list of IDs within the
specified SLS file(s). Similar to :py:func:`state.sls_exists
<salt.modules.state.sls_exists>`, returns True or False. The default
environment is base``, use ``saltenv`` to specify a different environment.
.. versionadded:: 2019.2.0
saltenv
Specify a salt fileserver environment from which to look for the SLS files
specified in the ``mods`` argument
CLI Example:
.. code-block:: bash
salt '*' state.id_exists create_myfile,update_template filestate saltenv=dev
"""
ids = salt.utils.args.split_input(ids)
ids = set(ids)
sls_ids = {
x["__id__"] for x in show_low_sls(mods, test=test, queue=queue, **kwargs)
}
return ids.issubset(sls_ids)
def show_top(queue=False, **kwargs):
"""
Return the top data that the minion will use for a highstate
CLI Example:
.. code-block:: bash
salt '*' state.show_top
"""
if "env" in kwargs:
# "env" is not supported; Use "saltenv".
kwargs.pop("env")
conflict = _check_queue(queue, kwargs)
if conflict is not None:
return conflict
opts = salt.utils.state.get_sls_opts(__opts__, **kwargs)
try:
st_ = salt.state.HighState(
opts, proxy=__proxy__, initial_pillar=_get_initial_pillar(opts)
)
except NameError:
st_ = salt.state.HighState(opts, initial_pillar=_get_initial_pillar(opts))
with st_:
errors = _get_pillar_errors(kwargs, pillar=st_.opts["pillar"])
if errors:
__context__["retcode"] = salt.defaults.exitcodes.EX_PILLAR_FAILURE
raise CommandExecutionError("Pillar failed to render", info=errors)
errors = []
top_ = st_.get_top()
errors += st_.verify_tops(top_)
if errors:
__context__["retcode"] = salt.defaults.exitcodes.EX_STATE_COMPILER_ERROR
return errors
matches = st_.top_matches(top_)
return matches
def single(fun, name, test=None, queue=False, **kwargs):
"""
Execute a single state function with the named kwargs, returns False if
insufficient data is sent to the command
By default, the values of the kwargs will be parsed as YAML. So, you can
specify lists values, or lists of single entry key-value maps, as you
would in a YAML salt file. Alternatively, JSON format of keyword values
is also supported.
CLI Example:
.. code-block:: bash
salt '*' state.single pkg.installed name=vim
"""
conflict = _check_queue(queue, kwargs)
if conflict is not None:
return conflict
comps = fun.split(".")
if len(comps) < 2:
__context__["retcode"] = salt.defaults.exitcodes.EX_STATE_COMPILER_ERROR
return "Invalid function passed"
kwargs.update({"state": comps[0], "fun": comps[1], "__id__": name, "name": name})
orig_test = __opts__.get("test", None)
opts = salt.utils.state.get_sls_opts(__opts__, **kwargs)
opts["test"] = _get_test_value(test, **kwargs)
pillar_override = kwargs.get("pillar")
pillar_enc = kwargs.get("pillar_enc")
if (
pillar_enc is None
and pillar_override is not None
and not isinstance(pillar_override, dict)
):
raise SaltInvocationError(
"Pillar data must be formatted as a dictionary, unless pillar_enc "
"is specified."
)
try:
st_ = salt.state.State(
opts,
pillar_override,
pillar_enc=pillar_enc,
proxy=__proxy__,
initial_pillar=_get_initial_pillar(opts),
)
except NameError:
st_ = salt.state.State(
opts,
pillar_override,
pillar_enc=pillar_enc,
initial_pillar=_get_initial_pillar(opts),
)
err = st_.verify_data(kwargs)
if err:
__context__["retcode"] = salt.defaults.exitcodes.EX_STATE_COMPILER_ERROR
return err
st_._mod_init(kwargs)
snapper_pre = _snapper_pre(opts, kwargs.get("__pub_jid", "called localy"))
ret = {
"{0[state]}_|-{0[__id__]}_|-{0[name]}_|-{0[fun]}".format(kwargs): st_.call(
kwargs
)
}
_set_retcode(ret)
# Work around Windows multiprocessing bug, set __opts__['test'] back to
# value from before this function was run.
_snapper_post(opts, kwargs.get("__pub_jid", "called localy"), snapper_pre)
__opts__["test"] = orig_test
return ret
def clear_cache():
"""
Clear out cached state files, forcing even cache runs to refresh the cache
on the next state execution.
Remember that the state cache is completely disabled by default, this
execution only applies if cache=True is used in states
CLI Example:
.. code-block:: bash
salt '*' state.clear_cache
"""
ret = []
for fn_ in os.listdir(__opts__["cachedir"]):
if fn_.endswith(".cache.p"):
path = os.path.join(__opts__["cachedir"], fn_)
if not os.path.isfile(path):
continue
os.remove(path)
ret.append(fn_)
return ret
def pkg(pkg_path, pkg_sum, hash_type, test=None, **kwargs):
"""
Execute a packaged state run, the packaged state run will exist in a
tarball available locally. This packaged state
can be generated using salt-ssh.
CLI Example:
.. code-block:: bash
salt '*' state.pkg /tmp/salt_state.tgz 760a9353810e36f6d81416366fc426dc md5
"""
# TODO - Add ability to download from salt master or other source
popts = salt.utils.state.get_sls_opts(__opts__, **kwargs)
if not os.path.isfile(pkg_path):
return {}
if not salt.utils.hashutils.get_hash(pkg_path, hash_type) == pkg_sum:
return {}
root = tempfile.mkdtemp()
s_pkg = tarfile.open(pkg_path, "r:gz")
# Verify that the tarball does not extract outside of the intended root
members = s_pkg.getmembers()
for member in members:
if salt.utils.stringutils.to_unicode(member.path).startswith(
(os.sep, "..{}".format(os.sep))
):
return {}
elif "..{}".format(os.sep) in salt.utils.stringutils.to_unicode(member.path):
return {}
s_pkg.extractall(root)
s_pkg.close()
lowstate_json = os.path.join(root, "lowstate.json")
with salt.utils.files.fopen(lowstate_json, "r") as fp_:
lowstate = salt.utils.json.load(fp_)
# Check for errors in the lowstate
for chunk in lowstate:
if not isinstance(chunk, dict):
return lowstate
pillar_json = os.path.join(root, "pillar.json")
if os.path.isfile(pillar_json):
with salt.utils.files.fopen(pillar_json, "r") as fp_:
pillar_override = salt.utils.json.load(fp_)
else:
pillar_override = None
roster_grains_json = os.path.join(root, "roster_grains.json")
if os.path.isfile(roster_grains_json):
with salt.utils.files.fopen(roster_grains_json, "r") as fp_:
roster_grains = _format_cached_grains(salt.utils.json.load(fp_))
if os.path.isfile(roster_grains_json):
popts["grains"] = roster_grains
popts["fileclient"] = "local"
popts["file_roots"] = {}
popts["test"] = _get_test_value(test, **kwargs)
envs = os.listdir(root)
for fn_ in envs:
full = os.path.join(root, fn_)
if not os.path.isdir(full):
continue
popts["file_roots"][fn_] = [full]
st_ = salt.state.State(popts, pillar_override=pillar_override)
snapper_pre = _snapper_pre(popts, kwargs.get("__pub_jid", "called localy"))
ret = st_.call_chunks(lowstate)
ret = st_.call_listen(lowstate, ret)
try:
shutil.rmtree(root)
except OSError:
pass
_set_retcode(ret)
_snapper_post(popts, kwargs.get("__pub_jid", "called localy"), snapper_pre)
return ret
def disable(states):
"""
Disable state runs.
CLI Example:
.. code-block:: bash
salt '*' state.disable highstate
salt '*' state.disable highstate,test.succeed_without_changes
.. note::
To disable a state file from running provide the same name that would
be passed in a state.sls call.
salt '*' state.disable bind.config
"""
ret = {"res": True, "msg": ""}
states = salt.utils.args.split_input(states)
msg = []
_disabled_state_runs = __salt__["grains.get"]("state_runs_disabled")
if not isinstance(_disabled_state_runs, list):
_disabled_state_runs = []
_changed = False
for _state in states:
if _state in _disabled_state_runs:
msg.append("Info: {} state already disabled.".format(_state))
else:
msg.append("Info: {} state disabled.".format(_state))
_disabled_state_runs.append(_state)
_changed = True
if _changed:
__salt__["grains.setval"]("state_runs_disabled", _disabled_state_runs)
ret["msg"] = "\n".join(msg)
# refresh the grains
__salt__["saltutil.refresh_modules"]()
return ret
def enable(states):
"""
Enable state function or sls run
CLI Example:
.. code-block:: bash
salt '*' state.enable highstate
salt '*' state.enable test.succeed_without_changes
.. note::
To enable a state file from running provide the same name that would
be passed in a state.sls call.
salt '*' state.disable bind.config
"""
ret = {"res": True, "msg": ""}
states = salt.utils.args.split_input(states)
log.debug("states %s", states)
msg = []
_disabled_state_runs = __salt__["grains.get"]("state_runs_disabled")
if not isinstance(_disabled_state_runs, list):
_disabled_state_runs = []
_changed = False
for _state in states:
log.debug("_state %s", _state)
if _state not in _disabled_state_runs:
msg.append("Info: {} state already enabled.".format(_state))
else:
msg.append("Info: {} state enabled.".format(_state))
_disabled_state_runs.remove(_state)
_changed = True
if _changed:
__salt__["grains.setval"]("state_runs_disabled", _disabled_state_runs)
ret["msg"] = "\n".join(msg)
# refresh the grains
__salt__["saltutil.refresh_modules"]()
return ret
def list_disabled():
"""
List the states which are currently disabled
CLI Example:
.. code-block:: bash
salt '*' state.list_disabled
"""
return __salt__["grains.get"]("state_runs_disabled")
def _disabled(funs):
"""
Return messages for disabled states
that match state functions in funs.
"""
ret = []
_disabled_state_runs = __salt__["grains.get"]("state_runs_disabled")
if not isinstance(_disabled_state_runs, list):
_disabled_state_runs = []
for state in funs:
for _state in _disabled_state_runs:
if ".*" in _state:
target_state = _state.split(".")[0]
target_state = (
target_state + "."
if not target_state.endswith(".")
else target_state
)
if state.startswith(target_state):
err = (
'The state file "{0}" is currently disabled by "{1}", '
"to re-enable, run state.enable {1}.".format(
state,
_state,
)
)
ret.append(err)
continue
else:
if _state == state:
err = (
'The state file "{0}" is currently disabled, '
"to re-enable, run state.enable {0}.".format(
_state,
)
)
ret.append(err)
continue
return ret
def event(
tagmatch="*", count=-1, quiet=False, sock_dir=None, pretty=False, node="minion"
):
r"""
Watch Salt's event bus and block until the given tag is matched
.. versionadded:: 2016.3.0
.. versionchanged:: 2019.2.0
``tagmatch`` can now be either a glob or regular expression.
This is useful for utilizing Salt's event bus from shell scripts or for
taking simple actions directly from the CLI.
Enable debug logging to see ignored events.
:param tagmatch: the event is written to stdout for each tag that matches
this glob or regular expression.
:param count: this number is decremented for each event that matches the
``tagmatch`` parameter; pass ``-1`` to listen forever.
:param quiet: do not print to stdout; just block
:param sock_dir: path to the Salt master's event socket file.
:param pretty: Output the JSON all on a single line if ``False`` (useful
for shell tools); pretty-print the JSON output if ``True``.
:param node: Watch the minion-side or master-side event bus.
CLI Example:
.. code-block:: bash
salt-call --local state.event pretty=True
"""
with salt.utils.event.get_event(
node,
sock_dir or __opts__["sock_dir"],
__opts__["transport"],
opts=__opts__,
listen=True,
) as sevent:
while True:
ret = sevent.get_event(full=True, auto_reconnect=True)
if ret is None:
continue
if salt.utils.stringutils.expr_match(ret["tag"], tagmatch):
if not quiet:
salt.utils.stringutils.print_cli(
"{}\t{}".format(
salt.utils.stringutils.to_str(ret["tag"]),
salt.utils.json.dumps(
salt.utils.data.decode(ret["data"]),
sort_keys=pretty,
indent=None if not pretty else 4,
),
)
)
sys.stdout.flush()
if count > 0:
count -= 1
log.debug("Remaining event matches: %s", count)
if count == 0:
break
else:
log.debug("Skipping event tag: %s", ret["tag"])
continue
| 31.994431
| 147
| 0.613808
|
d669123a9292d3bf1da68d1ae90f3abe98538e95
| 6,891
|
py
|
Python
|
register/forms.py
|
cds-snc/covid-alert-portal
|
e7d56fa9fa4a2ad2d60f056eae063713661bd260
|
[
"MIT"
] | 43
|
2020-07-31T14:38:06.000Z
|
2022-03-07T11:28:28.000Z
|
register/forms.py
|
cds-snc/covid-alert-portal
|
e7d56fa9fa4a2ad2d60f056eae063713661bd260
|
[
"MIT"
] | 322
|
2020-07-23T19:38:26.000Z
|
2022-03-31T19:15:45.000Z
|
register/forms.py
|
cds-snc/covid-alert-portal
|
e7d56fa9fa4a2ad2d60f056eae063713661bd260
|
[
"MIT"
] | 6
|
2020-11-28T19:30:20.000Z
|
2021-07-29T18:06:55.000Z
|
import re
from django import forms
from django.core.validators import RegexValidator
from portal.forms import HealthcareBaseForm
from django.utils.translation import gettext_lazy as _
from portal.widgets import CDSRadioWidget
from phonenumber_field.formfields import PhoneNumberField
from dependency_injector.wiring import inject, Provide
from portal.containers import Container
from portal.services import NotifyService
from .widgets import AutocompleteWidget
from localflavor.ca.forms import CAPostalCodeField
from django.core.exceptions import ValidationError
type_event = 1
type_place = 2
type_event_or_place = 3
location_restaurant_bar_coffee = "restaurant_bar_coffee"
location_fitness_recreation = "fitness_recreation"
location_arts_entertainment = "arts_entertainment"
location_grooming_wellness = "grooming_wellness"
location_religious_space = "religious_space"
location_events = "events"
location_retail = "retail"
location_medical = "medical"
location_centres = "centres"
location_other = "other"
location_category_type_map = {
location_restaurant_bar_coffee: type_place,
location_fitness_recreation: type_place,
location_arts_entertainment: type_place,
location_grooming_wellness: type_place,
location_religious_space: type_place,
location_events: type_event,
location_retail: type_place,
location_medical: type_place,
location_centres: type_place,
location_other: type_event_or_place,
}
location_choices = [
("", _("Select a type of place or event")),
(location_restaurant_bar_coffee, _("Restaurant, bar, coffee shop")),
(location_fitness_recreation, _("Fitness, sports, recreation")),
(location_arts_entertainment, _("Arts, entertainment")),
(location_grooming_wellness, _("Grooming and wellness")),
(location_religious_space, _("Places of worship")),
(location_events, _("Events such as festivals, weddings, conferences")),
(location_retail, _("Retail such as grocery stores, liquor stores, pharmacies")),
(location_medical, _("Medical centres, such as doctor or dentist offices")),
(location_centres, _("Community centres, libraries, government service centres")),
(location_other, _("Other")),
]
@inject
def send_email(
to_email,
payload,
template_id,
notify_service: NotifyService = Provide[Container.notify_service],
):
notify_service.send_email(
address=to_email, template_id=template_id, details=payload
)
class EmailForm(HealthcareBaseForm, forms.Form):
email = forms.EmailField(label=_("Email address"), max_length=255)
class LocationCategoryForm(HealthcareBaseForm, forms.Form):
category = forms.ChoiceField(
label="",
choices=location_choices,
)
category_description = forms.CharField(
label=_("Tell us the type of place."), required=False, max_length=200
)
def clean(self):
cleaned_data = super().clean()
other_selected = cleaned_data.get("category") == "other"
category_description = cleaned_data.get("category_description")
if other_selected and not category_description:
raise forms.ValidationError(_("Tell us the type of place."))
alphanum_validator = RegexValidator(
r'^[0-9A-zÀ-ÿ-_\s,.!?"\'\(\):;«»@$&]*$', _("Only enter letters or numbers.")
)
class LocationNameForm(HealthcareBaseForm, forms.Form):
name = forms.CharField(
label="",
max_length=65,
validators=[alphanum_validator],
error_messages={
"max_length": _(
"Your name is longer than the %(limit_value)d character limit."
)
},
)
provinces = [
("", _("Select a province or territory")),
("AB", _("Alberta")),
("BC", _("British Columbia")),
("MB", _("Manitoba")),
("NB", _("New Brunswick")),
("NL", _("Newfoundland and Labrador")),
("NS", _("Nova Scotia")),
("NT", _("Northwest Territories")),
("NU", _("Nunavut")),
("ON", _("Ontario")),
("PE", _("Prince Edward Island")),
("QC", _("Quebec")),
("SK", _("Saskatchewan")),
("YT", _("Yukon")),
]
class LocationAddressForm(HealthcareBaseForm, forms.Form):
address = forms.CharField(
label=_("Address line 1"),
widget=AutocompleteWidget(),
max_length=200,
validators=[alphanum_validator],
)
address_2 = forms.CharField(
label=_("Address line 2"),
required=False,
max_length=200,
validators=[alphanum_validator],
)
city = forms.CharField(
label=_("City"),
max_length=100,
validators=[alphanum_validator],
)
province = forms.ChoiceField(label=_("Province or territory"), choices=provinces)
postal_code = CAPostalCodeField(
label=_("Postal code"),
max_length=10,
error_messages={"invalid": _("Enter a valid Canadian postal code.")},
)
class LocationContactForm(HealthcareBaseForm, forms.Form):
contact_name = forms.CharField(
label=_("Name of contact"),
max_length=200,
validators=[alphanum_validator],
)
contact_email = forms.EmailField(
label=_("Contact email"),
max_length=255,
)
invalid_phone_error = _("Your phone number must be valid.")
contact_phone = PhoneNumberField(
label=_("Contact phone number"),
error_messages={"invalid": invalid_phone_error},
)
contact_phone_ext = forms.CharField(
label=_("Extension"),
required=False,
max_length=20,
validators=[alphanum_validator],
)
def clean_contact_phone(self):
phone_number = self.data["contact-contact_phone"]
cleaned_phone_number = self.cleaned_data["contact_phone"]
# Search for an alpha characters
m = re.search("[A-Za-z]+", phone_number)
# By default the PhoneNumberField will convert chars to #s
# Raise a validation error if alpha chars found
if m:
raise ValidationError(self.invalid_phone_error)
return cleaned_phone_number
class RegisterSummaryForm(HealthcareBaseForm, forms.Form):
"""
A form to show an information panel.
"""
pass
class ContactUsForm(HealthcareBaseForm, forms.Form):
help_category = forms.ChoiceField(
label="",
choices=[
("get_help", _("Get help.")),
("give_feedback", _("Give feedback.")),
("something_else", _("Something else.")),
],
widget=CDSRadioWidget(attrs={"class": "multichoice-radio"}),
)
more_info = forms.CharField(
label=_("Tell us more about the issue"),
widget=forms.Textarea,
)
contact_email = forms.EmailField(
label=_("Your email address"),
help_text=_(
"We'll use this if we need to contact you. We will not use your email address for anything else."
),
)
| 31.040541
| 109
| 0.674358
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.